summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/00-INDEX27
-rw-r--r--Documentation/ABI/testing/sysfs-firmware-acpi99
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-uids2
-rw-r--r--Documentation/BUG-HUNTING17
-rw-r--r--Documentation/DocBook/genericirq.tmpl26
-rw-r--r--Documentation/DocBook/kernel-api.tmpl2
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl32
-rw-r--r--Documentation/DocBook/lsm.tmpl2
-rw-r--r--Documentation/DocBook/mtdnand.tmpl58
-rw-r--r--Documentation/DocBook/procfs-guide.tmpl16
-rw-r--r--Documentation/DocBook/rapidio.tmpl18
-rw-r--r--Documentation/DocBook/s390-drivers.tmpl21
-rw-r--r--Documentation/DocBook/scsi.tmpl2
-rw-r--r--Documentation/DocBook/videobook.tmpl34
-rw-r--r--Documentation/DocBook/z8530book.tmpl12
-rw-r--r--Documentation/Smack.txt493
-rw-r--r--Documentation/acpi/dsdt-override.txt15
-rwxr-xr-xDocumentation/acpi/initramfs-add-dsdt.sh43
-rw-r--r--Documentation/acpi/method-tracing.txt26
-rw-r--r--Documentation/cgroups.txt22
-rw-r--r--Documentation/controllers/memory.txt279
-rw-r--r--Documentation/cpusets.txt23
-rw-r--r--Documentation/edac.txt (renamed from Documentation/drivers/edac/edac.txt)0
-rw-r--r--Documentation/email-clients.txt1
-rw-r--r--Documentation/fb/deferred_io.txt6
-rw-r--r--Documentation/feature-removal-schedule.txt15
-rw-r--r--Documentation/filesystems/00-INDEX4
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/dnotify.txt (renamed from Documentation/dnotify.txt)10
-rw-r--r--Documentation/filesystems/porting30
-rw-r--r--Documentation/filesystems/proc.txt103
-rw-r--r--Documentation/filesystems/sharedsubtree.txt (renamed from Documentation/sharedsubtree.txt)0
-rw-r--r--Documentation/filesystems/vfs.txt17
-rw-r--r--Documentation/gpio.txt133
-rw-r--r--Documentation/i2c/chips/pca95393
-rw-r--r--Documentation/ia64/aliasing-test.c15
-rw-r--r--Documentation/input/input-programming.txt2
-rw-r--r--Documentation/kernel-parameters.txt12
-rw-r--r--Documentation/kprobes.txt81
-rw-r--r--Documentation/kref.txt20
-rw-r--r--Documentation/leds-class.txt29
-rw-r--r--Documentation/md.txt10
-rw-r--r--Documentation/pcmcia/driver-changes.txt4
-rw-r--r--Documentation/pm_qos_interface.txt59
-rw-r--r--Documentation/power/swsusp.txt5
-rw-r--r--Documentation/powerpc/booting-without-of.txt42
-rw-r--r--Documentation/rtc.txt42
-rw-r--r--Documentation/scheduler/00-INDEX16
-rw-r--r--Documentation/scheduler/sched-arch.txt (renamed from Documentation/sched-arch.txt)0
-rw-r--r--Documentation/scheduler/sched-coding.txt (renamed from Documentation/sched-coding.txt)0
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt (renamed from Documentation/sched-design-CFS.txt)0
-rw-r--r--Documentation/scheduler/sched-design.txt (renamed from Documentation/sched-design.txt)0
-rw-r--r--Documentation/scheduler/sched-domains.txt (renamed from Documentation/sched-domains.txt)0
-rw-r--r--Documentation/scheduler/sched-nice-design.txt (renamed from Documentation/sched-nice-design.txt)0
-rw-r--r--Documentation/scheduler/sched-stats.txt (renamed from Documentation/sched-stats.txt)0
-rw-r--r--Documentation/scsi/ChangeLog.arcmsr41
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt2
-rw-r--r--Documentation/sysctl/fs.txt10
-rw-r--r--Documentation/sysctl/vm.txt29
-rw-r--r--Documentation/thermal/sysfs-api.txt246
-rw-r--r--Documentation/thinkpad-acpi.txt116
-rw-r--r--Documentation/unaligned-memory-access.txt226
-rw-r--r--Documentation/vm/slabinfo.c149
-rw-r--r--Documentation/w1/masters/00-INDEX2
-rw-r--r--Documentation/w1/masters/w1-gpio33
-rw-r--r--MAINTAINERS84
-rw-r--r--REPORTING-BUGS11
-rw-r--r--arch/alpha/Kconfig.debug9
-rw-r--r--arch/alpha/defconfig1
-rw-r--r--arch/alpha/kernel/core_irongate.c3
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/alpha/kernel/pci-noop.c4
-rw-r--r--arch/alpha/kernel/pci_iommu.c24
-rw-r--r--arch/alpha/kernel/setup.c6
-rw-r--r--arch/alpha/kernel/smp.c4
-rw-r--r--arch/alpha/kernel/systbls.S2
-rw-r--r--arch/alpha/mm/numa.c5
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c1
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c1
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c4
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c6
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c8
-rw-r--r--arch/arm/mach-pxa/Makefile3
-rw-r--r--arch/arm/mach-pxa/generic.c93
-rw-r--r--arch/arm/mach-pxa/generic.h1
-rw-r--r--arch/arm/mach-pxa/gpio.c197
-rw-r--r--arch/arm/mach-pxa/irq.c2
-rw-r--r--arch/arm/mach-pxa/tosa.c43
-rw-r--r--arch/arm/mach-rpc/riscpc.c2
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/mm/mmu.c17
-rw-r--r--arch/arm/mm/nommu.c9
-rw-r--r--arch/arm/mm/pgd.c8
-rw-r--r--arch/arm/plat-omap/fb.c2
-rw-r--r--arch/avr32/Kconfig1
-rw-r--r--arch/avr32/kernel/setup.c6
-rw-r--r--arch/avr32/kernel/syscall_table.S2
-rw-r--r--arch/avr32/lib/delay.c4
-rw-r--r--arch/avr32/mach-at32ap/pio.c172
-rw-r--r--arch/avr32/mach-at32ap/pio.h2
-rw-r--r--arch/blackfin/kernel/setup.c2
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/H8606.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/cm_bf533.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/cm_bf537.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/generic_board.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/minotaur.c2
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/cm_bf561.c2
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c2
-rw-r--r--arch/blackfin/mach-common/entry.S2
-rw-r--r--arch/cris/Kconfig5
-rw-r--r--arch/cris/arch-v10/Kconfig4
-rw-r--r--arch/cris/arch-v10/drivers/Kconfig4
-rw-r--r--arch/cris/arch-v10/kernel/entry.S2
-rw-r--r--arch/cris/arch-v32/Kconfig4
-rw-r--r--arch/cris/arch-v32/drivers/Kconfig4
-rw-r--r--arch/cris/arch-v32/drivers/pci/dma.c4
-rw-r--r--arch/cris/kernel/setup.c2
-rw-r--r--arch/frv/Kconfig9
-rw-r--r--arch/frv/kernel/entry.S2
-rw-r--r--arch/frv/kernel/setup.c18
-rw-r--r--arch/frv/kernel/vmlinux.lds.S2
-rw-r--r--arch/frv/mm/mmu-context.c2
-rw-r--r--arch/frv/mm/pgalloc.c2
-rw-r--r--arch/h8300/kernel/irq.c1
-rw-r--r--arch/h8300/kernel/setup.c2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c10
-rw-r--r--arch/ia64/ia32/ia32_support.c5
-rw-r--r--arch/ia64/kernel/acpi-processor.c6
-rw-r--r--arch/ia64/kernel/acpi.c32
-rw-r--r--arch/ia64/kernel/efi.c502
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/fsyscall_gtod_data.h4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c3
-rw-r--r--arch/ia64/kernel/kprobes.c7
-rw-r--r--arch/ia64/kernel/machine_kexec.c7
-rw-r--r--arch/ia64/kernel/mca.c66
-rw-r--r--arch/ia64/kernel/mca_asm.S46
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/mca_drv.h2
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S2
-rw-r--r--arch/ia64/kernel/perfmon.c6
-rw-r--r--arch/ia64/kernel/sal.c14
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/kernel/smpboot.c14
-rw-r--r--arch/ia64/kernel/traps.c35
-rw-r--r--arch/ia64/kernel/unaligned.c13
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c4
-rw-r--r--arch/ia64/mm/fault.c8
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c11
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c6
-rw-r--r--arch/m32r/boot/compressed/m32r_sio.c4
-rw-r--r--arch/m32r/kernel/setup.c11
-rw-r--r--arch/m32r/kernel/smpboot.c1
-rw-r--r--arch/m32r/kernel/syscall_table.S2
-rw-r--r--arch/m32r/mm/discontig.c5
-rw-r--r--arch/m68k/Kconfig14
-rw-r--r--arch/m68k/Makefile11
-rw-r--r--arch/m68k/amiga/Makefile2
-rw-r--r--arch/m68k/amiga/amiga_ksyms.c33
-rw-r--r--arch/m68k/amiga/amisound.c5
-rw-r--r--arch/m68k/amiga/chipram.c9
-rw-r--r--arch/m68k/amiga/cia.c2
-rw-r--r--arch/m68k/amiga/config.c12
-rw-r--r--arch/m68k/amiga/pcmcia.c9
-rw-r--r--arch/m68k/atari/Makefile2
-rw-r--r--arch/m68k/atari/ataints.c3
-rw-r--r--arch/m68k/atari/atari_ksyms.c35
-rw-r--r--arch/m68k/atari/atasound.c2
-rw-r--r--arch/m68k/atari/config.c11
-rw-r--r--arch/m68k/atari/debug.c6
-rw-r--r--arch/m68k/atari/hades-pci.c54
-rw-r--r--arch/m68k/atari/stdma.c5
-rw-r--r--arch/m68k/atari/stram.c5
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/hp300/Makefile2
-rw-r--r--arch/m68k/hp300/ksyms.c9
-rw-r--r--arch/m68k/kernel/entry.S2
-rw-r--r--arch/m68k/kernel/process.c2
-rw-r--r--arch/m68k/kernel/setup.c3
-rw-r--r--arch/m68k/mac/Makefile2
-rw-r--r--arch/m68k/mac/config.c2
-rw-r--r--arch/m68k/mac/mac_ksyms.c8
-rw-r--r--arch/m68k/mac/via.c5
-rw-r--r--arch/m68k/mvme16x/Makefile2
-rw-r--r--arch/m68k/mvme16x/config.c2
-rw-r--r--arch/m68k/mvme16x/mvme16x_ksyms.c6
-rw-r--r--arch/m68knommu/Kconfig.debug7
-rw-r--r--arch/m68knommu/defconfig1
-rw-r--r--arch/m68knommu/kernel/m68k_ksyms.c11
-rw-r--r--arch/m68knommu/kernel/setup.c5
-rw-r--r--arch/m68knommu/kernel/syscalltable.S2
-rw-r--r--arch/m68knommu/lib/memcpy.c1
-rw-r--r--arch/mips/au1000/common/gpio.c1
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c4
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/sysirix.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c3
-rw-r--r--arch/parisc/Kconfig.debug9
-rw-r--r--arch/parisc/configs/a500_defconfig1
-rw-r--r--arch/parisc/mm/init.c14
-rw-r--r--arch/powerpc/Kconfig25
-rw-r--r--arch/powerpc/Makefile9
-rw-r--r--arch/powerpc/boot/Makefile126
-rw-r--r--arch/powerpc/boot/cuboot-mpc7448hpc2.c (renamed from arch/powerpc/boot/cuboot-hpc2.c)0
-rw-r--r--arch/powerpc/boot/dts/adder875-redboot.dts1
-rw-r--r--arch/powerpc/boot/dts/adder875-uboot.dts1
-rw-r--r--arch/powerpc/boot/dts/mpc5121ads.dts122
-rw-r--r--arch/powerpc/boot/dts/mpc8313erdb.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8315erdb.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc834x_mds.dts2
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dts12
-rw-r--r--arch/powerpc/boot/dts/mpc885ads.dts1
-rw-r--r--arch/powerpc/boot/dts/sequoia.dts8
-rw-r--r--arch/powerpc/boot/dts/storcenter.dts12
-rwxr-xr-xarch/powerpc/boot/wrapper23
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig10
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/binfmt_elf32.c69
-rw-r--r--arch/powerpc/kernel/cputable.c10
-rw-r--r--arch/powerpc/kernel/dma_64.c8
-rw-r--r--arch/powerpc/kernel/iommu.c93
-rw-r--r--arch/powerpc/kernel/legacy_serial.c3
-rw-r--r--arch/powerpc/kernel/pmc.c2
-rw-r--r--arch/powerpc/kernel/ptrace.c727
-rw-r--r--arch/powerpc/kernel/ptrace32.c161
-rw-r--r--arch/powerpc/kernel/time.c12
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/vio.c2
-rw-r--r--arch/powerpc/mm/mem.c15
-rw-r--r--arch/powerpc/mm/numa.c68
-rw-r--r--arch/powerpc/mm/pgtable_32.c6
-rw-r--r--arch/powerpc/oprofile/Makefile2
-rw-r--r--arch/powerpc/oprofile/common.c6
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_emb.c (renamed from arch/powerpc/oprofile/op_model_fsl_booke.c)28
-rw-r--r--arch/powerpc/platforms/40x/Kconfig1
-rw-r--r--arch/powerpc/platforms/40x/virtex.c2
-rw-r--r--arch/powerpc/platforms/40x/walnut.c1
-rw-r--r--arch/powerpc/platforms/44x/warp.c2
-rw-r--r--arch/powerpc/platforms/512x/Kconfig20
-rw-r--r--arch/powerpc/platforms/512x/Makefile4
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads.c104
-rw-r--r--arch/powerpc/platforms/82xx/mpc8272_ads.c3
-rw-r--r--arch/powerpc/platforms/82xx/pq2fads.c3
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h2
-rw-r--r--arch/powerpc/platforms/83xx/usb.c17
-rw-r--r--arch/powerpc/platforms/8xx/adder875.c6
-rw-r--r--arch/powerpc/platforms/8xx/ep88xc.c1
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype10
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig7
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c99
-rw-r--r--arch/powerpc/platforms/cell/setup.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c29
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c7
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c28
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h5
-rw-r--r--arch/powerpc/platforms/cell/spufs/sputrace.c250
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c25
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c4
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c2
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c2
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c1
-rw-r--r--arch/powerpc/platforms/pseries/xics.c118
-rw-r--r--arch/powerpc/platforms/pseries/xics.h3
-rw-r--r--arch/powerpc/sysdev/dcr.c5
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c2
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c10
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c10
-rw-r--r--arch/ppc/8260_io/enet.c2
-rw-r--r--arch/ppc/8260_io/fcc_enet.c2
-rw-r--r--arch/ppc/kernel/vmlinux.lds.S5
-rw-r--r--arch/ppc/mm/pgtable.c6
-rw-r--r--arch/ppc/platforms/prep_setup.c81
-rw-r--r--arch/s390/Kconfig8
-rw-r--r--arch/s390/Kconfig.debug8
-rw-r--r--arch/s390/kernel/compat_wrapper.S8
-rw-r--r--arch/s390/kernel/entry.S7
-rw-r--r--arch/s390/kernel/entry64.S7
-rw-r--r--arch/s390/kernel/ipl.c27
-rw-r--r--arch/s390/kernel/setup.c25
-rw-r--r--arch/s390/kernel/smp.c13
-rw-r--r--arch/s390/kernel/stacktrace.c31
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/traps.c5
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/mm/init.c27
-rw-r--r--arch/s390/mm/vmem.c5
-rw-r--r--arch/sh/boards/landisk/setup.c2
-rw-r--r--arch/sh/boards/lboxre2/setup.c2
-rw-r--r--arch/sh/boards/renesas/r7780rp/setup.c2
-rw-r--r--arch/sh/boards/renesas/rts7751r2d/setup.c2
-rw-r--r--arch/sh/boards/renesas/sdk7780/setup.c2
-rw-r--r--arch/sh/boards/se/7722/setup.c2
-rw-r--r--arch/sh/kernel/setup.c35
-rw-r--r--arch/sh/kernel/syscalls_32.S2
-rw-r--r--arch/sh/kernel/syscalls_64.S2
-rw-r--r--arch/sh/mm/numa.c4
-rw-r--r--arch/sparc/kernel/entry.S17
-rw-r--r--arch/sparc/kernel/ptrace.c813
-rw-r--r--arch/sparc/kernel/sun4d_smp.c4
-rw-r--r--arch/sparc/kernel/sun4m_smp.c5
-rw-r--r--arch/sparc/kernel/systbls.S6
-rw-r--r--arch/sparc/mm/init.c6
-rw-r--r--arch/sparc64/defconfig83
-rw-r--r--arch/sparc64/kernel/Makefile2
-rw-r--r--arch/sparc64/kernel/binfmt_elf32.c31
-rw-r--r--arch/sparc64/kernel/entry.S4
-rw-r--r--arch/sparc64/kernel/head.S25
-rw-r--r--arch/sparc64/kernel/iommu.c142
-rw-r--r--arch/sparc64/kernel/iommu_common.c244
-rw-r--r--arch/sparc64/kernel/iommu_common.h29
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c176
-rw-r--r--arch/sparc64/kernel/ptrace.c1095
-rw-r--r--arch/sparc64/kernel/smp.c2
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c1
-rw-r--r--arch/sparc64/kernel/systbls.S9
-rw-r--r--arch/sparc64/kernel/time.c5
-rw-r--r--arch/sparc64/mm/init.c8
-rw-r--r--arch/sparc64/prom/init.c3
-rw-r--r--arch/sparc64/solaris/fs.c2
-rw-r--r--arch/sparc64/solaris/timod.c6
-rw-r--r--arch/um/Kconfig26
-rw-r--r--arch/um/Kconfig.char2
-rw-r--r--arch/um/Kconfig.debug6
-rw-r--r--arch/um/Kconfig.net12
-rw-r--r--arch/um/Makefile12
-rw-r--r--arch/um/Makefile-tt5
-rw-r--r--arch/um/defconfig6
-rw-r--r--arch/um/drivers/line.c28
-rw-r--r--arch/um/drivers/mconsole_kern.c55
-rw-r--r--arch/um/drivers/mconsole_user.c5
-rw-r--r--arch/um/drivers/net_kern.c16
-rw-r--r--arch/um/drivers/net_user.c2
-rw-r--r--arch/um/drivers/port_kern.c7
-rw-r--r--arch/um/drivers/random.c1
-rw-r--r--arch/um/drivers/slip_user.c2
-rw-r--r--arch/um/drivers/slirp_user.c2
-rw-r--r--arch/um/drivers/ssl.c1
-rw-r--r--arch/um/drivers/stdio_console.c1
-rw-r--r--arch/um/drivers/ubd_kern.c30
-rw-r--r--arch/um/drivers/ubd_user.c1
-rw-r--r--arch/um/drivers/vde_user.c2
-rw-r--r--arch/um/include/arch.h2
-rw-r--r--arch/um/include/as-layout.h34
-rw-r--r--arch/um/include/chan_user.h2
-rw-r--r--arch/um/include/common-offsets.h1
-rw-r--r--arch/um/include/init.h25
-rw-r--r--arch/um/include/irq_user.h1
-rw-r--r--arch/um/include/kern_util.h120
-rw-r--r--arch/um/include/mem_user.h5
-rw-r--r--arch/um/include/misc_constants.h6
-rw-r--r--arch/um/include/os.h41
-rw-r--r--arch/um/include/ptrace_user.h11
-rw-r--r--arch/um/include/registers.h7
-rw-r--r--arch/um/include/signal_kern.h22
-rw-r--r--arch/um/include/skas/mode-skas.h11
-rw-r--r--arch/um/include/sysdep-i386/syscalls.h5
-rw-r--r--arch/um/include/sysdep-x86_64/kernel-offsets.h9
-rw-r--r--arch/um/include/sysdep-x86_64/syscalls.h2
-rw-r--r--arch/um/include/um_mmu.h4
-rw-r--r--arch/um/include/um_uaccess.h4
-rw-r--r--arch/um/kernel/exec.c5
-rw-r--r--arch/um/kernel/exitcode.c31
-rw-r--r--arch/um/kernel/gmon_syms.c11
-rw-r--r--arch/um/kernel/gprof_syms.c13
-rw-r--r--arch/um/kernel/initrd.c30
-rw-r--r--arch/um/kernel/irq.c6
-rw-r--r--arch/um/kernel/ksyms.c5
-rw-r--r--arch/um/kernel/mem.c147
-rw-r--r--arch/um/kernel/physmem.c16
-rw-r--r--arch/um/kernel/process.c154
-rw-r--r--arch/um/kernel/reboot.c7
-rw-r--r--arch/um/kernel/sigio.c18
-rw-r--r--arch/um/kernel/signal.c16
-rw-r--r--arch/um/kernel/skas/clone.c32
-rw-r--r--arch/um/kernel/skas/mmu.c127
-rw-r--r--arch/um/kernel/skas/process.c20
-rw-r--r--arch/um/kernel/skas/syscall.c6
-rw-r--r--arch/um/kernel/skas/uaccess.c140
-rw-r--r--arch/um/kernel/smp.c14
-rw-r--r--arch/um/kernel/syscall.c3
-rw-r--r--arch/um/kernel/sysrq.c44
-rw-r--r--arch/um/kernel/time.c14
-rw-r--r--arch/um/kernel/tlb.c51
-rw-r--r--arch/um/kernel/trap.c33
-rw-r--r--arch/um/kernel/uaccess.c11
-rw-r--r--arch/um/kernel/um_arch.c100
-rw-r--r--arch/um/kernel/umid.c15
-rw-r--r--arch/um/os-Linux/Makefile4
-rw-r--r--arch/um/os-Linux/aio.c1
-rw-r--r--arch/um/os-Linux/drivers/ethertap_user.c2
-rw-r--r--arch/um/os-Linux/drivers/tuntap_user.c5
-rw-r--r--arch/um/os-Linux/file.c287
-rw-r--r--arch/um/os-Linux/helper.c74
-rw-r--r--arch/um/os-Linux/irq.c39
-rw-r--r--arch/um/os-Linux/main.c14
-rw-r--r--arch/um/os-Linux/mem.c13
-rw-r--r--arch/um/os-Linux/process.c5
-rw-r--r--arch/um/os-Linux/registers.c34
-rw-r--r--arch/um/os-Linux/sigio.c244
-rw-r--r--arch/um/os-Linux/signal.c102
-rw-r--r--arch/um/os-Linux/skas/Makefile6
-rw-r--r--arch/um/os-Linux/skas/process.c281
-rw-r--r--arch/um/os-Linux/skas/trap.c69
-rw-r--r--arch/um/os-Linux/start_up.c17
-rw-r--r--arch/um/os-Linux/trap.c23
-rw-r--r--arch/um/os-Linux/tty.c57
-rw-r--r--arch/um/os-Linux/tty_log.c1
-rw-r--r--arch/um/os-Linux/util.c15
-rw-r--r--arch/um/sys-i386/bug.c1
-rw-r--r--arch/um/sys-i386/bugs.c201
-rw-r--r--arch/um/sys-i386/ldt.c19
-rw-r--r--arch/um/sys-i386/ptrace.c6
-rw-r--r--arch/um/sys-i386/ptrace_user.c14
-rw-r--r--arch/um/sys-i386/signal.c18
-rw-r--r--arch/um/sys-i386/stub.S8
-rw-r--r--arch/um/sys-i386/stub_segv.c19
-rw-r--r--arch/um/sys-i386/sys_call_table.S5
-rw-r--r--arch/um/sys-i386/tls.c41
-rw-r--r--arch/um/sys-ppc/Makefile22
-rw-r--r--arch/um/sys-x86_64/bug.c3
-rw-r--r--arch/um/sys-x86_64/bugs.c7
-rw-r--r--arch/um/sys-x86_64/ptrace.c29
-rw-r--r--arch/um/sys-x86_64/ptrace_user.c46
-rw-r--r--arch/um/sys-x86_64/signal.c4
-rw-r--r--arch/um/sys-x86_64/stub.S8
-rw-r--r--arch/um/sys-x86_64/stub_segv.c39
-rw-r--r--arch/um/sys-x86_64/syscall_table.c43
-rw-r--r--arch/um/sys-x86_64/syscalls.c10
-rw-r--r--arch/um/sys-x86_64/sysrq.c29
-rw-r--r--arch/um/sys-x86_64/um_module.c8
-rw-r--r--arch/v850/kernel/anna.c3
-rw-r--r--arch/v850/kernel/as85ep1.c3
-rw-r--r--arch/v850/kernel/rte_ma1_cb.c6
-rw-r--r--arch/v850/kernel/setup.c12
-rw-r--r--arch/x86/Kconfig13
-rw-r--r--arch/x86/Kconfig.debug4
-rw-r--r--arch/x86/ia32/ia32_aout.c3
-rw-r--r--arch/x86/ia32/ia32entry.S4
-rw-r--r--arch/x86/kernel/acpi/boot.c47
-rw-r--r--arch/x86/kernel/acpi/processor.c6
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c61
-rw-r--r--arch/x86/kernel/cpu/cpufreq/gx-suspmod.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c10
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.c5
-rw-r--r--arch/x86/kernel/cpu/cyrix.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c19
-rw-r--r--arch/x86/kernel/entry_64.S24
-rw-r--r--arch/x86/kernel/head_64.S15
-rw-r--r--arch/x86/kernel/machine_kexec_32.c2
-rw-r--r--arch/x86/kernel/machine_kexec_64.c5
-rw-r--r--arch/x86/kernel/mpparse_32.c6
-rw-r--r--arch/x86/kernel/pci-calgary_64.c34
-rw-r--r--arch/x86/kernel/pci-gart_64.c48
-rw-r--r--arch/x86/kernel/ptrace.c25
-rw-r--r--arch/x86/kernel/quirks.c26
-rw-r--r--arch/x86/kernel/setup_32.c18
-rw-r--r--arch/x86/kernel/setup_64.c37
-rw-r--r--arch/x86/kernel/smpboot_32.c2
-rw-r--r--arch/x86/kernel/srat_32.c2
-rw-r--r--arch/x86/kernel/syscall_table_32.S4
-rw-r--r--arch/x86/kernel/test_nx.c2
-rw-r--r--arch/x86/kernel/traps_32.c15
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/bitstr_64.c28
-rw-r--r--arch/x86/lib/delay_32.c4
-rw-r--r--arch/x86/lib/delay_64.c4
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c2
-rw-r--r--arch/x86/mm/discontig_32.c3
-rw-r--r--arch/x86/mm/fault.c28
-rw-r--r--arch/x86/mm/init_64.c13
-rw-r--r--arch/x86/mm/numa_64.c5
-rw-r--r--arch/x86/mm/pageattr-test.c65
-rw-r--r--arch/x86/mm/pageattr.c15
-rw-r--r--arch/x86/mm/pgtable_32.c12
-rw-r--r--arch/x86/mm/srat_64.c3
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--crypto/async_tx/async_memcpy.c38
-rw-r--r--crypto/async_tx/async_memset.c28
-rw-r--r--crypto/async_tx/async_tx.c9
-rw-r--r--crypto/async_tx/async_xor.c124
-rw-r--r--crypto/cbc.c2
-rw-r--r--crypto/cryptd.c4
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/hmac.c2
-rw-r--r--crypto/lrw.c2
-rw-r--r--crypto/pcbc.c2
-rw-r--r--crypto/xcbc.c2
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig40
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/asus_acpi.c55
-rw-r--r--drivers/acpi/battery.c5
-rw-r--r--drivers/acpi/bay.c10
-rw-r--r--drivers/acpi/blacklist.c58
-rw-r--r--drivers/acpi/bus.c26
-rw-r--r--drivers/acpi/debug.c57
-rw-r--r--drivers/acpi/dispatcher/dsopcode.c4
-rw-r--r--drivers/acpi/dock.c14
-rw-r--r--drivers/acpi/ec.c19
-rw-r--r--drivers/acpi/event.c28
-rw-r--r--drivers/acpi/events/evevent.c2
-rw-r--r--drivers/acpi/events/evgpe.c27
-rw-r--r--drivers/acpi/fan.c92
-rw-r--r--drivers/acpi/glue.c4
-rw-r--r--drivers/acpi/hardware/hwsleep.c2
-rw-r--r--drivers/acpi/namespace/nsxfeval.c10
-rw-r--r--drivers/acpi/numa.c3
-rw-r--r--drivers/acpi/osl.c372
-rw-r--r--drivers/acpi/pci_bind.c4
-rw-r--r--drivers/acpi/pci_irq.c7
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/power.c6
-rw-r--r--drivers/acpi/processor_core.c42
-rw-r--r--drivers/acpi/processor_idle.c65
-rw-r--r--drivers/acpi/processor_perflib.c16
-rw-r--r--drivers/acpi/processor_thermal.c134
-rw-r--r--drivers/acpi/processor_throttling.c346
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c4
-rw-r--r--drivers/acpi/scan.c110
-rw-r--r--drivers/acpi/sleep/main.c17
-rw-r--r--drivers/acpi/sleep/proc.c46
-rw-r--r--drivers/acpi/system.c208
-rw-r--r--drivers/acpi/tables/Makefile2
-rw-r--r--drivers/acpi/tables/tbxfroot.c4
-rw-r--r--drivers/acpi/thermal.c663
-rw-r--r--drivers/acpi/utilities/utglobal.c2
-rw-r--r--drivers/acpi/video.c262
-rw-r--r--drivers/acpi/wmi.c710
-rw-r--r--drivers/ata/ahci.c35
-rw-r--r--drivers/ata/ata_piix.c3
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/pata_of_platform.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/sata_fsl.c4
-rw-r--r--drivers/ata/sata_inic162x.c25
-rw-r--r--drivers/ata/sata_mv.c366
-rw-r--r--drivers/ata/sata_nv.c78
-rw-r--r--drivers/ata/sata_via.c2
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dmapool.c481
-rw-r--r--drivers/base/firmware_class.c3
-rw-r--r--drivers/block/ataflop.c16
-rw-r--r--drivers/block/cciss.c10
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/paride/pt.c2
-rw-r--r--drivers/block/pktcdvd.c4
-rw-r--r--drivers/block/rd.c3
-rw-r--r--drivers/block/xsysace.c6
-rw-r--r--drivers/bluetooth/bpa10x.c1
-rw-r--r--drivers/bluetooth/bt3c_cs.c2
-rw-r--r--drivers/bluetooth/btsdio.c4
-rw-r--r--drivers/bluetooth/btuart_cs.c2
-rw-r--r--drivers/bluetooth/hci_usb.c1
-rw-r--r--drivers/cdrom/cdrom.c17
-rw-r--r--drivers/cdrom/viocd.c2
-rw-r--r--drivers/char/Kconfig27
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/agp.h6
-rw-r--r--drivers/char/agp/alpha-agp.c17
-rw-r--r--drivers/char/agp/amd-k7-agp.c4
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/compat_ioctl.c4
-rw-r--r--drivers/char/agp/compat_ioctl.h2
-rw-r--r--drivers/char/agp/frontend.c13
-rw-r--r--drivers/char/agp/generic.c7
-rw-r--r--drivers/char/agp/intel-agp.c305
-rw-r--r--drivers/char/drm/Kconfig9
-rw-r--r--drivers/char/drm/Makefile2
-rw-r--r--drivers/char/drm/README.drm1
-rw-r--r--drivers/char/drm/ati_pcigart.c6
-rw-r--r--drivers/char/drm/drm.h3
-rw-r--r--drivers/char/drm/drmP.h68
-rw-r--r--drivers/char/drm/drm_agpsupport.c3
-rw-r--r--drivers/char/drm/drm_bufs.c23
-rw-r--r--drivers/char/drm/drm_context.c2
-rw-r--r--drivers/char/drm/drm_drv.c39
-rw-r--r--drivers/char/drm/drm_hashtab.c5
-rw-r--r--drivers/char/drm/drm_hashtab.h1
-rw-r--r--drivers/char/drm/drm_ioc32.c6
-rw-r--r--drivers/char/drm/drm_ioctl.c25
-rw-r--r--drivers/char/drm/drm_irq.c4
-rw-r--r--drivers/char/drm/drm_memory.c1
-rw-r--r--drivers/char/drm/drm_mm.c1
-rw-r--r--drivers/char/drm/drm_os_linux.h4
-rw-r--r--drivers/char/drm/drm_pciids.h97
-rw-r--r--drivers/char/drm/drm_proc.c4
-rw-r--r--drivers/char/drm/drm_sarea.h2
-rw-r--r--drivers/char/drm/drm_scatter.c10
-rw-r--r--drivers/char/drm/drm_stub.c18
-rw-r--r--drivers/char/drm/drm_sysfs.c146
-rw-r--r--drivers/char/drm/drm_vm.c4
-rw-r--r--drivers/char/drm/i810_dma.c24
-rw-r--r--drivers/char/drm/i810_drv.h52
-rw-r--r--drivers/char/drm/i830_dma.c2
-rw-r--r--drivers/char/drm/i830_drm.h8
-rw-r--r--drivers/char/drm/i830_drv.h51
-rw-r--r--drivers/char/drm/i830_irq.c2
-rw-r--r--drivers/char/drm/i915_dma.c124
-rw-r--r--drivers/char/drm/i915_drv.c464
-rw-r--r--drivers/char/drm/i915_drv.h848
-rw-r--r--drivers/char/drm/i915_irq.c26
-rw-r--r--drivers/char/drm/i915_mem.c11
-rw-r--r--drivers/char/drm/mga_dma.c10
-rw-r--r--drivers/char/drm/mga_drv.h123
-rw-r--r--drivers/char/drm/mga_state.c24
-rw-r--r--drivers/char/drm/r128_cce.c6
-rw-r--r--drivers/char/drm/r128_drv.h5
-rw-r--r--drivers/char/drm/r128_state.c43
-rw-r--r--drivers/char/drm/r300_cmdbuf.c75
-rw-r--r--drivers/char/drm/r300_reg.h32
-rw-r--r--drivers/char/drm/radeon_cp.c166
-rw-r--r--drivers/char/drm/radeon_drm.h13
-rw-r--r--drivers/char/drm/radeon_drv.h91
-rw-r--r--drivers/char/drm/radeon_irq.c6
-rw-r--r--drivers/char/drm/radeon_mem.c6
-rw-r--r--drivers/char/drm/radeon_state.c18
-rw-r--r--drivers/char/drm/savage_state.c6
-rw-r--r--drivers/char/drm/sis_mm.c6
-rw-r--r--drivers/char/drm/via_dma.c20
-rw-r--r--drivers/char/drm/via_dmablit.c184
-rw-r--r--drivers/char/drm/via_dmablit.h84
-rw-r--r--drivers/char/drm/via_drm.h4
-rw-r--r--drivers/char/drm/via_drv.c2
-rw-r--r--drivers/char/drm/via_irq.c26
-rw-r--r--drivers/char/drm/via_map.c5
-rw-r--r--drivers/char/drm/via_mm.c6
-rw-r--r--drivers/char/drm/via_video.c4
-rw-r--r--drivers/char/epca.h1
-rw-r--r--drivers/char/esp.c61
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/hvcs.c2
-rw-r--r--drivers/char/hw_random/core.c10
-rw-r--r--drivers/char/hw_random/via-rng.c14
-rw-r--r--drivers/char/i8k.c43
-rw-r--r--drivers/char/ip2/ip2main.c3
-rw-r--r--drivers/char/ip27-rtc.c9
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c6
-rw-r--r--drivers/char/istallion.c23
-rw-r--r--drivers/char/keyboard.c5
-rw-r--r--drivers/char/lp.c10
-rw-r--r--drivers/char/mbcs.c19
-rw-r--r--drivers/char/mbcs.h6
-rw-r--r--drivers/char/misc.c13
-rw-r--r--drivers/char/mxser.c3868
-rw-r--r--drivers/char/mxser.h478
-rw-r--r--drivers/char/mxser_new.c2817
-rw-r--r--drivers/char/mxser_new.h293
-rw-r--r--drivers/char/n_tty.c27
-rw-r--r--drivers/char/pcmcia/Kconfig9
-rw-r--r--drivers/char/pcmcia/Makefile2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c17
-rw-r--r--drivers/char/pcmcia/ipwireless/Makefile10
-rw-r--r--drivers/char/pcmcia/ipwireless/hardware.c1787
-rw-r--r--drivers/char/pcmcia/ipwireless/hardware.h64
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c501
-rw-r--r--drivers/char/pcmcia/ipwireless/main.h70
-rw-r--r--drivers/char/pcmcia/ipwireless/network.c512
-rw-r--r--drivers/char/pcmcia/ipwireless/network.h55
-rw-r--r--drivers/char/pcmcia/ipwireless/setup_protocol.h108
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.c688
-rw-r--r--drivers/char/pcmcia/ipwireless/tty.h48
-rw-r--r--drivers/char/pcmcia/synclink_cs.c3
-rw-r--r--drivers/char/random.c1
-rw-r--r--drivers/char/riscom8.c198
-rw-r--r--drivers/char/riscom8.h3
-rw-r--r--drivers/char/rocket.c108
-rw-r--r--drivers/char/rocket_int.h24
-rw-r--r--drivers/char/ser_a2232.c2
-rw-r--r--drivers/char/serial167.c78
-rw-r--r--drivers/char/specialix.c72
-rw-r--r--drivers/char/specialix_io8.h3
-rw-r--r--drivers/char/stallion.c78
-rw-r--r--drivers/char/sx.h2
-rw-r--r--drivers/char/synclink.c3
-rw-r--r--drivers/char/synclink_gt.c71
-rw-r--r--drivers/char/synclinkmp.c3
-rw-r--r--drivers/char/tpm/tpm.c44
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_infineon.c6
-rw-r--r--drivers/char/tty_io.c25
-rw-r--r--drivers/char/vt.c8
-rw-r--r--drivers/char/xilinx_hwicap/Makefile7
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.c380
-rw-r--r--drivers/char/xilinx_hwicap/buffer_icap.h57
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.c381
-rw-r--r--drivers/char/xilinx_hwicap/fifo_icap.h62
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c904
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h193
-rw-r--r--drivers/cpufreq/Kconfig3
-rw-r--r--drivers/cpufreq/cpufreq.c33
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c40
-rw-r--r--drivers/cpufreq/freq_table.c2
-rw-r--r--drivers/cpuidle/Kconfig4
-rw-r--r--drivers/cpuidle/cpuidle.c48
-rw-r--r--drivers/cpuidle/governors/ladder.c5
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/dio/dio.c4
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/dmaengine.c49
-rw-r--r--drivers/dma/ioat_dma.c43
-rw-r--r--drivers/dma/iop-adma.c138
-rw-r--r--drivers/edac/Kconfig23
-rw-r--r--drivers/edac/Makefile3
-rw-r--r--drivers/edac/cell_edac.c258
-rw-r--r--drivers/edac/edac_core.h2
-rw-r--r--drivers/edac/edac_device.c8
-rw-r--r--drivers/edac/edac_mc_sysfs.c3
-rw-r--r--drivers/edac/edac_pci.c2
-rw-r--r--drivers/edac/edac_pci_sysfs.c12
-rw-r--r--drivers/edac/i3000_edac.c267
-rw-r--r--drivers/edac/mpc85xx_edac.c1043
-rw-r--r--drivers/edac/mpc85xx_edac.h162
-rw-r--r--drivers/edac/mv64x60_edac.c855
-rw-r--r--drivers/edac/mv64x60_edac.h114
-rw-r--r--drivers/firmware/dcdbas.c3
-rw-r--r--drivers/firmware/dmi-id.c1
-rw-r--r--drivers/firmware/dmi_scan.c9
-rw-r--r--drivers/gpio/Kconfig73
-rw-r--r--drivers/gpio/Makefile9
-rw-r--r--drivers/gpio/gpiolib.c567
-rw-r--r--drivers/gpio/mcp23s08.c357
-rw-r--r--drivers/gpio/pca953x.c308
-rw-r--r--drivers/gpio/pcf857x.c330
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/i2c/chips/Kconfig7
-rw-r--r--drivers/ide/Kconfig9
-rw-r--r--drivers/ide/arm/Makefile1
-rw-r--r--drivers/ide/arm/icside.c2
-rw-r--r--drivers/ide/arm/palm_bk3710.c395
-rw-r--r--drivers/ide/cris/ide-cris.c33
-rw-r--r--drivers/ide/ide-acpi.c2
-rw-r--r--drivers/ide/ide-cd.c9
-rw-r--r--drivers/ide/ide-dma.c3
-rw-r--r--drivers/ide/ide-floppy.c6
-rw-r--r--drivers/ide/ide-generic.c10
-rw-r--r--drivers/ide/ide-io.c14
-rw-r--r--drivers/ide/ide-iops.c45
-rw-r--r--drivers/ide/ide-lib.c2
-rw-r--r--drivers/ide/ide-probe.c53
-rw-r--r--drivers/ide/ide-proc.c1
-rw-r--r--drivers/ide/ide-tape.c2400
-rw-r--r--drivers/ide/ide-taskfile.c35
-rw-r--r--drivers/ide/ide.c54
-rw-r--r--drivers/ide/legacy/buddha.c72
-rw-r--r--drivers/ide/legacy/falconide.c42
-rw-r--r--drivers/ide/legacy/gayle.c39
-rw-r--r--drivers/ide/legacy/hd.c9
-rw-r--r--drivers/ide/legacy/ide_platform.c2
-rw-r--r--drivers/ide/legacy/macide.c57
-rw-r--r--drivers/ide/legacy/q40ide.c9
-rw-r--r--drivers/ide/pci/Makefile3
-rw-r--r--drivers/ide/pci/generic.c13
-rw-r--r--drivers/ide/pci/siimage.c3
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/cm.c89
-rw-r--r--drivers/infiniband/core/fmr_pool.c7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c91
-rw-r--r--drivers/infiniband/hw/mlx4/main.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c47
-rw-r--r--drivers/infiniband/hw/nes/Kconfig16
-rw-r--r--drivers/infiniband/hw/nes/Makefile3
-rw-r--r--drivers/infiniband/hw/nes/nes.c1152
-rw-r--r--drivers/infiniband/hw/nes/nes.h560
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c3088
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h433
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h193
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c3080
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1206
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c1703
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h112
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c917
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c3917
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h169
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c19
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c53
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h1
-rw-r--r--drivers/input/Kconfig12
-rw-r--r--drivers/input/Makefile1
-rw-r--r--drivers/input/apm-power.c131
-rw-r--r--drivers/input/evdev.c6
-rw-r--r--drivers/input/input-polldev.c18
-rw-r--r--drivers/input/input.c85
-rw-r--r--drivers/input/joystick/amijoy.c1
-rw-r--r--drivers/input/joystick/analog.c1
-rw-r--r--drivers/input/joystick/db9.c1
-rw-r--r--drivers/input/joystick/gamecon.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c17
-rw-r--r--drivers/input/joystick/turbografx.c1
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/keyboard/Kconfig29
-rw-r--r--drivers/input/keyboard/Makefile3
-rw-r--r--drivers/input/keyboard/atkbd.c91
-rw-r--r--drivers/input/keyboard/lkkbd.c1
-rw-r--r--drivers/input/keyboard/pxa27x_keyboard.c274
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c572
-rw-r--r--drivers/input/keyboard/tosakbd.c415
-rw-r--r--drivers/input/misc/Kconfig14
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/apanel.c378
-rw-r--r--drivers/input/misc/ati_remote.c1
-rw-r--r--drivers/input/misc/atlas_btns.c39
-rw-r--r--drivers/input/misc/cobalt_btns.c73
-rw-r--r--drivers/input/misc/keyspan_remote.c119
-rw-r--r--drivers/input/misc/wistron_btns.c4
-rw-r--r--drivers/input/mouse/inport.c1
-rw-r--r--drivers/input/mouse/logibm.c1
-rw-r--r--drivers/input/mouse/psmouse-base.c1
-rw-r--r--drivers/input/mouse/trackpoint.c1
-rw-r--r--drivers/input/mousedev.c3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h67
-rw-r--r--drivers/input/serio/i8042.c26
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/touchscreen/ads7846.c8
-rw-r--r--drivers/input/touchscreen/h3600_ts_input.c4
-rw-r--r--drivers/input/touchscreen/mk712.c1
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c1
-rw-r--r--drivers/isdn/act2000/module.c22
-rw-r--r--drivers/isdn/gigaset/asyncdata.c8
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c423
-rw-r--r--drivers/isdn/gigaset/common.c135
-rw-r--r--drivers/isdn/gigaset/ev-layer.c106
-rw-r--r--drivers/isdn/gigaset/gigaset.h45
-rw-r--r--drivers/isdn/gigaset/interface.c19
-rw-r--r--drivers/isdn/gigaset/isocdata.c51
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c20
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c206
-rw-r--r--drivers/isdn/hardware/eicon/debug.c2
-rw-r--r--drivers/isdn/hardware/eicon/diva.c5
-rw-r--r--drivers/isdn/hardware/eicon/message.c2
-rw-r--r--drivers/isdn/hisax/avm_pci.c8
-rw-r--r--drivers/isdn/i4l/isdn_tty.c1
-rw-r--r--drivers/isdn/i4l/isdn_ttyfax.c2
-rw-r--r--drivers/isdn/icn/icn.c22
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c16
-rw-r--r--drivers/leds/Kconfig48
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-class.c13
-rw-r--r--drivers/leds/leds-ams-delta.c12
-rw-r--r--drivers/leds/leds-clevo-mail.c219
-rw-r--r--drivers/leds/leds-corgi.c4
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-hp6xx.c120
-rw-r--r--drivers/leds/leds-ixp4xx-gpio.c214
-rw-r--r--drivers/leds/leds-locomo.c4
-rw-r--r--drivers/leds/leds-net48xx.c2
-rw-r--r--drivers/leds/leds-spitz.c8
-rw-r--r--drivers/leds/leds-tosa.c4
-rw-r--r--drivers/leds/leds-wrap.c47
-rw-r--r--drivers/leds/ledtrig-timer.c41
-rw-r--r--drivers/macintosh/mediabay.c46
-rw-r--r--drivers/macintosh/via-macii.c2
-rw-r--r--drivers/md/bitmap.c39
-rw-r--r--drivers/md/faulty.c2
-rw-r--r--drivers/md/linear.c2
-rw-r--r--drivers/md/md.c395
-rw-r--r--drivers/md/mktables.c187
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid0.c8
-rw-r--r--drivers/md/raid1.c5
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c48
-rw-r--r--drivers/md/raid6test/test.c117
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/tvmixer.c336
-rw-r--r--drivers/media/video/usbvideo/konicawc.c2
-rw-r--r--drivers/media/video/usbvideo/quickcam_messenger.c2
-rw-r--r--drivers/mfd/Kconfig7
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/asic3.c588
-rw-r--r--drivers/misc/Kconfig62
-rw-r--r--drivers/misc/Makefile4
-rw-r--r--drivers/misc/acer-wmi.c1109
-rw-r--r--drivers/misc/asus-laptop.c29
-rw-r--r--drivers/misc/enclosure.c484
-rw-r--r--drivers/misc/fujitsu-laptop.c1
-rw-r--r--drivers/misc/intel_menlow.c526
-rw-r--r--drivers/misc/lkdtm.c24
-rw-r--r--drivers/misc/msi-laptop.c1
-rw-r--r--drivers/misc/phantom.c7
-rw-r--r--drivers/misc/sony-laptop.c445
-rw-r--r--drivers/misc/tc1100-wmi.c290
-rw-r--r--drivers/misc/thinkpad_acpi.c3256
-rw-r--r--drivers/misc/thinkpad_acpi.h606
-rw-r--r--drivers/mtd/Kconfig11
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c78
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c14
-rw-r--r--drivers/mtd/chips/cfi_probe.c12
-rw-r--r--drivers/mtd/chips/gen_probe.c2
-rw-r--r--drivers/mtd/chips/jedec_probe.c1376
-rw-r--r--drivers/mtd/cmdlinepart.c9
-rw-r--r--drivers/mtd/devices/block2mtd.c4
-rw-r--r--drivers/mtd/devices/doc2000.c4
-rw-r--r--drivers/mtd/devices/doc2001plus.c2
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/devices/phram.c4
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/mtx-1_flash.c2
-rw-r--r--drivers/mtd/maps/physmap.c168
-rw-r--r--drivers/mtd/maps/physmap_of.c88
-rw-r--r--drivers/mtd/maps/pnc2000.c93
-rw-r--r--drivers/mtd/maps/scb2_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c8
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdoops.c185
-rw-r--r--drivers/mtd/mtdpart.c17
-rw-r--r--drivers/mtd/nand/Kconfig26
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/at91_nand.c12
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c39
-rw-r--r--drivers/mtd/nand/cafe_nand.c19
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c1244
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/orion_nand.c171
-rw-r--r--drivers/mtd/nand/pasemi_nand.c243
-rw-r--r--drivers/mtd/nand/plat_nand.c2
-rw-r--r--drivers/mtd/nand/s3c2410.c48
-rw-r--r--drivers/mtd/ofpart.c74
-rw-r--r--drivers/mtd/onenand/onenand_base.c199
-rw-r--r--drivers/mtd/redboot.c25
-rw-r--r--drivers/mtd/ubi/build.c674
-rw-r--r--drivers/mtd/ubi/cdev.c244
-rw-r--r--drivers/mtd/ubi/debug.h21
-rw-r--r--drivers/mtd/ubi/eba.c321
-rw-r--r--drivers/mtd/ubi/gluebi.c9
-rw-r--r--drivers/mtd/ubi/io.c10
-rw-r--r--drivers/mtd/ubi/kapi.c177
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/scan.c12
-rw-r--r--drivers/mtd/ubi/ubi.h171
-rw-r--r--drivers/mtd/ubi/upd.c185
-rw-r--r--drivers/mtd/ubi/vmt.c208
-rw-r--r--drivers/mtd/ubi/vtbl.c45
-rw-r--r--drivers/mtd/ubi/wl.c338
-rw-r--r--drivers/net/Kconfig10
-rw-r--r--drivers/net/cpmac.c55
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c4
-rw-r--r--drivers/net/forcedeth.c49
-rw-r--r--drivers/net/gianfar_mii.c4
-rw-r--r--drivers/net/hamradio/dmascc.c4
-rw-r--r--drivers/net/iseries_veth.c2
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c91
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c305
-rw-r--r--drivers/net/mlx4/fw.c6
-rw-r--r--drivers/net/mlx4/fw.h3
-rw-r--r--drivers/net/mlx4/main.c11
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mv643xx_eth.c11
-rw-r--r--drivers/net/pcmcia/3c574_cs.c51
-rw-r--r--drivers/net/pcmcia/3c589_cs.c30
-rw-r--r--drivers/net/pcmcia/axnet_cs.c34
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c26
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c25
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c62
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c62
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c51
-rw-r--r--drivers/net/pppol2tp.c3
-rw-r--r--drivers/net/sky2.c17
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/tlan.c25
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/ucc_geth_mii.c4
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/wan/hdlc.c24
-rw-r--r--drivers/net/wan/hdlc_cisco.c5
-rw-r--r--drivers/net/wan/hdlc_fr.c53
-rw-r--r--drivers/net/wan/hdlc_ppp.c2
-rw-r--r--drivers/net/wan/hdlc_raw.c2
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c6
-rw-r--r--drivers/net/wan/hdlc_x25.c10
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/dma.c137
-rw-r--r--drivers/net/wireless/b43/dma.h20
-rw-r--r--drivers/net/wireless/b43/leds.c13
-rw-r--r--drivers/net/wireless/b43/main.c26
-rw-r--r--drivers/net/wireless/b43legacy/dma.c23
-rw-r--r--drivers/net/wireless/b43legacy/main.c9
-rw-r--r--drivers/net/wireless/b43legacy/pio.c21
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c15
-rw-r--r--drivers/net/wireless/b43legacy/xmit.h2
-rw-r--r--drivers/net/wireless/ipw2100.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c8
-rw-r--r--drivers/net/wireless/netwave_cs.c24
-rw-r--r--drivers/net/wireless/wavelan_cs.c56
-rw-r--r--drivers/nubus/Makefile1
-rw-r--r--drivers/nubus/nubus.c13
-rw-r--r--drivers/nubus/nubus_syms.c28
-rw-r--r--drivers/nubus/proc.c4
-rw-r--r--drivers/of/base.c25
-rw-r--r--drivers/of/platform.c10
-rw-r--r--drivers/parisc/ccio-dma.c2
-rw-r--r--drivers/parisc/iommu-helpers.h7
-rw-r--r--drivers/parisc/sba_iommu.c2
-rw-r--r--drivers/parport/parport_pc.c50
-rw-r--r--drivers/parport/parport_serial.c4
-rw-r--r--drivers/pci/dmar.c1
-rw-r--r--drivers/pci/intel-iommu.c4
-rw-r--r--drivers/pci/intel-iommu.h14
-rw-r--r--drivers/pci/iova.c8
-rw-r--r--drivers/pci/iova.h16
-rw-r--r--drivers/pci/pci.c16
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pcmcia/at91_cf.c62
-rw-r--r--drivers/pcmcia/cardbus.c4
-rw-r--r--drivers/pcmcia/ds.c15
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/i82365.c18
-rw-r--r--drivers/pcmcia/m32r_cfc.c7
-rw-r--r--drivers/pcmcia/m32r_pcc.c7
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c6
-rw-r--r--drivers/pcmcia/pcmcia_resource.c14
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c11
-rw-r--r--drivers/pcmcia/tcic.c2
-rw-r--r--drivers/pnp/driver.c12
-rw-r--r--drivers/pnp/interface.c13
-rw-r--r--drivers/pnp/manager.c27
-rw-r--r--drivers/pnp/pnpacpi/core.c2
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c44
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/pnp/pnpbios/rsparser.c33
-rw-r--r--drivers/pnp/quirks.c43
-rw-r--r--drivers/power/power_supply_sysfs.c2
-rw-r--r--drivers/ps3/ps3av.c97
-rw-r--r--drivers/rtc/Kconfig126
-rw-r--r--drivers/rtc/Makefile4
-rw-r--r--drivers/rtc/rtc-at91sam9.c520
-rw-r--r--drivers/rtc/rtc-bfin.c351
-rw-r--r--drivers/rtc/rtc-cmos.c221
-rw-r--r--drivers/rtc/rtc-dev.c9
-rw-r--r--drivers/rtc/rtc-ds1302.c262
-rw-r--r--drivers/rtc/rtc-ds1307.c27
-rw-r--r--drivers/rtc/rtc-ds1511.c656
-rw-r--r--drivers/rtc/rtc-pcf8583.c24
-rw-r--r--drivers/rtc/rtc-r9701.c178
-rw-r--r--drivers/rtc/rtc-s3c.c5
-rw-r--r--drivers/rtc/rtc-sa1100.c16
-rw-r--r--drivers/rtc/rtc-sysfs.c19
-rw-r--r--drivers/s390/block/dasd.c19
-rw-r--r--drivers/s390/block/dasd_3990_erp.c62
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/char/sclp_tty.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c12
-rw-r--r--drivers/s390/cio/chsc.c147
-rw-r--r--drivers/s390/cio/device_id.c107
-rw-r--r--drivers/s390/sysinfo.c2
-rw-r--r--drivers/scsi/Kconfig93
-rw-r--r--drivers/scsi/Makefile12
-rw-r--r--drivers/scsi/NCR53C9x.c3654
-rw-r--r--drivers/scsi/NCR53C9x.h668
-rw-r--r--drivers/scsi/a2091.c3
-rw-r--r--drivers/scsi/a3000.c3
-rw-r--r--drivers/scsi/aacraid/aachba.c81
-rw-r--r--drivers/scsi/aacraid/commctrl.c26
-rw-r--r--drivers/scsi/aacraid/linit.c25
-rw-r--r--drivers/scsi/advansys.c6
-rw-r--r--drivers/scsi/aic7xxx_old.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c87
-rw-r--r--drivers/scsi/arm/acornscsi.c14
-rw-r--r--drivers/scsi/arm/scsi.h87
-rw-r--r--drivers/scsi/blz1230.c353
-rw-r--r--drivers/scsi/blz2060.c306
-rw-r--r--drivers/scsi/cyberstorm.c377
-rw-r--r--drivers/scsi/cyberstormII.c314
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/dec_esp.c687
-rw-r--r--drivers/scsi/fastlane.c421
-rw-r--r--drivers/scsi/gvp11.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c4
-rw-r--r--drivers/scsi/ide-scsi.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c57
-rw-r--r--drivers/scsi/libiscsi.c137
-rw-r--r--drivers/scsi/mac_esp.c751
-rw-r--r--drivers/scsi/mca_53c9x.c520
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c1
-rw-r--r--drivers/scsi/oktagon_esp.c606
-rw-r--r--drivers/scsi/oktagon_io.S194
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c2
-rw-r--r--drivers/scsi/ps3rom.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c87
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c404
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c1
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c75
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c238
-rw-r--r--drivers/scsi/sd.c34
-rw-r--r--drivers/scsi/ses.c689
-rw-r--r--drivers/scsi/sg.c23
-rw-r--r--drivers/scsi/sr.c49
-rw-r--r--drivers/scsi/sr.h1
-rw-r--r--drivers/scsi/sr_ioctl.c3
-rw-r--r--drivers/scsi/sun3x_esp.c546
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/u14-34f.c2
-rw-r--r--drivers/serial/68328serial.c3
-rw-r--r--drivers/serial/8250.c42
-rw-r--r--drivers/serial/8250_pci.c133
-rw-r--r--drivers/serial/8250_pnp.c10
-rw-r--r--drivers/serial/Kconfig51
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/serial/crisv10.c5
-rw-r--r--drivers/serial/dz.c543
-rw-r--r--drivers/serial/dz.h11
-rw-r--r--drivers/serial/imx.c4
-rw-r--r--drivers/serial/mcf.c2
-rw-r--r--drivers/serial/mpc52xx_uart.c424
-rw-r--r--drivers/serial/mpsc.c1
-rw-r--r--drivers/serial/s3c2410.c2
-rw-r--r--drivers/serial/sc26xx.c755
-rw-r--r--drivers/serial/serial_core.c20
-rw-r--r--drivers/serial/serial_cs.c6
-rw-r--r--drivers/serial/uartlite.c55
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/atmel_spi.c173
-rw-r--r--drivers/spi/omap2_mcspi.c37
-rw-r--r--drivers/spi/pxa2xx_spi.c17
-rw-r--r--drivers/spi/spi.c35
-rw-r--r--drivers/spi/spi_bfin5xx.c131
-rw-r--r--drivers/spi/spi_imx.c11
-rw-r--r--drivers/spi/spi_s3c24xx.c12
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c12
-rw-r--r--drivers/spi/spi_sh_sci.c205
-rw-r--r--drivers/thermal/Kconfig15
-rw-r--r--drivers/thermal/Makefile5
-rw-r--r--drivers/thermal/thermal.c714
-rw-r--r--drivers/uio/uio.c14
-rw-r--r--drivers/video/atmel_lcdfb.c135
-rw-r--r--drivers/video/backlight/Kconfig22
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/backlight.c12
-rw-r--r--drivers/video/backlight/omap1_bl.c210
-rw-r--r--drivers/video/bf54x-lq043fb.c3
-rw-r--r--drivers/video/console/bitblit.c4
-rw-r--r--drivers/video/console/fbcon.c29
-rw-r--r--drivers/video/console/fbcon.h47
-rw-r--r--drivers/video/console/fbcon_ccw.c4
-rw-r--r--drivers/video/console/fbcon_cw.c4
-rw-r--r--drivers/video/console/fbcon_ud.c4
-rw-r--r--drivers/video/console/fonts.c4
-rw-r--r--drivers/video/console/tileblit.c4
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fb_defio.c17
-rw-r--r--drivers/video/fb_draw.h1
-rw-r--r--drivers/video/fbmon.c118
-rw-r--r--drivers/video/geode/lxfb_core.c2
-rw-r--r--drivers/video/hpfb.c3
-rw-r--r--drivers/video/i810/i810_main.c2
-rw-r--r--drivers/video/igafb.c5
-rw-r--r--drivers/video/intelfb/intelfbhw.c2
-rw-r--r--drivers/video/neofb.c27
-rw-r--r--drivers/video/nvidia/nvidia.c22
-rw-r--r--drivers/video/pm2fb.c13
-rw-r--r--drivers/video/pm3fb.c2
-rw-r--r--drivers/video/pmag-aa-fb.c2
-rw-r--r--drivers/video/ps3fb.c451
-rw-r--r--drivers/video/s3c2410fb.c88
-rw-r--r--drivers/video/s3c2410fb.h7
-rw-r--r--drivers/video/sis/sis_main.c4
-rw-r--r--drivers/video/sm501fb.c64
-rw-r--r--drivers/video/tdfxfb.c2
-rw-r--r--drivers/video/uvesafb.c8
-rw-r--r--drivers/video/vermilion/vermilion.c5
-rw-r--r--drivers/video/xilinxfb.c4
-rw-r--r--drivers/virtio/virtio_balloon.c1
-rw-r--r--drivers/w1/masters/Kconfig10
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/ds1wm.c9
-rw-r--r--drivers/w1/masters/w1-gpio.c124
-rw-r--r--drivers/w1/slaves/w1_therm.c3
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--fs/9p/fid.c4
-rw-r--r--fs/9p/v9fs.c51
-rw-r--r--fs/9p/v9fs.h5
-rw-r--r--fs/9p/vfs_file.c4
-rw-r--r--fs/9p/vfs_inode.c7
-rw-r--r--fs/Kconfig128
-rw-r--r--fs/affs/affs.h3
-rw-r--r--fs/affs/amigaffs.c6
-rw-r--r--fs/affs/inode.c20
-rw-r--r--fs/affs/namei.c12
-rw-r--r--fs/affs/super.c12
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/inode.c5
-rw-r--r--fs/afs/security.c2
-rw-r--r--fs/autofs/autofs_i.h1
-rw-r--r--fs/autofs/inode.c27
-rw-r--r--fs/autofs/root.c22
-rw-r--r--fs/bad_inode.c14
-rw-r--r--fs/befs/linuxvfs.c39
-rw-r--r--fs/bfs/bfs.h2
-rw-r--r--fs/bfs/dir.c6
-rw-r--r--fs/bfs/inode.c32
-rw-r--r--fs/binfmt_aout.c2
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/block_dev.c1
-rw-r--r--fs/buffer.c62
-rw-r--r--fs/cifs/cifsfs.c8
-rw-r--r--fs/cifs/cifsfs.h1
-rw-r--r--fs/cifs/inode.c24
-rw-r--r--fs/compat.c81
-rw-r--r--fs/compat_ioctl.c2
-rw-r--r--fs/dcache.c5
-rw-r--r--fs/direct-io.c4
-rw-r--r--fs/dquot.c15
-rw-r--r--fs/ecryptfs/crypto.c191
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h17
-rw-r--r--fs/ecryptfs/file.c5
-rw-r--r--fs/ecryptfs/inode.c16
-rw-r--r--fs/ecryptfs/keystore.c8
-rw-r--r--fs/ecryptfs/main.c40
-rw-r--r--fs/ecryptfs/mmap.c28
-rw-r--r--fs/ecryptfs/read_write.c2
-rw-r--r--fs/ecryptfs/super.c48
-rw-r--r--fs/efs/inode.c25
-rw-r--r--fs/efs/namei.c23
-rw-r--r--fs/efs/super.c16
-rw-r--r--fs/eventfd.c1
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/exec.c16
-rw-r--r--fs/ext2/balloc.c98
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/ext2.h5
-rw-r--r--fs/ext2/file.c4
-rw-r--r--fs/ext2/inode.c37
-rw-r--r--fs/ext2/ioctl.c12
-rw-r--r--fs/ext2/namei.c12
-rw-r--r--fs/ext2/super.c63
-rw-r--r--fs/ext3/balloc.c94
-rw-r--r--fs/ext3/ialloc.c58
-rw-r--r--fs/ext3/inode.c143
-rw-r--r--fs/ext3/namei.c33
-rw-r--r--fs/ext3/resize.c7
-rw-r--r--fs/ext3/super.c69
-rw-r--r--fs/ext4/balloc.c8
-rw-r--r--fs/ext4/ialloc.c58
-rw-r--r--fs/ext4/inode.c38
-rw-r--r--fs/ext4/namei.c29
-rw-r--r--fs/ext4/resize.c7
-rw-r--r--fs/ext4/super.c50
-rw-r--r--fs/fat/file.c47
-rw-r--r--fs/fat/inode.c14
-rw-r--r--fs/fat/misc.c5
-rw-r--r--fs/file.c8
-rw-r--r--fs/freevxfs/vxfs_extern.h2
-rw-r--r--fs/freevxfs/vxfs_inode.c45
-rw-r--r--fs/freevxfs/vxfs_lookup.c6
-rw-r--r--fs/freevxfs/vxfs_super.c10
-rw-r--r--fs/fs-writeback.c38
-rw-r--r--fs/fuse/dev.c113
-rw-r--r--fs/fuse/dir.c7
-rw-r--r--fs/fuse/file.c14
-rw-r--r--fs/fuse/fuse_i.h18
-rw-r--r--fs/fuse/inode.c7
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/dir.c2
-rw-r--r--fs/gfs2/glock.c37
-rw-r--r--fs/gfs2/glock.h4
-rw-r--r--fs/gfs2/incore.h4
-rw-r--r--fs/gfs2/inode.c2
-rw-r--r--fs/gfs2/ops_address.c2
-rw-r--r--fs/gfs2/ops_export.c2
-rw-r--r--fs/gfs2/ops_inode.c2
-rw-r--r--fs/hfs/bfind.c11
-rw-r--r--fs/hfs/brec.c4
-rw-r--r--fs/hfs/btree.c26
-rw-r--r--fs/hfs/hfs.h2
-rw-r--r--fs/hfs/super.c2
-rw-r--r--fs/hfsplus/btree.c6
-rw-r--r--fs/hfsplus/dir.c6
-rw-r--r--fs/hfsplus/hfsplus_fs.h3
-rw-r--r--fs/hfsplus/super.c47
-rw-r--r--fs/hostfs/hostfs_kern.c57
-rw-r--r--fs/hostfs/hostfs_user.c8
-rw-r--r--fs/hppfs/hppfs_kern.c27
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/inode.c4
-rw-r--r--fs/inotify.c30
-rw-r--r--fs/inotify_user.c29
-rw-r--r--fs/ioctl.c223
-rw-r--r--fs/isofs/export.c14
-rw-r--r--fs/isofs/inode.c68
-rw-r--r--fs/isofs/namei.c4
-rw-r--r--fs/isofs/rock.c4
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd/recovery.c2
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jffs2/acl.c6
-rw-r--r--fs/jffs2/acl.h2
-rw-r--r--fs/jffs2/dir.c6
-rw-r--r--fs/jffs2/fs.c62
-rw-r--r--fs/jffs2/nodelist.c9
-rw-r--r--fs/jffs2/os-linux.h2
-rw-r--r--fs/jffs2/readinode.c31
-rw-r--r--fs/jffs2/super.c1
-rw-r--r--fs/jffs2/write.c32
-rw-r--r--fs/jfs/file.c5
-rw-r--r--fs/jfs/inode.c20
-rw-r--r--fs/jfs/ioctl.c31
-rw-r--r--fs/jfs/jfs_dinode.h2
-rw-r--r--fs/jfs/jfs_inode.h6
-rw-r--r--fs/jfs/namei.c39
-rw-r--r--fs/jfs/super.c15
-rw-r--r--fs/libfs.c11
-rw-r--r--fs/minix/inode.c43
-rw-r--r--fs/minix/minix.h1
-rw-r--r--fs/minix/namei.c7
-rw-r--r--fs/mpage.c7
-rw-r--r--fs/namei.c3
-rw-r--r--fs/namespace.c45
-rw-r--r--fs/ncpfs/inode.c4
-rw-r--r--fs/nfs/getroot.c4
-rw-r--r--fs/nfs/read.c10
-rw-r--r--fs/nfs/write.c24
-rw-r--r--fs/nfsd/auth.c10
-rw-r--r--fs/nfsd/export.c4
-rw-r--r--fs/ntfs/aops.c20
-rw-r--r--fs/ntfs/compress.c2
-rw-r--r--fs/ntfs/file.c32
-rw-r--r--fs/ntfs/malloc.h3
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c6
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h11
-rw-r--r--fs/ocfs2/dlm/dlmapi.h7
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h24
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c195
-rw-r--r--fs/ocfs2/dlm/dlmfs.c15
-rw-r--r--fs/ocfs2/dlm/userdlm.c5
-rw-r--r--fs/ocfs2/dlm/userdlm.h3
-rw-r--r--fs/ocfs2/dlmglue.c29
-rw-r--r--fs/ocfs2/dlmglue.h1
-rw-r--r--fs/ocfs2/ocfs2.h1
-rw-r--r--fs/ocfs2/ocfs2_lockingver.h30
-rw-r--r--fs/ocfs2/super.c1
-rw-r--r--fs/openpromfs/inode.c45
-rw-r--r--fs/partitions/Kconfig2
-rw-r--r--fs/partitions/check.c17
-rw-r--r--fs/pnode.c2
-rw-r--r--fs/proc/array.c21
-rw-r--r--fs/proc/base.c51
-rw-r--r--fs/proc/inode.c60
-rw-r--r--fs/proc/internal.h8
-rw-r--r--fs/proc/kcore.c2
-rw-r--r--fs/proc/proc_misc.c150
-rw-r--r--fs/proc/task_mmu.c676
-rw-r--r--fs/qnx4/inode.c47
-rw-r--r--fs/qnx4/namei.c8
-rw-r--r--fs/quota.c4
-rw-r--r--fs/reiserfs/inode.c6
-rw-r--r--fs/reiserfs/prints.c2
-rw-r--r--fs/reiserfs/xattr.c6
-rw-r--r--fs/romfs/inode.c46
-rw-r--r--fs/select.c2
-rw-r--r--fs/signalfd.c1
-rw-r--r--fs/smbfs/inode.c7
-rw-r--r--fs/smbfs/sock.c5
-rw-r--r--fs/sysfs/group.c7
-rw-r--r--fs/sysv/inode.c25
-rw-r--r--fs/sysv/namei.c6
-rw-r--r--fs/sysv/super.c4
-rw-r--r--fs/sysv/sysv.h1
-rw-r--r--fs/timerfd.c207
-rw-r--r--fs/ufs/inode.c34
-rw-r--r--fs/ufs/namei.c6
-rw-r--r--fs/ufs/super.c14
-rw-r--r--fs/ufs/ufs.h2
-rw-r--r--fs/utimes.c1
-rw-r--r--fs/vfat/namei.c2
-rw-r--r--fs/xattr.c45
-rw-r--r--fs/xfs/linux-2.6/kmem.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c2
-rw-r--r--include/acpi/acglobal.h4
-rw-r--r--include/acpi/acmacros.h6
-rw-r--r--include/acpi/acpi_bus.h5
-rw-r--r--include/acpi/acpi_drivers.h2
-rw-r--r--include/acpi/acpi_numa.h1
-rw-r--r--include/acpi/acpiosxf.h7
-rw-r--r--include/acpi/processor.h13
-rw-r--r--include/asm-alpha/atomic.h2
-rw-r--r--include/asm-alpha/elf.h3
-rw-r--r--include/asm-alpha/page.h3
-rw-r--r--include/asm-alpha/pci.h1
-rw-r--r--include/asm-alpha/pgalloc.h8
-rw-r--r--include/asm-alpha/system.h15
-rw-r--r--include/asm-alpha/tlb.h4
-rw-r--r--include/asm-alpha/tlbflush.h6
-rw-r--r--include/asm-alpha/unistd.h1
-rw-r--r--include/asm-alpha/user.h2
-rw-r--r--include/asm-arm/arch-iop13xx/adma.h18
-rw-r--r--include/asm-arm/arch-pxa/gpio.h48
-rw-r--r--include/asm-arm/arch-pxa/pxa-regs.h13
-rw-r--r--include/asm-arm/arch-pxa/pxa27x_keyboard.h13
-rw-r--r--include/asm-arm/arch-pxa/pxa27x_keypad.h56
-rw-r--r--include/asm-arm/arch-pxa/tosa.h30
-rw-r--r--include/asm-arm/arch-s3c2410/regs-lcd.h11
-rw-r--r--include/asm-arm/arch-s3c2410/spi-gpio.h6
-rw-r--r--include/asm-arm/arch-s3c2410/spi.h6
-rw-r--r--include/asm-arm/elf.h3
-rw-r--r--include/asm-arm/hardware/iop3xx-adma.h30
-rw-r--r--include/asm-arm/page.h5
-rw-r--r--include/asm-arm/pgalloc.h10
-rw-r--r--include/asm-arm/system.h15
-rw-r--r--include/asm-arm/tlb.h4
-rw-r--r--include/asm-arm/user.h2
-rw-r--r--include/asm-avr32/arch-at32ap/at32ap700x.h2
-rw-r--r--include/asm-avr32/arch-at32ap/gpio.h34
-rw-r--r--include/asm-avr32/arch-at32ap/irq.h4
-rw-r--r--include/asm-avr32/delay.h2
-rw-r--r--include/asm-avr32/elf.h2
-rw-r--r--include/asm-avr32/page.h4
-rw-r--r--include/asm-avr32/pgalloc.h6
-rw-r--r--include/asm-avr32/system.h23
-rw-r--r--include/asm-avr32/timex.h3
-rw-r--r--include/asm-avr32/unistd.h2
-rw-r--r--include/asm-avr32/user.h2
-rw-r--r--include/asm-blackfin/elf.h2
-rw-r--r--include/asm-blackfin/io.h2
-rw-r--r--include/asm-blackfin/page.h3
-rw-r--r--include/asm-blackfin/system.h57
-rw-r--r--include/asm-blackfin/user.h2
-rw-r--r--include/asm-cris/bitops.h7
-rw-r--r--include/asm-cris/elf.h3
-rw-r--r--include/asm-cris/page.h4
-rw-r--r--include/asm-cris/pgalloc.h6
-rw-r--r--include/asm-cris/system.h15
-rw-r--r--include/asm-cris/user.h2
-rw-r--r--include/asm-frv/Kbuild1
-rw-r--r--include/asm-frv/dma-mapping.h10
-rw-r--r--include/asm-frv/elf.h2
-rw-r--r--include/asm-frv/page.h8
-rw-r--r--include/asm-frv/pgalloc.h8
-rw-r--r--include/asm-frv/pgtable.h2
-rw-r--r--include/asm-frv/scatterlist.h10
-rw-r--r--include/asm-frv/system.h24
-rw-r--r--include/asm-frv/unistd.h2
-rw-r--r--include/asm-generic/4level-fixup.h2
-rw-r--r--include/asm-generic/Kbuild.asm5
-rw-r--r--include/asm-generic/cmpxchg-local.h65
-rw-r--r--include/asm-generic/cmpxchg.h22
-rw-r--r--include/asm-generic/cputime.h1
-rw-r--r--include/asm-generic/gpio.h98
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/sections.h2
-rw-r--r--include/asm-h8300/elf.h2
-rw-r--r--include/asm-h8300/io.h2
-rw-r--r--include/asm-h8300/page.h4
-rw-r--r--include/asm-h8300/system.h15
-rw-r--r--include/asm-h8300/user.h3
-rw-r--r--include/asm-h8300/virtconvert.h2
-rw-r--r--include/asm-ia64/bitops.h50
-rw-r--r--include/asm-ia64/elf.h3
-rw-r--r--include/asm-ia64/gcc_intrin.h2
-rw-r--r--include/asm-ia64/intrinsics.h12
-rw-r--r--include/asm-ia64/mca.h6
-rw-r--r--include/asm-ia64/mca_asm.h3
-rw-r--r--include/asm-ia64/page.h3
-rw-r--r--include/asm-ia64/pgalloc.h16
-rw-r--r--include/asm-ia64/processor.h5
-rw-r--r--include/asm-ia64/sal.h14
-rw-r--r--include/asm-ia64/user.h2
-rw-r--r--include/asm-m32r/delay.h2
-rw-r--r--include/asm-m32r/elf.h2
-rw-r--r--include/asm-m32r/irq.h2
-rw-r--r--include/asm-m32r/local.h362
-rw-r--r--include/asm-m32r/m32700ut/m32700ut_pld.h4
-rw-r--r--include/asm-m32r/page.h2
-rw-r--r--include/asm-m32r/pgalloc.h10
-rw-r--r--include/asm-m32r/system.h115
-rw-r--r--include/asm-m32r/unistd.h2
-rw-r--r--include/asm-m32r/user.h2
-rw-r--r--include/asm-m68k/elf.h2
-rw-r--r--include/asm-m68k/macintosh.h2
-rw-r--r--include/asm-m68k/motorola_pgalloc.h10
-rw-r--r--include/asm-m68k/motorola_pgtable.h3
-rw-r--r--include/asm-m68k/page.h5
-rw-r--r--include/asm-m68k/pgtable.h2
-rw-r--r--include/asm-m68k/sun3_pgalloc.h8
-rw-r--r--include/asm-m68k/system.h27
-rw-r--r--include/asm-m68k/user.h3
-rw-r--r--include/asm-m68knommu/elf.h2
-rw-r--r--include/asm-m68knommu/io.h2
-rw-r--r--include/asm-m68knommu/mcfne.h27
-rw-r--r--include/asm-m68knommu/mcfsim.h4
-rw-r--r--include/asm-m68knommu/mcftimer.h2
-rw-r--r--include/asm-m68knommu/mcfuart.h2
-rw-r--r--include/asm-m68knommu/page.h4
-rw-r--r--include/asm-m68knommu/system.h43
-rw-r--r--include/asm-mips/cmpxchg.h17
-rw-r--r--include/asm-mips/elf.h4
-rw-r--r--include/asm-mips/page.h5
-rw-r--r--include/asm-mips/pgalloc.h12
-rw-r--r--include/asm-mips/processor.h2
-rw-r--r--include/asm-mips/user.h6
-rw-r--r--include/asm-parisc/atomic.h33
-rw-r--r--include/asm-parisc/elf.h3
-rw-r--r--include/asm-parisc/page.h4
-rw-r--r--include/asm-parisc/pgalloc.h10
-rw-r--r--include/asm-parisc/processor.h3
-rw-r--r--include/asm-parisc/tlb.h4
-rw-r--r--include/asm-powerpc/cputable.h2
-rw-r--r--include/asm-powerpc/cputime.h14
-rw-r--r--include/asm-powerpc/dcr-native.h47
-rw-r--r--include/asm-powerpc/dma.h39
-rw-r--r--include/asm-powerpc/elf.h2
-rw-r--r--include/asm-powerpc/iommu.h12
-rw-r--r--include/asm-powerpc/mediabay.h8
-rw-r--r--include/asm-powerpc/mpc512x.h22
-rw-r--r--include/asm-powerpc/mpc52xx_psc.h48
-rw-r--r--include/asm-powerpc/nvram.h3
-rw-r--r--include/asm-powerpc/oprofile_impl.h2
-rw-r--r--include/asm-powerpc/paca.h2
-rw-r--r--include/asm-powerpc/page.h3
-rw-r--r--include/asm-powerpc/page_32.h2
-rw-r--r--include/asm-powerpc/page_64.h2
-rw-r--r--include/asm-powerpc/pgalloc-32.h10
-rw-r--r--include/asm-powerpc/pgalloc-64.h10
-rw-r--r--include/asm-powerpc/processor.h3
-rw-r--r--include/asm-powerpc/ps3av.h43
-rw-r--r--include/asm-powerpc/ptrace.h2
-rw-r--r--include/asm-powerpc/reg.h4
-rw-r--r--include/asm-powerpc/reg_booke.h62
-rw-r--r--include/asm-powerpc/reg_fsl_emb.h72
-rw-r--r--include/asm-powerpc/systbl.h2
-rw-r--r--include/asm-powerpc/system.h20
-rw-r--r--include/asm-powerpc/user.h6
-rw-r--r--include/asm-powerpc/vio.h2
-rw-r--r--include/asm-ppc/pgalloc.h10
-rw-r--r--include/asm-ppc/system.h51
-rw-r--r--include/asm-s390/bitops.h558
-rw-r--r--include/asm-s390/cacheflush.h4
-rw-r--r--include/asm-s390/ccwgroup.h2
-rw-r--r--include/asm-s390/cputime.h1
-rw-r--r--include/asm-s390/elf.h2
-rw-r--r--include/asm-s390/kexec.h2
-rw-r--r--include/asm-s390/page.h3
-rw-r--r--include/asm-s390/pgalloc.h14
-rw-r--r--include/asm-s390/pgtable.h12
-rw-r--r--include/asm-s390/processor.h3
-rw-r--r--include/asm-s390/system.h44
-rw-r--r--include/asm-s390/tlb.h8
-rw-r--r--include/asm-s390/user.h3
-rw-r--r--include/asm-sh/delay.h2
-rw-r--r--include/asm-sh/page.h3
-rw-r--r--include/asm-sh/pgalloc.h8
-rw-r--r--include/asm-sh/unistd_32.h2
-rw-r--r--include/asm-sh/unistd_64.h2
-rw-r--r--include/asm-sh/user.h2
-rw-r--r--include/asm-sparc/atomic.h36
-rw-r--r--include/asm-sparc/elf.h41
-rw-r--r--include/asm-sparc/page.h4
-rw-r--r--include/asm-sparc/pgalloc.h12
-rw-r--r--include/asm-sparc/ptrace.h7
-rw-r--r--include/asm-sparc/system.h48
-rw-r--r--include/asm-sparc/unistd.h6
-rw-r--r--include/asm-sparc64/elf.h34
-rw-r--r--include/asm-sparc64/io.h2
-rw-r--r--include/asm-sparc64/page.h3
-rw-r--r--include/asm-sparc64/pgalloc.h8
-rw-r--r--include/asm-sparc64/ptrace.h18
-rw-r--r--include/asm-sparc64/system.h29
-rw-r--r--include/asm-sparc64/timex.h6
-rw-r--r--include/asm-sparc64/tlb.h4
-rw-r--r--include/asm-sparc64/unistd.h6
-rw-r--r--include/asm-um/a.out.h4
-rw-r--r--include/asm-um/current.h23
-rw-r--r--include/asm-um/elf-i386.h28
-rw-r--r--include/asm-um/elf-x86_64.h8
-rw-r--r--include/asm-um/fixmap.h6
-rw-r--r--include/asm-um/ldt.h4
-rw-r--r--include/asm-um/linkage.h6
-rw-r--r--include/asm-um/mmu_context.h7
-rw-r--r--include/asm-um/page.h6
-rw-r--r--include/asm-um/param.h2
-rw-r--r--include/asm-um/pgalloc.h8
-rw-r--r--include/asm-um/pgtable-2level.h3
-rw-r--r--include/asm-um/pgtable-3level.h35
-rw-r--r--include/asm-um/pgtable.h100
-rw-r--r--include/asm-um/processor-generic.h13
-rw-r--r--include/asm-um/processor-i386.h1
-rw-r--r--include/asm-um/thread_info.h11
-rw-r--r--include/asm-um/tlb.h122
-rw-r--r--include/asm-um/uaccess.h10
-rw-r--r--include/asm-v850/elf.h2
-rw-r--r--include/asm-v850/io.h2
-rw-r--r--include/asm-v850/page.h4
-rw-r--r--include/asm-v850/system.h15
-rw-r--r--include/asm-v850/user.h6
-rw-r--r--include/asm-x86/Kbuild2
-rw-r--r--include/asm-x86/bitops_64.h16
-rw-r--r--include/asm-x86/cmpxchg_64.h22
-rw-r--r--include/asm-x86/delay.h2
-rw-r--r--include/asm-x86/elf.h3
-rw-r--r--include/asm-x86/highmem.h6
-rw-r--r--include/asm-x86/mmzone_32.h4
-rw-r--r--include/asm-x86/pgalloc_32.h8
-rw-r--r--include/asm-x86/pgalloc_64.h20
-rw-r--r--include/asm-x86/pgtable_32.h8
-rw-r--r--include/asm-x86/termios.h74
-rw-r--r--include/asm-x86/timex.h3
-rw-r--r--include/asm-x86/unistd_32.h4
-rw-r--r--include/asm-x86/unistd_64.h9
-rw-r--r--include/asm-x86/user.h14
-rw-r--r--include/asm-x86/user_32.h2
-rw-r--r--include/asm-x86/user_64.h2
-rw-r--r--include/asm-xtensa/elf.h3
-rw-r--r--include/asm-xtensa/page.h3
-rw-r--r--include/asm-xtensa/pgalloc.h6
-rw-r--r--include/asm-xtensa/system.h22
-rw-r--r--include/asm-xtensa/tlb.h2
-rw-r--r--include/linux/Kbuild5
-rw-r--r--include/linux/a.out.h8
-rw-r--r--include/linux/ac97_codec.h7
-rw-r--r--include/linux/acct.h4
-rw-r--r--include/linux/acpi.h62
-rw-r--r--include/linux/agp_backend.h1
-rw-r--r--include/linux/agpgart.h1
-rw-r--r--include/linux/async_tx.h13
-rw-r--r--include/linux/ata_platform.h (renamed from include/linux/pata_platform.h)13
-rw-r--r--include/linux/bootmem.h17
-rw-r--r--include/linux/capability.h251
-rw-r--r--include/linux/cgroup.h16
-rw-r--r--include/linux/cgroup_subsys.h5
-rw-r--r--include/linux/compat.h15
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/cpuidle.h15
-rw-r--r--include/linux/device.h11
-rw-r--r--include/linux/dma-mapping.h30
-rw-r--r--include/linux/dmaengine.h29
-rw-r--r--include/linux/dmi.h2
-rw-r--r--include/linux/ds1wm.h1
-rw-r--r--include/linux/efs_fs.h2
-rw-r--r--include/linux/elf.h3
-rw-r--r--include/linux/elfcore.h4
-rw-r--r--include/linux/enclosure.h129
-rw-r--r--include/linux/err.h13
-rw-r--r--include/linux/ext3_fs.h2
-rw-r--r--include/linux/ext4_fs.h2
-rw-r--r--include/linux/fs.h31
-rw-r--r--include/linux/fsnotify.h22
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/hash.h42
-rw-r--r--include/linux/hayesesp.h4
-rw-r--r--include/linux/hdlc.h25
-rw-r--r--include/linux/highmem.h52
-rw-r--r--include/linux/hrtimer.h18
-rw-r--r--include/linux/hw_random.h10
-rw-r--r--include/linux/i2c/pca953x.h18
-rw-r--r--include/linux/i2c/pcf857x.h45
-rw-r--r--include/linux/ide.h42
-rw-r--r--include/linux/if_vlan.h7
-rw-r--r--include/linux/init.h1
-rw-r--r--include/linux/init_task.h13
-rw-r--r--include/linux/input.h7
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/iommu-helper.h7
-rw-r--r--include/linux/isdn.h1
-rw-r--r--include/linux/isicom.h2
-rw-r--r--include/linux/istallion.h1
-rw-r--r--include/linux/jbd.h3
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kexec.h12
-rw-r--r--include/linux/kprobes.h3
-rw-r--r--include/linux/ktime.h3
-rw-r--r--include/linux/latency.h25
-rw-r--r--include/linux/leds.h15
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/log2.h16
-rw-r--r--include/linux/loop.h1
-rw-r--r--include/linux/lp.h4
-rw-r--r--include/linux/memcontrol.h190
-rw-r--r--include/linux/mfd/asic3.h497
-rw-r--r--include/linux/miscdevice.h10
-rw-r--r--include/linux/mm.h38
-rw-r--r--include/linux/mm_types.h11
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/mod_devicetable.h3
-rw-r--r--include/linux/mtd/cfi.h12
-rw-r--r--include/linux/mtd/mtd.h9
-rw-r--r--include/linux/mtd/mtdram.h8
-rw-r--r--include/linux/mtd/onenand_regs.h1
-rw-r--r--include/linux/mtd/partitions.h9
-rw-r--r--include/linux/mtd/ubi.h18
-rw-r--r--include/linux/nubus.h4
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/page-flags.h42
-rw-r--r--include/linux/pci.h16
-rw-r--r--include/linux/pci_ids.h18
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/pkt_cls.h4
-rw-r--r--include/linux/pm_qos_params.h25
-rw-r--r--include/linux/pnp.h1
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/prctl.h4
-rw-r--r--include/linux/proc_fs.h3
-rw-r--r--include/linux/ptrace.h35
-rw-r--r--include/linux/qnx4_fs.h1
-rw-r--r--include/linux/raid/bitmap.h3
-rw-r--r--include/linux/raid/md_k.h23
-rw-r--r--include/linux/rcupdate.h11
-rw-r--r--include/linux/reboot.h2
-rw-r--r--include/linux/res_counter.h127
-rw-r--r--include/linux/rmap.h5
-rw-r--r--include/linux/rtnetlink.h12
-rw-r--r--include/linux/sched.h18
-rw-r--r--include/linux/security.h26
-rw-r--r--include/linux/serial167.h14
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/shm.h8
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/slub_def.h23
-rw-r--r--include/linux/sm501.h2
-rw-r--r--include/linux/sonypi.h2
-rw-r--r--include/linux/spi/mcp23s08.h24
-rw-r--r--include/linux/ssb/ssb.h9
-rw-r--r--include/linux/stallion.h1
-rw-r--r--include/linux/suspend.h1
-rw-r--r--include/linux/swap.h38
-rw-r--r--include/linux/swapops.h6
-rw-r--r--include/linux/syscalls.h7
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/tc_ematch/tc_em_meta.h1
-rw-r--r--include/linux/thermal.h94
-rw-r--r--include/linux/timex.h2
-rw-r--r--include/linux/tty.h8
-rw-r--r--include/linux/vmalloc.h6
-rw-r--r--include/linux/vt_kern.h1
-rw-r--r--include/linux/w1-gpio.h23
-rw-r--r--include/linux/wait.h16
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/linux/xattr.h1
-rw-r--r--include/mtd/mtd-abi.h2
-rw-r--r--include/mtd/ubi-header.h47
-rw-r--r--include/mtd/ubi-user.h127
-rw-r--r--include/net/9p/9p.h1
-rw-r--r--include/net/9p/client.h5
-rw-r--r--include/net/9p/conn.h57
-rw-r--r--include/net/9p/transport.h11
-rw-r--r--include/net/inet_hashtables.h2
-rw-r--r--include/net/ip_fib.h8
-rw-r--r--include/net/ipv6.h6
-rw-r--r--include/net/netlabel.h47
-rw-r--r--include/pcmcia/cs.h8
-rw-r--r--include/pcmcia/cs_types.h1
-rw-r--r--include/pcmcia/ss.h6
-rw-r--r--include/scsi/iscsi_proto.h4
-rw-r--r--include/scsi/libiscsi.h30
-rw-r--r--include/scsi/scsi.h14
-rw-r--r--include/scsi/scsi_host.h44
-rw-r--r--include/scsi/scsi_transport_iscsi.h43
-rw-r--r--include/video/atmel_lcdc.h25
-rw-r--r--init/Kconfig45
-rw-r--r--init/calibrate.c9
-rw-r--r--init/do_mounts.c3
-rw-r--r--init/initramfs.c13
-rw-r--r--init/main.c9
-rw-r--r--ipc/msg.c17
-rw-r--r--ipc/sem.c17
-rw-r--r--ipc/shm.c20
-rw-r--r--ipc/util.c4
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/capability.c113
-rw-r--r--kernel/cgroup.c318
-rw-r--r--kernel/cpuset.c394
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c35
-rw-r--r--kernel/hrtimer.c9
-rw-r--r--kernel/kallsyms.c11
-rw-r--r--kernel/kexec.c18
-rw-r--r--kernel/kprobes.c9
-rw-r--r--kernel/latency.c280
-rw-r--r--kernel/notifier.c1
-rw-r--r--kernel/panic.c5
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/pid.c1
-rw-r--r--kernel/pm_qos_params.c425
-rw-r--r--kernel/posix-timers.c9
-rw-r--r--kernel/power/Kconfig9
-rw-r--r--kernel/power/disk.c4
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/printk.c36
-rw-r--r--kernel/ptrace.c11
-rw-r--r--kernel/relay.c24
-rw-r--r--kernel/res_counter.c134
-rw-r--r--kernel/signal.c110
-rw-r--r--kernel/srcu.c3
-rw-r--r--kernel/stop_machine.c6
-rw-r--r--kernel/sys.c37
-rw-r--r--kernel/sys_ni.c7
-rw-r--r--kernel/sysctl.c70
-rw-r--r--kernel/sysctl_check.c7
-rw-r--r--kernel/test_kprobes.c16
-rw-r--r--kernel/time.c13
-rw-r--r--kernel/time/clocksource.c19
-rw-r--r--kernel/timer.c10
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/Makefile1
-rw-r--r--lib/extable.c6
-rw-r--r--lib/iommu-helper.c80
-rw-r--r--lib/radix-tree.c15
-rw-r--r--lib/smp_processor_id.c4
-rw-r--r--lib/swiotlb.c41
-rw-r--r--mm/Makefile3
-rw-r--r--mm/allocpercpu.c2
-rw-r--r--mm/bootmem.c27
-rw-r--r--mm/dmapool.c500
-rw-r--r--mm/fadvise.c16
-rw-r--r--mm/filemap.c27
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/fremap.c5
-rw-r--r--mm/highmem.c4
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/internal.h4
-rw-r--r--mm/memcontrol.c1192
-rw-r--r--mm/memory.c256
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/migrate.c54
-rw-r--r--mm/mmap.c8
-rw-r--r--mm/nommu.c53
-rw-r--r--mm/oom_kill.c90
-rw-r--r--mm/page-writeback.c24
-rw-r--r--mm/page_alloc.c162
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/pagewalk.c131
-rw-r--r--mm/rmap.c53
-rw-r--r--mm/shmem.c517
-rw-r--r--mm/slob.c51
-rw-r--r--mm/slub.c326
-rw-r--r--mm/sparse.c12
-rw-r--r--mm/swap.c12
-rw-r--r--mm/swap_state.c153
-rw-r--r--mm/swapfile.c150
-rw-r--r--mm/tiny-shmem.c12
-rw-r--r--mm/truncate.c8
-rw-r--r--mm/vmalloc.c74
-rw-r--r--mm/vmscan.c495
-rw-r--r--mm/vmstat.c61
-rw-r--r--net/9p/Makefile1
-rw-r--r--net/9p/client.c161
-rw-r--r--net/9p/fcprint.c4
-rw-r--r--net/9p/mod.c9
-rw-r--r--net/9p/mux.c1060
-rw-r--r--net/9p/trans_fd.c1103
-rw-r--r--net/9p/trans_virtio.c355
-rw-r--r--net/9p/util.c20
-rw-r--r--net/bluetooth/hidp/core.c49
-rw-r--r--net/bluetooth/rfcomm/tty.c3
-rw-r--r--net/core/rtnetlink.c44
-rw-r--r--net/ipv4/cipso_ipv4.c4
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/icmp.c3
-rw-r--r--net/ipv4/inet_hashtables.c6
-rw-r--r--net/ipv4/ipvs/ip_vs_wrr.c3
-rw-r--r--net/ipv4/xfrm4_mode_beet.c2
-rw-r--r--net/ipv6/icmp.c3
-rw-r--r--net/ipv6/inet6_hashtables.c2
-rw-r--r--net/mac80211/Kconfig1
-rw-r--r--net/netlabel/netlabel_cipso_v4.c2
-rw-r--r--net/netlabel/netlabel_cipso_v4.h3
-rw-r--r--net/netlabel/netlabel_domainhash.h1
-rw-r--r--net/netlabel/netlabel_kapi.c177
-rw-r--r--net/rxrpc/af_rxrpc.c6
-rw-r--r--net/sched/cls_flow.c21
-rw-r--r--net/sched/em_meta.c17
-rw-r--r--net/sctp/auth.c6
-rw-r--r--net/sctp/sm_statefuns.c4
-rwxr-xr-xscripts/checkstack.pl2
-rw-r--r--scripts/kallsyms.c61
-rwxr-xr-xscripts/kernel-doc12
-rw-r--r--security/Kconfig19
-rw-r--r--security/Makefile2
-rw-r--r--security/commoncap.c134
-rw-r--r--security/dummy.c19
-rw-r--r--security/keys/key.c2
-rw-r--r--security/keys/process_keys.c2
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/security.c8
-rw-r--r--security/selinux/hooks.c43
-rw-r--r--security/selinux/include/security.h1
-rw-r--r--security/selinux/ss/services.c33
-rw-r--r--security/smack/Kconfig10
-rw-r--r--security/smack/Makefile7
-rw-r--r--security/smack/smack.h220
-rw-r--r--security/smack/smack_access.c356
-rw-r--r--security/smack/smack_lsm.c2518
-rw-r--r--security/smack/smackfs.c981
-rw-r--r--sound/core/pcm_native.c11
-rw-r--r--sound/oss/Makefile9
-rw-r--r--sound/oss/ac97_codec.c284
-rw-r--r--sound/oss/btaudio.c1139
-rw-r--r--sound/oss/cs4232.c526
-rw-r--r--sound/oss/dmasound/Kconfig2
-rw-r--r--sound/oss/dmasound/dmasound_paula.c4
-rw-r--r--sound/oss/i810_audio.c3656
-rw-r--r--sound/oss/pss.c30
-rw-r--r--sound/oss/sb_common.c3
-rw-r--r--sound/oss/trident.c4
-rw-r--r--sound/oss/via82cxxx_audio.c3616
1957 files changed, 88181 insertions, 50705 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 40ac7759c3bb..6e9c4050a41b 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -14,6 +14,7 @@ Following translations are available on the WWW:
- this file.
ABI/
- info on kernel <-> userspace ABI and relative interface stability.
+
BUG-HUNTING
- brute force method of doing binary search of patches to find bug.
Changes
@@ -66,6 +67,8 @@ VGA-softcursor.txt
- how to change your VGA cursor from a blinking underscore.
accounting/
- documentation on accounting and taskstats.
+acpi/
+ - info on ACPI-specific hooks in the kernel.
aoe/
- description of AoE (ATA over Ethernet) along with config examples.
applying-patches.txt
@@ -126,18 +129,16 @@ devices.txt
- plain ASCII listing of all the nodes in /dev/ with major minor #'s.
digiepca.txt
- info on Digi Intl. {PC,PCI,EISA}Xx and Xem series cards.
-dnotify.txt
- - info about directory notification in Linux.
dontdiff
- file containing a list of files that should never be diff'ed.
driver-model/
- directory with info about Linux driver model.
-drivers/
- - directory with driver documentation (currently only EDAC).
dvb/
- info on Linux Digital Video Broadcast (DVB) subsystem.
early-userspace/
- info about initramfs, klibc, and userspace early during boot.
+edac.txt
+ - information on EDAC - Error Detection And Correction
eisa.txt
- info on EISA bus support.
exception.txt
@@ -334,20 +335,8 @@ rtc.txt
- notes on how to use the Real Time Clock (aka CMOS clock) driver.
s390/
- directory with info on using Linux on the IBM S390.
-sched-arch.txt
- - CPU Scheduler implementation hints for architecture specific code.
-sched-coding.txt
- - reference for various scheduler-related methods in the O(1) scheduler.
-sched-design.txt
- - goals, design and implementation of the Linux O(1) scheduler.
-sched-design-CFS.txt
- - goals, design and implementation of the Complete Fair Scheduler.
-sched-domains.txt
- - information on scheduling domains.
-sched-nice-design.txt
- - How and why the scheduler's nice levels are implemented.
-sched-stats.txt
- - information on schedstats (Linux Scheduler Statistics).
+scheduler/
+ - directory with info on the scheduler.
scsi/
- directory with info on Linux scsi support.
serial/
@@ -360,8 +349,6 @@ sgi-visws.txt
- short blurb on the SGI Visual Workstations.
sh/
- directory with info on porting Linux to a new architecture.
-sharedsubtree.txt
- - a description of shared subtrees for namespaces.
smart-config.txt
- description of the Smart Config makefile feature.
sony-laptop.txt
diff --git a/Documentation/ABI/testing/sysfs-firmware-acpi b/Documentation/ABI/testing/sysfs-firmware-acpi
new file mode 100644
index 000000000000..9470ed9afcc0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-firmware-acpi
@@ -0,0 +1,99 @@
+What: /sys/firmware/acpi/interrupts/
+Date: February 2008
+Contact: Len Brown <lenb@kernel.org>
+Description:
+ All ACPI interrupts are handled via a single IRQ,
+ the System Control Interrupt (SCI), which appears
+ as "acpi" in /proc/interrupts.
+
+ However, one of the main functions of ACPI is to make
+ the platform understand random hardware without
+ special driver support. So while the SCI handles a few
+ well known (fixed feature) interrupts sources, such
+ as the power button, it can also handle a variable
+ number of a "General Purpose Events" (GPE).
+
+ A GPE vectors to a specified handler in AML, which
+ can do a anything the BIOS writer wants from
+ OS context. GPE 0x12, for example, would vector
+ to a level or edge handler called _L12 or _E12.
+ The handler may do its business and return.
+ Or the handler may send send a Notify event
+ to a Linux device driver registered on an ACPI device,
+ such as a battery, or a processor.
+
+ To figure out where all the SCI's are coming from,
+ /sys/firmware/acpi/interrupts contains a file listing
+ every possible source, and the count of how many
+ times it has triggered.
+
+ $ cd /sys/firmware/acpi/interrupts
+ $ grep . *
+ error:0
+ ff_gbl_lock:0
+ ff_pmtimer:0
+ ff_pwr_btn:0
+ ff_rt_clk:0
+ ff_slp_btn:0
+ gpe00:0
+ gpe01:0
+ gpe02:0
+ gpe03:0
+ gpe04:0
+ gpe05:0
+ gpe06:0
+ gpe07:0
+ gpe08:0
+ gpe09:174
+ gpe0A:0
+ gpe0B:0
+ gpe0C:0
+ gpe0D:0
+ gpe0E:0
+ gpe0F:0
+ gpe10:0
+ gpe11:60
+ gpe12:0
+ gpe13:0
+ gpe14:0
+ gpe15:0
+ gpe16:0
+ gpe17:0
+ gpe18:0
+ gpe19:7
+ gpe1A:0
+ gpe1B:0
+ gpe1C:0
+ gpe1D:0
+ gpe1E:0
+ gpe1F:0
+ gpe_all:241
+ sci:241
+
+ sci - The total number of times the ACPI SCI
+ has claimed an interrupt.
+
+ gpe_all - count of SCI caused by GPEs.
+
+ gpeXX - count for individual GPE source
+
+ ff_gbl_lock - Global Lock
+
+ ff_pmtimer - PM Timer
+
+ ff_pwr_btn - Power Button
+
+ ff_rt_clk - Real Time Clock
+
+ ff_slp_btn - Sleep Button
+
+ error - an interrupt that can't be accounted for above.
+
+ Root has permission to clear any of these counters. Eg.
+ # echo 0 > gpe11
+
+ All counters can be cleared by clearing the total "sci":
+ # echo 0 > sci
+
+ None of these counters has an effect on the function
+ of the system, they are simply statistics.
diff --git a/Documentation/ABI/testing/sysfs-kernel-uids b/Documentation/ABI/testing/sysfs-kernel-uids
index 648d65dbc0e7..28f14695a852 100644
--- a/Documentation/ABI/testing/sysfs-kernel-uids
+++ b/Documentation/ABI/testing/sysfs-kernel-uids
@@ -11,4 +11,4 @@ Description:
example would be, if User A has shares = 1024 and user
B has shares = 2048, User B will get twice the CPU
bandwidth user A will. For more details refer
- Documentation/sched-design-CFS.txt
+ Documentation/scheduler/sched-design-CFS.txt
diff --git a/Documentation/BUG-HUNTING b/Documentation/BUG-HUNTING
index 6c816751b868..65022a87bf17 100644
--- a/Documentation/BUG-HUNTING
+++ b/Documentation/BUG-HUNTING
@@ -214,6 +214,23 @@ And recompile the kernel with CONFIG_DEBUG_INFO enabled:
gdb vmlinux
(gdb) p vt_ioctl
(gdb) l *(0x<address of vt_ioctl> + 0xda8)
+or, as one command
+ (gdb) l *(vt_ioctl + 0xda8)
+
+If you have a call trace, such as :-
+>Call Trace:
+> [<ffffffff8802c8e9>] :jbd:log_wait_commit+0xa3/0xf5
+> [<ffffffff810482d9>] autoremove_wake_function+0x0/0x2e
+> [<ffffffff8802770b>] :jbd:journal_stop+0x1be/0x1ee
+> ...
+this shows the problem in the :jbd: module. You can load that module in gdb
+and list the relevant code.
+ gdb fs/jbd/jbd.ko
+ (gdb) p log_wait_commit
+ (gdb) l *(0x<address> + 0xa3)
+or
+ (gdb) l *(log_wait_commit + 0xa3)
+
Another very useful option of the Kernel Hacking section in menuconfig is
Debug memory allocations. This will help you see whether data has been
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl
index 4215f69ce7e6..3a882d9a90a9 100644
--- a/Documentation/DocBook/genericirq.tmpl
+++ b/Documentation/DocBook/genericirq.tmpl
@@ -172,7 +172,7 @@
<listitem><para>Chiplevel hardware encapsulation</para></listitem>
</orderedlist>
</para>
- <sect1>
+ <sect1 id="Interrupt_control_flow">
<title>Interrupt control flow</title>
<para>
Each interrupt is described by an interrupt descriptor structure
@@ -190,7 +190,7 @@
referenced by the assigned chip descriptor structure.
</para>
</sect1>
- <sect1>
+ <sect1 id="Highlevel_Driver_API">
<title>Highlevel Driver API</title>
<para>
The highlevel Driver API consists of following functions:
@@ -210,7 +210,7 @@
See the autogenerated function documentation for details.
</para>
</sect1>
- <sect1>
+ <sect1 id="Highlevel_IRQ_flow_handlers">
<title>Highlevel IRQ flow handlers</title>
<para>
The generic layer provides a set of pre-defined irq-flow methods:
@@ -224,9 +224,9 @@
specific) are assigned to specific interrupts by the architecture
either during bootup or during device initialization.
</para>
- <sect2>
+ <sect2 id="Default_flow_implementations">
<title>Default flow implementations</title>
- <sect3>
+ <sect3 id="Helper_functions">
<title>Helper functions</title>
<para>
The helper functions call the chip primitives and
@@ -267,9 +267,9 @@ noop(irq)
</para>
</sect3>
</sect2>
- <sect2>
+ <sect2 id="Default_flow_handler_implementations">
<title>Default flow handler implementations</title>
- <sect3>
+ <sect3 id="Default_Level_IRQ_flow_handler">
<title>Default Level IRQ flow handler</title>
<para>
handle_level_irq provides a generic implementation
@@ -284,7 +284,7 @@ desc->chip->end();
</programlisting>
</para>
</sect3>
- <sect3>
+ <sect3 id="Default_Edge_IRQ_flow_handler">
<title>Default Edge IRQ flow handler</title>
<para>
handle_edge_irq provides a generic implementation
@@ -311,7 +311,7 @@ desc->chip->end();
</programlisting>
</para>
</sect3>
- <sect3>
+ <sect3 id="Default_simple_IRQ_flow_handler">
<title>Default simple IRQ flow handler</title>
<para>
handle_simple_irq provides a generic implementation
@@ -328,7 +328,7 @@ handle_IRQ_event(desc->action);
</programlisting>
</para>
</sect3>
- <sect3>
+ <sect3 id="Default_per_CPU_flow_handler">
<title>Default per CPU flow handler</title>
<para>
handle_percpu_irq provides a generic implementation
@@ -349,7 +349,7 @@ desc->chip->end();
</para>
</sect3>
</sect2>
- <sect2>
+ <sect2 id="Quirks_and_optimizations">
<title>Quirks and optimizations</title>
<para>
The generic functions are intended for 'clean' architectures and chips,
@@ -358,7 +358,7 @@ desc->chip->end();
overriding the highlevel irq-flow handler.
</para>
</sect2>
- <sect2>
+ <sect2 id="Delayed_interrupt_disable">
<title>Delayed interrupt disable</title>
<para>
This per interrupt selectable feature, which was introduced by Russell
@@ -380,7 +380,7 @@ desc->chip->end();
</para>
</sect2>
</sect1>
- <sect1>
+ <sect1 id="Chiplevel_hardware_encapsulation">
<title>Chiplevel hardware encapsulation</title>
<para>
The chip level hardware descriptor structure irq_chip
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index 77436d735013..059aaf20951a 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -165,6 +165,7 @@ X!Ilib/string.c
!Emm/vmalloc.c
!Imm/page_alloc.c
!Emm/mempool.c
+!Emm/dmapool.c
!Emm/page-writeback.c
!Emm/truncate.c
</sect1>
@@ -371,7 +372,6 @@ X!Iinclude/linux/device.h
!Edrivers/base/class.c
!Edrivers/base/firmware_class.c
!Edrivers/base/transport_class.c
-!Edrivers/base/dmapool.c
<!-- Cannot be included, because
attribute_container_add_class_device_adapter
and attribute_container_classdev_to_container
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 01825ee7db64..2e9d6b41f034 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -717,7 +717,7 @@ used, and when it gets full, throws out the least used one.
<para>
For our first example, we assume that all operations are in user
context (ie. from system calls), so we can sleep. This means we can
-use a semaphore to protect the cache and all the objects within
+use a mutex to protect the cache and all the objects within
it. Here's the code:
</para>
@@ -725,7 +725,7 @@ it. Here's the code:
#include &lt;linux/list.h&gt;
#include &lt;linux/slab.h&gt;
#include &lt;linux/string.h&gt;
-#include &lt;asm/semaphore.h&gt;
+#include &lt;linux/mutex.h&gt;
#include &lt;asm/errno.h&gt;
struct object
@@ -737,7 +737,7 @@ struct object
};
/* Protects the cache, cache_num, and the objects within it */
-static DECLARE_MUTEX(cache_lock);
+static DEFINE_MUTEX(cache_lock);
static LIST_HEAD(cache);
static unsigned int cache_num = 0;
#define MAX_CACHE_SIZE 10
@@ -789,17 +789,17 @@ int cache_add(int id, const char *name)
obj-&gt;id = id;
obj-&gt;popularity = 0;
- down(&amp;cache_lock);
+ mutex_lock(&amp;cache_lock);
__cache_add(obj);
- up(&amp;cache_lock);
+ mutex_unlock(&amp;cache_lock);
return 0;
}
void cache_delete(int id)
{
- down(&amp;cache_lock);
+ mutex_lock(&amp;cache_lock);
__cache_delete(__cache_find(id));
- up(&amp;cache_lock);
+ mutex_unlock(&amp;cache_lock);
}
int cache_find(int id, char *name)
@@ -807,13 +807,13 @@ int cache_find(int id, char *name)
struct object *obj;
int ret = -ENOENT;
- down(&amp;cache_lock);
+ mutex_lock(&amp;cache_lock);
obj = __cache_find(id);
if (obj) {
ret = 0;
strcpy(name, obj-&gt;name);
}
- up(&amp;cache_lock);
+ mutex_unlock(&amp;cache_lock);
return ret;
}
</programlisting>
@@ -853,7 +853,7 @@ The change is shown below, in standard patch format: the
int popularity;
};
--static DECLARE_MUTEX(cache_lock);
+-static DEFINE_MUTEX(cache_lock);
+static spinlock_t cache_lock = SPIN_LOCK_UNLOCKED;
static LIST_HEAD(cache);
static unsigned int cache_num = 0;
@@ -870,22 +870,22 @@ The change is shown below, in standard patch format: the
obj-&gt;id = id;
obj-&gt;popularity = 0;
-- down(&amp;cache_lock);
+- mutex_lock(&amp;cache_lock);
+ spin_lock_irqsave(&amp;cache_lock, flags);
__cache_add(obj);
-- up(&amp;cache_lock);
+- mutex_unlock(&amp;cache_lock);
+ spin_unlock_irqrestore(&amp;cache_lock, flags);
return 0;
}
void cache_delete(int id)
{
-- down(&amp;cache_lock);
+- mutex_lock(&amp;cache_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&amp;cache_lock, flags);
__cache_delete(__cache_find(id));
-- up(&amp;cache_lock);
+- mutex_unlock(&amp;cache_lock);
+ spin_unlock_irqrestore(&amp;cache_lock, flags);
}
@@ -895,14 +895,14 @@ The change is shown below, in standard patch format: the
int ret = -ENOENT;
+ unsigned long flags;
-- down(&amp;cache_lock);
+- mutex_lock(&amp;cache_lock);
+ spin_lock_irqsave(&amp;cache_lock, flags);
obj = __cache_find(id);
if (obj) {
ret = 0;
strcpy(name, obj-&gt;name);
}
-- up(&amp;cache_lock);
+- mutex_unlock(&amp;cache_lock);
+ spin_unlock_irqrestore(&amp;cache_lock, flags);
return ret;
}
diff --git a/Documentation/DocBook/lsm.tmpl b/Documentation/DocBook/lsm.tmpl
index f63822195871..fe7664ce9667 100644
--- a/Documentation/DocBook/lsm.tmpl
+++ b/Documentation/DocBook/lsm.tmpl
@@ -33,7 +33,7 @@
</authorgroup>
</articleinfo>
-<sect1><title>Introduction</title>
+<sect1 id="Introduction"><title>Introduction</title>
<para>
In March 2001, the National Security Agency (NSA) gave a presentation
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 957cf5c26831..8e145857fc9d 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -80,7 +80,7 @@
struct member has a short description which is marked with an [XXX] identifier.
The following chapters explain the meaning of those identifiers.
</para>
- <sect1>
+ <sect1 id="Function_identifiers_XXX">
<title>Function identifiers [XXX]</title>
<para>
The functions are marked with [XXX] identifiers in the short
@@ -115,7 +115,7 @@
</para></listitem>
</itemizedlist>
</sect1>
- <sect1>
+ <sect1 id="Struct_member_identifiers_XXX">
<title>Struct member identifiers [XXX]</title>
<para>
The struct members are marked with [XXX] identifiers in the
@@ -159,7 +159,7 @@
basic functions and fill out some really board dependent
members in the nand chip description structure.
</para>
- <sect1>
+ <sect1 id="Basic_defines">
<title>Basic defines</title>
<para>
At least you have to provide a mtd structure and
@@ -185,7 +185,7 @@ static struct nand_chip board_chip;
static unsigned long baseaddr;
</programlisting>
</sect1>
- <sect1>
+ <sect1 id="Partition_defines">
<title>Partition defines</title>
<para>
If you want to divide your device into partitions, then
@@ -204,7 +204,7 @@ static struct mtd_partition partition_info[] = {
};
</programlisting>
</sect1>
- <sect1>
+ <sect1 id="Hardware_control_functions">
<title>Hardware control function</title>
<para>
The hardware control function provides access to the
@@ -246,7 +246,7 @@ static void board_hwcontrol(struct mtd_info *mtd, int cmd)
}
</programlisting>
</sect1>
- <sect1>
+ <sect1 id="Device_ready_function">
<title>Device ready function</title>
<para>
If the hardware interface has the ready busy pin of the NAND chip connected to a
@@ -257,7 +257,7 @@ static void board_hwcontrol(struct mtd_info *mtd, int cmd)
the function must not be defined and the function pointer this->dev_ready is set to NULL.
</para>
</sect1>
- <sect1>
+ <sect1 id="Init_function">
<title>Init function</title>
<para>
The init function allocates memory and sets up all the board
@@ -325,7 +325,7 @@ out:
module_init(board_init);
</programlisting>
</sect1>
- <sect1>
+ <sect1 id="Exit_function">
<title>Exit function</title>
<para>
The exit function is only neccecary if the driver is
@@ -359,7 +359,7 @@ module_exit(board_cleanup);
driver. For a list of functions which can be overridden by the board
driver see the documentation of the nand_chip structure.
</para>
- <sect1>
+ <sect1 id="Multiple_chip_control">
<title>Multiple chip control</title>
<para>
The nand driver can control chip arrays. Therefor the
@@ -419,9 +419,9 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
}
</programlisting>
</sect1>
- <sect1>
+ <sect1 id="Hardware_ECC_support">
<title>Hardware ECC support</title>
- <sect2>
+ <sect2 id="Functions_and_constants">
<title>Functions and constants</title>
<para>
The nand driver supports three different types of
@@ -475,7 +475,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
</itemizedlist>
</para>
</sect2>
- <sect2>
+ <sect2 id="Hardware_ECC_with_syndrome_calculation">
<title>Hardware ECC with syndrome calculation</title>
<para>
Many hardware ECC implementations provide Reed-Solomon
@@ -500,7 +500,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
</para>
</sect2>
</sect1>
- <sect1>
+ <sect1 id="Bad_Block_table_support">
<title>Bad block table support</title>
<para>
Most NAND chips mark the bad blocks at a defined
@@ -552,7 +552,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
allows faster access than always checking the
bad block information on the flash chip itself.
</para>
- <sect2>
+ <sect2 id="Flash_based_tables">
<title>Flash based tables</title>
<para>
It may be desired or neccecary to keep a bad block table in FLASH.
@@ -587,7 +587,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
</itemizedlist>
</para>
</sect2>
- <sect2>
+ <sect2 id="User_defined_tables">
<title>User defined tables</title>
<para>
User defined tables are created by filling out a
@@ -676,7 +676,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
</para>
</sect2>
</sect1>
- <sect1>
+ <sect1 id="Spare_area_placement">
<title>Spare area (auto)placement</title>
<para>
The nand driver implements different possibilities for
@@ -730,7 +730,7 @@ struct nand_oobinfo {
</para></listitem>
</itemizedlist>
</para>
- <sect2>
+ <sect2 id="Placement_defined_by_fs_driver">
<title>Placement defined by fs driver</title>
<para>
The calling function provides a pointer to a nand_oobinfo
@@ -760,7 +760,7 @@ struct nand_oobinfo {
done according to the given scheme in the nand_oobinfo structure.
</para>
</sect2>
- <sect2>
+ <sect2 id="Automatic_placement">
<title>Automatic placement</title>
<para>
Automatic placement uses the built in defaults to place the
@@ -774,7 +774,7 @@ struct nand_oobinfo {
done according to the default builtin scheme.
</para>
</sect2>
- <sect2>
+ <sect2 id="User_space_placement_selection">
<title>User space placement selection</title>
<para>
All non ecc functions like mtd->read and mtd->write use an internal
@@ -789,9 +789,9 @@ struct nand_oobinfo {
</para>
</sect2>
</sect1>
- <sect1>
+ <sect1 id="Spare_area_autoplacement_default">
<title>Spare area autoplacement default schemes</title>
- <sect2>
+ <sect2 id="pagesize_256">
<title>256 byte pagesize</title>
<informaltable><tgroup cols="3"><tbody>
<row>
@@ -843,7 +843,7 @@ pages this byte is reserved</entry>
</row>
</tbody></tgroup></informaltable>
</sect2>
- <sect2>
+ <sect2 id="pagesize_512">
<title>512 byte pagesize</title>
<informaltable><tgroup cols="3"><tbody>
<row>
@@ -906,7 +906,7 @@ in this page</entry>
</row>
</tbody></tgroup></informaltable>
</sect2>
- <sect2>
+ <sect2 id="pagesize_2048">
<title>2048 byte pagesize</title>
<informaltable><tgroup cols="3"><tbody>
<row>
@@ -1126,9 +1126,9 @@ in this page</entry>
<para>
This chapter describes the constants which might be relevant for a driver developer.
</para>
- <sect1>
+ <sect1 id="Chip_option_constants">
<title>Chip option constants</title>
- <sect2>
+ <sect2 id="Constants_for_chip_id_table">
<title>Constants for chip id table</title>
<para>
These constants are defined in nand.h. They are ored together to describe
@@ -1153,7 +1153,7 @@ in this page</entry>
</programlisting>
</para>
</sect2>
- <sect2>
+ <sect2 id="Constants_for_runtime_options">
<title>Constants for runtime options</title>
<para>
These constants are defined in nand.h. They are ored together to describe
@@ -1171,7 +1171,7 @@ in this page</entry>
</sect2>
</sect1>
- <sect1>
+ <sect1 id="EEC_selection_constants">
<title>ECC selection constants</title>
<para>
Use these constants to select the ECC algorithm.
@@ -1192,7 +1192,7 @@ in this page</entry>
</para>
</sect1>
- <sect1>
+ <sect1 id="Hardware_control_related_constants">
<title>Hardware control related constants</title>
<para>
These constants describe the requested hardware access function when
@@ -1218,7 +1218,7 @@ in this page</entry>
</para>
</sect1>
- <sect1>
+ <sect1 id="Bad_block_table_constants">
<title>Bad block table related constants</title>
<para>
These constants describe the options used for bad block
diff --git a/Documentation/DocBook/procfs-guide.tmpl b/Documentation/DocBook/procfs-guide.tmpl
index 2de84dc195a8..1fd6a1ec7591 100644
--- a/Documentation/DocBook/procfs-guide.tmpl
+++ b/Documentation/DocBook/procfs-guide.tmpl
@@ -85,7 +85,7 @@
- <preface>
+ <preface id="Preface">
<title>Preface</title>
<para>
@@ -230,7 +230,7 @@
- <sect1>
+ <sect1 id="Creating_a_symlink">
<title>Creating a symlink</title>
<funcsynopsis>
@@ -254,7 +254,7 @@
</para>
</sect1>
- <sect1>
+ <sect1 id="Creating_a_directory">
<title>Creating a directory</title>
<funcsynopsis>
@@ -274,7 +274,7 @@
- <sect1>
+ <sect1 id="Removing_an_entry">
<title>Removing an entry</title>
<funcsynopsis>
@@ -340,7 +340,7 @@ entry->write_proc = write_proc_foo;
- <sect1>
+ <sect1 id="Reading_data">
<title>Reading data</title>
<para>
@@ -448,7 +448,7 @@ entry->write_proc = write_proc_foo;
- <sect1>
+ <sect1 id="Writing_data">
<title>Writing data</title>
<para>
@@ -579,7 +579,7 @@ int foo_read_func(char *page, char **start, off_t off,
- <sect1>
+ <sect1 id="Modules">
<title>Modules</title>
<para>
@@ -599,7 +599,7 @@ entry->owner = THIS_MODULE;
- <sect1>
+ <sect1 id="Mode_and_ownership">
<title>Mode and ownership</title>
<para>
diff --git a/Documentation/DocBook/rapidio.tmpl b/Documentation/DocBook/rapidio.tmpl
index a8b88c47e809..b9e143e28c64 100644
--- a/Documentation/DocBook/rapidio.tmpl
+++ b/Documentation/DocBook/rapidio.tmpl
@@ -77,11 +77,11 @@
<chapter id="bugs">
<title>Known Bugs and Limitations</title>
- <sect1>
+ <sect1 id="known_bugs">
<title>Bugs</title>
<para>None. ;)</para>
</sect1>
- <sect1>
+ <sect1 id="Limitations">
<title>Limitations</title>
<para>
<orderedlist>
@@ -100,7 +100,7 @@
on devices, request/map memory region resources,
and manage mailboxes/doorbells.
</para>
- <sect1>
+ <sect1 id="Functions">
<title>Functions</title>
!Iinclude/linux/rio_drv.h
!Edrivers/rapidio/rio-driver.c
@@ -116,23 +116,23 @@
subsystem.
</para>
- <sect1><title>Structures</title>
+ <sect1 id="Structures"><title>Structures</title>
!Iinclude/linux/rio.h
</sect1>
- <sect1><title>Enumeration and Discovery</title>
+ <sect1 id="Enumeration_and_Discovery"><title>Enumeration and Discovery</title>
!Idrivers/rapidio/rio-scan.c
</sect1>
- <sect1><title>Driver functionality</title>
+ <sect1 id="Driver_functionality"><title>Driver functionality</title>
!Idrivers/rapidio/rio.c
!Idrivers/rapidio/rio-access.c
</sect1>
- <sect1><title>Device model support</title>
+ <sect1 id="Device_model_support"><title>Device model support</title>
!Idrivers/rapidio/rio-driver.c
</sect1>
- <sect1><title>Sysfs support</title>
+ <sect1 id="Sysfs_support"><title>Sysfs support</title>
!Idrivers/rapidio/rio-sysfs.c
</sect1>
- <sect1><title>PPC32 support</title>
+ <sect1 id="PPC32_support"><title>PPC32 support</title>
!Iarch/powerpc/kernel/rio.c
!Earch/powerpc/sysdev/fsl_rio.c
!Iarch/powerpc/sysdev/fsl_rio.c
diff --git a/Documentation/DocBook/s390-drivers.tmpl b/Documentation/DocBook/s390-drivers.tmpl
index 3d2f31b99dd9..4acc73240a6d 100644
--- a/Documentation/DocBook/s390-drivers.tmpl
+++ b/Documentation/DocBook/s390-drivers.tmpl
@@ -59,7 +59,7 @@
<title>Introduction</title>
<para>
This document describes the interfaces available for device drivers that
- drive s390 based channel attached devices. This includes interfaces for
+ drive s390 based channel attached I/O devices. This includes interfaces for
interaction with the hardware and interfaces for interacting with the
common driver core. Those interfaces are provided by the s390 common I/O
layer.
@@ -86,9 +86,10 @@
The ccw bus typically contains the majority of devices available to
a s390 system. Named after the channel command word (ccw), the basic
command structure used to address its devices, the ccw bus contains
- so-called channel attached devices. They are addressed via subchannels,
- visible on the css bus. A device driver, however, will never interact
- with the subchannel directly, but only via the device on the ccw bus,
+ so-called channel attached devices. They are addressed via I/O
+ subchannels, visible on the css bus. A device driver for
+ channel-attached devices, however, will never interact with the
+ subchannel directly, but only via the I/O device on the ccw bus,
the ccw device.
</para>
<sect1 id="channelIO">
@@ -116,7 +117,6 @@
!Iinclude/asm-s390/ccwdev.h
!Edrivers/s390/cio/device.c
!Edrivers/s390/cio/device_ops.c
-!Edrivers/s390/cio/airq.c
</sect1>
<sect1 id="cmf">
<title>The channel-measurement facility</title>
@@ -147,4 +147,15 @@
</sect1>
</chapter>
+ <chapter id="genericinterfaces">
+ <title>Generic interfaces</title>
+ <para>
+ Some interfaces are available to other drivers that do not necessarily
+ have anything to do with the busses described above, but still are
+ indirectly using basic infrastructure in the common I/O layer.
+ One example is the support for adapter interrupts.
+ </para>
+!Edrivers/s390/cio/airq.c
+ </chapter>
+
</book>
diff --git a/Documentation/DocBook/scsi.tmpl b/Documentation/DocBook/scsi.tmpl
index f299ab182bbe..10a150ae2a7e 100644
--- a/Documentation/DocBook/scsi.tmpl
+++ b/Documentation/DocBook/scsi.tmpl
@@ -12,7 +12,7 @@
<surname>Bottomley</surname>
<affiliation>
<address>
- <email>James.Bottomley@steeleye.com</email>
+ <email>James.Bottomley@hansenpartnership.com</email>
</address>
</affiliation>
</author>
diff --git a/Documentation/DocBook/videobook.tmpl b/Documentation/DocBook/videobook.tmpl
index b3d93ee27693..89817795e668 100644
--- a/Documentation/DocBook/videobook.tmpl
+++ b/Documentation/DocBook/videobook.tmpl
@@ -170,7 +170,7 @@ int __init myradio_init(struct video_init *v)
<para>
The types available are
</para>
- <table frame="all"><title>Device Types</title>
+ <table frame="all" id="Device_Types"><title>Device Types</title>
<tgroup cols="3" align="left">
<tbody>
<row>
@@ -291,7 +291,7 @@ static int radio_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
allows the applications to find out what sort of a card they have found and
to figure out what they want to do about it. The fields in the structure are
</para>
- <table frame="all"><title>struct video_capability fields</title>
+ <table frame="all" id="video_capability_fields"><title>struct video_capability fields</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -365,7 +365,7 @@ static int radio_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
<para>
The video_tuner structure has the following fields
</para>
- <table frame="all"><title>struct video_tuner fields</title>
+ <table frame="all" id="video_tuner_fields"><title>struct video_tuner fields</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -398,7 +398,7 @@ static int radio_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
</tgroup>
</table>
- <table frame="all"><title>struct video_tuner flags</title>
+ <table frame="all" id="video_tuner_flags"><title>struct video_tuner flags</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -421,7 +421,7 @@ static int radio_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
</tgroup>
</table>
- <table frame="all"><title>struct video_tuner modes</title>
+ <table frame="all" id="video_tuner_modes"><title>struct video_tuner modes</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -572,7 +572,7 @@ static int current_volume=0;
<para>
Then we fill in the video_audio structure. This has the following format
</para>
- <table frame="all"><title>struct video_audio fields</title>
+ <table frame="all" id="video_audio_fields"><title>struct video_audio fields</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -607,7 +607,7 @@ static int current_volume=0;
</tgroup>
</table>
- <table frame="all"><title>struct video_audio flags</title>
+ <table frame="all" id="video_audio_flags"><title>struct video_audio flags</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -625,7 +625,7 @@ static int current_volume=0;
</tgroup>
</table>
- <table frame="all"><title>struct video_audio modes</title>
+ <table frame="all" id="video_audio_modes"><title>struct video_audio modes</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -775,7 +775,7 @@ module_exit(cleanup);
</para>
</sect1>
</chapter>
- <chapter>
+ <chapter id="Video_Capture_Devices">
<title>Video Capture Devices</title>
<sect1 id="introvid">
<title>Video Capture Device Types</title>
@@ -855,7 +855,7 @@ static struct video_device my_camera
We use the extra video capability flags that did not apply to the
radio interface. The video related flags are
</para>
- <table frame="all"><title>Capture Capabilities</title>
+ <table frame="all" id="Capture_Capabilities"><title>Capture Capabilities</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -1195,7 +1195,7 @@ static int camera_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
inputs to the video card). Our example card has a single camera input. The
fields in the structure are
</para>
- <table frame="all"><title>struct video_channel fields</title>
+ <table frame="all" id="video_channel_fields"><title>struct video_channel fields</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -1218,7 +1218,7 @@ static int camera_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
</tbody>
</tgroup>
</table>
- <table frame="all"><title>struct video_channel flags</title>
+ <table frame="all" id="video_channel_flags"><title>struct video_channel flags</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -1229,7 +1229,7 @@ static int camera_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
</tbody>
</tgroup>
</table>
- <table frame="all"><title>struct video_channel types</title>
+ <table frame="all" id="video_channel_types"><title>struct video_channel types</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -1242,7 +1242,7 @@ static int camera_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
</tbody>
</tgroup>
</table>
- <table frame="all"><title>struct video_channel norms</title>
+ <table frame="all" id="video_channel_norms"><title>struct video_channel norms</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -1328,7 +1328,7 @@ static int camera_ioctl(struct video_device *dev, unsigned int cmd, void *arg)
for every other pixel in the image. The other common formats the interface
defines are
</para>
- <table frame="all"><title>Framebuffer Encodings</title>
+ <table frame="all" id="Framebuffer_Encodings"><title>Framebuffer Encodings</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -1466,7 +1466,7 @@ static struct video_buffer capture_fb;
display. The video_window structure is used to describe the way the image
should be displayed.
</para>
- <table frame="all"><title>struct video_window fields</title>
+ <table frame="all" id="video_window_fields"><title>struct video_window fields</title>
<tgroup cols="2" align="left">
<tbody>
<row>
@@ -1503,7 +1503,7 @@ static struct video_buffer capture_fb;
<para>
Each clip is a struct video_clip which has the following fields
</para>
- <table frame="all"><title>video_clip fields</title>
+ <table frame="all" id="video_clip_fields"><title>video_clip fields</title>
<tgroup cols="2" align="left">
<tbody>
<row>
diff --git a/Documentation/DocBook/z8530book.tmpl b/Documentation/DocBook/z8530book.tmpl
index a507876447aa..42c75ba71ba2 100644
--- a/Documentation/DocBook/z8530book.tmpl
+++ b/Documentation/DocBook/z8530book.tmpl
@@ -77,7 +77,7 @@
</para>
</chapter>
- <chapter>
+ <chapter id="Driver_Modes">
<title>Driver Modes</title>
<para>
The Z85230 driver layer can drive Z8530, Z85C30 and Z85230 devices
@@ -108,7 +108,7 @@
</para>
</chapter>
- <chapter>
+ <chapter id="Using_the_Z85230_driver">
<title>Using the Z85230 driver</title>
<para>
The Z85230 driver provides the back end interface to your board. To
@@ -174,7 +174,7 @@
</para>
</chapter>
- <chapter>
+ <chapter id="Attaching_Network_Interfaces">
<title>Attaching Network Interfaces</title>
<para>
If you wish to use the network interface facilities of the driver,
@@ -216,7 +216,7 @@
</para>
</chapter>
- <chapter>
+ <chapter id="Configuring_And_Activating_The_Port">
<title>Configuring And Activating The Port</title>
<para>
The Z85230 driver provides helper functions and tables to load the
@@ -300,7 +300,7 @@
</para>
</chapter>
- <chapter>
+ <chapter id="Network_Layer_Functions">
<title>Network Layer Functions</title>
<para>
The Z8530 layer provides functions to queue packets for
@@ -327,7 +327,7 @@
</para>
</chapter>
- <chapter>
+ <chapter id="Porting_The_Z8530_Driver">
<title>Porting The Z8530 Driver</title>
<para>
The Z8530 driver is written to be portable. In DMA mode it makes
diff --git a/Documentation/Smack.txt b/Documentation/Smack.txt
new file mode 100644
index 000000000000..989c2fcd8111
--- /dev/null
+++ b/Documentation/Smack.txt
@@ -0,0 +1,493 @@
+
+
+ "Good for you, you've decided to clean the elevator!"
+ - The Elevator, from Dark Star
+
+Smack is the the Simplified Mandatory Access Control Kernel.
+Smack is a kernel based implementation of mandatory access
+control that includes simplicity in its primary design goals.
+
+Smack is not the only Mandatory Access Control scheme
+available for Linux. Those new to Mandatory Access Control
+are encouraged to compare Smack with the other mechanisms
+available to determine which is best suited to the problem
+at hand.
+
+Smack consists of three major components:
+ - The kernel
+ - A start-up script and a few modified applications
+ - Configuration data
+
+The kernel component of Smack is implemented as a Linux
+Security Modules (LSM) module. It requires netlabel and
+works best with file systems that support extended attributes,
+although xattr support is not strictly required.
+It is safe to run a Smack kernel under a "vanilla" distribution.
+Smack kernels use the CIPSO IP option. Some network
+configurations are intolerant of IP options and can impede
+access to systems that use them as Smack does.
+
+The startup script etc-init.d-smack should be installed
+in /etc/init.d/smack and should be invoked early in the
+start-up process. On Fedora rc5.d/S02smack is recommended.
+This script ensures that certain devices have the correct
+Smack attributes and loads the Smack configuration if
+any is defined. This script invokes two programs that
+ensure configuration data is properly formatted. These
+programs are /usr/sbin/smackload and /usr/sin/smackcipso.
+The system will run just fine without these programs,
+but it will be difficult to set access rules properly.
+
+A version of "ls" that provides a "-M" option to display
+Smack labels on long listing is available.
+
+A hacked version of sshd that allows network logins by users
+with specific Smack labels is available. This version does
+not work for scp. You must set the /etc/ssh/sshd_config
+line:
+ UsePrivilegeSeparation no
+
+The format of /etc/smack/usr is:
+
+ username smack
+
+In keeping with the intent of Smack, configuration data is
+minimal and not strictly required. The most important
+configuration step is mounting the smackfs pseudo filesystem.
+
+Add this line to /etc/fstab:
+
+ smackfs /smack smackfs smackfsdef=* 0 0
+
+and create the /smack directory for mounting.
+
+Smack uses extended attributes (xattrs) to store file labels.
+The command to set a Smack label on a file is:
+
+ # attr -S -s SMACK64 -V "value" path
+
+NOTE: Smack labels are limited to 23 characters. The attr command
+ does not enforce this restriction and can be used to set
+ invalid Smack labels on files.
+
+If you don't do anything special all users will get the floor ("_")
+label when they log in. If you do want to log in via the hacked ssh
+at other labels use the attr command to set the smack value on the
+home directory and it's contents.
+
+You can add access rules in /etc/smack/accesses. They take the form:
+
+ subjectlabel objectlabel access
+
+access is a combination of the letters rwxa which specify the
+kind of access permitted a subject with subjectlabel on an
+object with objectlabel. If there is no rule no access is allowed.
+
+A process can see the smack label it is running with by
+reading /proc/self/attr/current. A privileged process can
+set the process smack by writing there.
+
+Look for additional programs on http://schaufler-ca.com
+
+From the Smack Whitepaper:
+
+The Simplified Mandatory Access Control Kernel
+
+Casey Schaufler
+casey@schaufler-ca.com
+
+Mandatory Access Control
+
+Computer systems employ a variety of schemes to constrain how information is
+shared among the people and services using the machine. Some of these schemes
+allow the program or user to decide what other programs or users are allowed
+access to pieces of data. These schemes are called discretionary access
+control mechanisms because the access control is specified at the discretion
+of the user. Other schemes do not leave the decision regarding what a user or
+program can access up to users or programs. These schemes are called mandatory
+access control mechanisms because you don't have a choice regarding the users
+or programs that have access to pieces of data.
+
+Bell & LaPadula
+
+From the middle of the 1980's until the turn of the century Mandatory Access
+Control (MAC) was very closely associated with the Bell & LaPadula security
+model, a mathematical description of the United States Department of Defense
+policy for marking paper documents. MAC in this form enjoyed a following
+within the Capital Beltway and Scandinavian supercomputer centers but was
+often sited as failing to address general needs.
+
+Domain Type Enforcement
+
+Around the turn of the century Domain Type Enforcement (DTE) became popular.
+This scheme organizes users, programs, and data into domains that are
+protected from each other. This scheme has been widely deployed as a component
+of popular Linux distributions. The administrative overhead required to
+maintain this scheme and the detailed understanding of the whole system
+necessary to provide a secure domain mapping leads to the scheme being
+disabled or used in limited ways in the majority of cases.
+
+Smack
+
+Smack is a Mandatory Access Control mechanism designed to provide useful MAC
+while avoiding the pitfalls of its predecessors. The limitations of Bell &
+LaPadula are addressed by providing a scheme whereby access can be controlled
+according to the requirements of the system and its purpose rather than those
+imposed by an arcane government policy. The complexity of Domain Type
+Enforcement and avoided by defining access controls in terms of the access
+modes already in use.
+
+Smack Terminology
+
+The jargon used to talk about Smack will be familiar to those who have dealt
+with other MAC systems and shouldn't be too difficult for the uninitiated to
+pick up. There are four terms that are used in a specific way and that are
+especially important:
+
+ Subject: A subject is an active entity on the computer system.
+ On Smack a subject is a task, which is in turn the basic unit
+ of execution.
+
+ Object: An object is a passive entity on the computer system.
+ On Smack files of all types, IPC, and tasks can be objects.
+
+ Access: Any attempt by a subject to put information into or get
+ information from an object is an access.
+
+ Label: Data that identifies the Mandatory Access Control
+ characteristics of a subject or an object.
+
+These definitions are consistent with the traditional use in the security
+community. There are also some terms from Linux that are likely to crop up:
+
+ Capability: A task that possesses a capability has permission to
+ violate an aspect of the system security policy, as identified by
+ the specific capability. A task that possesses one or more
+ capabilities is a privileged task, whereas a task with no
+ capabilities is an unprivileged task.
+
+ Privilege: A task that is allowed to violate the system security
+ policy is said to have privilege. As of this writing a task can
+ have privilege either by possessing capabilities or by having an
+ effective user of root.
+
+Smack Basics
+
+Smack is an extension to a Linux system. It enforces additional restrictions
+on what subjects can access which objects, based on the labels attached to
+each of the subject and the object.
+
+Labels
+
+Smack labels are ASCII character strings, one to twenty-three characters in
+length. Single character labels using special characters, that being anything
+other than a letter or digit, are reserved for use by the Smack development
+team. Smack labels are unstructured, case sensitive, and the only operation
+ever performed on them is comparison for equality. Smack labels cannot
+contain unprintable characters or the "/" (slash) character.
+
+There are some predefined labels:
+
+ _ Pronounced "floor", a single underscore character.
+ ^ Pronounced "hat", a single circumflex character.
+ * Pronounced "star", a single asterisk character.
+ ? Pronounced "huh", a single question mark character.
+
+Every task on a Smack system is assigned a label. System tasks, such as
+init(8) and systems daemons, are run with the floor ("_") label. User tasks
+are assigned labels according to the specification found in the
+/etc/smack/user configuration file.
+
+Access Rules
+
+Smack uses the traditional access modes of Linux. These modes are read,
+execute, write, and occasionally append. There are a few cases where the
+access mode may not be obvious. These include:
+
+ Signals: A signal is a write operation from the subject task to
+ the object task.
+ Internet Domain IPC: Transmission of a packet is considered a
+ write operation from the source task to the destination task.
+
+Smack restricts access based on the label attached to a subject and the label
+attached to the object it is trying to access. The rules enforced are, in
+order:
+
+ 1. Any access requested by a task labeled "*" is denied.
+ 2. A read or execute access requested by a task labeled "^"
+ is permitted.
+ 3. A read or execute access requested on an object labeled "_"
+ is permitted.
+ 4. Any access requested on an object labeled "*" is permitted.
+ 5. Any access requested by a task on an object with the same
+ label is permitted.
+ 6. Any access requested that is explicitly defined in the loaded
+ rule set is permitted.
+ 7. Any other access is denied.
+
+Smack Access Rules
+
+With the isolation provided by Smack access separation is simple. There are
+many interesting cases where limited access by subjects to objects with
+different labels is desired. One example is the familiar spy model of
+sensitivity, where a scientist working on a highly classified project would be
+able to read documents of lower classifications and anything she writes will
+be "born" highly classified. To accommodate such schemes Smack includes a
+mechanism for specifying rules allowing access between labels.
+
+Access Rule Format
+
+The format of an access rule is:
+
+ subject-label object-label access
+
+Where subject-label is the Smack label of the task, object-label is the Smack
+label of the thing being accessed, and access is a string specifying the sort
+of access allowed. The Smack labels are limited to 23 characters. The access
+specification is searched for letters that describe access modes:
+
+ a: indicates that append access should be granted.
+ r: indicates that read access should be granted.
+ w: indicates that write access should be granted.
+ x: indicates that execute access should be granted.
+
+Uppercase values for the specification letters are allowed as well.
+Access mode specifications can be in any order. Examples of acceptable rules
+are:
+
+ TopSecret Secret rx
+ Secret Unclass R
+ Manager Game x
+ User HR w
+ New Old rRrRr
+ Closed Off -
+
+Examples of unacceptable rules are:
+
+ Top Secret Secret rx
+ Ace Ace r
+ Odd spells waxbeans
+
+Spaces are not allowed in labels. Since a subject always has access to files
+with the same label specifying a rule for that case is pointless. Only
+valid letters (rwxaRWXA) and the dash ('-') character are allowed in
+access specifications. The dash is a placeholder, so "a-r" is the same
+as "ar". A lone dash is used to specify that no access should be allowed.
+
+Applying Access Rules
+
+The developers of Linux rarely define new sorts of things, usually importing
+schemes and concepts from other systems. Most often, the other systems are
+variants of Unix. Unix has many endearing properties, but consistency of
+access control models is not one of them. Smack strives to treat accesses as
+uniformly as is sensible while keeping with the spirit of the underlying
+mechanism.
+
+File system objects including files, directories, named pipes, symbolic links,
+and devices require access permissions that closely match those used by mode
+bit access. To open a file for reading read access is required on the file. To
+search a directory requires execute access. Creating a file with write access
+requires both read and write access on the containing directory. Deleting a
+file requires read and write access to the file and to the containing
+directory. It is possible that a user may be able to see that a file exists
+but not any of its attributes by the circumstance of having read access to the
+containing directory but not to the differently labeled file. This is an
+artifact of the file name being data in the directory, not a part of the file.
+
+IPC objects, message queues, semaphore sets, and memory segments exist in flat
+namespaces and access requests are only required to match the object in
+question.
+
+Process objects reflect tasks on the system and the Smack label used to access
+them is the same Smack label that the task would use for its own access
+attempts. Sending a signal via the kill() system call is a write operation
+from the signaler to the recipient. Debugging a process requires both reading
+and writing. Creating a new task is an internal operation that results in two
+tasks with identical Smack labels and requires no access checks.
+
+Sockets are data structures attached to processes and sending a packet from
+one process to another requires that the sender have write access to the
+receiver. The receiver is not required to have read access to the sender.
+
+Setting Access Rules
+
+The configuration file /etc/smack/accesses contains the rules to be set at
+system startup. The contents are written to the special file /smack/load.
+Rules can be written to /smack/load at any time and take effect immediately.
+For any pair of subject and object labels there can be only one rule, with the
+most recently specified overriding any earlier specification.
+
+The program smackload is provided to ensure data is formatted
+properly when written to /smack/load. This program reads lines
+of the form
+
+ subjectlabel objectlabel mode.
+
+Task Attribute
+
+The Smack label of a process can be read from /proc/<pid>/attr/current. A
+process can read its own Smack label from /proc/self/attr/current. A
+privileged process can change its own Smack label by writing to
+/proc/self/attr/current but not the label of another process.
+
+File Attribute
+
+The Smack label of a filesystem object is stored as an extended attribute
+named SMACK64 on the file. This attribute is in the security namespace. It can
+only be changed by a process with privilege.
+
+Privilege
+
+A process with CAP_MAC_OVERRIDE is privileged.
+
+Smack Networking
+
+As mentioned before, Smack enforces access control on network protocol
+transmissions. Every packet sent by a Smack process is tagged with its Smack
+label. This is done by adding a CIPSO tag to the header of the IP packet. Each
+packet received is expected to have a CIPSO tag that identifies the label and
+if it lacks such a tag the network ambient label is assumed. Before the packet
+is delivered a check is made to determine that a subject with the label on the
+packet has write access to the receiving process and if that is not the case
+the packet is dropped.
+
+CIPSO Configuration
+
+It is normally unnecessary to specify the CIPSO configuration. The default
+values used by the system handle all internal cases. Smack will compose CIPSO
+label values to match the Smack labels being used without administrative
+intervention. Unlabeled packets that come into the system will be given the
+ambient label.
+
+Smack requires configuration in the case where packets from a system that is
+not smack that speaks CIPSO may be encountered. Usually this will be a Trusted
+Solaris system, but there are other, less widely deployed systems out there.
+CIPSO provides 3 important values, a Domain Of Interpretation (DOI), a level,
+and a category set with each packet. The DOI is intended to identify a group
+of systems that use compatible labeling schemes, and the DOI specified on the
+smack system must match that of the remote system or packets will be
+discarded. The DOI is 3 by default. The value can be read from /smack/doi and
+can be changed by writing to /smack/doi.
+
+The label and category set are mapped to a Smack label as defined in
+/etc/smack/cipso.
+
+A Smack/CIPSO mapping has the form:
+
+ smack level [category [category]*]
+
+Smack does not expect the level or category sets to be related in any
+particular way and does not assume or assign accesses based on them. Some
+examples of mappings:
+
+ TopSecret 7
+ TS:A,B 7 1 2
+ SecBDE 5 2 4 6
+ RAFTERS 7 12 26
+
+The ":" and "," characters are permitted in a Smack label but have no special
+meaning.
+
+The mapping of Smack labels to CIPSO values is defined by writing to
+/smack/cipso. Again, the format of data written to this special file
+is highly restrictive, so the program smackcipso is provided to
+ensure the writes are done properly. This program takes mappings
+on the standard input and sends them to /smack/cipso properly.
+
+In addition to explicit mappings Smack supports direct CIPSO mappings. One
+CIPSO level is used to indicate that the category set passed in the packet is
+in fact an encoding of the Smack label. The level used is 250 by default. The
+value can be read from /smack/direct and changed by writing to /smack/direct.
+
+Socket Attributes
+
+There are two attributes that are associated with sockets. These attributes
+can only be set by privileged tasks, but any task can read them for their own
+sockets.
+
+ SMACK64IPIN: The Smack label of the task object. A privileged
+ program that will enforce policy may set this to the star label.
+
+ SMACK64IPOUT: The Smack label transmitted with outgoing packets.
+ A privileged program may set this to match the label of another
+ task with which it hopes to communicate.
+
+Writing Applications for Smack
+
+There are three sorts of applications that will run on a Smack system. How an
+application interacts with Smack will determine what it will have to do to
+work properly under Smack.
+
+Smack Ignorant Applications
+
+By far the majority of applications have no reason whatever to care about the
+unique properties of Smack. Since invoking a program has no impact on the
+Smack label associated with the process the only concern likely to arise is
+whether the process has execute access to the program.
+
+Smack Relevant Applications
+
+Some programs can be improved by teaching them about Smack, but do not make
+any security decisions themselves. The utility ls(1) is one example of such a
+program.
+
+Smack Enforcing Applications
+
+These are special programs that not only know about Smack, but participate in
+the enforcement of system policy. In most cases these are the programs that
+set up user sessions. There are also network services that provide information
+to processes running with various labels.
+
+File System Interfaces
+
+Smack maintains labels on file system objects using extended attributes. The
+Smack label of a file, directory, or other file system object can be obtained
+using getxattr(2).
+
+ len = getxattr("/", "security.SMACK64", value, sizeof (value));
+
+will put the Smack label of the root directory into value. A privileged
+process can set the Smack label of a file system object with setxattr(2).
+
+ len = strlen("Rubble");
+ rc = setxattr("/foo", "security.SMACK64", "Rubble", len, 0);
+
+will set the Smack label of /foo to "Rubble" if the program has appropriate
+privilege.
+
+Socket Interfaces
+
+The socket attributes can be read using fgetxattr(2).
+
+A privileged process can set the Smack label of outgoing packets with
+fsetxattr(2).
+
+ len = strlen("Rubble");
+ rc = fsetxattr(fd, "security.SMACK64IPOUT", "Rubble", len, 0);
+
+will set the Smack label "Rubble" on packets going out from the socket if the
+program has appropriate privilege.
+
+ rc = fsetxattr(fd, "security.SMACK64IPIN, "*", strlen("*"), 0);
+
+will set the Smack label "*" as the object label against which incoming
+packets will be checked if the program has appropriate privilege.
+
+Administration
+
+Smack supports some mount options:
+
+ smackfsdef=label: specifies the label to give files that lack
+ the Smack label extended attribute.
+
+ smackfsroot=label: specifies the label to assign the root of the
+ file system if it lacks the Smack extended attribute.
+
+ smackfshat=label: specifies a label that must have read access to
+ all labels set on the filesystem. Not yet enforced.
+
+ smackfsfloor=label: specifies a label to which all labels set on the
+ filesystem must have read access. Not yet enforced.
+
+These mount options apply to all file system types.
+
diff --git a/Documentation/acpi/dsdt-override.txt b/Documentation/acpi/dsdt-override.txt
new file mode 100644
index 000000000000..5008f256a2db
--- /dev/null
+++ b/Documentation/acpi/dsdt-override.txt
@@ -0,0 +1,15 @@
+Linux supports two methods of overriding the BIOS DSDT:
+
+CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel.
+
+CONFIG_ACPI_CUSTOM_DSDT_INITRD adds the image to the initrd.
+
+When to use these methods is described in detail on the
+Linux/ACPI home page:
+http://www.lesswatts.org/projects/acpi/overridingDSDT.php
+
+Note that if both options are used, the DSDT supplied
+by the INITRD method takes precedence.
+
+Documentation/initramfs-add-dsdt.sh is provided for convenience
+for use with the CONFIG_ACPI_CUSTOM_DSDT_INITRD method.
diff --git a/Documentation/acpi/initramfs-add-dsdt.sh b/Documentation/acpi/initramfs-add-dsdt.sh
new file mode 100755
index 000000000000..17ef6e838e14
--- /dev/null
+++ b/Documentation/acpi/initramfs-add-dsdt.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Adds a DSDT file to the initrd (if it's an initramfs)
+# first argument is the name of archive
+# second argument is the name of the file to add
+# The file will be copied as /DSDT.aml
+
+# 20060126: fix "Premature end of file" with some old cpio (Roland Robic)
+# 20060205: this time it should really work
+
+# check the arguments
+if [ $# -ne 2 ]; then
+ program_name=$(basename $0)
+ echo "\
+$program_name: too few arguments
+Usage: $program_name initrd-name.img DSDT-to-add.aml
+Adds a DSDT file to an initrd (in initramfs format)
+
+ initrd-name.img: filename of the initrd in initramfs format
+ DSDT-to-add.aml: filename of the DSDT file to add
+ " 1>&2
+ exit 1
+fi
+
+# we should check it's an initramfs
+
+tempcpio=$(mktemp -d)
+# cleanup on exit, hangup, interrupt, quit, termination
+trap 'rm -rf $tempcpio' 0 1 2 3 15
+
+# extract the archive
+gunzip -c "$1" > "$tempcpio"/initramfs.cpio || exit 1
+
+# copy the DSDT file at the root of the directory so that we can call it "/DSDT.aml"
+cp -f "$2" "$tempcpio"/DSDT.aml
+
+# add the file
+cd "$tempcpio"
+(echo DSDT.aml | cpio --quiet -H newc -o -A -O "$tempcpio"/initramfs.cpio) || exit 1
+cd "$OLDPWD"
+
+# re-compress the archive
+gzip -c "$tempcpio"/initramfs.cpio > "$1"
+
diff --git a/Documentation/acpi/method-tracing.txt b/Documentation/acpi/method-tracing.txt
new file mode 100644
index 000000000000..f6efb1ea559a
--- /dev/null
+++ b/Documentation/acpi/method-tracing.txt
@@ -0,0 +1,26 @@
+/sys/module/acpi/parameters/:
+
+trace_method_name
+ The AML method name that the user wants to trace
+
+trace_debug_layer
+ The temporary debug_layer used when tracing the method.
+ Using 0xffffffff by default if it is 0.
+
+trace_debug_level
+ The temporary debug_level used when tracing the method.
+ Using 0x00ffffff by default if it is 0.
+
+trace_state
+ The status of the tracing feature.
+
+ "enabled" means this feature is enabled
+ and the AML method is traced every time it's executed.
+
+ "1" means this feature is enabled and the AML method
+ will only be traced during the next execution.
+
+ "disabled" means this feature is disabled.
+ Users can enable/disable this debug tracing feature by
+ "echo string > /sys/module/acpi/parameters/trace_state".
+ "string" should be one of "enable", "disable" and "1".
diff --git a/Documentation/cgroups.txt b/Documentation/cgroups.txt
index 98a26f81fa75..42d7c4cb39cd 100644
--- a/Documentation/cgroups.txt
+++ b/Documentation/cgroups.txt
@@ -456,7 +456,7 @@ methods are create/destroy. Any others that are null are presumed to
be successful no-ops.
struct cgroup_subsys_state *create(struct cgroup *cont)
-LL=cgroup_mutex
+(cgroup_mutex held by caller)
Called to create a subsystem state object for a cgroup. The
subsystem should allocate its subsystem state object for the passed
@@ -471,14 +471,19 @@ it's the root of the hierarchy) and may be an appropriate place for
initialization code.
void destroy(struct cgroup *cont)
-LL=cgroup_mutex
+(cgroup_mutex held by caller)
-The cgroup system is about to destroy the passed cgroup; the
-subsystem should do any necessary cleanup
+The cgroup system is about to destroy the passed cgroup; the subsystem
+should do any necessary cleanup and free its subsystem state
+object. By the time this method is called, the cgroup has already been
+unlinked from the file system and from the child list of its parent;
+cgroup->parent is still valid. (Note - can also be called for a
+newly-created cgroup if an error occurs after this subsystem's
+create() method has been called for the new cgroup).
int can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
struct task_struct *task)
-LL=cgroup_mutex
+(cgroup_mutex held by caller)
Called prior to moving a task into a cgroup; if the subsystem
returns an error, this will abort the attach operation. If a NULL
@@ -489,25 +494,20 @@ remain valid while the caller holds cgroup_mutex.
void attach(struct cgroup_subsys *ss, struct cgroup *cont,
struct cgroup *old_cont, struct task_struct *task)
-LL=cgroup_mutex
-
Called after the task has been attached to the cgroup, to allow any
post-attachment activity that requires memory allocations or blocking.
void fork(struct cgroup_subsy *ss, struct task_struct *task)
-LL=callback_mutex, maybe read_lock(tasklist_lock)
Called when a task is forked into a cgroup. Also called during
registration for all existing tasks.
void exit(struct cgroup_subsys *ss, struct task_struct *task)
-LL=callback_mutex
Called during task exit
int populate(struct cgroup_subsys *ss, struct cgroup *cont)
-LL=none
Called after creation of a cgroup to allow a subsystem to populate
the cgroup directory with file entries. The subsystem should make
@@ -524,7 +524,7 @@ example in cpusets, no task may attach before 'cpus' and 'mems' are set
up.
void bind(struct cgroup_subsys *ss, struct cgroup *root)
-LL=callback_mutex
+(cgroup_mutex held by caller)
Called when a cgroup subsystem is rebound to a different hierarchy
and root cgroup. Currently this will only involve movement between
diff --git a/Documentation/controllers/memory.txt b/Documentation/controllers/memory.txt
new file mode 100644
index 000000000000..b5bbea92a61a
--- /dev/null
+++ b/Documentation/controllers/memory.txt
@@ -0,0 +1,279 @@
+Memory Controller
+
+Salient features
+
+a. Enable control of both RSS (mapped) and Page Cache (unmapped) pages
+b. The infrastructure allows easy addition of other types of memory to control
+c. Provides *zero overhead* for non memory controller users
+d. Provides a double LRU: global memory pressure causes reclaim from the
+ global LRU; a cgroup on hitting a limit, reclaims from the per
+ cgroup LRU
+
+NOTE: Swap Cache (unmapped) is not accounted now.
+
+Benefits and Purpose of the memory controller
+
+The memory controller isolates the memory behaviour of a group of tasks
+from the rest of the system. The article on LWN [12] mentions some probable
+uses of the memory controller. The memory controller can be used to
+
+a. Isolate an application or a group of applications
+ Memory hungry applications can be isolated and limited to a smaller
+ amount of memory.
+b. Create a cgroup with limited amount of memory, this can be used
+ as a good alternative to booting with mem=XXXX.
+c. Virtualization solutions can control the amount of memory they want
+ to assign to a virtual machine instance.
+d. A CD/DVD burner could control the amount of memory used by the
+ rest of the system to ensure that burning does not fail due to lack
+ of available memory.
+e. There are several other use cases, find one or use the controller just
+ for fun (to learn and hack on the VM subsystem).
+
+1. History
+
+The memory controller has a long history. A request for comments for the memory
+controller was posted by Balbir Singh [1]. At the time the RFC was posted
+there were several implementations for memory control. The goal of the
+RFC was to build consensus and agreement for the minimal features required
+for memory control. The first RSS controller was posted by Balbir Singh[2]
+in Feb 2007. Pavel Emelianov [3][4][5] has since posted three versions of the
+RSS controller. At OLS, at the resource management BoF, everyone suggested
+that we handle both page cache and RSS together. Another request was raised
+to allow user space handling of OOM. The current memory controller is
+at version 6; it combines both mapped (RSS) and unmapped Page
+Cache Control [11].
+
+2. Memory Control
+
+Memory is a unique resource in the sense that it is present in a limited
+amount. If a task requires a lot of CPU processing, the task can spread
+its processing over a period of hours, days, months or years, but with
+memory, the same physical memory needs to be reused to accomplish the task.
+
+The memory controller implementation has been divided into phases. These
+are:
+
+1. Memory controller
+2. mlock(2) controller
+3. Kernel user memory accounting and slab control
+4. user mappings length controller
+
+The memory controller is the first controller developed.
+
+2.1. Design
+
+The core of the design is a counter called the res_counter. The res_counter
+tracks the current memory usage and limit of the group of processes associated
+with the controller. Each cgroup has a memory controller specific data
+structure (mem_cgroup) associated with it.
+
+2.2. Accounting
+
+ +--------------------+
+ | mem_cgroup |
+ | (res_counter) |
+ +--------------------+
+ / ^ \
+ / | \
+ +---------------+ | +---------------+
+ | mm_struct | |.... | mm_struct |
+ | | | | |
+ +---------------+ | +---------------+
+ |
+ + --------------+
+ |
+ +---------------+ +------+--------+
+ | page +----------> page_cgroup|
+ | | | |
+ +---------------+ +---------------+
+
+ (Figure 1: Hierarchy of Accounting)
+
+
+Figure 1 shows the important aspects of the controller
+
+1. Accounting happens per cgroup
+2. Each mm_struct knows about which cgroup it belongs to
+3. Each page has a pointer to the page_cgroup, which in turn knows the
+ cgroup it belongs to
+
+The accounting is done as follows: mem_cgroup_charge() is invoked to setup
+the necessary data structures and check if the cgroup that is being charged
+is over its limit. If it is then reclaim is invoked on the cgroup.
+More details can be found in the reclaim section of this document.
+If everything goes well, a page meta-data-structure called page_cgroup is
+allocated and associated with the page. This routine also adds the page to
+the per cgroup LRU.
+
+2.2.1 Accounting details
+
+All mapped pages (RSS) and unmapped user pages (Page Cache) are accounted.
+RSS pages are accounted at the time of page_add_*_rmap() unless they've already
+been accounted for earlier. A file page will be accounted for as Page Cache;
+it's mapped into the page tables of a process, duplicate accounting is carefully
+avoided. Page Cache pages are accounted at the time of add_to_page_cache().
+The corresponding routines that remove a page from the page tables or removes
+a page from Page Cache is used to decrement the accounting counters of the
+cgroup.
+
+2.3 Shared Page Accounting
+
+Shared pages are accounted on the basis of the first touch approach. The
+cgroup that first touches a page is accounted for the page. The principle
+behind this approach is that a cgroup that aggressively uses a shared
+page will eventually get charged for it (once it is uncharged from
+the cgroup that brought it in -- this will happen on memory pressure).
+
+2.4 Reclaim
+
+Each cgroup maintains a per cgroup LRU that consists of an active
+and inactive list. When a cgroup goes over its limit, we first try
+to reclaim memory from the cgroup so as to make space for the new
+pages that the cgroup has touched. If the reclaim is unsuccessful,
+an OOM routine is invoked to select and kill the bulkiest task in the
+cgroup.
+
+The reclaim algorithm has not been modified for cgroups, except that
+pages that are selected for reclaiming come from the per cgroup LRU
+list.
+
+2. Locking
+
+The memory controller uses the following hierarchy
+
+1. zone->lru_lock is used for selecting pages to be isolated
+2. mem->per_zone->lru_lock protects the per cgroup LRU (per zone)
+3. lock_page_cgroup() is used to protect page->page_cgroup
+
+3. User Interface
+
+0. Configuration
+
+a. Enable CONFIG_CGROUPS
+b. Enable CONFIG_RESOURCE_COUNTERS
+c. Enable CONFIG_CGROUP_MEM_CONT
+
+1. Prepare the cgroups
+# mkdir -p /cgroups
+# mount -t cgroup none /cgroups -o memory
+
+2. Make the new group and move bash into it
+# mkdir /cgroups/0
+# echo $$ > /cgroups/0/tasks
+
+Since now we're in the 0 cgroup,
+We can alter the memory limit:
+# echo -n 4M > /cgroups/0/memory.limit_in_bytes
+
+NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
+mega or gigabytes.
+
+# cat /cgroups/0/memory.limit_in_bytes
+4194304 Bytes
+
+NOTE: The interface has now changed to display the usage in bytes
+instead of pages
+
+We can check the usage:
+# cat /cgroups/0/memory.usage_in_bytes
+1216512 Bytes
+
+A successful write to this file does not guarantee a successful set of
+this limit to the value written into the file. This can be due to a
+number of factors, such as rounding up to page boundaries or the total
+availability of memory on the system. The user is required to re-read
+this file after a write to guarantee the value committed by the kernel.
+
+# echo -n 1 > memory.limit_in_bytes
+# cat memory.limit_in_bytes
+4096 Bytes
+
+The memory.failcnt field gives the number of times that the cgroup limit was
+exceeded.
+
+The memory.stat file gives accounting information. Now, the number of
+caches, RSS and Active pages/Inactive pages are shown.
+
+The memory.force_empty gives an interface to drop *all* charges by force.
+
+# echo -n 1 > memory.force_empty
+
+will drop all charges in cgroup. Currently, this is maintained for test.
+
+4. Testing
+
+Balbir posted lmbench, AIM9, LTP and vmmstress results [10] and [11].
+Apart from that v6 has been tested with several applications and regular
+daily use. The controller has also been tested on the PPC64, x86_64 and
+UML platforms.
+
+4.1 Troubleshooting
+
+Sometimes a user might find that the application under a cgroup is
+terminated. There are several causes for this:
+
+1. The cgroup limit is too low (just too low to do anything useful)
+2. The user is using anonymous memory and swap is turned off or too low
+
+A sync followed by echo 1 > /proc/sys/vm/drop_caches will help get rid of
+some of the pages cached in the cgroup (page cache pages).
+
+4.2 Task migration
+
+When a task migrates from one cgroup to another, it's charge is not
+carried forward. The pages allocated from the original cgroup still
+remain charged to it, the charge is dropped when the page is freed or
+reclaimed.
+
+4.3 Removing a cgroup
+
+A cgroup can be removed by rmdir, but as discussed in sections 4.1 and 4.2, a
+cgroup might have some charge associated with it, even though all
+tasks have migrated away from it. Such charges are automatically dropped at
+rmdir() if there are no tasks.
+
+4.4 Choosing what to account -- Page Cache (unmapped) vs RSS (mapped)?
+
+The type of memory accounted by the cgroup can be limited to just
+mapped pages by writing "1" to memory.control_type field
+
+echo -n 1 > memory.control_type
+
+5. TODO
+
+1. Add support for accounting huge pages (as a separate controller)
+2. Make per-cgroup scanner reclaim not-shared pages first
+3. Teach controller to account for shared-pages
+4. Start reclamation when the limit is lowered
+5. Start reclamation in the background when the limit is
+ not yet hit but the usage is getting closer
+
+Summary
+
+Overall, the memory controller has been a stable controller and has been
+commented and discussed quite extensively in the community.
+
+References
+
+1. Singh, Balbir. RFC: Memory Controller, http://lwn.net/Articles/206697/
+2. Singh, Balbir. Memory Controller (RSS Control),
+ http://lwn.net/Articles/222762/
+3. Emelianov, Pavel. Resource controllers based on process cgroups
+ http://lkml.org/lkml/2007/3/6/198
+4. Emelianov, Pavel. RSS controller based on process cgroups (v2)
+ http://lkml.org/lkml/2007/4/9/74
+5. Emelianov, Pavel. RSS controller based on process cgroups (v3)
+ http://lkml.org/lkml/2007/5/30/244
+6. Menage, Paul. Control Groups v10, http://lwn.net/Articles/236032/
+7. Vaidyanathan, Srinivasan, Control Groups: Pagecache accounting and control
+ subsystem (v3), http://lwn.net/Articles/235534/
+8. Singh, Balbir. RSS controller V2 test results (lmbench),
+ http://lkml.org/lkml/2007/5/17/232
+9. Singh, Balbir. RSS controller V2 AIM9 results
+ http://lkml.org/lkml/2007/5/18/1
+10. Singh, Balbir. Memory controller v6 results,
+ http://lkml.org/lkml/2007/8/19/36
+11. Singh, Balbir. Memory controller v6, http://lkml.org/lkml/2007/8/17/69
+12. Corbet, Jonathan, Controlling memory use in cgroups,
+ http://lwn.net/Articles/243795/
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt
index 141bef1c8599..43db6fe12814 100644
--- a/Documentation/cpusets.txt
+++ b/Documentation/cpusets.txt
@@ -523,21 +523,14 @@ from one cpuset to another, then the kernel will adjust the tasks
memory placement, as above, the next time that the kernel attempts
to allocate a page of memory for that task.
-If a cpuset has its CPUs modified, then each task using that
-cpuset does _not_ change its behavior automatically. In order to
-minimize the impact on the critical scheduling code in the kernel,
-tasks will continue to use their prior CPU placement until they
-are rebound to their cpuset, by rewriting their pid to the 'tasks'
-file of their cpuset. If a task had been bound to some subset of its
-cpuset using the sched_setaffinity() call, and if any of that subset
-is still allowed in its new cpuset settings, then the task will be
-restricted to the intersection of the CPUs it was allowed on before,
-and its new cpuset CPU placement. If, on the other hand, there is
-no overlap between a tasks prior placement and its new cpuset CPU
-placement, then the task will be allowed to run on any CPU allowed
-in its new cpuset. If a task is moved from one cpuset to another,
-its CPU placement is updated in the same way as if the tasks pid is
-rewritten to the 'tasks' file of its current cpuset.
+If a cpuset has its 'cpus' modified, then each task in that cpuset
+will have its allowed CPU placement changed immediately. Similarly,
+if a tasks pid is written to a cpusets 'tasks' file, in either its
+current cpuset or another cpuset, then its allowed CPU placement is
+changed immediately. If such a task had been bound to some subset
+of its cpuset using the sched_setaffinity() call, the task will be
+allowed to run on any CPU allowed in its new cpuset, negating the
+affect of the prior sched_setaffinity() call.
In summary, the memory placement of a task whose cpuset is changed is
updated by the kernel, on the next allocation of a page for that task,
diff --git a/Documentation/drivers/edac/edac.txt b/Documentation/edac.txt
index a5c36842ecef..a5c36842ecef 100644
--- a/Documentation/drivers/edac/edac.txt
+++ b/Documentation/edac.txt
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 113165b48305..2ebb94d6ed8e 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -170,7 +170,6 @@ Sylpheed (GUI)
- Works well for inlining text (or using attachments).
- Allows use of an external editor.
-- Not good for IMAP.
- Is slow on large folders.
- Won't do TLS SMTP auth over a non-SSL connection.
- Has a helpful ruler bar in the compose window.
diff --git a/Documentation/fb/deferred_io.txt b/Documentation/fb/deferred_io.txt
index 63883a892120..748328370250 100644
--- a/Documentation/fb/deferred_io.txt
+++ b/Documentation/fb/deferred_io.txt
@@ -7,10 +7,10 @@ IO. The following example may be a useful explanation of how one such setup
works:
- userspace app like Xfbdev mmaps framebuffer
-- deferred IO and driver sets up nopage and page_mkwrite handlers
+- deferred IO and driver sets up fault and page_mkwrite handlers
- userspace app tries to write to mmaped vaddress
-- we get pagefault and reach nopage handler
-- nopage handler finds and returns physical page
+- we get pagefault and reach fault handler
+- fault handler finds and returns physical page
- we get page_mkwrite where we add this page to a list
- schedule a workqueue task to be run after a delay
- app continues writing to that page with no additional cost. this is
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index a7d9d179131a..17b1659bd3f8 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -6,14 +6,6 @@ be removed from this file.
---------------------------
-What: MXSER
-When: December 2007
-Why: Old mxser driver is obsoleted by the mxser_new. Give it some time yet
- and remove it.
-Who: Jiri Slaby <jirislaby@gmail.com>
-
----------------------------
-
What: dev->power.power_state
When: July 2007
Why: Broken design for runtime control over driver power states, confusing
@@ -208,13 +200,6 @@ Who: Randy Dunlap <randy.dunlap@oracle.com>
---------------------------
-What: drivers depending on OSS_OBSOLETE
-When: options in 2.6.23, code in 2.6.25
-Why: obsolete OSS drivers
-Who: Adrian Bunk <bunk@stusta.de>
-
----------------------------
-
What: libata spindown skipping and warning
When: Dec 2008
Why: Some halt(8) implementations synchronize caches for and spin
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 1de155e2dc36..e68021c08fbd 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -32,6 +32,8 @@ directory-locking
- info about the locking scheme used for directory operations.
dlmfs.txt
- info on the userspace interface to the OCFS2 DLM.
+dnotify.txt
+ - info about directory notification in Linux.
ecryptfs.txt
- docs on eCryptfs: stacked cryptographic filesystem for Linux.
ext2.txt
@@ -80,6 +82,8 @@ relay.txt
- info on relay, for efficient streaming from kernel to user space.
romfs.txt
- description of the ROMFS filesystem.
+sharedsubtree.txt
+ - a description of shared subtrees for namespaces.
smbfs.txt
- info on using filesystems with the SMB protocol (Win 3.11 and NT).
spufs.txt
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 37c10cba7177..42d4b30b1045 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -90,7 +90,6 @@ of the locking scheme for directory operations.
prototypes:
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
- void (*read_inode) (struct inode *);
void (*dirty_inode) (struct inode *);
int (*write_inode) (struct inode *, int);
void (*put_inode) (struct inode *);
@@ -114,7 +113,6 @@ locking rules:
BKL s_lock s_umount
alloc_inode: no no no
destroy_inode: no
-read_inode: no (see below)
dirty_inode: no (must not sleep)
write_inode: no
put_inode: no
@@ -133,7 +131,6 @@ show_options: no (vfsmount->sem)
quota_read: no no no (see below)
quota_write: no no no (see below)
-->read_inode() is not a method - it's a callback used in iget().
->remount_fs() will have the s_umount lock if it's already mounted.
When called from get_sb_single, it does NOT have the s_umount lock.
->quota_read() and ->quota_write() functions are both guaranteed to
diff --git a/Documentation/dnotify.txt b/Documentation/filesystems/dnotify.txt
index 6984fca6002a..9f5d338ddbb8 100644
--- a/Documentation/dnotify.txt
+++ b/Documentation/filesystems/dnotify.txt
@@ -69,24 +69,24 @@ Example
#include <signal.h>
#include <stdio.h>
#include <unistd.h>
-
+
static volatile int event_fd;
-
+
static void handler(int sig, siginfo_t *si, void *data)
{
event_fd = si->si_fd;
}
-
+
int main(void)
{
struct sigaction act;
int fd;
-
+
act.sa_sigaction = handler;
sigemptyset(&act.sa_mask);
act.sa_flags = SA_SIGINFO;
sigaction(SIGRTMIN + 1, &act, NULL);
-
+
fd = open(".", O_RDONLY);
fcntl(fd, F_SETSIG, SIGRTMIN + 1);
fcntl(fd, F_NOTIFY, DN_MODIFY|DN_CREATE|DN_MULTISHOT);
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 0f33c77bc14b..92b888d540a6 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -34,8 +34,8 @@ FOO_I(inode) (see in-tree filesystems for examples).
Make them ->alloc_inode and ->destroy_inode in your super_operations.
-Keep in mind that now you need explicit initialization of private data -
-typically in ->read_inode() and after getting an inode from new_inode().
+Keep in mind that now you need explicit initialization of private data
+typically between calling iget_locked() and unlocking the inode.
At some point that will become mandatory.
@@ -173,10 +173,10 @@ should be a non-blocking function that initializes those parts of a
newly created inode to allow the test function to succeed. 'data' is
passed as an opaque value to both test and set functions.
-When the inode has been created by iget5_locked(), it will be returned with
-the I_NEW flag set and will still be locked. read_inode has not been
-called so the file system still has to finalize the initialization. Once
-the inode is initialized it must be unlocked by calling unlock_new_inode().
+When the inode has been created by iget5_locked(), it will be returned with the
+I_NEW flag set and will still be locked. The filesystem then needs to finalize
+the initialization. Once the inode is initialized it must be unlocked by
+calling unlock_new_inode().
The filesystem is responsible for setting (and possibly testing) i_ino
when appropriate. There is also a simpler iget_locked function that
@@ -184,11 +184,19 @@ just takes the superblock and inode number as arguments and does the
test and set for you.
e.g.
- inode = iget_locked(sb, ino);
- if (inode->i_state & I_NEW) {
- read_inode_from_disk(inode);
- unlock_new_inode(inode);
- }
+ inode = iget_locked(sb, ino);
+ if (inode->i_state & I_NEW) {
+ err = read_inode_from_disk(inode);
+ if (err < 0) {
+ iget_failed(inode);
+ return err;
+ }
+ unlock_new_inode(inode);
+ }
+
+Note that if the process of setting up a new inode fails, then iget_failed()
+should be called on the inode to render it dead, and an appropriate error
+should be passed back to the caller.
---
[recommended]
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 0b1b0c008613..5681e2fa1496 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1029,6 +1029,14 @@ nr_inodes
Denotes the number of inodes the system has allocated. This number will
grow and shrink dynamically.
+nr_open
+-------
+
+Denotes the maximum number of file-handles a process can
+allocate. Default value is 1024*1024 (1048576) which should be
+enough for most machines. Actual limit depends on RLIMIT_NOFILE
+resource limit.
+
nr_free_inodes
--------------
@@ -1315,13 +1323,28 @@ for writeout by the pdflush daemons. It is expressed in 100'ths of a second.
Data which has been dirty in-memory for longer than this interval will be
written out next time a pdflush daemon wakes up.
+highmem_is_dirtyable
+--------------------
+
+Only present if CONFIG_HIGHMEM is set.
+
+This defaults to 0 (false), meaning that the ratios set above are calculated
+as a percentage of lowmem only. This protects against excessive scanning
+in page reclaim, swapping and general VM distress.
+
+Setting this to 1 can be useful on 32 bit machines where you want to make
+random changes within an MMAPed file that is larger than your available
+lowmem without causing large quantities of random IO. Is is safe if the
+behavior of all programs running on the machine is known and memory will
+not be otherwise stressed.
+
legacy_va_layout
----------------
If non-zero, this sysctl disables the new 32-bit mmap mmap layout - the kernel
will use the legacy (2.4) layout for all processes.
-lower_zone_protection
+lowmem_reserve_ratio
---------------------
For some specialised workloads on highmem machines it is dangerous for
@@ -1341,25 +1364,71 @@ captured into pinned user memory.
mechanism will also defend that region from allocations which could use
highmem or lowmem).
-The `lower_zone_protection' tunable determines how aggressive the kernel is
-in defending these lower zones. The default value is zero - no
-protection at all.
+The `lowmem_reserve_ratio' tunable determines how aggressive the kernel is
+in defending these lower zones.
If you have a machine which uses highmem or ISA DMA and your
applications are using mlock(), or if you are running with no swap then
-you probably should increase the lower_zone_protection setting.
-
-The units of this tunable are fairly vague. It is approximately equal
-to "megabytes," so setting lower_zone_protection=100 will protect around 100
-megabytes of the lowmem zone from user allocations. It will also make
-those 100 megabytes unavailable for use by applications and by
-pagecache, so there is a cost.
-
-The effects of this tunable may be observed by monitoring
-/proc/meminfo:LowFree. Write a single huge file and observe the point
-at which LowFree ceases to fall.
-
-A reasonable value for lower_zone_protection is 100.
+you probably should change the lowmem_reserve_ratio setting.
+
+The lowmem_reserve_ratio is an array. You can see them by reading this file.
+-
+% cat /proc/sys/vm/lowmem_reserve_ratio
+256 256 32
+-
+Note: # of this elements is one fewer than number of zones. Because the highest
+ zone's value is not necessary for following calculation.
+
+But, these values are not used directly. The kernel calculates # of protection
+pages for each zones from them. These are shown as array of protection pages
+in /proc/zoneinfo like followings. (This is an example of x86-64 box).
+Each zone has an array of protection pages like this.
+
+-
+Node 0, zone DMA
+ pages free 1355
+ min 3
+ low 3
+ high 4
+ :
+ :
+ numa_other 0
+ protection: (0, 2004, 2004, 2004)
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ pagesets
+ cpu: 0 pcp: 0
+ :
+-
+These protections are added to score to judge whether this zone should be used
+for page allocation or should be reclaimed.
+
+In this example, if normal pages (index=2) are required to this DMA zone and
+pages_high is used for watermark, the kernel judges this zone should not be
+used because pages_free(1355) is smaller than watermark + protection[2]
+(4 + 2004 = 2008). If this protection value is 0, this zone would be used for
+normal page requirement. If requirement is DMA zone(index=0), protection[0]
+(=0) is used.
+
+zone[i]'s protection[j] is calculated by following exprssion.
+
+(i < j):
+ zone[i]->protection[j]
+ = (total sums of present_pages from zone[i+1] to zone[j] on the node)
+ / lowmem_reserve_ratio[i];
+(i = j):
+ (should not be protected. = 0;
+(i > j):
+ (not necessary, but looks 0)
+
+The default values of lowmem_reserve_ratio[i] are
+ 256 (if zone[i] means DMA or DMA32 zone)
+ 32 (others).
+As above expression, they are reciprocal number of ratio.
+256 means 1/256. # of protection pages becomes about "0.39%" of total present
+pages of higher zones on the node.
+
+If you would like to protect more pages, smaller values are effective.
+The minimum value is 1 (1/1 -> 100%).
page-cluster
------------
diff --git a/Documentation/sharedsubtree.txt b/Documentation/filesystems/sharedsubtree.txt
index 736540045dc7..736540045dc7 100644
--- a/Documentation/sharedsubtree.txt
+++ b/Documentation/filesystems/sharedsubtree.txt
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 9d019d35728f..bd55038b56f5 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -203,8 +203,6 @@ struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
- void (*read_inode) (struct inode *);
-
void (*dirty_inode) (struct inode *);
int (*write_inode) (struct inode *, int);
void (*put_inode) (struct inode *);
@@ -242,15 +240,6 @@ or bottom half).
->alloc_inode was defined and simply undoes anything done by
->alloc_inode.
- read_inode: this method is called to read a specific inode from the
- mounted filesystem. The i_ino member in the struct inode is
- initialized by the VFS to indicate which inode to read. Other
- members are filled in by this method.
-
- You can set this to NULL and use iget5_locked() instead of iget()
- to read inodes. This is necessary for filesystems for which the
- inode number is not sufficient to identify an inode.
-
dirty_inode: this method is called by the VFS to mark an inode dirty.
write_inode: this method is called when the VFS needs to write an
@@ -308,9 +297,9 @@ or bottom half).
quota_write: called by the VFS to write to filesystem quota file.
-The read_inode() method is responsible for filling in the "i_op"
-field. This is a pointer to a "struct inode_operations" which
-describes the methods that can be performed on individual inodes.
+Whoever sets up the inode is responsible for filling in the "i_op" field. This
+is a pointer to a "struct inode_operations" which describes the methods that
+can be performed on individual inodes.
The Inode Object
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 6bc2ba215df9..8da724e2a0ff 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -32,7 +32,7 @@ The exact capabilities of GPIOs vary between systems. Common options:
- Input values are likewise readable (1, 0). Some chips support readback
of pins configured as "output", which is very useful in such "wire-OR"
cases (to support bidirectional signaling). GPIO controllers may have
- input de-glitch logic, sometimes with software controls.
+ input de-glitch/debounce logic, sometimes with software controls.
- Inputs can often be used as IRQ signals, often edge triggered but
sometimes level triggered. Such IRQs may be configurable as system
@@ -60,10 +60,13 @@ used on a board that's wired differently. Only least-common-denominator
functionality can be very portable. Other features are platform-specific,
and that can be critical for glue logic.
-Plus, this doesn't define an implementation framework, just an interface.
+Plus, this doesn't require any implementation framework, just an interface.
One platform might implement it as simple inline functions accessing chip
registers; another might implement it by delegating through abstractions
-used for several very different kinds of GPIO controller.
+used for several very different kinds of GPIO controller. (There is some
+optional code supporting such an implementation strategy, described later
+in this document, but drivers acting as clients to the GPIO interface must
+not care how it's implemented.)
That said, if the convention is supported on their platform, drivers should
use it when possible. Platforms should declare GENERIC_GPIO support in
@@ -121,6 +124,11 @@ before tasking is enabled, as part of early board setup.
For output GPIOs, the value provided becomes the initial output value.
This helps avoid signal glitching during system startup.
+For compatibility with legacy interfaces to GPIOs, setting the direction
+of a GPIO implicitly requests that GPIO (see below) if it has not been
+requested already. That compatibility may be removed in the future;
+explicitly requesting GPIOs is strongly preferred.
+
Setting the direction can fail if the GPIO number is invalid, or when
that particular GPIO can't be used in that mode. It's generally a bad
idea to rely on boot firmware to have set the direction correctly, since
@@ -133,6 +141,7 @@ Spinlock-Safe GPIO access
-------------------------
Most GPIO controllers can be accessed with memory read/write instructions.
That doesn't need to sleep, and can safely be done from inside IRQ handlers.
+(That includes hardirq contexts on RT kernels.)
Use these calls to access such GPIOs:
@@ -145,7 +154,7 @@ Use these calls to access such GPIOs:
The values are boolean, zero for low, nonzero for high. When reading the
value of an output pin, the value returned should be what's seen on the
pin ... that won't always match the specified output value, because of
-issues including wire-OR and output latencies.
+issues including open-drain signaling and output latencies.
The get/set calls have no error returns because "invalid GPIO" should have
been reported earlier from gpio_direction_*(). However, note that not all
@@ -170,7 +179,8 @@ get to the head of a queue to transmit a command and get its response.
This requires sleeping, which can't be done from inside IRQ handlers.
Platforms that support this type of GPIO distinguish them from other GPIOs
-by returning nonzero from this call:
+by returning nonzero from this call (which requires a valid GPIO number,
+either explicitly or implicitly requested):
int gpio_cansleep(unsigned gpio);
@@ -209,8 +219,11 @@ before tasking is enabled, as part of early board setup.
These calls serve two basic purposes. One is marking the signals which
are actually in use as GPIOs, for better diagnostics; systems may have
several hundred potential GPIOs, but often only a dozen are used on any
-given board. Another is to catch conflicts between drivers, reporting
-errors when drivers wrongly think they have exclusive use of that signal.
+given board. Another is to catch conflicts, identifying errors when
+(a) two or more drivers wrongly think they have exclusive use of that
+signal, or (b) something wrongly believes it's safe to remove drivers
+needed to manage a signal that's in active use. That is, requesting a
+GPIO can serve as a kind of lock.
These two calls are optional because not not all current Linux platforms
offer such functionality in their GPIO support; a valid implementation
@@ -223,6 +236,9 @@ Note that requesting a GPIO does NOT cause it to be configured in any
way; it just marks that GPIO as in use. Separate code must handle any
pin setup (e.g. controlling which pin the GPIO uses, pullup/pulldown).
+Also note that it's your responsibility to have stopped using a GPIO
+before you free it.
+
GPIOs mapped to IRQs
--------------------
@@ -238,7 +254,7 @@ map between them using calls like:
Those return either the corresponding number in the other namespace, or
else a negative errno code if the mapping can't be done. (For example,
-some GPIOs can't used as IRQs.) It is an unchecked error to use a GPIO
+some GPIOs can't be used as IRQs.) It is an unchecked error to use a GPIO
number that wasn't set up as an input using gpio_direction_input(), or
to use an IRQ number that didn't originally come from gpio_to_irq().
@@ -299,17 +315,110 @@ Related to multiplexing is configuration and enabling of the pullups or
pulldowns integrated on some platforms. Not all platforms support them,
or support them in the same way; and any given board might use external
pullups (or pulldowns) so that the on-chip ones should not be used.
+(When a circuit needs 5 kOhm, on-chip 100 kOhm resistors won't do.)
There are other system-specific mechanisms that are not specified here,
like the aforementioned options for input de-glitching and wire-OR output.
Hardware may support reading or writing GPIOs in gangs, but that's usually
configuration dependent: for GPIOs sharing the same bank. (GPIOs are
commonly grouped in banks of 16 or 32, with a given SOC having several such
-banks.) Some systems can trigger IRQs from output GPIOs. Code relying on
-such mechanisms will necessarily be nonportable.
+banks.) Some systems can trigger IRQs from output GPIOs, or read values
+from pins not managed as GPIOs. Code relying on such mechanisms will
+necessarily be nonportable.
-Dynamic definition of GPIOs is not currently supported; for example, as
+Dynamic definition of GPIOs is not currently standard; for example, as
a side effect of configuring an add-on board with some GPIO expanders.
These calls are purely for kernel space, but a userspace API could be built
-on top of it.
+on top of them.
+
+
+GPIO implementor's framework (OPTIONAL)
+=======================================
+As noted earlier, there is an optional implementation framework making it
+easier for platforms to support different kinds of GPIO controller using
+the same programming interface.
+
+As a debugging aid, if debugfs is available a /sys/kernel/debug/gpio file
+will be found there. That will list all the controllers registered through
+this framework, and the state of the GPIOs currently in use.
+
+
+Controller Drivers: gpio_chip
+-----------------------------
+In this framework each GPIO controller is packaged as a "struct gpio_chip"
+with information common to each controller of that type:
+
+ - methods to establish GPIO direction
+ - methods used to access GPIO values
+ - flag saying whether calls to its methods may sleep
+ - optional debugfs dump method (showing extra state like pullup config)
+ - label for diagnostics
+
+There is also per-instance data, which may come from device.platform_data:
+the number of its first GPIO, and how many GPIOs it exposes.
+
+The code implementing a gpio_chip should support multiple instances of the
+controller, possibly using the driver model. That code will configure each
+gpio_chip and issue gpiochip_add(). Removing a GPIO controller should be
+rare; use gpiochip_remove() when it is unavoidable.
+
+Most often a gpio_chip is part of an instance-specific structure with state
+not exposed by the GPIO interfaces, such as addressing, power management,
+and more. Chips such as codecs will have complex non-GPIO state,
+
+Any debugfs dump method should normally ignore signals which haven't been
+requested as GPIOs. They can use gpiochip_is_requested(), which returns
+either NULL or the label associated with that GPIO when it was requested.
+
+
+Platform Support
+----------------
+To support this framework, a platform's Kconfig will "select HAVE_GPIO_LIB"
+and arrange that its <asm/gpio.h> includes <asm-generic/gpio.h> and defines
+three functions: gpio_get_value(), gpio_set_value(), and gpio_cansleep().
+They may also want to provide a custom value for ARCH_NR_GPIOS.
+
+Trivial implementations of those functions can directly use framework
+code, which always dispatches through the gpio_chip:
+
+ #define gpio_get_value __gpio_get_value
+ #define gpio_set_value __gpio_set_value
+ #define gpio_cansleep __gpio_cansleep
+
+Fancier implementations could instead define those as inline functions with
+logic optimizing access to specific SOC-based GPIOs. For example, if the
+referenced GPIO is the constant "12", getting or setting its value could
+cost as little as two or three instructions, never sleeping. When such an
+optimization is not possible those calls must delegate to the framework
+code, costing at least a few dozen instructions. For bitbanged I/O, such
+instruction savings can be significant.
+
+For SOCs, platform-specific code defines and registers gpio_chip instances
+for each bank of on-chip GPIOs. Those GPIOs should be numbered/labeled to
+match chip vendor documentation, and directly match board schematics. They
+may well start at zero and go up to a platform-specific limit. Such GPIOs
+are normally integrated into platform initialization to make them always be
+available, from arch_initcall() or earlier; they can often serve as IRQs.
+
+
+Board Support
+-------------
+For external GPIO controllers -- such as I2C or SPI expanders, ASICs, multi
+function devices, FPGAs or CPLDs -- most often board-specific code handles
+registering controller devices and ensures that their drivers know what GPIO
+numbers to use with gpiochip_add(). Their numbers often start right after
+platform-specific GPIOs.
+
+For example, board setup code could create structures identifying the range
+of GPIOs that chip will expose, and passes them to each GPIO expander chip
+using platform_data. Then the chip driver's probe() routine could pass that
+data to gpiochip_add().
+
+Initialization order can be important. For example, when a device relies on
+an I2C-based GPIO, its probe() routine should only be called after that GPIO
+becomes available. That may mean the device should not be registered until
+calls for that GPIO can work. One way to address such dependencies is for
+such gpio_chip controllers to provide setup() and teardown() callbacks to
+board specific code; those board specific callbacks would register devices
+once all the necessary resources are available.
diff --git a/Documentation/i2c/chips/pca9539 b/Documentation/i2c/chips/pca9539
index c4fce6a13537..1d81c530c4a5 100644
--- a/Documentation/i2c/chips/pca9539
+++ b/Documentation/i2c/chips/pca9539
@@ -1,6 +1,9 @@
Kernel driver pca9539
=====================
+NOTE: this driver is deprecated and will be dropped soon, use
+drivers/gpio/pca9539.c instead.
+
Supported chips:
* Philips PCA9539
Prefix: 'pca9539'
diff --git a/Documentation/ia64/aliasing-test.c b/Documentation/ia64/aliasing-test.c
index 773a814d4093..d23610fb2ff9 100644
--- a/Documentation/ia64/aliasing-test.c
+++ b/Documentation/ia64/aliasing-test.c
@@ -16,6 +16,7 @@
#include <fcntl.h>
#include <fnmatch.h>
#include <string.h>
+#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -65,7 +66,7 @@ int scan_tree(char *path, char *file, off_t offset, size_t length, int touch)
{
struct dirent **namelist;
char *name, *path2;
- int i, n, r, rc, result = 0;
+ int i, n, r, rc = 0, result = 0;
struct stat buf;
n = scandir(path, &namelist, 0, alphasort);
@@ -113,7 +114,7 @@ skip:
free(namelist[i]);
}
free(namelist);
- return rc;
+ return result;
}
char buf[1024];
@@ -149,7 +150,7 @@ int scan_rom(char *path, char *file)
{
struct dirent **namelist;
char *name, *path2;
- int i, n, r, rc, result = 0;
+ int i, n, r, rc = 0, result = 0;
struct stat buf;
n = scandir(path, &namelist, 0, alphasort);
@@ -180,7 +181,7 @@ int scan_rom(char *path, char *file)
* important thing is that no MCA happened.
*/
if (rc > 0)
- fprintf(stderr, "PASS: %s read %ld bytes\n", path2, rc);
+ fprintf(stderr, "PASS: %s read %d bytes\n", path2, rc);
else {
fprintf(stderr, "PASS: %s not readable\n", path2);
return rc;
@@ -201,10 +202,10 @@ skip:
free(namelist[i]);
}
free(namelist);
- return rc;
+ return result;
}
-int main()
+int main(void)
{
int rc;
@@ -256,4 +257,6 @@ int main()
scan_tree("/proc/bus/pci", "??.?", 0xA0000, 0x20000, 0);
scan_tree("/proc/bus/pci", "??.?", 0xC0000, 0x40000, 1);
scan_tree("/proc/bus/pci", "??.?", 0, 1024*1024, 0);
+
+ return rc;
}
diff --git a/Documentation/input/input-programming.txt b/Documentation/input/input-programming.txt
index 47fc86830cd7..81905e81585e 100644
--- a/Documentation/input/input-programming.txt
+++ b/Documentation/input/input-programming.txt
@@ -22,7 +22,7 @@ static struct input_dev *button_dev;
static void button_interrupt(int irq, void *dummy, struct pt_regs *fp)
{
- input_report_key(button_dev, BTN_1, inb(BUTTON_PORT) & 1);
+ input_report_key(button_dev, BTN_0, inb(BUTTON_PORT) & 1);
input_sync(button_dev);
}
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9ad4e6fc56fd..0dcbd266b442 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -147,8 +147,10 @@ and is between 256 and 4096 characters. It is defined in the file
default: 0
acpi_sleep= [HW,ACPI] Sleep options
- Format: { s3_bios, s3_mode }
- See Documentation/power/video.txt
+ Format: { s3_bios, s3_mode, s3_beep }
+ See Documentation/power/video.txt for s3_bios and s3_mode.
+ s3_beep is for debugging; it makes the PC's speaker beep
+ as soon as the kernel's real-mode entry point is called.
acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode
Format: { level | edge | high | low }
@@ -175,6 +177,9 @@ and is between 256 and 4096 characters. It is defined in the file
acpi_no_auto_ssdt [HW,ACPI] Disable automatic loading of SSDT
+ acpi_no_initrd_override [KNL,ACPI]
+ Disable loading custom ACPI tables from the initramfs
+
acpi_os_name= [HW,ACPI] Tell ACPI BIOS the name of the OS
Format: To spoof as Windows 98: ="Microsoft Windows"
@@ -780,6 +785,9 @@ and is between 256 and 4096 characters. It is defined in the file
loop use the MONITOR/MWAIT idle loop anyways. Performance should be the same
as idle=poll.
+ ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem
+ Claim all unknown PCI IDE storage controllers.
+
ignore_loglevel [KNL]
Ignore loglevel setting - this will print /all/
kernel messages to the console. Useful for debugging.
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 53a63890aea4..30c101761d0d 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -96,7 +96,9 @@ or in registers (e.g., for x86_64 or for an i386 fastcall function).
The jprobe will work in either case, so long as the handler's
prototype matches that of the probed function.
-1.3 How Does a Return Probe Work?
+1.3 Return Probes
+
+1.3.1 How Does a Return Probe Work?
When you call register_kretprobe(), Kprobes establishes a kprobe at
the entry to the function. When the probed function is called and this
@@ -107,9 +109,9 @@ At boot time, Kprobes registers a kprobe at the trampoline.
When the probed function executes its return instruction, control
passes to the trampoline and that probe is hit. Kprobes' trampoline
-handler calls the user-specified handler associated with the kretprobe,
-then sets the saved instruction pointer to the saved return address,
-and that's where execution resumes upon return from the trap.
+handler calls the user-specified return handler associated with the
+kretprobe, then sets the saved instruction pointer to the saved return
+address, and that's where execution resumes upon return from the trap.
While the probed function is executing, its return address is
stored in an object of type kretprobe_instance. Before calling
@@ -131,6 +133,30 @@ zero when the return probe is registered, and is incremented every
time the probed function is entered but there is no kretprobe_instance
object available for establishing the return probe.
+1.3.2 Kretprobe entry-handler
+
+Kretprobes also provides an optional user-specified handler which runs
+on function entry. This handler is specified by setting the entry_handler
+field of the kretprobe struct. Whenever the kprobe placed by kretprobe at the
+function entry is hit, the user-defined entry_handler, if any, is invoked.
+If the entry_handler returns 0 (success) then a corresponding return handler
+is guaranteed to be called upon function return. If the entry_handler
+returns a non-zero error then Kprobes leaves the return address as is, and
+the kretprobe has no further effect for that particular function instance.
+
+Multiple entry and return handler invocations are matched using the unique
+kretprobe_instance object associated with them. Additionally, a user
+may also specify per return-instance private data to be part of each
+kretprobe_instance object. This is especially useful when sharing private
+data between corresponding user entry and return handlers. The size of each
+private data object can be specified at kretprobe registration time by
+setting the data_size field of the kretprobe struct. This data can be
+accessed through the data field of each kretprobe_instance object.
+
+In case probed function is entered but there is no kretprobe_instance
+object available, then in addition to incrementing the nmissed count,
+the user entry_handler invocation is also skipped.
+
2. Architectures Supported
Kprobes, jprobes, and return probes are implemented on the following
@@ -274,6 +300,8 @@ of interest:
- ret_addr: the return address
- rp: points to the corresponding kretprobe object
- task: points to the corresponding task struct
+- data: points to per return-instance private data; see "Kretprobe
+ entry-handler" for details.
The regs_return_value(regs) macro provides a simple abstraction to
extract the return value from the appropriate register as defined by
@@ -556,23 +584,52 @@ report failed calls to sys_open().
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kprobes.h>
+#include <linux/ktime.h>
+
+/* per-instance private data */
+struct my_data {
+ ktime_t entry_stamp;
+};
static const char *probed_func = "sys_open";
-/* Return-probe handler: If the probed function fails, log the return value. */
-static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+/* Timestamp function entry. */
+static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ struct my_data *data;
+
+ if(!current->mm)
+ return 1; /* skip kernel threads */
+
+ data = (struct my_data *)ri->data;
+ data->entry_stamp = ktime_get();
+ return 0;
+}
+
+/* If the probed function failed, log the return value and duration.
+ * Duration may turn out to be zero consistently, depending upon the
+ * granularity of time accounting on the platform. */
+static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
int retval = regs_return_value(regs);
+ struct my_data *data = (struct my_data *)ri->data;
+ s64 delta;
+ ktime_t now;
+
if (retval < 0) {
- printk("%s returns %d\n", probed_func, retval);
+ now = ktime_get();
+ delta = ktime_to_ns(ktime_sub(now, data->entry_stamp));
+ printk("%s: return val = %d (duration = %lld ns)\n",
+ probed_func, retval, delta);
}
return 0;
}
static struct kretprobe my_kretprobe = {
- .handler = ret_handler,
- /* Probe up to 20 instances concurrently. */
- .maxactive = 20
+ .handler = return_handler,
+ .entry_handler = entry_handler,
+ .data_size = sizeof(struct my_data),
+ .maxactive = 20, /* probe up to 20 instances concurrently */
};
static int __init kretprobe_init(void)
@@ -584,7 +641,7 @@ static int __init kretprobe_init(void)
printk("register_kretprobe failed, returned %d\n", ret);
return -1;
}
- printk("Planted return probe at %p\n", my_kretprobe.kp.addr);
+ printk("Kretprobe active on %s\n", my_kretprobe.kp.symbol_name);
return 0;
}
@@ -594,7 +651,7 @@ static void __exit kretprobe_exit(void)
printk("kretprobe unregistered\n");
/* nmissed > 0 suggests that maxactive was set too low. */
printk("Missed probing %d instances of %s\n",
- my_kretprobe.nmissed, probed_func);
+ my_kretprobe.nmissed, probed_func);
}
module_init(kretprobe_init)
diff --git a/Documentation/kref.txt b/Documentation/kref.txt
index f38b59d00c63..130b6e87aa7e 100644
--- a/Documentation/kref.txt
+++ b/Documentation/kref.txt
@@ -141,10 +141,10 @@ The last rule (rule 3) is the nastiest one to handle. Say, for
instance, you have a list of items that are each kref-ed, and you wish
to get the first one. You can't just pull the first item off the list
and kref_get() it. That violates rule 3 because you are not already
-holding a valid pointer. You must add locks or semaphores. For
-instance:
+holding a valid pointer. You must add a mutex (or some other lock).
+For instance:
-static DECLARE_MUTEX(sem);
+static DEFINE_MUTEX(mutex);
static LIST_HEAD(q);
struct my_data
{
@@ -155,12 +155,12 @@ struct my_data
static struct my_data *get_entry()
{
struct my_data *entry = NULL;
- down(&sem);
+ mutex_lock(&mutex);
if (!list_empty(&q)) {
entry = container_of(q.next, struct my_q_entry, link);
kref_get(&entry->refcount);
}
- up(&sem);
+ mutex_unlock(&mutex);
return entry;
}
@@ -174,9 +174,9 @@ static void release_entry(struct kref *ref)
static void put_entry(struct my_data *entry)
{
- down(&sem);
+ mutex_lock(&mutex);
kref_put(&entry->refcount, release_entry);
- up(&sem);
+ mutex_unlock(&mutex);
}
The kref_put() return value is useful if you do not want to hold the
@@ -191,13 +191,13 @@ static void release_entry(struct kref *ref)
static void put_entry(struct my_data *entry)
{
- down(&sem);
+ mutex_lock(&mutex);
if (kref_put(&entry->refcount, release_entry)) {
list_del(&entry->link);
- up(&sem);
+ mutex_unlock(&mutex);
kfree(entry);
} else
- up(&sem);
+ mutex_unlock(&mutex);
}
This is really more useful if you have to call other routines as part
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt
index 8c35c0426110..56757c751d6f 100644
--- a/Documentation/leds-class.txt
+++ b/Documentation/leds-class.txt
@@ -39,12 +39,33 @@ LED Device Naming
Is currently of the form:
-"devicename:colour"
+"devicename:colour:function"
There have been calls for LED properties such as colour to be exported as
individual led class attributes. As a solution which doesn't incur as much
overhead, I suggest these become part of the device name. The naming scheme
-above leaves scope for further attributes should they be needed.
+above leaves scope for further attributes should they be needed. If sections
+of the name don't apply, just leave that section blank.
+
+
+Hardware accelerated blink of LEDs
+==================================
+
+Some LEDs can be programmed to blink without any CPU interaction. To
+support this feature, a LED driver can optionally implement the
+blink_set() function (see <linux/leds.h>). If implemeted, triggers can
+attempt to use it before falling back to software timers. The blink_set()
+function should return 0 if the blink setting is supported, or -EINVAL
+otherwise, which means that LED blinking will be handled by software.
+
+The blink_set() function should choose a user friendly blinking
+value if it is called with *delay_on==0 && *delay_off==0 parameters. In
+this case the driver should give back the chosen value through delay_on
+and delay_off parameters to the leds subsystem.
+
+Any call to the brightness_set() callback function should cancel the
+previously programmed hardware blinking function so setting the brightness
+to 0 can also cancel the blinking of the LED.
Known Issues
@@ -55,10 +76,6 @@ would cause nightmare dependency issues. I see this as a minor issue
compared to the benefits the simple trigger functionality brings. The
rest of the LED subsystem can be modular.
-Some leds can be programmed to flash in hardware. As this isn't a generic
-LED device property, this should be exported as a device specific sysfs
-attribute rather than part of the class if this functionality is required.
-
Future Development
==================
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 5818628207b5..396cdd982c26 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -416,6 +416,16 @@ also have
sectors in total that could need to be processed. The two
numbers are separated by a '/' thus effectively showing one
value, a fraction of the process that is complete.
+ A 'select' on this attribute will return when resync completes,
+ when it reaches the current sync_max (below) and possibly at
+ other times.
+
+ sync_max
+ This is a number of sectors at which point a resync/recovery
+ process will pause. When a resync is active, the value can
+ only ever be increased, never decreased. The value of 'max'
+ effectively disables the limit.
+
sync_speed
This shows the current actual speed, in K/sec, of the current
diff --git a/Documentation/pcmcia/driver-changes.txt b/Documentation/pcmcia/driver-changes.txt
index 4739c5c3face..96f155e68750 100644
--- a/Documentation/pcmcia/driver-changes.txt
+++ b/Documentation/pcmcia/driver-changes.txt
@@ -33,8 +33,8 @@ This file details changes in 2.6 which affect PCMCIA card driver authors:
and can be used (e.g. for SET_NETDEV_DEV) by using
handle_to_dev(client_handle_t * handle).
-* Convert internal I/O port addresses to unsigned long (as of 2.6.11)
- ioaddr_t should be replaced by kio_addr_t in PCMCIA card drivers.
+* Convert internal I/O port addresses to unsigned int (as of 2.6.11)
+ ioaddr_t should be replaced by unsigned int in PCMCIA card drivers.
* irq_mask and irq_list parameters (as of 2.6.11)
The irq_mask and irq_list parameters should no longer be used in
diff --git a/Documentation/pm_qos_interface.txt b/Documentation/pm_qos_interface.txt
new file mode 100644
index 000000000000..49adb1a33514
--- /dev/null
+++ b/Documentation/pm_qos_interface.txt
@@ -0,0 +1,59 @@
+PM quality of Service interface.
+
+This interface provides a kernel and user mode interface for registering
+performance expectations by drivers, subsystems and user space applications on
+one of the parameters.
+
+Currently we have {cpu_dma_latency, network_latency, network_throughput} as the
+initial set of pm_qos parameters.
+
+The infrastructure exposes multiple misc device nodes one per implemented
+parameter. The set of parameters implement is defined by pm_qos_power_init()
+and pm_qos_params.h. This is done because having the available parameters
+being runtime configurable or changeable from a driver was seen as too easy to
+abuse.
+
+For each parameter a list of performance requirements is maintained along with
+an aggregated target value. The aggregated target value is updated with
+changes to the requirement list or elements of the list. Typically the
+aggregated target value is simply the max or min of the requirement values held
+in the parameter list elements.
+
+From kernel mode the use of this interface is simple:
+pm_qos_add_requirement(param_id, name, target_value):
+Will insert a named element in the list for that identified PM_QOS parameter
+with the target value. Upon change to this list the new target is recomputed
+and any registered notifiers are called only if the target value is now
+different.
+
+pm_qos_update_requirement(param_id, name, new_target_value):
+Will search the list identified by the param_id for the named list element and
+then update its target value, calling the notification tree if the aggregated
+target is changed. with that name is already registered.
+
+pm_qos_remove_requirement(param_id, name):
+Will search the identified list for the named element and remove it, after
+removal it will update the aggregate target and call the notification tree if
+the target was changed as a result of removing the named requirement.
+
+
+From user mode:
+Only processes can register a pm_qos requirement. To provide for automatic
+cleanup for process the interface requires the process to register its
+parameter requirements in the following way:
+
+To register the default pm_qos target for the specific parameter, the process
+must open one of /dev/[cpu_dma_latency, network_latency, network_throughput]
+
+As long as the device node is held open that process has a registered
+requirement on the parameter. The name of the requirement is "process_<PID>"
+derived from the current->pid from within the open system call.
+
+To change the requested target value the process needs to write a s32 value to
+the open device node. This translates to a pm_qos_update_requirement call.
+
+To remove the user mode request for a target value simply close the device
+node.
+
+
+
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index aea7e9209667..9d60ab717a7b 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -386,6 +386,11 @@ before suspending; then remount them after resuming.
There is a work-around for this problem. For more information, see
Documentation/usb/persist.txt.
+Q: Can I suspend-to-disk using a swap partition under LVM?
+
+A: No. You can suspend successfully, but you'll not be able to
+resume. uswsusp should be able to work with LVM. See suspend.sf.net.
+
Q: I upgraded the kernel from 2.6.15 to 2.6.16. Both kernels were
compiled with the similar configuration files. Anyway I found that
suspend to disk (and resume) is much slower on 2.6.16 compared to
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index b5e46efeba84..7b4e8a70882c 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -57,6 +57,7 @@ Table of Contents
n) 4xx/Axon EMAC ethernet nodes
o) Xilinx IP cores
p) Freescale Synchronous Serial Interface
+ q) USB EHCI controllers
VII - Specifying interrupt information for devices
1) interrupts property
@@ -2577,6 +2578,20 @@ platforms are moved over to use the flattened-device-tree model.
Requred properties:
- current-speed : Baud rate of uartlite
+ v) Xilinx hwicap
+
+ Xilinx hwicap devices provide access to the configuration logic
+ of the FPGA through the Internal Configuration Access Port
+ (ICAP). The ICAP enables partial reconfiguration of the FPGA,
+ readback of the configuration information, and some control over
+ 'warm boots' of the FPGA fabric.
+
+ Required properties:
+ - xlnx,family : The family of the FPGA, necessary since the
+ capabilities of the underlying ICAP hardware
+ differ between different families. May be
+ 'virtex2p', 'virtex4', or 'virtex5'.
+
p) Freescale Synchronous Serial Interface
The SSI is a serial device that communicates with audio codecs. It can
@@ -2775,6 +2790,33 @@ platforms are moved over to use the flattened-device-tree model.
interrupt-parent = < &ipic >;
};
+ q) USB EHCI controllers
+
+ Required properties:
+ - compatible : should be "usb-ehci".
+ - reg : should contain at least address and length of the standard EHCI
+ register set for the device. Optional platform-dependent registers
+ (debug-port or other) can be also specified here, but only after
+ definition of standard EHCI registers.
+ - interrupts : one EHCI interrupt should be described here.
+ If device registers are implemented in big endian mode, the device
+ node should have "big-endian-regs" property.
+ If controller implementation operates with big endian descriptors,
+ "big-endian-desc" property should be specified.
+ If both big endian registers and descriptors are used by the controller
+ implementation, "big-endian" property can be specified instead of having
+ both "big-endian-regs" and "big-endian-desc".
+
+ Example (Sequoia 440EPx):
+ ehci@e0000300 {
+ compatible = "ibm,usb-ehci-440epx", "usb-ehci";
+ interrupt-parent = <&UIC0>;
+ interrupts = <1a 4>;
+ reg = <0 e0000300 90 0 e0000390 70>;
+ big-endian;
+ };
+
+
More devices will be defined as this spec matures.
VII - Specifying interrupt information for devices
diff --git a/Documentation/rtc.txt b/Documentation/rtc.txt
index e20b19c1b60d..8deffcd68cb8 100644
--- a/Documentation/rtc.txt
+++ b/Documentation/rtc.txt
@@ -182,8 +182,8 @@ driver returns ENOIOCTLCMD. Some common examples:
since the frequency is stored in the irq_freq member of the rtc_device
structure. Your driver needs to initialize the irq_freq member during
init. Make sure you check the requested frequency is in range of your
- hardware in the irq_set_freq function. If you cannot actually change
- the frequency, just return -ENOTTY.
+ hardware in the irq_set_freq function. If it isn't, return -EINVAL. If
+ you cannot actually change the frequency, do not define irq_set_freq.
If all else fails, check out the rtc-test.c driver!
@@ -268,8 +268,8 @@ int main(int argc, char **argv)
/* This read will block */
retval = read(fd, &data, sizeof(unsigned long));
if (retval == -1) {
- perror("read");
- exit(errno);
+ perror("read");
+ exit(errno);
}
fprintf(stderr, " %d",i);
fflush(stderr);
@@ -326,11 +326,11 @@ test_READ:
rtc_tm.tm_sec %= 60;
rtc_tm.tm_min++;
}
- if (rtc_tm.tm_min == 60) {
+ if (rtc_tm.tm_min == 60) {
rtc_tm.tm_min = 0;
rtc_tm.tm_hour++;
}
- if (rtc_tm.tm_hour == 24)
+ if (rtc_tm.tm_hour == 24)
rtc_tm.tm_hour = 0;
retval = ioctl(fd, RTC_ALM_SET, &rtc_tm);
@@ -407,8 +407,8 @@ test_PIE:
"\n...Periodic IRQ rate is fixed\n");
goto done;
}
- perror("RTC_IRQP_SET ioctl");
- exit(errno);
+ perror("RTC_IRQP_SET ioctl");
+ exit(errno);
}
fprintf(stderr, "\n%ldHz:\t", tmp);
@@ -417,27 +417,27 @@ test_PIE:
/* Enable periodic interrupts */
retval = ioctl(fd, RTC_PIE_ON, 0);
if (retval == -1) {
- perror("RTC_PIE_ON ioctl");
- exit(errno);
+ perror("RTC_PIE_ON ioctl");
+ exit(errno);
}
for (i=1; i<21; i++) {
- /* This blocks */
- retval = read(fd, &data, sizeof(unsigned long));
- if (retval == -1) {
- perror("read");
- exit(errno);
- }
- fprintf(stderr, " %d",i);
- fflush(stderr);
- irqcount++;
+ /* This blocks */
+ retval = read(fd, &data, sizeof(unsigned long));
+ if (retval == -1) {
+ perror("read");
+ exit(errno);
+ }
+ fprintf(stderr, " %d",i);
+ fflush(stderr);
+ irqcount++;
}
/* Disable periodic interrupts */
retval = ioctl(fd, RTC_PIE_OFF, 0);
if (retval == -1) {
- perror("RTC_PIE_OFF ioctl");
- exit(errno);
+ perror("RTC_PIE_OFF ioctl");
+ exit(errno);
}
}
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
new file mode 100644
index 000000000000..b5f5ca069b2d
--- /dev/null
+++ b/Documentation/scheduler/00-INDEX
@@ -0,0 +1,16 @@
+00-INDEX
+ - this file.
+sched-arch.txt
+ - CPU Scheduler implementation hints for architecture specific code.
+sched-coding.txt
+ - reference for various scheduler-related methods in the O(1) scheduler.
+sched-design.txt
+ - goals, design and implementation of the Linux O(1) scheduler.
+sched-design-CFS.txt
+ - goals, design and implementation of the Complete Fair Scheduler.
+sched-domains.txt
+ - information on scheduling domains.
+sched-nice-design.txt
+ - How and why the scheduler's nice levels are implemented.
+sched-stats.txt
+ - information on schedstats (Linux Scheduler Statistics).
diff --git a/Documentation/sched-arch.txt b/Documentation/scheduler/sched-arch.txt
index 941615a9769b..941615a9769b 100644
--- a/Documentation/sched-arch.txt
+++ b/Documentation/scheduler/sched-arch.txt
diff --git a/Documentation/sched-coding.txt b/Documentation/scheduler/sched-coding.txt
index cbd8db752acf..cbd8db752acf 100644
--- a/Documentation/sched-coding.txt
+++ b/Documentation/scheduler/sched-coding.txt
diff --git a/Documentation/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 88bcb8767335..88bcb8767335 100644
--- a/Documentation/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
diff --git a/Documentation/sched-design.txt b/Documentation/scheduler/sched-design.txt
index 1605bf0cba8b..1605bf0cba8b 100644
--- a/Documentation/sched-design.txt
+++ b/Documentation/scheduler/sched-design.txt
diff --git a/Documentation/sched-domains.txt b/Documentation/scheduler/sched-domains.txt
index a9e990ab980f..a9e990ab980f 100644
--- a/Documentation/sched-domains.txt
+++ b/Documentation/scheduler/sched-domains.txt
diff --git a/Documentation/sched-nice-design.txt b/Documentation/scheduler/sched-nice-design.txt
index e2bae5a577e3..e2bae5a577e3 100644
--- a/Documentation/sched-nice-design.txt
+++ b/Documentation/scheduler/sched-nice-design.txt
diff --git a/Documentation/sched-stats.txt b/Documentation/scheduler/sched-stats.txt
index 442e14d35dea..442e14d35dea 100644
--- a/Documentation/sched-stats.txt
+++ b/Documentation/scheduler/sched-stats.txt
diff --git a/Documentation/scsi/ChangeLog.arcmsr b/Documentation/scsi/ChangeLog.arcmsr
index cd8403a33ee6..de2bcacfa870 100644
--- a/Documentation/scsi/ChangeLog.arcmsr
+++ b/Documentation/scsi/ChangeLog.arcmsr
@@ -68,4 +68,45 @@
** 2. modify the arcmsr_pci_slot_reset function
** 3. modify the arcmsr_pci_ers_disconnect_forepart function
** 4. modify the arcmsr_pci_ers_need_reset_forepart function
+** 1.20.00.15 09/27/2007 Erich Chen & Nick Cheng
+** 1. add arcmsr_enable_eoi_mode() on adapter Type B
+** 2. add readl(reg->iop2drv_doorbell_reg) in arcmsr_handle_hbb_isr()
+** in case of the doorbell interrupt clearance is cached
+** 1.20.00.15 10/01/2007 Erich Chen & Nick Cheng
+** 1. modify acb->devstate[i][j]
+** as ARECA_RAID_GOOD instead of
+** ARECA_RAID_GONE in arcmsr_alloc_ccb_pool
+** 1.20.00.15 11/06/2007 Erich Chen & Nick Cheng
+** 1. add conditional declaration for
+** arcmsr_pci_error_detected() and
+** arcmsr_pci_slot_reset
+** 1.20.00.15 11/23/2007 Erich Chen & Nick Cheng
+** 1.check if the sg list member number
+** exceeds arcmsr default limit in arcmsr_build_ccb()
+** 2.change the returned value type of arcmsr_build_ccb()
+** from "void" to "int"
+** 3.add the conditional check if arcmsr_build_ccb()
+** returns FAILED
+** 1.20.00.15 12/04/2007 Erich Chen & Nick Cheng
+** 1. modify arcmsr_drain_donequeue() to ignore unknown
+** command and let kernel process command timeout.
+** This could handle IO request violating max. segments
+** while Linux XFS over DM-CRYPT.
+** Thanks to Milan Broz's comments <mbroz@redhat.com>
+** 1.20.00.15 12/24/2007 Erich Chen & Nick Cheng
+** 1.fix the portability problems
+** 2.fix type B where we should _not_ iounmap() acb->pmu;
+** it's not ioremapped.
+** 3.add return -ENOMEM if ioremap() fails
+** 4.transfer IS_SG64_ADDR w/ cpu_to_le32()
+** in arcmsr_build_ccb
+** 5. modify acb->devstate[i][j] as ARECA_RAID_GONE instead of
+** ARECA_RAID_GOOD in arcmsr_alloc_ccb_pool()
+** 6.fix arcmsr_cdb->Context as (unsigned long)arcmsr_cdb
+** 7.add the checking state of
+** (outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT) == 0
+** in arcmsr_handle_hba_isr
+** 8.replace pci_alloc_consistent()/pci_free_consistent() with kmalloc()/kfree() in arcmsr_iop_message_xfer()
+** 9. fix the release of dma memory for type B in arcmsr_free_ccb_pool()
+** 10.fix the arcmsr_polling_hbb_ccbdone()
**************************************************************************
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index 6f70f2b9327e..a6d5354639b2 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -1407,7 +1407,7 @@ Credits
=======
The following people have contributed to this document:
Mike Anderson <andmike at us dot ibm dot com>
- James Bottomley <James dot Bottomley at steeleye dot com>
+ James Bottomley <James dot Bottomley at hansenpartnership dot com>
Patrick Mansfield <patmans at us dot ibm dot com>
Christoph Hellwig <hch at infradead dot org>
Doug Ledford <dledford at redhat dot com>
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index aa986a35e994..f99254327ae5 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -23,6 +23,7 @@ Currently, these files are in /proc/sys/fs:
- inode-max
- inode-nr
- inode-state
+- nr_open
- overflowuid
- overflowgid
- suid_dumpable
@@ -91,6 +92,15 @@ usage of file handles and you don't need to increase the maximum.
==============================================================
+nr_open:
+
+This denotes the maximum number of file-handles a process can
+allocate. Default value is 1024*1024 (1048576) which should be
+enough for most machines. Actual limit depends on RLIMIT_NOFILE
+resource limit.
+
+==============================================================
+
inode-max, inode-nr & inode-state:
As with file handles, the kernel allocates the inode structures
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index 6f31f0a247d0..8a4863c4edd4 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -22,6 +22,7 @@ Currently, these files are in /proc/sys/vm:
- dirty_background_ratio
- dirty_expire_centisecs
- dirty_writeback_centisecs
+- highmem_is_dirtyable (only if CONFIG_HIGHMEM set)
- max_map_count
- min_free_kbytes
- laptop_mode
@@ -31,6 +32,7 @@ Currently, these files are in /proc/sys/vm:
- min_unmapped_ratio
- min_slab_ratio
- panic_on_oom
+- oom_dump_tasks
- oom_kill_allocating_task
- mmap_min_address
- numa_zonelist_order
@@ -40,9 +42,9 @@ Currently, these files are in /proc/sys/vm:
==============================================================
dirty_ratio, dirty_background_ratio, dirty_expire_centisecs,
-dirty_writeback_centisecs, vfs_cache_pressure, laptop_mode,
-block_dump, swap_token_timeout, drop-caches,
-hugepages_treat_as_movable:
+dirty_writeback_centisecs, highmem_is_dirtyable,
+vfs_cache_pressure, laptop_mode, block_dump, swap_token_timeout,
+drop-caches, hugepages_treat_as_movable:
See Documentation/filesystems/proc.txt
@@ -231,6 +233,27 @@ according to your policy of failover.
=============================================================
+oom_dump_tasks
+
+Enables a system-wide task dump (excluding kernel threads) to be
+produced when the kernel performs an OOM-killing and includes such
+information as pid, uid, tgid, vm size, rss, cpu, oom_adj score, and
+name. This is helpful to determine why the OOM killer was invoked
+and to identify the rogue task that caused it.
+
+If this is set to zero, this information is suppressed. On very
+large systems with thousands of tasks it may not be feasible to dump
+the memory state information for each one. Such systems should not
+be forced to incur a performance penalty in OOM conditions when the
+information may not be desired.
+
+If this is set to non-zero, this information is shown whenever the
+OOM killer actually kills a memory-hogging task.
+
+The default value is 0.
+
+=============================================================
+
oom_kill_allocating_task
This enables or disables killing the OOM-triggering task in
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
new file mode 100644
index 000000000000..5776e090359d
--- /dev/null
+++ b/Documentation/thermal/sysfs-api.txt
@@ -0,0 +1,246 @@
+Generic Thermal Sysfs driver How To
+=========================
+
+Written by Sujith Thomas <sujith.thomas@intel.com>, Zhang Rui <rui.zhang@intel.com>
+
+Updated: 2 January 2008
+
+Copyright (c) 2008 Intel Corporation
+
+
+0. Introduction
+
+The generic thermal sysfs provides a set of interfaces for thermal zone devices (sensors)
+and thermal cooling devices (fan, processor...) to register with the thermal management
+solution and to be a part of it.
+
+This how-to focusses on enabling new thermal zone and cooling devices to participate
+in thermal management.
+This solution is platform independent and any type of thermal zone devices and
+cooling devices should be able to make use of the infrastructure.
+
+The main task of the thermal sysfs driver is to expose thermal zone attributes as well
+as cooling device attributes to the user space.
+An intelligent thermal management application can make decisions based on inputs
+from thermal zone attributes (the current temperature and trip point temperature)
+and throttle appropriate devices.
+
+[0-*] denotes any positive number starting from 0
+[1-*] denotes any positive number starting from 1
+
+1. thermal sysfs driver interface functions
+
+1.1 thermal zone device interface
+1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *name, int trips,
+ void *devdata, struct thermal_zone_device_ops *ops)
+
+ This interface function adds a new thermal zone device (sensor) to
+ /sys/class/thermal folder as thermal_zone[0-*].
+ It tries to bind all the thermal cooling devices registered at the same time.
+
+ name: the thermal zone name.
+ trips: the total number of trip points this thermal zone supports.
+ devdata: device private data
+ ops: thermal zone device callbacks.
+ .bind: bind the thermal zone device with a thermal cooling device.
+ .unbind: unbing the thermal zone device with a thermal cooling device.
+ .get_temp: get the current temperature of the thermal zone.
+ .get_mode: get the current mode (user/kernel) of the thermal zone.
+ "kernel" means thermal management is done in kernel.
+ "user" will prevent kernel thermal driver actions upon trip points
+ so that user applications can take charge of thermal management.
+ .set_mode: set the mode (user/kernel) of the thermal zone.
+ .get_trip_type: get the type of certain trip point.
+ .get_trip_temp: get the temperature above which the certain trip point
+ will be fired.
+
+1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz)
+
+ This interface function removes the thermal zone device.
+ It deletes the corresponding entry form /sys/class/thermal folder and unbind all
+ the thermal cooling devices it uses.
+
+1.2 thermal cooling device interface
+1.2.1 struct thermal_cooling_device *thermal_cooling_device_register(char *name,
+ void *devdata, struct thermal_cooling_device_ops *)
+
+ This interface function adds a new thermal cooling device (fan/processor/...) to
+ /sys/class/thermal/ folder as cooling_device[0-*].
+ It tries to bind itself to all the thermal zone devices register at the same time.
+ name: the cooling device name.
+ devdata: device private data.
+ ops: thermal cooling devices callbacks.
+ .get_max_state: get the Maximum throttle state of the cooling device.
+ .get_cur_state: get the Current throttle state of the cooling device.
+ .set_cur_state: set the Current throttle state of the cooling device.
+
+1.2.2 void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
+
+ This interface function remove the thermal cooling device.
+ It deletes the corresponding entry form /sys/class/thermal folder and unbind
+ itself from all the thermal zone devices using it.
+
+1.3 interface for binding a thermal zone device with a thermal cooling device
+1.3.1 int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ int trip, struct thermal_cooling_device *cdev);
+
+ This interface function bind a thermal cooling device to the certain trip point
+ of a thermal zone device.
+ This function is usually called in the thermal zone device .bind callback.
+ tz: the thermal zone device
+ cdev: thermal cooling device
+ trip: indicates which trip point the cooling devices is associated with
+ in this thermal zone.
+
+1.3.2 int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
+ int trip, struct thermal_cooling_device *cdev);
+
+ This interface function unbind a thermal cooling device from the certain trip point
+ of a thermal zone device.
+ This function is usually called in the thermal zone device .unbind callback.
+ tz: the thermal zone device
+ cdev: thermal cooling device
+ trip: indicates which trip point the cooling devices is associated with
+ in this thermal zone.
+
+2. sysfs attributes structure
+
+RO read only value
+RW read/write value
+
+All thermal sysfs attributes will be represented under /sys/class/thermal
+/sys/class/thermal/
+
+Thermal zone device sys I/F, created once it's registered:
+|thermal_zone[0-*]:
+ |-----type: Type of the thermal zone
+ |-----temp: Current temperature
+ |-----mode: Working mode of the thermal zone
+ |-----trip_point_[0-*]_temp: Trip point temperature
+ |-----trip_point_[0-*]_type: Trip point type
+
+Thermal cooling device sys I/F, created once it's registered:
+|cooling_device[0-*]:
+ |-----type : Type of the cooling device(processor/fan/...)
+ |-----max_state: Maximum cooling state of the cooling device
+ |-----cur_state: Current cooling state of the cooling device
+
+
+These two dynamic attributes are created/removed in pairs.
+They represent the relationship between a thermal zone and its associated cooling device.
+They are created/removed for each
+thermal_zone_bind_cooling_device/thermal_zone_unbind_cooling_device successful exection.
+
+|thermal_zone[0-*]
+ |-----cdev[0-*]: The [0-*]th cooling device in the current thermal zone
+ |-----cdev[0-*]_trip_point: Trip point that cdev[0-*] is associated with
+
+
+***************************
+* Thermal zone attributes *
+***************************
+
+type Strings which represent the thermal zone type.
+ This is given by thermal zone driver as part of registration.
+ Eg: "ACPI thermal zone" indicates it's a ACPI thermal device
+ RO
+ Optional
+
+temp Current temperature as reported by thermal zone (sensor)
+ Unit: degree celsius
+ RO
+ Required
+
+mode One of the predifned values in [kernel, user]
+ This file gives information about the algorithm
+ that is currently managing the thermal zone.
+ It can be either default kernel based algorithm
+ or user space application.
+ RW
+ Optional
+ kernel = Thermal management in kernel thermal zone driver.
+ user = Preventing kernel thermal zone driver actions upon
+ trip points so that user application can take full
+ charge of the thermal management.
+
+trip_point_[0-*]_temp The temperature above which trip point will be fired
+ Unit: degree celsius
+ RO
+ Optional
+
+trip_point_[0-*]_type Strings which indicate the type of the trip point
+ Eg. it can be one of critical, hot, passive,
+ active[0-*] for ACPI thermal zone.
+ RO
+ Optional
+
+cdev[0-*] Sysfs link to the thermal cooling device node where the sys I/F
+ for cooling device throttling control represents.
+ RO
+ Optional
+
+cdev[0-*]_trip_point The trip point with which cdev[0-*] is assocated in this thermal zone
+ -1 means the cooling device is not associated with any trip point.
+ RO
+ Optional
+
+******************************
+* Cooling device attributes *
+******************************
+
+type String which represents the type of device
+ eg: For generic ACPI: this should be "Fan",
+ "Processor" or "LCD"
+ eg. For memory controller device on intel_menlow platform:
+ this should be "Memory controller"
+ RO
+ Optional
+
+max_state The maximum permissible cooling state of this cooling device.
+ RO
+ Required
+
+cur_state The current cooling state of this cooling device.
+ the value can any integer numbers between 0 and max_state,
+ cur_state == 0 means no cooling
+ cur_state == max_state means the maximum cooling.
+ RW
+ Required
+
+3. A simple implementation
+
+ACPI thermal zone may support multiple trip points like critical/hot/passive/active.
+If an ACPI thermal zone supports critical, passive, active[0] and active[1] at the same time,
+it may register itself as a thermale_zone_device (thermal_zone1) with 4 trip points in all.
+It has one processor and one fan, which are both registered as thermal_cooling_device.
+If the processor is listed in _PSL method, and the fan is listed in _AL0 method,
+the sys I/F structure will be built like this:
+
+/sys/class/thermal:
+
+|thermal_zone1:
+ |-----type: ACPI thermal zone
+ |-----temp: 37
+ |-----mode: kernel
+ |-----trip_point_0_temp: 100
+ |-----trip_point_0_type: critical
+ |-----trip_point_1_temp: 80
+ |-----trip_point_1_type: passive
+ |-----trip_point_2_temp: 70
+ |-----trip_point_2_type: active[0]
+ |-----trip_point_3_temp: 60
+ |-----trip_point_3_type: active[1]
+ |-----cdev0: --->/sys/class/thermal/cooling_device0
+ |-----cdev0_trip_point: 1 /* cdev0 can be used for passive */
+ |-----cdev1: --->/sys/class/thermal/cooling_device3
+ |-----cdev1_trip_point: 2 /* cdev1 can be used for active[0]*/
+
+|cooling_device0:
+ |-----type: Processor
+ |-----max_state: 8
+ |-----cur_state: 0
+
+|cooling_device3:
+ |-----type: Fan
+ |-----max_state: 2
+ |-----cur_state: 0
diff --git a/Documentation/thinkpad-acpi.txt b/Documentation/thinkpad-acpi.txt
index 10c041ca13c7..6c2477754a2a 100644
--- a/Documentation/thinkpad-acpi.txt
+++ b/Documentation/thinkpad-acpi.txt
@@ -1,7 +1,7 @@
ThinkPad ACPI Extras Driver
- Version 0.17
- October 04th, 2007
+ Version 0.19
+ January 06th, 2008
Borislav Deianov <borislav@users.sf.net>
Henrique de Moraes Holschuh <hmh@hmh.eng.br>
@@ -215,6 +215,11 @@ The following commands can be written to the /proc/acpi/ibm/hotkey file:
... any other 8-hex-digit mask ...
echo reset > /proc/acpi/ibm/hotkey -- restore the original mask
+The procfs interface does not support NVRAM polling control. So as to
+maintain maximum bug-to-bug compatibility, it does not report any masks,
+nor does it allow one to manipulate the hot key mask when the firmware
+does not support masks at all, even if NVRAM polling is in use.
+
sysfs notes:
hotkey_bios_enabled:
@@ -231,17 +236,26 @@ sysfs notes:
to this value.
hotkey_enable:
- Enables/disables the hot keys feature, and reports
- current status of the hot keys feature.
+ Enables/disables the hot keys feature in the ACPI
+ firmware, and reports current status of the hot keys
+ feature. Has no effect on the NVRAM hot key polling
+ functionality.
0: disables the hot keys feature / feature disabled
1: enables the hot keys feature / feature enabled
hotkey_mask:
- bit mask to enable driver-handling and ACPI event
- generation for each hot key (see above). Returns the
- current status of the hot keys mask, and allows one to
- modify it.
+ bit mask to enable driver-handling (and depending on
+ the firmware, ACPI event generation) for each hot key
+ (see above). Returns the current status of the hot keys
+ mask, and allows one to modify it.
+
+ Note: when NVRAM polling is active, the firmware mask
+ will be different from the value returned by
+ hotkey_mask. The driver will retain enabled bits for
+ hotkeys that are under NVRAM polling even if the
+ firmware refuses them, and will not set these bits on
+ the firmware hot key mask.
hotkey_all_mask:
bit mask that should enable event reporting for all
@@ -257,12 +271,48 @@ sysfs notes:
handled by the firmware anyway. Echo it to
hotkey_mask above, to use.
+ hotkey_source_mask:
+ bit mask that selects which hot keys will the driver
+ poll the NVRAM for. This is auto-detected by the driver
+ based on the capabilities reported by the ACPI firmware,
+ but it can be overridden at runtime.
+
+ Hot keys whose bits are set in both hotkey_source_mask
+ and also on hotkey_mask are polled for in NVRAM. Only a
+ few hot keys are available through CMOS NVRAM polling.
+
+ Warning: when in NVRAM mode, the volume up/down/mute
+ keys are synthesized according to changes in the mixer,
+ so you have to use volume up or volume down to unmute,
+ as per the ThinkPad volume mixer user interface. When
+ in ACPI event mode, volume up/down/mute are reported as
+ separate events, but this behaviour may be corrected in
+ future releases of this driver, in which case the
+ ThinkPad volume mixer user interface semanthics will be
+ enforced.
+
+ hotkey_poll_freq:
+ frequency in Hz for hot key polling. It must be between
+ 0 and 25 Hz. Polling is only carried out when strictly
+ needed.
+
+ Setting hotkey_poll_freq to zero disables polling, and
+ will cause hot key presses that require NVRAM polling
+ to never be reported.
+
+ Setting hotkey_poll_freq too low will cause repeated
+ pressings of the same hot key to be misreported as a
+ single key press, or to not even be detected at all.
+ The recommended polling frequency is 10Hz.
+
hotkey_radio_sw:
if the ThinkPad has a hardware radio switch, this
attribute will read 0 if the switch is in the "radios
disabled" postition, and 1 if the switch is in the
"radios enabled" position.
+ This attribute has poll()/select() support.
+
hotkey_report_mode:
Returns the state of the procfs ACPI event report mode
filter for hot keys. If it is set to 1 (the default),
@@ -277,6 +327,25 @@ sysfs notes:
May return -EPERM (write access locked out by module
parameter) or -EACCES (read-only).
+ wakeup_reason:
+ Set to 1 if the system is waking up because the user
+ requested a bay ejection. Set to 2 if the system is
+ waking up because the user requested the system to
+ undock. Set to zero for normal wake-ups or wake-ups
+ due to unknown reasons.
+
+ This attribute has poll()/select() support.
+
+ wakeup_hotunplug_complete:
+ Set to 1 if the system was waken up because of an
+ undock or bay ejection request, and that request
+ was sucessfully completed. At this point, it might
+ be useful to send the system back to sleep, at the
+ user's choice. Refer to HKEY events 0x4003 and
+ 0x3003, below.
+
+ This attribute has poll()/select() support.
+
input layer notes:
A Hot key is mapped to a single input layer EV_KEY event, possibly
@@ -427,6 +496,23 @@ Non hot-key ACPI HKEY event map:
The above events are not propagated by the driver, except for legacy
compatibility purposes when hotkey_report_mode is set to 1.
+0x2304 System is waking up from suspend to undock
+0x2305 System is waking up from suspend to eject bay
+0x2404 System is waking up from hibernation to undock
+0x2405 System is waking up from hibernation to eject bay
+
+The above events are never propagated by the driver.
+
+0x3003 Bay ejection (see 0x2x05) complete, can sleep again
+0x4003 Undocked (see 0x2x04), can sleep again
+0x5009 Tablet swivel: switched to tablet mode
+0x500A Tablet swivel: switched to normal mode
+0x500B Tablet pen insterted into its storage bay
+0x500C Tablet pen removed from its storage bay
+0x5010 Brightness level changed (newer Lenovo BIOSes)
+
+The above events are propagated by the driver.
+
Compatibility notes:
ibm-acpi and thinkpad-acpi 0.15 (mainline kernels before 2.6.23) never
@@ -1263,3 +1349,17 @@ Sysfs interface changelog:
and the hwmon class for libsensors4 (lm-sensors 3)
compatibility. Moved all hwmon attributes to this
new platform device.
+
+0x020100: Marker for thinkpad-acpi with hot key NVRAM polling
+ support. If you must, use it to know you should not
+ start an userspace NVRAM poller (allows to detect when
+ NVRAM is compiled out by the user because it is
+ unneeded/undesired in the first place).
+0x020101: Marker for thinkpad-acpi with hot key NVRAM polling
+ and proper hotkey_mask semanthics (version 8 of the
+ NVRAM polling patch). Some development snapshots of
+ 0.18 had an earlier version that did strange things
+ to hotkey_mask.
+
+0x020200: Add poll()/select() support to the following attributes:
+ hotkey_radio_sw, wakeup_hotunplug_complete, wakeup_reason
diff --git a/Documentation/unaligned-memory-access.txt b/Documentation/unaligned-memory-access.txt
new file mode 100644
index 000000000000..6223eace3c09
--- /dev/null
+++ b/Documentation/unaligned-memory-access.txt
@@ -0,0 +1,226 @@
+UNALIGNED MEMORY ACCESSES
+=========================
+
+Linux runs on a wide variety of architectures which have varying behaviour
+when it comes to memory access. This document presents some details about
+unaligned accesses, why you need to write code that doesn't cause them,
+and how to write such code!
+
+
+The definition of an unaligned access
+=====================================
+
+Unaligned memory accesses occur when you try to read N bytes of data starting
+from an address that is not evenly divisible by N (i.e. addr % N != 0).
+For example, reading 4 bytes of data from address 0x10004 is fine, but
+reading 4 bytes of data from address 0x10005 would be an unaligned memory
+access.
+
+The above may seem a little vague, as memory access can happen in different
+ways. The context here is at the machine code level: certain instructions read
+or write a number of bytes to or from memory (e.g. movb, movw, movl in x86
+assembly). As will become clear, it is relatively easy to spot C statements
+which will compile to multiple-byte memory access instructions, namely when
+dealing with types such as u16, u32 and u64.
+
+
+Natural alignment
+=================
+
+The rule mentioned above forms what we refer to as natural alignment:
+When accessing N bytes of memory, the base memory address must be evenly
+divisible by N, i.e. addr % N == 0.
+
+When writing code, assume the target architecture has natural alignment
+requirements.
+
+In reality, only a few architectures require natural alignment on all sizes
+of memory access. However, we must consider ALL supported architectures;
+writing code that satisfies natural alignment requirements is the easiest way
+to achieve full portability.
+
+
+Why unaligned access is bad
+===========================
+
+The effects of performing an unaligned memory access vary from architecture
+to architecture. It would be easy to write a whole document on the differences
+here; a summary of the common scenarios is presented below:
+
+ - Some architectures are able to perform unaligned memory accesses
+ transparently, but there is usually a significant performance cost.
+ - Some architectures raise processor exceptions when unaligned accesses
+ happen. The exception handler is able to correct the unaligned access,
+ at significant cost to performance.
+ - Some architectures raise processor exceptions when unaligned accesses
+ happen, but the exceptions do not contain enough information for the
+ unaligned access to be corrected.
+ - Some architectures are not capable of unaligned memory access, but will
+ silently perform a different memory access to the one that was requested,
+ resulting a a subtle code bug that is hard to detect!
+
+It should be obvious from the above that if your code causes unaligned
+memory accesses to happen, your code will not work correctly on certain
+platforms and will cause performance problems on others.
+
+
+Code that does not cause unaligned access
+=========================================
+
+At first, the concepts above may seem a little hard to relate to actual
+coding practice. After all, you don't have a great deal of control over
+memory addresses of certain variables, etc.
+
+Fortunately things are not too complex, as in most cases, the compiler
+ensures that things will work for you. For example, take the following
+structure:
+
+ struct foo {
+ u16 field1;
+ u32 field2;
+ u8 field3;
+ };
+
+Let us assume that an instance of the above structure resides in memory
+starting at address 0x10000. With a basic level of understanding, it would
+not be unreasonable to expect that accessing field2 would cause an unaligned
+access. You'd be expecting field2 to be located at offset 2 bytes into the
+structure, i.e. address 0x10002, but that address is not evenly divisible
+by 4 (remember, we're reading a 4 byte value here).
+
+Fortunately, the compiler understands the alignment constraints, so in the
+above case it would insert 2 bytes of padding in between field1 and field2.
+Therefore, for standard structure types you can always rely on the compiler
+to pad structures so that accesses to fields are suitably aligned (assuming
+you do not cast the field to a type of different length).
+
+Similarly, you can also rely on the compiler to align variables and function
+parameters to a naturally aligned scheme, based on the size of the type of
+the variable.
+
+At this point, it should be clear that accessing a single byte (u8 or char)
+will never cause an unaligned access, because all memory addresses are evenly
+divisible by one.
+
+On a related topic, with the above considerations in mind you may observe
+that you could reorder the fields in the structure in order to place fields
+where padding would otherwise be inserted, and hence reduce the overall
+resident memory size of structure instances. The optimal layout of the
+above example is:
+
+ struct foo {
+ u32 field2;
+ u16 field1;
+ u8 field3;
+ };
+
+For a natural alignment scheme, the compiler would only have to add a single
+byte of padding at the end of the structure. This padding is added in order
+to satisfy alignment constraints for arrays of these structures.
+
+Another point worth mentioning is the use of __attribute__((packed)) on a
+structure type. This GCC-specific attribute tells the compiler never to
+insert any padding within structures, useful when you want to use a C struct
+to represent some data that comes in a fixed arrangement 'off the wire'.
+
+You might be inclined to believe that usage of this attribute can easily
+lead to unaligned accesses when accessing fields that do not satisfy
+architectural alignment requirements. However, again, the compiler is aware
+of the alignment constraints and will generate extra instructions to perform
+the memory access in a way that does not cause unaligned access. Of course,
+the extra instructions obviously cause a loss in performance compared to the
+non-packed case, so the packed attribute should only be used when avoiding
+structure padding is of importance.
+
+
+Code that causes unaligned access
+=================================
+
+With the above in mind, let's move onto a real life example of a function
+that can cause an unaligned memory access. The following function adapted
+from include/linux/etherdevice.h is an optimized routine to compare two
+ethernet MAC addresses for equality.
+
+unsigned int compare_ether_addr(const u8 *addr1, const u8 *addr2)
+{
+ const u16 *a = (const u16 *) addr1;
+ const u16 *b = (const u16 *) addr2;
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+}
+
+In the above function, the reference to a[0] causes 2 bytes (16 bits) to
+be read from memory starting at address addr1. Think about what would happen
+if addr1 was an odd address such as 0x10003. (Hint: it'd be an unaligned
+access.)
+
+Despite the potential unaligned access problems with the above function, it
+is included in the kernel anyway but is understood to only work on
+16-bit-aligned addresses. It is up to the caller to ensure this alignment or
+not use this function at all. This alignment-unsafe function is still useful
+as it is a decent optimization for the cases when you can ensure alignment,
+which is true almost all of the time in ethernet networking context.
+
+
+Here is another example of some code that could cause unaligned accesses:
+ void myfunc(u8 *data, u32 value)
+ {
+ [...]
+ *((u32 *) data) = cpu_to_le32(value);
+ [...]
+ }
+
+This code will cause unaligned accesses every time the data parameter points
+to an address that is not evenly divisible by 4.
+
+In summary, the 2 main scenarios where you may run into unaligned access
+problems involve:
+ 1. Casting variables to types of different lengths
+ 2. Pointer arithmetic followed by access to at least 2 bytes of data
+
+
+Avoiding unaligned accesses
+===========================
+
+The easiest way to avoid unaligned access is to use the get_unaligned() and
+put_unaligned() macros provided by the <asm/unaligned.h> header file.
+
+Going back to an earlier example of code that potentially causes unaligned
+access:
+
+ void myfunc(u8 *data, u32 value)
+ {
+ [...]
+ *((u32 *) data) = cpu_to_le32(value);
+ [...]
+ }
+
+To avoid the unaligned memory access, you would rewrite it as follows:
+
+ void myfunc(u8 *data, u32 value)
+ {
+ [...]
+ value = cpu_to_le32(value);
+ put_unaligned(value, (u32 *) data);
+ [...]
+ }
+
+The get_unaligned() macro works similarly. Assuming 'data' is a pointer to
+memory and you wish to avoid unaligned access, its usage is as follows:
+
+ u32 value = get_unaligned((u32 *) data);
+
+These macros work work for memory accesses of any length (not just 32 bits as
+in the examples above). Be aware that when compared to standard access of
+aligned memory, using these macros to access unaligned memory can be costly in
+terms of performance.
+
+If use of such macros is not convenient, another option is to use memcpy(),
+where the source or destination (or both) are of type u8* or unsigned char*.
+Due to the byte-wise nature of this operation, unaligned accesses are avoided.
+
+--
+Author: Daniel Drake <dsd@gentoo.org>
+With help from: Alan Cox, Avuton Olrich, Heikki Orsila, Jan Engelhardt,
+Johannes Berg, Kyle McMartin, Kyle Moffett, Randy Dunlap, Robert Hancock,
+Uli Kunitz, Vadim Lobanov
+
diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c
index 488c1f31b992..7123fee708ca 100644
--- a/Documentation/vm/slabinfo.c
+++ b/Documentation/vm/slabinfo.c
@@ -32,6 +32,13 @@ struct slabinfo {
int sanity_checks, slab_size, store_user, trace;
int order, poison, reclaim_account, red_zone;
unsigned long partial, objects, slabs;
+ unsigned long alloc_fastpath, alloc_slowpath;
+ unsigned long free_fastpath, free_slowpath;
+ unsigned long free_frozen, free_add_partial, free_remove_partial;
+ unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill;
+ unsigned long cpuslab_flush, deactivate_full, deactivate_empty;
+ unsigned long deactivate_to_head, deactivate_to_tail;
+ unsigned long deactivate_remote_frees;
int numa[MAX_NODES];
int numa_partial[MAX_NODES];
} slabinfo[MAX_SLABS];
@@ -64,8 +71,10 @@ int show_inverted = 0;
int show_single_ref = 0;
int show_totals = 0;
int sort_size = 0;
+int sort_active = 0;
int set_debug = 0;
int show_ops = 0;
+int show_activity = 0;
/* Debug options */
int sanity = 0;
@@ -93,8 +102,10 @@ void usage(void)
printf("slabinfo 5/7/2007. (c) 2007 sgi. clameter@sgi.com\n\n"
"slabinfo [-ahnpvtsz] [-d debugopts] [slab-regexp]\n"
"-a|--aliases Show aliases\n"
+ "-A|--activity Most active slabs first\n"
"-d<options>|--debug=<options> Set/Clear Debug options\n"
- "-e|--empty Show empty slabs\n"
+ "-D|--display-active Switch line format to activity\n"
+ "-e|--empty Show empty slabs\n"
"-f|--first-alias Show first alias\n"
"-h|--help Show usage information\n"
"-i|--inverted Inverted list\n"
@@ -281,8 +292,11 @@ int line = 0;
void first_line(void)
{
- printf("Name Objects Objsize Space "
- "Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n");
+ if (show_activity)
+ printf("Name Objects Alloc Free %%Fast\n");
+ else
+ printf("Name Objects Objsize Space "
+ "Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n");
}
/*
@@ -309,6 +323,12 @@ unsigned long slab_size(struct slabinfo *s)
return s->slabs * (page_size << s->order);
}
+unsigned long slab_activity(struct slabinfo *s)
+{
+ return s->alloc_fastpath + s->free_fastpath +
+ s->alloc_slowpath + s->free_slowpath;
+}
+
void slab_numa(struct slabinfo *s, int mode)
{
int node;
@@ -392,6 +412,71 @@ const char *onoff(int x)
return "Off";
}
+void slab_stats(struct slabinfo *s)
+{
+ unsigned long total_alloc;
+ unsigned long total_free;
+ unsigned long total;
+
+ if (!s->alloc_slab)
+ return;
+
+ total_alloc = s->alloc_fastpath + s->alloc_slowpath;
+ total_free = s->free_fastpath + s->free_slowpath;
+
+ if (!total_alloc)
+ return;
+
+ printf("\n");
+ printf("Slab Perf Counter Alloc Free %%Al %%Fr\n");
+ printf("--------------------------------------------------\n");
+ printf("Fastpath %8lu %8lu %3lu %3lu\n",
+ s->alloc_fastpath, s->free_fastpath,
+ s->alloc_fastpath * 100 / total_alloc,
+ s->free_fastpath * 100 / total_free);
+ printf("Slowpath %8lu %8lu %3lu %3lu\n",
+ total_alloc - s->alloc_fastpath, s->free_slowpath,
+ (total_alloc - s->alloc_fastpath) * 100 / total_alloc,
+ s->free_slowpath * 100 / total_free);
+ printf("Page Alloc %8lu %8lu %3lu %3lu\n",
+ s->alloc_slab, s->free_slab,
+ s->alloc_slab * 100 / total_alloc,
+ s->free_slab * 100 / total_free);
+ printf("Add partial %8lu %8lu %3lu %3lu\n",
+ s->deactivate_to_head + s->deactivate_to_tail,
+ s->free_add_partial,
+ (s->deactivate_to_head + s->deactivate_to_tail) * 100 / total_alloc,
+ s->free_add_partial * 100 / total_free);
+ printf("Remove partial %8lu %8lu %3lu %3lu\n",
+ s->alloc_from_partial, s->free_remove_partial,
+ s->alloc_from_partial * 100 / total_alloc,
+ s->free_remove_partial * 100 / total_free);
+
+ printf("RemoteObj/SlabFrozen %8lu %8lu %3lu %3lu\n",
+ s->deactivate_remote_frees, s->free_frozen,
+ s->deactivate_remote_frees * 100 / total_alloc,
+ s->free_frozen * 100 / total_free);
+
+ printf("Total %8lu %8lu\n\n", total_alloc, total_free);
+
+ if (s->cpuslab_flush)
+ printf("Flushes %8lu\n", s->cpuslab_flush);
+
+ if (s->alloc_refill)
+ printf("Refill %8lu\n", s->alloc_refill);
+
+ total = s->deactivate_full + s->deactivate_empty +
+ s->deactivate_to_head + s->deactivate_to_tail;
+
+ if (total)
+ printf("Deactivate Full=%lu(%lu%%) Empty=%lu(%lu%%) "
+ "ToHead=%lu(%lu%%) ToTail=%lu(%lu%%)\n",
+ s->deactivate_full, (s->deactivate_full * 100) / total,
+ s->deactivate_empty, (s->deactivate_empty * 100) / total,
+ s->deactivate_to_head, (s->deactivate_to_head * 100) / total,
+ s->deactivate_to_tail, (s->deactivate_to_tail * 100) / total);
+}
+
void report(struct slabinfo *s)
{
if (strcmp(s->name, "*") == 0)
@@ -430,6 +515,7 @@ void report(struct slabinfo *s)
ops(s);
show_tracking(s);
slab_numa(s, 1);
+ slab_stats(s);
}
void slabcache(struct slabinfo *s)
@@ -479,13 +565,27 @@ void slabcache(struct slabinfo *s)
*p++ = 'T';
*p = 0;
- printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n",
- s->name, s->objects, s->object_size, size_str, dist_str,
- s->objs_per_slab, s->order,
- s->slabs ? (s->partial * 100) / s->slabs : 100,
- s->slabs ? (s->objects * s->object_size * 100) /
- (s->slabs * (page_size << s->order)) : 100,
- flags);
+ if (show_activity) {
+ unsigned long total_alloc;
+ unsigned long total_free;
+
+ total_alloc = s->alloc_fastpath + s->alloc_slowpath;
+ total_free = s->free_fastpath + s->free_slowpath;
+
+ printf("%-21s %8ld %8ld %8ld %3ld %3ld \n",
+ s->name, s->objects,
+ total_alloc, total_free,
+ total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0,
+ total_free ? (s->free_fastpath * 100 / total_free) : 0);
+ }
+ else
+ printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n",
+ s->name, s->objects, s->object_size, size_str, dist_str,
+ s->objs_per_slab, s->order,
+ s->slabs ? (s->partial * 100) / s->slabs : 100,
+ s->slabs ? (s->objects * s->object_size * 100) /
+ (s->slabs * (page_size << s->order)) : 100,
+ flags);
}
/*
@@ -892,6 +992,8 @@ void sort_slabs(void)
if (sort_size)
result = slab_size(s1) < slab_size(s2);
+ else if (sort_active)
+ result = slab_activity(s1) < slab_activity(s2);
else
result = strcasecmp(s1->name, s2->name);
@@ -1074,6 +1176,23 @@ void read_slab_dir(void)
free(t);
slab->store_user = get_obj("store_user");
slab->trace = get_obj("trace");
+ slab->alloc_fastpath = get_obj("alloc_fastpath");
+ slab->alloc_slowpath = get_obj("alloc_slowpath");
+ slab->free_fastpath = get_obj("free_fastpath");
+ slab->free_slowpath = get_obj("free_slowpath");
+ slab->free_frozen= get_obj("free_frozen");
+ slab->free_add_partial = get_obj("free_add_partial");
+ slab->free_remove_partial = get_obj("free_remove_partial");
+ slab->alloc_from_partial = get_obj("alloc_from_partial");
+ slab->alloc_slab = get_obj("alloc_slab");
+ slab->alloc_refill = get_obj("alloc_refill");
+ slab->free_slab = get_obj("free_slab");
+ slab->cpuslab_flush = get_obj("cpuslab_flush");
+ slab->deactivate_full = get_obj("deactivate_full");
+ slab->deactivate_empty = get_obj("deactivate_empty");
+ slab->deactivate_to_head = get_obj("deactivate_to_head");
+ slab->deactivate_to_tail = get_obj("deactivate_to_tail");
+ slab->deactivate_remote_frees = get_obj("deactivate_remote_frees");
chdir("..");
if (slab->name[0] == ':')
alias_targets++;
@@ -1124,7 +1243,9 @@ void output_slabs(void)
struct option opts[] = {
{ "aliases", 0, NULL, 'a' },
+ { "activity", 0, NULL, 'A' },
{ "debug", 2, NULL, 'd' },
+ { "display-activity", 0, NULL, 'D' },
{ "empty", 0, NULL, 'e' },
{ "first-alias", 0, NULL, 'f' },
{ "help", 0, NULL, 'h' },
@@ -1149,7 +1270,7 @@ int main(int argc, char *argv[])
page_size = getpagesize();
- while ((c = getopt_long(argc, argv, "ad::efhil1noprstvzTS",
+ while ((c = getopt_long(argc, argv, "aAd::Defhil1noprstvzTS",
opts, NULL)) != -1)
switch (c) {
case '1':
@@ -1158,11 +1279,17 @@ int main(int argc, char *argv[])
case 'a':
show_alias = 1;
break;
+ case 'A':
+ sort_active = 1;
+ break;
case 'd':
set_debug = 1;
if (!debug_opt_scan(optarg))
fatal("Invalid debug option '%s'\n", optarg);
break;
+ case 'D':
+ show_activity = 1;
+ break;
case 'e':
show_empty = 1;
break;
diff --git a/Documentation/w1/masters/00-INDEX b/Documentation/w1/masters/00-INDEX
index 752613c4cea2..7b0ceaaad7af 100644
--- a/Documentation/w1/masters/00-INDEX
+++ b/Documentation/w1/masters/00-INDEX
@@ -4,3 +4,5 @@ ds2482
- The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses.
ds2490
- The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges.
+w1-gpio
+ - GPIO 1-wire bus master driver.
diff --git a/Documentation/w1/masters/w1-gpio b/Documentation/w1/masters/w1-gpio
new file mode 100644
index 000000000000..af5d3b4aa851
--- /dev/null
+++ b/Documentation/w1/masters/w1-gpio
@@ -0,0 +1,33 @@
+Kernel driver w1-gpio
+=====================
+
+Author: Ville Syrjala <syrjala@sci.fi>
+
+
+Description
+-----------
+
+GPIO 1-wire bus master driver. The driver uses the GPIO API to control the
+wire and the GPIO pin can be specified using platform data.
+
+
+Example (mach-at91)
+-------------------
+
+#include <linux/w1-gpio.h>
+
+static struct w1_gpio_platform_data foo_w1_gpio_pdata = {
+ .pin = AT91_PIN_PB20,
+ .is_open_drain = 1,
+};
+
+static struct platform_device foo_w1_device = {
+ .name = "w1-gpio",
+ .id = -1,
+ .dev.platform_data = &foo_w1_gpio_pdata,
+};
+
+...
+ at91_set_GPIO_periph(foo_w1_gpio_pdata.pin, 1);
+ at91_set_multi_drive(foo_w1_gpio_pdata.pin, 1);
+ platform_device_register(&foo_w1_device);
diff --git a/MAINTAINERS b/MAINTAINERS
index da30a72a839c..2cdb591ac080 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -202,6 +202,13 @@ L: linux-scsi@vger.kernel.org
W: http://www.adaptec.com/
S: Supported
+ACER WMI LAPTOP EXTRAS
+P: Carlos Corbacho
+M: carlos@strangeworlds.co.uk
+L: aceracpi@googlegroups.com (subscribers-only)
+W: http://code.google.com/p/aceracpi
+S: Maintained
+
ACPI
P: Len Brown
M: len.brown@intel.com
@@ -252,6 +259,13 @@ L: linux-acpi@vger.kernel.org
W: http://acpi.sourceforge.net/
S: Supported
+ACPI WMI DRIVER
+P: Carlos Corbacho
+M: carlos@strangeworlds.co.uk
+L: linux-acpi@vger.kernel.org
+W: http://www.lesswatts.org/projects/acpi/
+S: Maintained
+
ADM1025 HARDWARE MONITOR DRIVER
P: Jean Delvare
M: khali@linux-fr.org
@@ -338,13 +352,12 @@ S: Maintained for 2.4; PCI support for 2.6.
AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER
P: Thomas Dahlmann
M: thomas.dahlmann@amd.com
-L: info-linux@geode.amd.com
+L: info-linux@geode.amd.com (subscribers-only)
S: Supported
AMD GEODE PROCESSOR/CHIPSET SUPPORT
P: Jordan Crouse
-M: info-linux@geode.amd.com
-L: info-linux@geode.amd.com
+L: info-linux@geode.amd.com (subscribers-only)
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported
@@ -841,6 +854,12 @@ L: linux-kernel@vger.kernel.org
T: git kernel.org:/pub/scm/linux/kernel/git/axboe/linux-2.6-block.git
S: Maintained
+BLOCK2MTD DRIVER
+P: Joern Engel
+M: joern@lazybastard.org
+L: linux-mtd@lists.infradead.org
+S: Maintained
+
BLUETOOTH SUBSYSTEM
P: Marcel Holtmann
M: marcel@holtmann.org
@@ -1366,6 +1385,11 @@ W: http://linuxtv.org/
T: git kernel.org:/pub/scm/linux/kernel/git/mchehab/v4l-dvb.git
S: Maintained
+DZ DECSTATION DZ11 SERIAL DRIVER
+P: Maciej W. Rozycki
+M: macro@linux-mips.org
+S: Maintained
+
EATA-DMA SCSI DRIVER
P: Michael Neuffer
L: linux-eata@i-connect.net, linux-scsi@vger.kernel.org
@@ -1780,6 +1804,11 @@ P: Jaroslav Kysela
M: perex@perex.cz
S: Maintained
+HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER
+P: Carlos Corbacho
+M: carlos@strangeworlds.co.uk
+S: Odd Fixes
+
HPET: High Precision Event Timers driver (hpet.c)
P: Clemens Ladisch
M: clemens@ladisch.de
@@ -2121,6 +2150,14 @@ M: acme@ghostprotocols.net
L: netdev@vger.kernel.org
S: Maintained
+IPWIRELES DRIVER
+P: Jiri Kosina
+M: jkosina@suse.cz
+P: David Sterba
+M: dsterba@suse.cz
+S: Maintained
+T: git://git.kernel.org/pub/scm/linux/kernel/git/jikos/ipwireless_cs.git
+
IRDA SUBSYSTEM
P: Samuel Ortiz
M: samuel@sortiz.org
@@ -2249,6 +2286,15 @@ L: kvm-devel@lists.sourceforge.net
W: kvm.sourceforge.net
S: Supported
+KERNEL VIRTUAL MACHINE For Itanium(KVM/IA64)
+P: Anthony Xu
+M: anthony.xu@intel.com
+P: Xiantao Zhang
+M: xiantao.zhang@intel.com
+L: kvm-ia64-devel@lists.sourceforge.net
+W: kvm.sourceforge.net
+S: Supported
+
KEXEC
P: Eric Biederman
M: ebiederm@xmission.com
@@ -2681,6 +2727,16 @@ M: James.Bottomley@HansenPartnership.com
L: linux-scsi@vger.kernel.org
S: Maintained
+NETEFFECT IWARP RNIC DRIVER (IW_NES)
+P: Faisal Latif
+M: flatif@neteffect.com
+P: Glenn Streiff
+M: gstreiff@neteffect.com
+L: general@lists.openfabrics.org
+W: http://www.neteffect.com
+S: Supported
+F: drivers/infiniband/hw/nes/
+
NETEM NETWORK EMULATOR
P: Stephen Hemminger
M: shemminger@linux-foundation.org
@@ -3012,8 +3068,8 @@ L: linux-abi-devel@lists.sourceforge.net
S: Maintained
PHRAM MTD DRIVER
-P: Jörn Engel
-M: joern@wh.fh-wedel.de
+P: Joern Engel
+M: joern@lazybastard.org
L: linux-mtd@lists.infradead.org
S: Maintained
@@ -3200,6 +3256,12 @@ M: mporter@kernel.crashing.org
L: linux-kernel@vger.kernel.org
S: Maintained
+RDC R-321X SoC
+P: Florian Fainelli
+M: florian.fainelli@telecomint.eu
+L: linux-kernel@vger.kernel.org
+S: Maintained
+
RDC R6040 FAST ETHERNET DRIVER
P: Florian Fainelli
M: florian.fainelli@telecomint.eu
@@ -3837,6 +3899,12 @@ M: oliver@neukum.name
L: linux-usb@vger.kernel.org
S: Maintained
+USB AUERSWALD DRIVER
+P: Wolfgang Muees
+M: wolfgang@iksw-muees.de
+L: linux-usb@vger.kernel.org
+S: Maintained
+
USB BLOCK DRIVER (UB ub)
P: Pete Zaitcev
M: zaitcev@redhat.com
@@ -3987,12 +4055,6 @@ S: Maintained
W: http://geocities.com/i0xox0i
W: http://firstlight.net/cvs
-USB AUERSWALD DRIVER
-P: Wolfgang Muees
-M: wolfgang@iksw-muees.de
-L: linux-usb@vger.kernel.org
-S: Maintained
-
USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER
P: Gary Brubaker
M: xavyer@ix.netcom.com
diff --git a/REPORTING-BUGS b/REPORTING-BUGS
index ac02e42a2627..ab0c56630a8c 100644
--- a/REPORTING-BUGS
+++ b/REPORTING-BUGS
@@ -10,11 +10,12 @@ bug report. This explains what you should do with the "Oops" information
to make it useful to the recipient.
Send the output to the maintainer of the kernel area that seems to
-be involved with the problem. Don't worry too much about getting the
-wrong person. If you are unsure send it to the person responsible for the
-code relevant to what you were doing. If it occurs repeatably try and
-describe how to recreate it. That is worth even more than the oops itself.
-The list of maintainers is in the MAINTAINERS file in this directory.
+be involved with the problem, and cc the relevant mailing list. Don't
+worry too much about getting the wrong person. If you are unsure send it
+to the person responsible for the code relevant to what you were doing.
+If it occurs repeatably try and describe how to recreate it. That is
+worth even more than the oops itself. The list of maintainers and
+mailing lists is in the MAINTAINERS file in this directory.
If it is a security bug, please copy the Security Contact listed
in the MAINTAINERS file. They can help coordinate bugfix and disclosure.
diff --git a/arch/alpha/Kconfig.debug b/arch/alpha/Kconfig.debug
index f45f28cc10da..3f6265f2d9d4 100644
--- a/arch/alpha/Kconfig.debug
+++ b/arch/alpha/Kconfig.debug
@@ -7,15 +7,6 @@ config EARLY_PRINTK
depends on ALPHA_GENERIC || ALPHA_SRM
default y
-config DEBUG_RWLOCK
- bool "Read-write spinlock debugging"
- depends on DEBUG_KERNEL
- help
- If you say Y here then read-write lock processing will count how many
- times it has tried to get the lock and issue an error message after
- too many attempts. If you suspect a rwlock problem or a kernel
- hacker asks for this option then say Y. Otherwise say N.
-
config ALPHA_LEGACY_START_ADDRESS
bool "Legacy kernel start address"
depends on ALPHA_GENERIC
diff --git a/arch/alpha/defconfig b/arch/alpha/defconfig
index 6da9c3dbde44..e43f68fd66b0 100644
--- a/arch/alpha/defconfig
+++ b/arch/alpha/defconfig
@@ -882,7 +882,6 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_DEBUG_SPINLOCK is not set
CONFIG_DEBUG_INFO=y
CONFIG_EARLY_PRINTK=y
-# CONFIG_DEBUG_RWLOCK is not set
# CONFIG_DEBUG_SEMAPHORE is not set
CONFIG_ALPHA_LEGACY_START_ADDRESS=y
CONFIG_MATHEMU=y
diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c
index e4a0bcf1d28b..a872078497be 100644
--- a/arch/alpha/kernel/core_irongate.c
+++ b/arch/alpha/kernel/core_irongate.c
@@ -241,7 +241,8 @@ albacore_init_arch(void)
size / 1024);
}
#endif
- reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop - pci_mem);
+ reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop -
+ pci_mem, BOOTMEM_DEFAULT);
printk("irongate_init_arch: temporarily reserving "
"region %08lx-%08lx for PCI\n", pci_mem, memtop - 1);
}
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 6413c5f23226..72f9a619a66d 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -430,7 +430,7 @@ sys_getpagesize(void)
asmlinkage unsigned long
sys_getdtablesize(void)
{
- return NR_OPEN;
+ return sysctl_nr_open;
}
/*
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
index 468b76ce66a1..8ac08311f5a5 100644
--- a/arch/alpha/kernel/pci-noop.c
+++ b/arch/alpha/kernel/pci-noop.c
@@ -165,7 +165,7 @@ dma_alloc_coherent(struct device *dev, size_t size,
ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret) {
memset(ret, 0, size);
- *dma_handle = virt_to_bus(ret);
+ *dma_handle = virt_to_phys(ret);
}
return ret;
}
@@ -184,7 +184,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
BUG_ON(!sg_page(sg));
va = sg_virt(sg);
- sg_dma_address(sg) = (dma_addr_t)virt_to_bus(va);
+ sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va);
sg_dma_len(sg) = sg->length;
}
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index 2d00a08d3f08..26d3789dfdd0 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -9,6 +9,7 @@
#include <linux/bootmem.h>
#include <linux/scatterlist.h>
#include <linux/log2.h>
+#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/hwrpb.h>
@@ -470,22 +471,29 @@ EXPORT_SYMBOL(pci_free_consistent);
#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
static void
-sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
+sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
+ int virt_ok)
{
unsigned long next_paddr;
struct scatterlist *leader;
long leader_flag, leader_length;
+ unsigned int max_seg_size;
leader = sg;
leader_flag = 0;
leader_length = leader->length;
next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
+ /* we will not marge sg without device. */
+ max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
for (++sg; sg < end; ++sg) {
unsigned long addr, len;
addr = SG_ENT_PHYS_ADDRESS(sg);
len = sg->length;
+ if (leader_length + len > max_seg_size)
+ goto new_segment;
+
if (next_paddr == addr) {
sg->dma_address = -1;
leader_length += len;
@@ -494,6 +502,7 @@ sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
leader_flag = 1;
leader_length += len;
} else {
+new_segment:
leader->dma_address = leader_flag;
leader->dma_length = leader_length;
leader = sg;
@@ -512,7 +521,7 @@ sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
in the blanks. */
static int
-sg_fill(struct scatterlist *leader, struct scatterlist *end,
+sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
struct scatterlist *out, struct pci_iommu_arena *arena,
dma_addr_t max_dma, int dac_allowed)
{
@@ -562,8 +571,8 @@ sg_fill(struct scatterlist *leader, struct scatterlist *end,
/* Otherwise, break up the remaining virtually contiguous
hunks into individual direct maps and retry. */
- sg_classify(leader, end, 0);
- return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
+ sg_classify(dev, leader, end, 0);
+ return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
}
out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
@@ -619,12 +628,15 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
struct pci_iommu_arena *arena;
dma_addr_t max_dma;
int dac_allowed;
+ struct device *dev;
if (direction == PCI_DMA_NONE)
BUG();
dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
+ dev = pdev ? &pdev->dev : NULL;
+
/* Fast path single entry scatterlists. */
if (nents == 1) {
sg->dma_length = sg->length;
@@ -638,7 +650,7 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
end = sg + nents;
/* First, prepare information about the entries. */
- sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
+ sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
/* Second, figure out where we're going to map things. */
if (alpha_mv.mv_pci_tbi) {
@@ -658,7 +670,7 @@ pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
for (out = sg; sg < end; ++sg) {
if ((int) sg->dma_address < 0)
continue;
- if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
+ if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
goto error;
out++;
}
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index bd5e68cd61e8..74c346625658 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -58,7 +58,6 @@ static struct notifier_block alpha_panic_block = {
#include <asm/system.h>
#include <asm/hwrpb.h>
#include <asm/dma.h>
-#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/console.h>
@@ -429,7 +428,8 @@ setup_memory(void *kernel_end)
}
/* Reserve the bootmap memory. */
- reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size);
+ reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size,
+ BOOTMEM_DEFAULT);
printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
#ifdef CONFIG_BLK_DEV_INITRD
@@ -447,7 +447,7 @@ setup_memory(void *kernel_end)
phys_to_virt(PFN_PHYS(max_low_pfn)));
} else {
reserve_bootmem(virt_to_phys((void *)initrd_start),
- INITRD_SIZE);
+ INITRD_SIZE, BOOTMEM_DEFAULT);
}
}
#endif /* CONFIG_BLK_DEV_INITRD */
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index f4ab233201b2..63c2073401ee 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -77,10 +77,6 @@ int smp_num_probed; /* Internal processor count */
int smp_num_cpus = 1; /* Number that came online. */
EXPORT_SYMBOL(smp_num_cpus);
-extern void calibrate_delay(void);
-
-
-
/*
* Called by both boot and secondaries to move global data into
* per-processor storage.
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 79de99e32c35..ba914af18c4f 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -495,7 +495,7 @@ sys_call_table:
.quad sys_epoll_pwait
.quad sys_utimensat /* 475 */
.quad sys_signalfd
- .quad sys_timerfd
+ .quad sys_ni_syscall
.quad sys_eventfd
.size sys_call_table, . - sys_call_table
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index e3e3806a6f25..10ab7833e83c 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -242,7 +242,8 @@ setup_memory_node(int nid, void *kernel_end)
}
/* Reserve the bootmap memory. */
- reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start), bootmap_size);
+ reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start),
+ bootmap_size, BOOTMEM_DEFAULT);
printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
node_set_online(nid);
@@ -281,7 +282,7 @@ setup_memory(void *kernel_end)
nid = kvaddr_to_nid(initrd_start);
reserve_bootmem_node(NODE_DATA(nid),
virt_to_phys((void *)initrd_start),
- INITRD_SIZE);
+ INITRD_SIZE, BOOTMEM_DEFAULT);
}
}
#endif /* CONFIG_BLK_DEV_INITRD */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 64d19eff3faa..e19e7744e366 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -385,6 +385,7 @@ config ARCH_PXA
depends on MMU
select ARCH_MTD_XIP
select GENERIC_GPIO
+ select HAVE_GPIO_LIB
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
select TICK_ONESHOT
@@ -1122,6 +1123,8 @@ source "drivers/i2c/Kconfig"
source "drivers/spi/Kconfig"
+source "drivers/gpio/Kconfig"
+
source "drivers/w1/Kconfig"
source "drivers/power/Kconfig"
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index cecf658e3625..283e14fff993 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -359,7 +359,7 @@
CALL(sys_kexec_load)
CALL(sys_utimensat)
CALL(sys_signalfd)
-/* 350 */ CALL(sys_timerfd)
+/* 350 */ CALL(sys_ni_syscall)
CALL(sys_eventfd)
CALL(sys_fallocate)
#ifndef syscalls_counted
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index e9dfbab46cb6..eefae1de334c 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -150,7 +150,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
secondary_data.pgdir = 0;
*pmd_offset(pgd, PHYS_OFFSET) = __pmd(0);
- pgd_free(pgd);
+ pgd_free(&init_mm, pgd);
if (ret) {
printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index aa29ea58ca09..0ce38dfa6ebe 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -383,6 +383,7 @@ static void at91_lcdc_tft_power_control(int on)
}
static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+ .lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
.default_lcdcon2 = AT91SAM9261_DEFAULT_TFT_LCDCON2,
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index f09347a86e71..38313abef657 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -253,6 +253,7 @@ static void at91_lcdc_power_control(int on)
/* Driver datas */
static struct atmel_lcdfb_info __initdata ek_lcdc_data = {
+ .lcdcon_is_backlight = true,
.default_bpp = 16,
.default_dmacon = ATMEL_LCDC_DMAEN,
.default_lcdcon2 = AT91SAM9263_DEFAULT_LCDCON2,
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index 688659668bdf..8cb07437a807 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -71,11 +71,11 @@ static struct i2c_board_info __initdata dsmg600_i2c_board_info [] = {
static struct gpio_led dsmg600_led_pins[] = {
{
- .name = "power",
+ .name = "dsmg600:green:power",
.gpio = DSMG600_LED_PWR_GPIO,
},
{
- .name = "wlan",
+ .name = "dsmg600:green:wlan",
.gpio = DSMG600_LED_WLAN_GPIO,
.active_low = true,
},
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index 4cecae84837b..159e1c4f1eda 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -60,17 +60,17 @@ static struct i2c_board_info __initdata nas100d_i2c_board_info [] = {
static struct gpio_led nas100d_led_pins[] = {
{
- .name = "wlan", /* green led */
+ .name = "nas100d:green:wlan",
.gpio = NAS100D_LED_WLAN_GPIO,
.active_low = true,
},
{
- .name = "power", /* blue power led (off=flashing) */
+ .name = "nas100d:blue:power", /* (off=flashing) */
.gpio = NAS100D_LED_PWR_GPIO,
.active_low = true,
},
{
- .name = "disk", /* yellow led */
+ .name = "nas100d:yellow:disk",
.gpio = NAS100D_LED_DISK_GPIO,
.active_low = true,
},
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index acaebcbce53a..d9a182895a0f 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -63,20 +63,20 @@ static struct i2c_board_info __initdata nslu2_i2c_board_info [] = {
static struct gpio_led nslu2_led_pins[] = {
{
- .name = "ready", /* green led */
+ .name = "nslu2:green:ready",
.gpio = NSLU2_LED_GRN_GPIO,
},
{
- .name = "status", /* red led */
+ .name = "nslu2:red:status",
.gpio = NSLU2_LED_RED_GPIO,
},
{
- .name = "disk-1",
+ .name = "nslu2:green:disk-1",
.gpio = NSLU2_LED_DISK1_GPIO,
.active_low = true,
},
{
- .name = "disk-2",
+ .name = "nslu2:green:disk-2",
.gpio = NSLU2_LED_DISK2_GPIO,
.active_low = true,
},
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index 8604938bd948..6e0c4f5b5ae6 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -3,7 +3,8 @@
#
# Common support (must be linked before board specific support)
-obj-y += clock.o devices.o generic.o irq.o dma.o time.o
+obj-y += clock.o devices.o generic.o irq.o dma.o \
+ time.o gpio.o
obj-$(CONFIG_PXA25x) += pxa25x.o
obj-$(CONFIG_PXA27x) += pxa27x.o
obj-$(CONFIG_PXA3xx) += pxa3xx.o mfp.o smemc.o
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c
index 76970598f550..80721c610d41 100644
--- a/arch/arm/mach-pxa/generic.c
+++ b/arch/arm/mach-pxa/generic.c
@@ -32,7 +32,6 @@
#include <asm/mach/map.h>
#include <asm/arch/pxa-regs.h>
-#include <asm/arch/gpio.h>
#include "generic.h"
@@ -67,97 +66,6 @@ unsigned int get_memclk_frequency_10khz(void)
EXPORT_SYMBOL(get_memclk_frequency_10khz);
/*
- * Handy function to set GPIO alternate functions
- */
-int pxa_last_gpio;
-
-int pxa_gpio_mode(int gpio_mode)
-{
- unsigned long flags;
- int gpio = gpio_mode & GPIO_MD_MASK_NR;
- int fn = (gpio_mode & GPIO_MD_MASK_FN) >> 8;
- int gafr;
-
- if (gpio > pxa_last_gpio)
- return -EINVAL;
-
- local_irq_save(flags);
- if (gpio_mode & GPIO_DFLT_LOW)
- GPCR(gpio) = GPIO_bit(gpio);
- else if (gpio_mode & GPIO_DFLT_HIGH)
- GPSR(gpio) = GPIO_bit(gpio);
- if (gpio_mode & GPIO_MD_MASK_DIR)
- GPDR(gpio) |= GPIO_bit(gpio);
- else
- GPDR(gpio) &= ~GPIO_bit(gpio);
- gafr = GAFR(gpio) & ~(0x3 << (((gpio) & 0xf)*2));
- GAFR(gpio) = gafr | (fn << (((gpio) & 0xf)*2));
- local_irq_restore(flags);
-
- return 0;
-}
-
-EXPORT_SYMBOL(pxa_gpio_mode);
-
-int gpio_direction_input(unsigned gpio)
-{
- unsigned long flags;
- u32 mask;
-
- if (gpio > pxa_last_gpio)
- return -EINVAL;
-
- mask = GPIO_bit(gpio);
- local_irq_save(flags);
- GPDR(gpio) &= ~mask;
- local_irq_restore(flags);
-
- return 0;
-}
-EXPORT_SYMBOL(gpio_direction_input);
-
-int gpio_direction_output(unsigned gpio, int value)
-{
- unsigned long flags;
- u32 mask;
-
- if (gpio > pxa_last_gpio)
- return -EINVAL;
-
- mask = GPIO_bit(gpio);
- local_irq_save(flags);
- if (value)
- GPSR(gpio) = mask;
- else
- GPCR(gpio) = mask;
- GPDR(gpio) |= mask;
- local_irq_restore(flags);
-
- return 0;
-}
-EXPORT_SYMBOL(gpio_direction_output);
-
-/*
- * Return GPIO level
- */
-int pxa_gpio_get_value(unsigned gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-EXPORT_SYMBOL(pxa_gpio_get_value);
-
-/*
- * Set output GPIO level
- */
-void pxa_gpio_set_value(unsigned gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-EXPORT_SYMBOL(pxa_gpio_set_value);
-
-/*
* Routine to safely enable or disable a clock in the CKEN
*/
void __pxa_set_cken(int clock, int enable)
@@ -172,7 +80,6 @@ void __pxa_set_cken(int clock, int enable)
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(__pxa_set_cken);
/*
diff --git a/arch/arm/mach-pxa/generic.h b/arch/arm/mach-pxa/generic.h
index 1a16ad3ecee6..b3d10b0e52a0 100644
--- a/arch/arm/mach-pxa/generic.h
+++ b/arch/arm/mach-pxa/generic.h
@@ -16,6 +16,7 @@ extern void __init pxa_init_irq_low(void);
extern void __init pxa_init_irq_high(void);
extern void __init pxa_init_irq_gpio(int gpio_nr);
extern void __init pxa_init_irq_set_wake(int (*set_wake)(unsigned int, unsigned int));
+extern void __init pxa_init_gpio(int gpio_nr);
extern void __init pxa25x_init_irq(void);
extern void __init pxa27x_init_irq(void);
extern void __init pxa3xx_init_irq(void);
diff --git a/arch/arm/mach-pxa/gpio.c b/arch/arm/mach-pxa/gpio.c
new file mode 100644
index 000000000000..8638dd7dd076
--- /dev/null
+++ b/arch/arm/mach-pxa/gpio.c
@@ -0,0 +1,197 @@
+/*
+ * linux/arch/arm/mach-pxa/gpio.c
+ *
+ * Generic PXA GPIO handling
+ *
+ * Author: Nicolas Pitre
+ * Created: Jun 15, 2001
+ * Copyright: MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/gpio.h>
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/arch/pxa-regs.h>
+
+#include "generic.h"
+
+
+struct pxa_gpio_chip {
+ struct gpio_chip chip;
+ void __iomem *regbase;
+};
+
+int pxa_last_gpio;
+
+/*
+ * Configure pins for GPIO or other functions
+ */
+int pxa_gpio_mode(int gpio_mode)
+{
+ unsigned long flags;
+ int gpio = gpio_mode & GPIO_MD_MASK_NR;
+ int fn = (gpio_mode & GPIO_MD_MASK_FN) >> 8;
+ int gafr;
+
+ if (gpio > pxa_last_gpio)
+ return -EINVAL;
+
+ local_irq_save(flags);
+ if (gpio_mode & GPIO_DFLT_LOW)
+ GPCR(gpio) = GPIO_bit(gpio);
+ else if (gpio_mode & GPIO_DFLT_HIGH)
+ GPSR(gpio) = GPIO_bit(gpio);
+ if (gpio_mode & GPIO_MD_MASK_DIR)
+ GPDR(gpio) |= GPIO_bit(gpio);
+ else
+ GPDR(gpio) &= ~GPIO_bit(gpio);
+ gafr = GAFR(gpio) & ~(0x3 << (((gpio) & 0xf)*2));
+ GAFR(gpio) = gafr | (fn << (((gpio) & 0xf)*2));
+ local_irq_restore(flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(pxa_gpio_mode);
+
+static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned long flags;
+ u32 mask = 1 << offset;
+ u32 value;
+ struct pxa_gpio_chip *pxa;
+ void __iomem *gpdr;
+
+ pxa = container_of(chip, struct pxa_gpio_chip, chip);
+ gpdr = pxa->regbase + GPDR_OFFSET;
+ local_irq_save(flags);
+ value = __raw_readl(gpdr);
+ value &= ~mask;
+ __raw_writel(value, gpdr);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int pxa_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ unsigned long flags;
+ u32 mask = 1 << offset;
+ u32 tmp;
+ struct pxa_gpio_chip *pxa;
+ void __iomem *gpdr;
+
+ pxa = container_of(chip, struct pxa_gpio_chip, chip);
+ __raw_writel(mask,
+ pxa->regbase + (value ? GPSR_OFFSET : GPCR_OFFSET));
+ gpdr = pxa->regbase + GPDR_OFFSET;
+ local_irq_save(flags);
+ tmp = __raw_readl(gpdr);
+ tmp |= mask;
+ __raw_writel(tmp, gpdr);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/*
+ * Return GPIO level
+ */
+static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ u32 mask = 1 << offset;
+ struct pxa_gpio_chip *pxa;
+
+ pxa = container_of(chip, struct pxa_gpio_chip, chip);
+ return __raw_readl(pxa->regbase + GPLR_OFFSET) & mask;
+}
+
+/*
+ * Set output GPIO level
+ */
+static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ u32 mask = 1 << offset;
+ struct pxa_gpio_chip *pxa;
+
+ pxa = container_of(chip, struct pxa_gpio_chip, chip);
+
+ if (value)
+ __raw_writel(mask, pxa->regbase + GPSR_OFFSET);
+ else
+ __raw_writel(mask, pxa->regbase + GPCR_OFFSET);
+}
+
+static struct pxa_gpio_chip pxa_gpio_chip[] = {
+ [0] = {
+ .regbase = GPIO0_BASE,
+ .chip = {
+ .label = "gpio-0",
+ .direction_input = pxa_gpio_direction_input,
+ .direction_output = pxa_gpio_direction_output,
+ .get = pxa_gpio_get,
+ .set = pxa_gpio_set,
+ .base = 0,
+ .ngpio = 32,
+ },
+ },
+ [1] = {
+ .regbase = GPIO1_BASE,
+ .chip = {
+ .label = "gpio-1",
+ .direction_input = pxa_gpio_direction_input,
+ .direction_output = pxa_gpio_direction_output,
+ .get = pxa_gpio_get,
+ .set = pxa_gpio_set,
+ .base = 32,
+ .ngpio = 32,
+ },
+ },
+ [2] = {
+ .regbase = GPIO2_BASE,
+ .chip = {
+ .label = "gpio-2",
+ .direction_input = pxa_gpio_direction_input,
+ .direction_output = pxa_gpio_direction_output,
+ .get = pxa_gpio_get,
+ .set = pxa_gpio_set,
+ .base = 64,
+ .ngpio = 32, /* 21 for PXA25x */
+ },
+ },
+#if defined(CONFIG_PXA27x) || defined(CONFIG_PXA3xx)
+ [3] = {
+ .regbase = GPIO3_BASE,
+ .chip = {
+ .label = "gpio-3",
+ .direction_input = pxa_gpio_direction_input,
+ .direction_output = pxa_gpio_direction_output,
+ .get = pxa_gpio_get,
+ .set = pxa_gpio_set,
+ .base = 96,
+ .ngpio = 32,
+ },
+ },
+#endif
+};
+
+void __init pxa_init_gpio(int gpio_nr)
+{
+ int i;
+
+ /* add a GPIO chip for each register bank.
+ * the last PXA25x register only contains 21 GPIOs
+ */
+ for (i = 0; i < gpio_nr; i += 32) {
+ if (i+32 > gpio_nr)
+ pxa_gpio_chip[i/32].chip.ngpio = gpio_nr - i;
+ gpiochip_add(&pxa_gpio_chip[i/32].chip);
+ }
+}
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 5a1d5eef10a4..36c6a68beca2 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -311,6 +311,8 @@ void __init pxa_init_irq_gpio(int gpio_nr)
/* Install handler for GPIO>=2 edge detect interrupts */
set_irq_chip(IRQ_GPIO_2_x, &pxa_internal_chip_low);
set_irq_chained_handler(IRQ_GPIO_2_x, pxa_gpio_demux_handler);
+
+ pxa_init_gpio(gpio_nr);
}
void __init pxa_init_irq_set_wake(int (*set_wake)(unsigned int, unsigned int))
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 9b26fa5edad6..f99112d50b41 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -21,6 +21,8 @@
#include <linux/mmc/host.h>
#include <linux/pm.h>
#include <linux/delay.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
#include <asm/setup.h>
#include <asm/memory.h>
@@ -246,6 +248,46 @@ static struct platform_device tosakbd_device = {
.id = -1,
};
+static struct gpio_keys_button tosa_gpio_keys[] = {
+ {
+ .type = EV_PWR,
+ .code = KEY_SUSPEND,
+ .gpio = TOSA_GPIO_ON_KEY,
+ .desc = "On key",
+ .wakeup = 1,
+ .active_low = 1,
+ },
+ {
+ .type = EV_KEY,
+ .code = TOSA_KEY_RECORD,
+ .gpio = TOSA_GPIO_RECORD_BTN,
+ .desc = "Record Button",
+ .wakeup = 1,
+ .active_low = 1,
+ },
+ {
+ .type = EV_KEY,
+ .code = TOSA_KEY_SYNC,
+ .gpio = TOSA_GPIO_SYNC,
+ .desc = "Sync Button",
+ .wakeup = 1,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_keys_platform_data tosa_gpio_keys_platform_data = {
+ .buttons = tosa_gpio_keys,
+ .nbuttons = ARRAY_SIZE(tosa_gpio_keys),
+};
+
+static struct platform_device tosa_gpio_keys_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &tosa_gpio_keys_platform_data,
+ },
+};
+
/*
* Tosa LEDs
*/
@@ -258,6 +300,7 @@ static struct platform_device *devices[] __initdata = {
&tosascoop_device,
&tosascoop_jc_device,
&tosakbd_device,
+ &tosa_gpio_keys_device,
&tosaled_device,
};
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index a454451c97c3..eca558c6bf5d 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -17,7 +17,7 @@
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/serial_8250.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <asm/elf.h>
#include <asm/io.h>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c0ad7c0fbae0..ec00f26bffa4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -239,7 +239,7 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
* Reserve the bootmem bitmap for this node.
*/
reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
- boot_pages << PAGE_SHIFT);
+ boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
#ifdef CONFIG_BLK_DEV_INITRD
/*
@@ -247,7 +247,7 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
*/
if (node == initrd_node) {
reserve_bootmem_node(pgdat, phys_initrd_start,
- phys_initrd_size);
+ phys_initrd_size, BOOTMEM_DEFAULT);
initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
}
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 75952779ce19..303a7ff6bfd2 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -162,7 +162,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
* Free the page table, if there was one.
*/
if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
- pte_free_kernel(pmd_page_vaddr(pmd));
+ pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PGDIR_SIZE;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e5d61ee3d4a1..d41a75ed3dce 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -605,9 +605,11 @@ void __init reserve_node_zero(pg_data_t *pgdat)
* Note that this can only be in node 0.
*/
#ifdef CONFIG_XIP_KERNEL
- reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
+ reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start,
+ BOOTMEM_DEFAULT);
#else
- reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
+ reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext,
+ BOOTMEM_DEFAULT);
#endif
/*
@@ -615,7 +617,7 @@ void __init reserve_node_zero(pg_data_t *pgdat)
* and can only be in node 0.
*/
reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
- PTRS_PER_PGD * sizeof(pgd_t));
+ PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT);
/*
* Hmm... This should go elsewhere, but we really really need to
@@ -638,8 +640,10 @@ void __init reserve_node_zero(pg_data_t *pgdat)
/* H1940 and RX3715 need to reserve this for suspend */
if (machine_is_h1940() || machine_is_rx3715()) {
- reserve_bootmem_node(pgdat, 0x30003000, 0x1000);
- reserve_bootmem_node(pgdat, 0x30081000, 0x1000);
+ reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
+ BOOTMEM_DEFAULT);
+ reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
+ BOOTMEM_DEFAULT);
}
#ifdef CONFIG_SA1111
@@ -650,7 +654,8 @@ void __init reserve_node_zero(pg_data_t *pgdat)
res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
#endif
if (res_size)
- reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
+ reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size,
+ BOOTMEM_DEFAULT);
}
/*
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 8cd3a60954f0..63c62fdea521 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -27,9 +27,11 @@ void __init reserve_node_zero(pg_data_t *pgdat)
* Note that this can only be in node 0.
*/
#ifdef CONFIG_XIP_KERNEL
- reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
+ reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start,
+ BOOTMEM_DEFAULT);
#else
- reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
+ reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext,
+ BOOTMEM_DEFAULT);
#endif
/*
@@ -37,7 +39,8 @@ void __init reserve_node_zero(pg_data_t *pgdat)
* some architectures which the DRAM is the exception vector to trap,
* alloc_page breaks with error, although it is not NULL, but "0."
*/
- reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE);
+ reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE,
+ BOOTMEM_DEFAULT);
}
/*
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 50b9aed6000d..500c9610ab30 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -65,14 +65,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
return new_pgd;
no_pte:
- pmd_free(new_pmd);
+ pmd_free(mm, new_pmd);
no_pmd:
free_pages((unsigned long)new_pgd, 2);
no_pgd:
return NULL;
}
-void free_pgd_slow(pgd_t *pgd)
+void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
{
pmd_t *pmd;
struct page *pte;
@@ -94,8 +94,8 @@ void free_pgd_slow(pgd_t *pgd)
pmd_clear(pmd);
dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
pte_lock_deinit(pte);
- pte_free(pte);
- pmd_free(pmd);
+ pte_free(mm, pte);
+ pmd_free(mm, pmd);
free:
free_pages((unsigned long) pgd, 2);
}
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
index ee40c1a0b83d..7854f19b77cf 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
@@ -207,7 +207,7 @@ void __init omapfb_reserve_sdram(void)
return;
}
if (rg.paddr)
- reserve_bootmem(rg.paddr, rg.size);
+ reserve_bootmem(rg.paddr, rg.size, BOOTMEM_DEFAULT);
reserved += rg.size;
omapfb_config.mem_desc.region[i] = rg;
configured_regions++;
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index c816f29154c9..28e0caf4156c 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -82,6 +82,7 @@ config PLATFORM_AT32AP
select SUBARCH_AVR32B
select MMU
select PERFORMANCE_COUNTERS
+ select HAVE_GPIO_LIB
#
# CPU types
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index 4b4c1884e1c5..e66a07a928cd 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -489,7 +489,8 @@ static void __init setup_bootmem(void)
/* Reserve space for the bootmem bitmap... */
reserve_bootmem_node(NODE_DATA(node),
PFN_PHYS(bootmap_pfn),
- bootmap_size);
+ bootmap_size,
+ BOOTMEM_DEFAULT);
/* ...and any other reserved regions. */
for (res = reserved; res; res = res->sibling) {
@@ -505,7 +506,8 @@ static void __init setup_bootmem(void)
&& res->end < PFN_PHYS(max_pfn))
reserve_bootmem_node(
NODE_DATA(node), res->start,
- res->end - res->start + 1);
+ res->end - res->start + 1,
+ BOOTMEM_DEFAULT);
}
node_set_online(node);
diff --git a/arch/avr32/kernel/syscall_table.S b/arch/avr32/kernel/syscall_table.S
index 75c81f2dd0b3..478bda4c4a09 100644
--- a/arch/avr32/kernel/syscall_table.S
+++ b/arch/avr32/kernel/syscall_table.S
@@ -293,6 +293,6 @@ sys_call_table:
.long sys_shmctl
.long sys_utimensat
.long sys_signalfd
- .long sys_timerfd /* 280 */
+ .long sys_ni_syscall /* 280, was sys_timerfd */
.long sys_eventfd
.long sys_ni_syscall /* r8 is saturated at nr_syscalls */
diff --git a/arch/avr32/lib/delay.c b/arch/avr32/lib/delay.c
index b3bc0b56e2c6..9aa8800830f3 100644
--- a/arch/avr32/lib/delay.c
+++ b/arch/avr32/lib/delay.c
@@ -12,13 +12,15 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/timex.h>
#include <linux/param.h>
#include <linux/types.h>
+#include <linux/init.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
-int read_current_timer(unsigned long *timer_value)
+int __devinit read_current_timer(unsigned long *timer_value)
{
*timer_value = sysreg_read(COUNT);
return 0;
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index d61a02da898c..38a8fa31c0b5 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -24,11 +24,11 @@
#define MAX_NR_PIO_DEVICES 8
struct pio_device {
+ struct gpio_chip chip;
void __iomem *regs;
const struct platform_device *pdev;
struct clk *clk;
u32 pinmux_mask;
- u32 gpio_mask;
char name[8];
};
@@ -64,7 +64,8 @@ void __init at32_select_periph(unsigned int pin, unsigned int periph,
goto fail;
}
- if (unlikely(test_and_set_bit(pin_index, &pio->pinmux_mask))) {
+ if (unlikely(test_and_set_bit(pin_index, &pio->pinmux_mask)
+ || gpiochip_is_requested(&pio->chip, pin_index))) {
printk("%s: pin %u is busy\n", pio->name, pin_index);
goto fail;
}
@@ -79,9 +80,6 @@ void __init at32_select_periph(unsigned int pin, unsigned int periph,
if (!(flags & AT32_GPIOF_PULLUP))
pio_writel(pio, PUDR, mask);
- /* gpio_request NOT allowed */
- set_bit(pin_index, &pio->gpio_mask);
-
return;
fail:
@@ -130,9 +128,6 @@ void __init at32_select_gpio(unsigned int pin, unsigned long flags)
pio_writel(pio, PER, mask);
- /* gpio_request now allowed */
- clear_bit(pin_index, &pio->gpio_mask);
-
return;
fail:
@@ -166,96 +161,50 @@ fail:
/* GPIO API */
-int gpio_request(unsigned int gpio, const char *label)
+static int direction_input(struct gpio_chip *chip, unsigned offset)
{
- struct pio_device *pio;
- unsigned int pin;
-
- pio = gpio_to_pio(gpio);
- if (!pio)
- return -ENODEV;
+ struct pio_device *pio = container_of(chip, struct pio_device, chip);
+ u32 mask = 1 << offset;
- pin = gpio & 0x1f;
- if (test_and_set_bit(pin, &pio->gpio_mask))
- return -EBUSY;
+ if (!(pio_readl(pio, PSR) & mask))
+ return -EINVAL;
+ pio_writel(pio, ODR, mask);
return 0;
}
-EXPORT_SYMBOL(gpio_request);
-void gpio_free(unsigned int gpio)
+static int gpio_get(struct gpio_chip *chip, unsigned offset)
{
- struct pio_device *pio;
- unsigned int pin;
+ struct pio_device *pio = container_of(chip, struct pio_device, chip);
- pio = gpio_to_pio(gpio);
- if (!pio) {
- printk(KERN_ERR
- "gpio: attempted to free invalid pin %u\n", gpio);
- return;
- }
-
- pin = gpio & 0x1f;
- if (!test_and_clear_bit(pin, &pio->gpio_mask))
- printk(KERN_ERR "gpio: freeing free or non-gpio pin %s-%u\n",
- pio->name, pin);
+ return (pio_readl(pio, PDSR) >> offset) & 1;
}
-EXPORT_SYMBOL(gpio_free);
-int gpio_direction_input(unsigned int gpio)
-{
- struct pio_device *pio;
- unsigned int pin;
-
- pio = gpio_to_pio(gpio);
- if (!pio)
- return -ENODEV;
-
- pin = gpio & 0x1f;
- pio_writel(pio, ODR, 1 << pin);
-
- return 0;
-}
-EXPORT_SYMBOL(gpio_direction_input);
+static void gpio_set(struct gpio_chip *chip, unsigned offset, int value);
-int gpio_direction_output(unsigned int gpio, int value)
+static int direction_output(struct gpio_chip *chip, unsigned offset, int value)
{
- struct pio_device *pio;
- unsigned int pin;
-
- pio = gpio_to_pio(gpio);
- if (!pio)
- return -ENODEV;
+ struct pio_device *pio = container_of(chip, struct pio_device, chip);
+ u32 mask = 1 << offset;
- gpio_set_value(gpio, value);
-
- pin = gpio & 0x1f;
- pio_writel(pio, OER, 1 << pin);
+ if (!(pio_readl(pio, PSR) & mask))
+ return -EINVAL;
+ gpio_set(chip, offset, value);
+ pio_writel(pio, OER, mask);
return 0;
}
-EXPORT_SYMBOL(gpio_direction_output);
-int gpio_get_value(unsigned int gpio)
+static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct pio_device *pio = &pio_dev[gpio >> 5];
+ struct pio_device *pio = container_of(chip, struct pio_device, chip);
+ u32 mask = 1 << offset;
- return (pio_readl(pio, PDSR) >> (gpio & 0x1f)) & 1;
-}
-EXPORT_SYMBOL(gpio_get_value);
-
-void gpio_set_value(unsigned int gpio, int value)
-{
- struct pio_device *pio = &pio_dev[gpio >> 5];
- u32 mask;
-
- mask = 1 << (gpio & 0x1f);
if (value)
pio_writel(pio, SODR, mask);
else
pio_writel(pio, CODR, mask);
}
-EXPORT_SYMBOL(gpio_set_value);
/*--------------------------------------------------------------------------*/
@@ -339,6 +288,63 @@ gpio_irq_setup(struct pio_device *pio, int irq, int gpio_irq)
/*--------------------------------------------------------------------------*/
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+
+/*
+ * This shows more info than the generic gpio dump code:
+ * pullups, deglitching, open drain drive.
+ */
+static void pio_bank_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct pio_device *pio = container_of(chip, struct pio_device, chip);
+ u32 psr, osr, imr, pdsr, pusr, ifsr, mdsr;
+ unsigned i;
+ u32 mask;
+ char bank;
+
+ psr = pio_readl(pio, PSR);
+ osr = pio_readl(pio, OSR);
+ imr = pio_readl(pio, IMR);
+ pdsr = pio_readl(pio, PDSR);
+ pusr = pio_readl(pio, PUSR);
+ ifsr = pio_readl(pio, IFSR);
+ mdsr = pio_readl(pio, MDSR);
+
+ bank = 'A' + pio->pdev->id;
+
+ for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
+ const char *label;
+
+ label = gpiochip_is_requested(chip, i);
+ if (!label)
+ continue;
+
+ seq_printf(s, " gpio-%-3d P%c%-2d (%-12s) %s %s %s",
+ chip->base + i, bank, i,
+ label,
+ (osr & mask) ? "out" : "in ",
+ (mask & pdsr) ? "hi" : "lo",
+ (mask & pusr) ? " " : "up");
+ if (ifsr & mask)
+ seq_printf(s, " deglitch");
+ if ((osr & mdsr) & mask)
+ seq_printf(s, " open-drain");
+ if (imr & mask)
+ seq_printf(s, " irq-%d edge-both",
+ gpio_to_irq(chip->base + i));
+ seq_printf(s, "\n");
+ }
+}
+
+#else
+#define pio_bank_show NULL
+#endif
+
+
+/*--------------------------------------------------------------------------*/
+
static int __init pio_probe(struct platform_device *pdev)
{
struct pio_device *pio = NULL;
@@ -349,6 +355,18 @@ static int __init pio_probe(struct platform_device *pdev)
pio = &pio_dev[pdev->id];
BUG_ON(!pio->regs);
+ pio->chip.label = pio->name;
+ pio->chip.base = pdev->id * 32;
+ pio->chip.ngpio = 32;
+
+ pio->chip.direction_input = direction_input;
+ pio->chip.get = gpio_get;
+ pio->chip.direction_output = direction_output;
+ pio->chip.set = gpio_set;
+ pio->chip.dbg_show = pio_bank_show;
+
+ gpiochip_add(&pio->chip);
+
gpio_irq_setup(pio, irq, gpio_irq_base);
platform_set_drvdata(pdev, pio);
@@ -406,12 +424,6 @@ void __init at32_init_pio(struct platform_device *pdev)
pio->pdev = pdev;
pio->regs = ioremap(regs->start, regs->end - regs->start + 1);
- /*
- * request_gpio() is only valid for pins that have been
- * explicitly configured as GPIO and not previously requested
- */
- pio->gpio_mask = ~0UL;
-
/* start with irqs disabled and acked */
pio_writel(pio, IDR, ~0UL);
(void) pio_readl(pio, ISR);
diff --git a/arch/avr32/mach-at32ap/pio.h b/arch/avr32/mach-at32ap/pio.h
index 50fa3aca32c5..7795116a483a 100644
--- a/arch/avr32/mach-at32ap/pio.h
+++ b/arch/avr32/mach-at32ap/pio.h
@@ -19,7 +19,7 @@
#define PIO_OSR 0x0018
#define PIO_IFER 0x0020
#define PIO_IFDR 0x0024
-#define PIO_ISFR 0x0028
+#define PIO_IFSR 0x0028
#define PIO_SODR 0x0030
#define PIO_CODR 0x0034
#define PIO_ODSR 0x0038
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 462cae893757..6e106b3d7729 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -406,7 +406,7 @@ void __init setup_arch(char **cmdline_p)
*/
free_bootmem(memory_start, memory_end - memory_start);
- reserve_bootmem(memory_start, bootmap_size);
+ reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
/*
* get kmalloc into gear
*/
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index f8c411a24af7..1795aab79064 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -37,7 +37,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/sl811.h>
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index a72c7a620fa1..97378b0a9753 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -38,7 +38,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index 21df2f375497..886f260d9359 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -34,7 +34,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/usb/isp1362.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index c37dd45c8803..4026c2f3ab4e 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -35,7 +35,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/usb/isp1362.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index ac52b040b336..0185350feacc 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -38,7 +38,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537.c b/arch/blackfin/mach-bf537/boards/cm_bf537.c
index 8703b67d5ec6..f7c1f964f13b 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537.c
@@ -36,7 +36,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/usb/isp1362.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf537/boards/generic_board.c b/arch/blackfin/mach-bf537/boards/generic_board.c
index 3e52f3f5bd58..8a3397db1d21 100644
--- a/arch/blackfin/mach-bf537/boards/generic_board.c
+++ b/arch/blackfin/mach-bf537/boards/generic_board.c
@@ -38,7 +38,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/sl811.h>
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index b8bbba85af53..d71e0be33921 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -10,7 +10,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb_isp1362.h>
#endif
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb_sl811.h>
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 772541548b76..119e6ea83384 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -38,7 +38,7 @@
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/usb/sl811.h>
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 3a79a9061bdc..bf9e738a7c64 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -34,7 +34,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/usb/isp1362.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 7601c3be1b5c..ed863ce9a2d8 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -35,7 +35,7 @@
#include <linux/spi/spi.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 56ff51bc8c21..fdd9bf43361e 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1373,7 +1373,7 @@ ENTRY(_sys_call_table)
.long _sys_epoll_pwait
.long _sys_utimensat
.long _sys_signalfd
- .long _sys_timerfd
+ .long _sys_ni_syscall
.long _sys_eventfd /* 350 */
.long _sys_pread64
.long _sys_pwrite64
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 7f0be4cd5e9a..27b082ac7f11 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -150,6 +150,7 @@ config ETRAX_FLASH_BUSWIDTH
Width in bytes of the Flash bus (1, 2 or 4). Is usually 2.
source arch/cris/arch-v10/Kconfig
+source arch/cris/arch-v32/Kconfig
endmenu
@@ -157,8 +158,8 @@ source "net/Kconfig"
# bring in ETRAX built-in drivers
menu "Drivers for built-in interfaces"
-# arch/cris/arch is a symlink to correct arch (arch-v10 or arch-v32)
-source arch/cris/arch/drivers/Kconfig
+source arch/cris/arch-v10/drivers/Kconfig
+source arch/cris/arch-v32/drivers/Kconfig
endmenu
diff --git a/arch/cris/arch-v10/Kconfig b/arch/cris/arch-v10/Kconfig
index f1ce6f64401d..1d61faec77cd 100644
--- a/arch/cris/arch-v10/Kconfig
+++ b/arch/cris/arch-v10/Kconfig
@@ -1,3 +1,5 @@
+if ETRAX_ARCH_V10
+
# ETRAX 100LX v1 has a MMU "feature" requiring a low mapping
config CRIS_LOW_MAP
bool
@@ -451,3 +453,5 @@ config ETRAX_POWERBUTTON_BIT
default "25"
help
Configure where power button is connected.
+
+endif
diff --git a/arch/cris/arch-v10/drivers/Kconfig b/arch/cris/arch-v10/drivers/Kconfig
index e3c0f2928149..96740ef497d4 100644
--- a/arch/cris/arch-v10/drivers/Kconfig
+++ b/arch/cris/arch-v10/drivers/Kconfig
@@ -1,3 +1,5 @@
+if ETRAX_ARCH_V10
+
config ETRAX_ETHERNET
bool "Ethernet support"
depends on ETRAX_ARCH_V10
@@ -806,3 +808,5 @@ config ETRAX_DS1302_TRICKLE_CHARGE
1 = 2kohm, 2 = 4kohm, 3 = 4kohm
4 = 1 diode, 8 = 2 diodes
Allowed values are (increasing current): 0, 11, 10, 9, 7, 6, 5
+
+endif
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index ec62c951fa3c..d1361dc119e2 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -1167,7 +1167,7 @@ sys_call_table:
.long sys_epoll_pwait
.long sys_utimensat /* 320 */
.long sys_signalfd
- .long sys_timerfd
+ .long sys_ni_syscall
.long sys_eventfd
.long sys_fallocate
diff --git a/arch/cris/arch-v32/Kconfig b/arch/cris/arch-v32/Kconfig
index 4f79d8ed3e1c..d8acaa920e1c 100644
--- a/arch/cris/arch-v32/Kconfig
+++ b/arch/cris/arch-v32/Kconfig
@@ -1,3 +1,5 @@
+if ETRAX_ARCH_V32
+
config ETRAX_DRAM_VIRTUAL_BASE
hex
depends on ETRAX_ARCH_V32
@@ -294,3 +296,5 @@ config ETRAX_DEF_GIO_PE_OUT
help
Configures the initial data for the general port E bits. Most
products should use 00000 here.
+
+endif
diff --git a/arch/cris/arch-v32/drivers/Kconfig b/arch/cris/arch-v32/drivers/Kconfig
index 9bccb5e2a960..c329cce2a0c3 100644
--- a/arch/cris/arch-v32/drivers/Kconfig
+++ b/arch/cris/arch-v32/drivers/Kconfig
@@ -1,3 +1,5 @@
+if ETRAX_ARCH_V32
+
config ETRAX_ETHERNET
bool "Ethernet support"
depends on ETRAX_ARCH_V32
@@ -610,3 +612,5 @@ config ETRAX_STREAMCOPROC
help
This option enables a driver for the stream co-processor
for cryptographic operations.
+
+endif
diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c
index 66f9500fbc02..e0364654fc44 100644
--- a/arch/cris/arch-v32/drivers/pci/dma.c
+++ b/arch/cris/arch-v32/drivers/pci/dma.c
@@ -93,7 +93,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
if (!dev->dma_mem)
- goto out;
+ goto iounmap_out;
dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!dev->dma_mem->bitmap)
goto free1_out;
@@ -110,6 +110,8 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
free1_out:
kfree(dev->dma_mem);
+ iounmap_out:
+ iounmap(mem_base);
out:
return 0;
}
diff --git a/arch/cris/kernel/setup.c b/arch/cris/kernel/setup.c
index 65466c49d7a9..4da042e100a0 100644
--- a/arch/cris/kernel/setup.c
+++ b/arch/cris/kernel/setup.c
@@ -137,7 +137,7 @@ setup_arch(char **cmdline_p)
* Arguments are start, size
*/
- reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size);
+ reserve_bootmem(PFN_PHYS(start_pfn), bootmap_size, BOOTMEM_DEFAULT);
/* paging_init() sets up the MMU and marks all pages as reserved */
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index bf0468cbe713..96f7d70f4473 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -138,6 +138,15 @@ config UCPAGE_OFFSET_C0000000
endchoice
+config PAGE_OFFSET
+ hex
+ default 0x20000000 if UCPAGE_OFFSET_20000000
+ default 0x40000000 if UCPAGE_OFFSET_40000000
+ default 0x60000000 if UCPAGE_OFFSET_60000000
+ default 0x80000000 if UCPAGE_OFFSET_80000000
+ default 0xA0000000 if UCPAGE_OFFSET_A0000000
+ default 0xC0000000
+
config PROTECT_KERNEL
bool "Protect core kernel against userspace"
depends on !MMU
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 99046b1f51c8..ca6a345b87e4 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -1494,7 +1494,7 @@ sys_call_table:
.long sys_epoll_pwait
.long sys_utimensat /* 320 */
.long sys_signalfd
- .long sys_timerfd
+ .long sys_ni_syscall
.long sys_eventfd
.long sys_fallocate
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index a74c08786b21..6c01464db699 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -708,7 +708,7 @@ static void __init reserve_dma_coherent(void)
/*
* calibrate the delay loop
*/
-void __init calibrate_delay(void)
+void __cpuinit calibrate_delay(void)
{
loops_per_jiffy = __delay_loops_MHz * (1000000 / HZ);
@@ -925,13 +925,15 @@ static void __init setup_linux_memory(void)
#endif
/* take back the memory occupied by the kernel image and the bootmem alloc map */
- reserve_bootmem(kstart, kend - kstart + bootmap_size);
+ reserve_bootmem(kstart, kend - kstart + bootmap_size,
+ BOOTMEM_DEFAULT);
/* reserve the memory occupied by the initial ramdisk */
#ifdef CONFIG_BLK_DEV_INITRD
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (low_top_pfn << PAGE_SHIFT)) {
- reserve_bootmem(INITRD_START, INITRD_SIZE);
+ reserve_bootmem(INITRD_START, INITRD_SIZE,
+ BOOTMEM_DEFAULT);
initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
}
@@ -986,9 +988,10 @@ static void __init setup_uclinux_memory(void)
/* now take back the bits the core kernel is occupying */
#ifndef CONFIG_PROTECT_KERNEL
- reserve_bootmem(kend, bootmap_size);
+ reserve_bootmem(kend, bootmap_size, BOOTMEM_DEFAULT);
reserve_bootmem((unsigned long) &__kernel_image_start,
- kend - (unsigned long) &__kernel_image_start);
+ kend - (unsigned long) &__kernel_image_start,
+ BOOTMEM_DEFAULT);
#else
dampr = __get_DAMPR(0);
@@ -996,14 +999,15 @@ static void __init setup_uclinux_memory(void)
dampr = (dampr >> 4) + 17;
dampr = 1 << dampr;
- reserve_bootmem(__get_DAMPR(0) & xAMPRx_PPFN, dampr);
+ reserve_bootmem(__get_DAMPR(0) & xAMPRx_PPFN, dampr, BOOTMEM_DEFAULT);
#endif
/* reserve some memory to do uncached DMA through if requested */
#ifdef CONFIG_RESERVE_DMA_COHERENT
if (dma_coherent_mem_start)
reserve_bootmem(dma_coherent_mem_start,
- dma_coherent_mem_end - dma_coherent_mem_start);
+ dma_coherent_mem_end - dma_coherent_mem_start,
+ BOOTMEM_DEFAULT);
#endif
} /* end setup_uclinux_memory() */
diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S
index f42b328b1dd0..ef7527b8b0c7 100644
--- a/arch/frv/kernel/vmlinux.lds.S
+++ b/arch/frv/kernel/vmlinux.lds.S
@@ -13,7 +13,7 @@ ENTRY(_start)
jiffies = jiffies_64 + 4;
-__page_offset = 0xc0000000; /* start of area covered by struct pages */
+__page_offset = CONFIG_PAGE_OFFSET; /* start of area covered by struct pages */
__kernel_image_start = __page_offset; /* address at which kernel image resides */
SECTIONS
diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c
index 1530a4111e6d..81757d55a5b5 100644
--- a/arch/frv/mm/mmu-context.c
+++ b/arch/frv/mm/mmu-context.c
@@ -181,7 +181,7 @@ int cxn_pin_by_pid(pid_t pid)
/* get a handle on the mm_struct */
read_lock(&tasklist_lock);
- tsk = find_task_by_pid(pid);
+ tsk = find_task_by_vpid(pid);
if (tsk) {
ret = -EINVAL;
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c
index 7787c3cc52c6..1a2e5c8d03a9 100644
--- a/arch/frv/mm/pgalloc.c
+++ b/arch/frv/mm/pgalloc.c
@@ -140,7 +140,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
return pgd;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
/* in the non-PAE case, clear_page_tables() clears user pgd entries */
quicklist_free(0, pgd_dtor, pgd);
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index 8dec4dd57b4e..5a1b4cfea05b 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -14,6 +14,7 @@
#include <linux/random.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/traps.h>
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index b2e86d0255e6..cd3734614d9d 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -173,7 +173,7 @@ void __init setup_arch(char **cmdline_p)
* the bootmem bitmap so we then reserve it after freeing it :-)
*/
free_bootmem(memory_start, memory_end - memory_start);
- reserve_bootmem(memory_start, bootmap_size);
+ reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
/*
* get kmalloc into gear
*/
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 45bf04eb7d70..a94445422cc6 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1265,7 +1265,7 @@ sba_fill_pdir(
* the sglist do both.
*/
static SBA_INLINE int
-sba_coalesce_chunks( struct ioc *ioc,
+sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
struct scatterlist *startsg,
int nents)
{
@@ -1275,6 +1275,7 @@ sba_coalesce_chunks( struct ioc *ioc,
struct scatterlist *dma_sg; /* next DMA stream head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
int n_mappings = 0;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
while (nents > 0) {
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
@@ -1314,6 +1315,9 @@ sba_coalesce_chunks( struct ioc *ioc,
> DMA_CHUNK_SIZE)
break;
+ if (dma_len + startsg->length > max_seg_size)
+ break;
+
/*
** Then look for virtually contiguous blocks.
**
@@ -1441,7 +1445,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
- coalesced = sba_coalesce_chunks(ioc, sglist, nents);
+ coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
/*
** Program the I/O Pdir
@@ -1871,7 +1875,7 @@ ioc_show(struct seq_file *s, void *v)
return 0;
}
-static struct seq_operations ioc_seq_ops = {
+static const struct seq_operations ioc_seq_ops = {
.start = ioc_start,
.next = ioc_next,
.stop = ioc_stop,
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
index d1d50cd1c38a..896b1ebbfb26 100644
--- a/arch/ia64/ia32/ia32_support.c
+++ b/arch/ia64/ia32/ia32_support.c
@@ -27,7 +27,7 @@
#include "ia32priv.h"
-extern void die_if_kernel (char *str, struct pt_regs *regs, long err);
+extern int die_if_kernel (char *str, struct pt_regs *regs, long err);
struct exec_domain ia32_exec_domain;
struct page *ia32_shared_page[NR_CPUS];
@@ -217,7 +217,8 @@ ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
{
siginfo_t siginfo;
- die_if_kernel("Bad IA-32 interrupt", regs, int_num);
+ if (die_if_kernel("Bad IA-32 interrupt", regs, int_num))
+ return;
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c
index 5a216c019924..cbe6cee5a550 100644
--- a/arch/ia64/kernel/acpi-processor.c
+++ b/arch/ia64/kernel/acpi-processor.c
@@ -45,6 +45,12 @@ static void init_intel_pdc(struct acpi_processor *pr)
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
+ /*
+ * The default of PDC_SMP_T_SWCOORD bit is set for IA64 cpu so
+ * that OSPM is capable of native ACPI throttling software
+ * coordination using BIOS supplied _TSD info.
+ */
+ buf[2] |= ACPI_PDC_SMP_T_SWCOORD;
obj->type = ACPI_TYPE_BUFFER;
obj->buffer.length = 12;
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 00b5d08f6da8..78f28d825f30 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -69,6 +69,20 @@ unsigned int acpi_cpei_phys_cpuid;
unsigned long acpi_wakeup_address = 0;
+#ifdef CONFIG_IA64_GENERIC
+static unsigned long __init acpi_find_rsdp(void)
+{
+ unsigned long rsdp_phys = 0;
+
+ if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
+ rsdp_phys = efi.acpi20;
+ else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
+ printk(KERN_WARNING PREFIX
+ "v1.0/r0.71 tables no longer supported\n");
+ return rsdp_phys;
+}
+#endif
+
const char __init *
acpi_get_sysname(void)
{
@@ -152,7 +166,7 @@ int acpi_request_vector(u32 int_type)
return vector;
}
-char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
+char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
{
return __va(phys_addr);
}
@@ -601,8 +615,6 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity)
IOSAPIC_LEVEL);
}
-EXPORT_SYMBOL(acpi_register_gsi);
-
void acpi_unregister_gsi(u32 gsi)
{
if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
@@ -611,8 +623,6 @@ void acpi_unregister_gsi(u32 gsi)
iosapic_unregister_intr(gsi);
}
-EXPORT_SYMBOL(acpi_unregister_gsi);
-
static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
struct acpi_table_header *fadt_header;
@@ -631,18 +641,6 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
return 0;
}
-unsigned long __init acpi_find_rsdp(void)
-{
- unsigned long rsdp_phys = 0;
-
- if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
- rsdp_phys = efi.acpi20;
- else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
- printk(KERN_WARNING PREFIX
- "v1.0/r0.71 tables no longer supported\n");
- return rsdp_phys;
-}
-
int __init acpi_boot_init(void)
{
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 242d79341120..919070a9aed7 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -1,7 +1,8 @@
/*
* Extensible Firmware Interface
*
- * Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999
+ * Based on Extensible Firmware Interface Specification version 0.9
+ * April 30, 1999
*
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
@@ -48,145 +49,157 @@ static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
#define efi_call_virt(f, args...) (*(f))(args)
-#define STUB_GET_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_time_cap_t *atc = NULL; \
- efi_status_t ret; \
- \
- if (tc) \
- atc = adjust_arg(tc); \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
+#define STUB_GET_TIME(prefix, adjust_arg) \
+static efi_status_t \
+prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
+{ \
+ struct ia64_fpreg fr[6]; \
+ efi_time_cap_t *atc = NULL; \
+ efi_status_t ret; \
+ \
+ if (tc) \
+ atc = adjust_arg(tc); \
+ ia64_save_scratch_fpregs(fr); \
+ ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), \
+ adjust_arg(tm), atc); \
+ ia64_load_scratch_fpregs(fr); \
+ return ret; \
}
-#define STUB_SET_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_set_time (efi_time_t *tm) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
+#define STUB_SET_TIME(prefix, adjust_arg) \
+static efi_status_t \
+prefix##_set_time (efi_time_t *tm) \
+{ \
+ struct ia64_fpreg fr[6]; \
+ efi_status_t ret; \
+ \
+ ia64_save_scratch_fpregs(fr); \
+ ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), \
+ adjust_arg(tm)); \
+ ia64_load_scratch_fpregs(fr); \
+ return ret; \
}
-#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
- adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
+#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
+static efi_status_t \
+prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, \
+ efi_time_t *tm) \
+{ \
+ struct ia64_fpreg fr[6]; \
+ efi_status_t ret; \
+ \
+ ia64_save_scratch_fpregs(fr); \
+ ret = efi_call_##prefix( \
+ (efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
+ adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
+ ia64_load_scratch_fpregs(fr); \
+ return ret; \
}
-#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_time_t *atm = NULL; \
- efi_status_t ret; \
- \
- if (tm) \
- atm = adjust_arg(tm); \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
- enabled, atm); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
+#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
+static efi_status_t \
+prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
+{ \
+ struct ia64_fpreg fr[6]; \
+ efi_time_t *atm = NULL; \
+ efi_status_t ret; \
+ \
+ if (tm) \
+ atm = adjust_arg(tm); \
+ ia64_save_scratch_fpregs(fr); \
+ ret = efi_call_##prefix( \
+ (efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
+ enabled, atm); \
+ ia64_load_scratch_fpregs(fr); \
+ return ret; \
}
-#define STUB_GET_VARIABLE(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
- unsigned long *data_size, void *data) \
-{ \
- struct ia64_fpreg fr[6]; \
- u32 *aattr = NULL; \
- efi_status_t ret; \
- \
- if (attr) \
- aattr = adjust_arg(attr); \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \
- adjust_arg(name), adjust_arg(vendor), aattr, \
- adjust_arg(data_size), adjust_arg(data)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
+#define STUB_GET_VARIABLE(prefix, adjust_arg) \
+static efi_status_t \
+prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
+ unsigned long *data_size, void *data) \
+{ \
+ struct ia64_fpreg fr[6]; \
+ u32 *aattr = NULL; \
+ efi_status_t ret; \
+ \
+ if (attr) \
+ aattr = adjust_arg(attr); \
+ ia64_save_scratch_fpregs(fr); \
+ ret = efi_call_##prefix( \
+ (efi_get_variable_t *) __va(runtime->get_variable), \
+ adjust_arg(name), adjust_arg(vendor), aattr, \
+ adjust_arg(data_size), adjust_arg(data)); \
+ ia64_load_scratch_fpregs(fr); \
+ return ret; \
}
-#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \
- adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
+#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
+static efi_status_t \
+prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, \
+ efi_guid_t *vendor) \
+{ \
+ struct ia64_fpreg fr[6]; \
+ efi_status_t ret; \
+ \
+ ia64_save_scratch_fpregs(fr); \
+ ret = efi_call_##prefix( \
+ (efi_get_next_variable_t *) __va(runtime->get_next_variable), \
+ adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
+ ia64_load_scratch_fpregs(fr); \
+ return ret; \
}
-#define STUB_SET_VARIABLE(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr, \
- unsigned long data_size, void *data) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \
- adjust_arg(name), adjust_arg(vendor), attr, data_size, \
- adjust_arg(data)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
+#define STUB_SET_VARIABLE(prefix, adjust_arg) \
+static efi_status_t \
+prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, \
+ unsigned long attr, unsigned long data_size, \
+ void *data) \
+{ \
+ struct ia64_fpreg fr[6]; \
+ efi_status_t ret; \
+ \
+ ia64_save_scratch_fpregs(fr); \
+ ret = efi_call_##prefix( \
+ (efi_set_variable_t *) __va(runtime->set_variable), \
+ adjust_arg(name), adjust_arg(vendor), attr, data_size, \
+ adjust_arg(data)); \
+ ia64_load_scratch_fpregs(fr); \
+ return ret; \
}
-#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
-static efi_status_t \
-prefix##_get_next_high_mono_count (u32 *count) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_status_t ret; \
- \
- ia64_save_scratch_fpregs(fr); \
- ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
- __va(runtime->get_next_high_mono_count), adjust_arg(count)); \
- ia64_load_scratch_fpregs(fr); \
- return ret; \
+#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
+static efi_status_t \
+prefix##_get_next_high_mono_count (u32 *count) \
+{ \
+ struct ia64_fpreg fr[6]; \
+ efi_status_t ret; \
+ \
+ ia64_save_scratch_fpregs(fr); \
+ ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
+ __va(runtime->get_next_high_mono_count), \
+ adjust_arg(count)); \
+ ia64_load_scratch_fpregs(fr); \
+ return ret; \
}
-#define STUB_RESET_SYSTEM(prefix, adjust_arg) \
-static void \
-prefix##_reset_system (int reset_type, efi_status_t status, \
- unsigned long data_size, efi_char16_t *data) \
-{ \
- struct ia64_fpreg fr[6]; \
- efi_char16_t *adata = NULL; \
- \
- if (data) \
- adata = adjust_arg(data); \
- \
- ia64_save_scratch_fpregs(fr); \
- efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \
- reset_type, status, data_size, adata); \
- /* should not return, but just in case... */ \
- ia64_load_scratch_fpregs(fr); \
+#define STUB_RESET_SYSTEM(prefix, adjust_arg) \
+static void \
+prefix##_reset_system (int reset_type, efi_status_t status, \
+ unsigned long data_size, efi_char16_t *data) \
+{ \
+ struct ia64_fpreg fr[6]; \
+ efi_char16_t *adata = NULL; \
+ \
+ if (data) \
+ adata = adjust_arg(data); \
+ \
+ ia64_save_scratch_fpregs(fr); \
+ efi_call_##prefix( \
+ (efi_reset_system_t *) __va(runtime->reset_system), \
+ reset_type, status, data_size, adata); \
+ /* should not return, but just in case... */ \
+ ia64_load_scratch_fpregs(fr); \
}
#define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
@@ -223,7 +236,8 @@ efi_gettimeofday (struct timespec *ts)
return;
}
- ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
+ ts->tv_sec = mktime(tm.year, tm.month, tm.day,
+ tm.hour, tm.minute, tm.second);
ts->tv_nsec = tm.nanosecond;
}
@@ -297,8 +311,8 @@ walk (efi_freemem_callback_t callback, void *arg, u64 attr)
}
/*
- * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
- * has memory that is available for OS use.
+ * Walk the EFI memory map and call CALLBACK once for each EFI memory
+ * descriptor that has memory that is available for OS use.
*/
void
efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
@@ -307,8 +321,8 @@ efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
}
/*
- * Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
- * has memory that is available for uncached allocator.
+ * Walk the EFI memory map and call CALLBACK once for each EFI memory
+ * descriptor that has memory that is available for uncached allocator.
*/
void
efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
@@ -317,11 +331,10 @@ efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
}
/*
- * Look for the PAL_CODE region reported by EFI and maps it using an
+ * Look for the PAL_CODE region reported by EFI and map it using an
* ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
* Abstraction Layer chapter 11 in ADAG
*/
-
void *
efi_get_pal_addr (void)
{
@@ -341,45 +354,47 @@ efi_get_pal_addr (void)
continue;
if (++pal_code_count > 1) {
- printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
- md->phys_addr);
+ printk(KERN_ERR "Too many EFI Pal Code memory ranges, "
+ "dropped @ %lx\n", md->phys_addr);
continue;
}
/*
- * The only ITLB entry in region 7 that is used is the one installed by
- * __start(). That entry covers a 64MB range.
+ * The only ITLB entry in region 7 that is used is the one
+ * installed by __start(). That entry covers a 64MB range.
*/
mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
vaddr = PAGE_OFFSET + md->phys_addr;
/*
- * We must check that the PAL mapping won't overlap with the kernel
- * mapping.
+ * We must check that the PAL mapping won't overlap with the
+ * kernel mapping.
*
- * PAL code is guaranteed to be aligned on a power of 2 between 4k and
- * 256KB and that only one ITR is needed to map it. This implies that the
- * PAL code is always aligned on its size, i.e., the closest matching page
- * size supported by the TLB. Therefore PAL code is guaranteed never to
- * cross a 64MB unless it is bigger than 64MB (very unlikely!). So for
- * now the following test is enough to determine whether or not we need a
- * dedicated ITR for the PAL code.
+ * PAL code is guaranteed to be aligned on a power of 2 between
+ * 4k and 256KB and that only one ITR is needed to map it. This
+ * implies that the PAL code is always aligned on its size,
+ * i.e., the closest matching page size supported by the TLB.
+ * Therefore PAL code is guaranteed never to cross a 64MB unless
+ * it is bigger than 64MB (very unlikely!). So for now the
+ * following test is enough to determine whether or not we need
+ * a dedicated ITR for the PAL code.
*/
if ((vaddr & mask) == (KERNEL_START & mask)) {
- printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
- __FUNCTION__);
+ printk(KERN_INFO "%s: no need to install ITR for "
+ "PAL code\n", __FUNCTION__);
continue;
}
if (efi_md_size(md) > IA64_GRANULE_SIZE)
- panic("Woah! PAL code size bigger than a granule!");
+ panic("Whoa! PAL code size bigger than a granule!");
#if EFI_DEBUG
mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
- printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
- smp_processor_id(), md->phys_addr,
- md->phys_addr + efi_md_size(md),
- vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
+ printk(KERN_INFO "CPU %d: mapping PAL code "
+ "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
+ smp_processor_id(), md->phys_addr,
+ md->phys_addr + efi_md_size(md),
+ vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
#endif
return __va(md->phys_addr);
}
@@ -401,11 +416,11 @@ efi_map_pal_code (void)
* Cannot write to CRx with PSR.ic=1
*/
psr = ia64_clear_ic();
- ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
+ ia64_itr(0x1, IA64_TR_PALCODE,
+ GRANULEROUNDDOWN((unsigned long) pal_vaddr),
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
IA64_GRANULE_SHIFT);
ia64_set_psr(psr); /* restore psr */
- ia64_srlz_i();
}
void __init
@@ -418,7 +433,10 @@ efi_init (void)
char *cp, vendor[100] = "unknown";
int i;
- /* it's too early to be able to use the standard kernel command line support... */
+ /*
+ * It's too early to be able to use the standard kernel command line
+ * support...
+ */
for (cp = boot_command_line; *cp; ) {
if (memcmp(cp, "mem=", 4) == 0) {
mem_limit = memparse(cp + 4, &cp);
@@ -434,9 +452,11 @@ efi_init (void)
}
}
if (min_addr != 0UL)
- printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
+ printk(KERN_INFO "Ignoring memory below %luMB\n",
+ min_addr >> 20);
if (max_addr != ~0UL)
- printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
+ printk(KERN_INFO "Ignoring memory above %luMB\n",
+ max_addr >> 20);
efi.systab = __va(ia64_boot_param->efi_systab);
@@ -444,9 +464,9 @@ efi_init (void)
* Verify the EFI Table
*/
if (efi.systab == NULL)
- panic("Woah! Can't find EFI system table.\n");
+ panic("Whoa! Can't find EFI system table.\n");
if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
- panic("Woah! EFI system table signature incorrect\n");
+ panic("Whoa! EFI system table signature incorrect\n");
if ((efi.systab->hdr.revision >> 16) == 0)
printk(KERN_WARNING "Warning: EFI system table version "
"%d.%02d, expected 1.00 or greater\n",
@@ -464,7 +484,8 @@ efi_init (void)
}
printk(KERN_INFO "EFI v%u.%.02u by %s:",
- efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff, vendor);
efi.mps = EFI_INVALID_TABLE_ADDR;
efi.acpi = EFI_INVALID_TABLE_ADDR;
@@ -519,9 +540,12 @@ efi_init (void)
efi_memory_desc_t *md;
void *p;
- for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
+ for (i = 0, p = efi_map_start; p < efi_map_end;
+ ++i, p += efi_desc_size)
+ {
md = p;
- printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
+ printk("mem%02u: type=%u, attr=0x%lx, "
+ "range=[0x%016lx-0x%016lx) (%luMB)\n",
i, md->type, md->attribute, md->phys_addr,
md->phys_addr + efi_md_size(md),
md->num_pages >> (20 - EFI_PAGE_SHIFT));
@@ -549,8 +573,8 @@ efi_enter_virtual_mode (void)
md = p;
if (md->attribute & EFI_MEMORY_RUNTIME) {
/*
- * Some descriptors have multiple bits set, so the order of
- * the tests is relevant.
+ * Some descriptors have multiple bits set, so the
+ * order of the tests is relevant.
*/
if (md->attribute & EFI_MEMORY_WB) {
md->virt_addr = (u64) __va(md->phys_addr);
@@ -558,21 +582,26 @@ efi_enter_virtual_mode (void)
md->virt_addr = (u64) ioremap(md->phys_addr, 0);
} else if (md->attribute & EFI_MEMORY_WC) {
#if 0
- md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
- | _PAGE_D
- | _PAGE_MA_WC
- | _PAGE_PL_0
- | _PAGE_AR_RW));
+ md->virt_addr = ia64_remap(md->phys_addr,
+ (_PAGE_A |
+ _PAGE_P |
+ _PAGE_D |
+ _PAGE_MA_WC |
+ _PAGE_PL_0 |
+ _PAGE_AR_RW));
#else
printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
md->virt_addr = (u64) ioremap(md->phys_addr, 0);
#endif
} else if (md->attribute & EFI_MEMORY_WT) {
#if 0
- md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
- | _PAGE_D | _PAGE_MA_WT
- | _PAGE_PL_0
- | _PAGE_AR_RW));
+ md->virt_addr = ia64_remap(md->phys_addr,
+ (_PAGE_A |
+ _PAGE_P |
+ _PAGE_D |
+ _PAGE_MA_WT |
+ _PAGE_PL_0 |
+ _PAGE_AR_RW));
#else
printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
md->virt_addr = (u64) ioremap(md->phys_addr, 0);
@@ -583,16 +612,18 @@ efi_enter_virtual_mode (void)
status = efi_call_phys(__va(runtime->set_virtual_address_map),
ia64_boot_param->efi_memmap_size,
- efi_desc_size, ia64_boot_param->efi_memdesc_version,
+ efi_desc_size,
+ ia64_boot_param->efi_memdesc_version,
ia64_boot_param->efi_memmap);
if (status != EFI_SUCCESS) {
- printk(KERN_WARNING "warning: unable to switch EFI into virtual mode "
- "(status=%lu)\n", status);
+ printk(KERN_WARNING "warning: unable to switch EFI into "
+ "virtual mode (status=%lu)\n", status);
return;
}
/*
- * Now that EFI is in virtual mode, we call the EFI functions more efficiently:
+ * Now that EFI is in virtual mode, we call the EFI functions more
+ * efficiently:
*/
efi.get_time = virt_get_time;
efi.set_time = virt_set_time;
@@ -606,8 +637,8 @@ efi_enter_virtual_mode (void)
}
/*
- * Walk the EFI memory map looking for the I/O port range. There can only be one entry of
- * this type, other I/O port ranges should be described via ACPI.
+ * Walk the EFI memory map looking for the I/O port range. There can only be
+ * one entry of this type, other I/O port ranges should be described via ACPI.
*/
u64
efi_get_iobase (void)
@@ -678,7 +709,6 @@ efi_memmap_intersects (unsigned long phys_addr, unsigned long size)
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
-
if (md->phys_addr < end && efi_md_end(md) > phys_addr)
return 1;
}
@@ -731,7 +761,7 @@ efi_mem_attribute (unsigned long phys_addr, unsigned long size)
if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
return 0;
} while (md);
- return 0;
+ return 0; /* never reached */
}
u64
@@ -767,7 +797,7 @@ kern_mem_attribute (unsigned long phys_addr, unsigned long size)
if (!md || md->attribute != attr)
return 0;
} while (md);
- return 0;
+ return 0; /* never reached */
}
EXPORT_SYMBOL(kern_mem_attribute);
@@ -883,7 +913,7 @@ efi_uart_console_only(void)
return 1;
uart = 0;
}
- hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length);
+ hdr = (struct efi_generic_dev_path *)((u8 *) hdr + hdr->length);
}
printk(KERN_ERR "Malformed %s value\n", name);
return 0;
@@ -921,10 +951,12 @@ find_memmap_space (void)
if (!efi_wb(md)) {
continue;
}
- if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
+ if (pmd == NULL || !efi_wb(pmd) ||
+ efi_md_end(pmd) != md->phys_addr) {
contig_low = GRANULEROUNDUP(md->phys_addr);
contig_high = efi_md_end(md);
- for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
+ for (q = p + efi_desc_size; q < efi_map_end;
+ q += efi_desc_size) {
check_md = q;
if (!efi_wb(check_md))
break;
@@ -988,8 +1020,9 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
md = p;
if (!efi_wb(md)) {
- if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY ||
- md->type == EFI_BOOT_SERVICES_DATA)) {
+ if (efi_uc(md) &&
+ (md->type == EFI_CONVENTIONAL_MEMORY ||
+ md->type == EFI_BOOT_SERVICES_DATA)) {
k->attribute = EFI_MEMORY_UC;
k->start = md->phys_addr;
k->num_pages = md->num_pages;
@@ -997,10 +1030,12 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
}
continue;
}
- if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
+ if (pmd == NULL || !efi_wb(pmd) ||
+ efi_md_end(pmd) != md->phys_addr) {
contig_low = GRANULEROUNDUP(md->phys_addr);
contig_high = efi_md_end(md);
- for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
+ for (q = p + efi_desc_size; q < efi_map_end;
+ q += efi_desc_size) {
check_md = q;
if (!efi_wb(check_md))
break;
@@ -1025,13 +1060,17 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
if (md->phys_addr < contig_low) {
lim = min(efi_md_end(md), contig_low);
if (efi_uc(md)) {
- if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC &&
+ if (k > kern_memmap &&
+ (k-1)->attribute == EFI_MEMORY_UC &&
kmd_end(k-1) == md->phys_addr) {
- (k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
+ (k-1)->num_pages +=
+ (lim - md->phys_addr)
+ >> EFI_PAGE_SHIFT;
} else {
k->attribute = EFI_MEMORY_UC;
k->start = md->phys_addr;
- k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
+ k->num_pages = (lim - md->phys_addr)
+ >> EFI_PAGE_SHIFT;
k++;
}
}
@@ -1049,7 +1088,8 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
} else {
k->attribute = EFI_MEMORY_UC;
k->start = lim;
- k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT;
+ k->num_pages = (efi_md_end(md) - lim)
+ >> EFI_PAGE_SHIFT;
k++;
}
}
@@ -1151,8 +1191,10 @@ efi_initialize_iomem_resources(struct resource *code_resource,
break;
}
- if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "failed to alocate resource for iomem\n");
+ if ((res = kzalloc(sizeof(struct resource),
+ GFP_KERNEL)) == NULL) {
+ printk(KERN_ERR
+ "failed to allocate resource for iomem\n");
return;
}
@@ -1187,44 +1229,44 @@ efi_initialize_iomem_resources(struct resource *code_resource,
rsvd_regions are sorted
*/
unsigned long __init
-kdump_find_rsvd_region (unsigned long size,
- struct rsvd_region *r, int n)
+kdump_find_rsvd_region (unsigned long size, struct rsvd_region *r, int n)
{
- int i;
- u64 start, end;
- u64 alignment = 1UL << _PAGE_SIZE_64M;
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
- if (!efi_wb(md))
- continue;
- start = ALIGN(md->phys_addr, alignment);
- end = efi_md_end(md);
- for (i = 0; i < n; i++) {
- if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
- if (__pa(r[i].start) > start + size)
- return start;
- start = ALIGN(__pa(r[i].end), alignment);
- if (i < n-1 && __pa(r[i+1].start) < start + size)
- continue;
- else
- break;
+ int i;
+ u64 start, end;
+ u64 alignment = 1UL << _PAGE_SIZE_64M;
+ void *efi_map_start, *efi_map_end, *p;
+ efi_memory_desc_t *md;
+ u64 efi_desc_size;
+
+ efi_map_start = __va(ia64_boot_param->efi_memmap);
+ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
+ efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
+ md = p;
+ if (!efi_wb(md))
+ continue;
+ start = ALIGN(md->phys_addr, alignment);
+ end = efi_md_end(md);
+ for (i = 0; i < n; i++) {
+ if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
+ if (__pa(r[i].start) > start + size)
+ return start;
+ start = ALIGN(__pa(r[i].end), alignment);
+ if (i < n-1 &&
+ __pa(r[i+1].start) < start + size)
+ continue;
+ else
+ break;
+ }
}
- }
- if (end > start + size)
- return start;
- }
-
- printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
- size);
- return ~0UL;
+ if (end > start + size)
+ return start;
+ }
+
+ printk(KERN_WARNING
+ "Cannot reserve 0x%lx byte of memory for crashdump\n", size);
+ return ~0UL;
}
#endif
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index c36f43c94600..f5d3efbfbeda 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1586,7 +1586,7 @@ sys_call_table:
data8 sys_epoll_pwait // 1305
data8 sys_utimensat
data8 sys_signalfd
- data8 sys_timerfd
+ data8 sys_ni_syscall
data8 sys_eventfd
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
index 490dab55fba3..57d2ee6c83e1 100644
--- a/arch/ia64/kernel/fsyscall_gtod_data.h
+++ b/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -14,10 +14,10 @@ struct fsyscall_gtod_data_t {
u32 clk_shift;
void *clk_fsys_mmio;
cycle_t clk_cycle_last;
-} __attribute__ ((aligned (L1_CACHE_BYTES)));
+} ____cacheline_aligned;
struct itc_jitter_data_t {
int itc_jitter;
cycle_t itc_lastcycle;
-} __attribute__ ((aligned (L1_CACHE_BYTES)));
+} ____cacheline_aligned;
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index c3b4412ccc67..8e7193d55528 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -12,6 +12,9 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(strlen);
+#include<asm/pgtable.h>
+EXPORT_SYMBOL_GPL(empty_zero_page);
+
#include <asm/checksum.h>
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
EXPORT_SYMBOL(csum_ipv6_magic);
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index fc4d2676264f..b618487cdc85 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -381,9 +381,10 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
unsigned int i;
- i = atomic_sub_return(1, &kcb->prev_kprobe_index);
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i].kp;
- kcb->kprobe_status = kcb->prev_kprobe[i].status;
+ i = atomic_read(&kcb->prev_kprobe_index);
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i-1].kp;
+ kcb->kprobe_status = kcb->prev_kprobe[i-1].status;
+ atomic_sub(1, &kcb->prev_kprobe_index);
}
static void __kprobes set_current_kprobe(struct kprobe *p,
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index d6cd45f4c6c7..0823de1f6ebe 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -129,13 +129,14 @@ void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
-#if defined(CONFIG_ARCH_DISCONTIGMEM_ENABLE) && defined(CONFIG_NUMA)
+#if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_SPARSEMEM)
VMCOREINFO_SYMBOL(pgdat_list);
VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES);
-
+#endif
+#ifdef CONFIG_NUMA
VMCOREINFO_SYMBOL(node_memblk);
VMCOREINFO_LENGTH(node_memblk, NR_NODE_MEMBLKS);
- VMCOREINFO_SIZE(node_memblk_s);
+ VMCOREINFO_STRUCT_SIZE(node_memblk_s);
VMCOREINFO_OFFSET(node_memblk_s, start_paddr);
VMCOREINFO_OFFSET(node_memblk_s, size);
#endif
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6dbf5919d2d0..846e7e036b13 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -2,61 +2,69 @@
* File: mca.c
* Purpose: Generic MCA handling layer
*
- * Updated for latest kernel
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Copyright (C) 2002 Dell Inc.
- * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
+ * Copyright (C) Matt Domsch <Matt_Domsch@dell.com>
*
* Copyright (C) 2002 Intel
- * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
+ * Copyright (C) Jenna Hall <jenna.s.hall@intel.com>
*
* Copyright (C) 2001 Intel
- * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
+ * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com>
*
* Copyright (C) 2000 Intel
- * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
+ * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
*
* Copyright (C) 1999, 2004 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
*
- * 03/04/15 D. Mosberger Added INIT backtrace support.
- * 02/03/25 M. Domsch GUID cleanups
+ * Copyright (C) 2006 FUJITSU LIMITED
+ * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
*
- * 02/01/04 J. Hall Aligned MCA stack to 16 bytes, added platform vs. CPU
- * error flag, set SAL default return values, changed
- * error record structure to linked list, added init call
- * to sal_get_state_info_size().
+ * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
+ * Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
+ * added min save state dump, added INIT handler.
*
- * 01/01/03 F. Lewis Added setup of CMCI and CPEI IRQs, logging of corrected
- * platform errors, completed code for logging of
- * corrected & uncorrected machine check errors, and
- * updated for conformance with Nov. 2000 revision of the
- * SAL 3.0 spec.
- * 00/03/29 C. Fleckenstein Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
- * added min save state dump, added INIT handler.
+ * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com>
+ * Added setup of CMCI and CPEI IRQs, logging of corrected platform
+ * errors, completed code for logging of corrected & uncorrected
+ * machine check errors, and updated for conformance with Nov. 2000
+ * revision of the SAL 3.0 spec.
+ *
+ * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com>
+ * Aligned MCA stack to 16 bytes, added platform vs. CPU error flag,
+ * set SAL default return values, changed error record structure to
+ * linked list, added init call to sal_get_state_info_size().
+ *
+ * 2002-03-25 Matt Domsch <Matt_Domsch@dell.com>
+ * GUID cleanups.
+ *
+ * 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Added INIT backtrace support.
*
* 2003-12-08 Keith Owens <kaos@sgi.com>
- * smp_call_function() must not be called from interrupt context (can
- * deadlock on tasklist_lock). Use keventd to call smp_call_function().
+ * smp_call_function() must not be called from interrupt context
+ * (can deadlock on tasklist_lock).
+ * Use keventd to call smp_call_function().
*
* 2004-02-01 Keith Owens <kaos@sgi.com>
- * Avoid deadlock when using printk() for MCA and INIT records.
- * Delete all record printing code, moved to salinfo_decode in user space.
- * Mark variables and functions static where possible.
- * Delete dead variables and functions.
- * Reorder to remove the need for forward declarations and to consolidate
- * related code.
+ * Avoid deadlock when using printk() for MCA and INIT records.
+ * Delete all record printing code, moved to salinfo_decode in user
+ * space. Mark variables and functions static where possible.
+ * Delete dead variables and functions. Reorder to remove the need
+ * for forward declarations and to consolidate related code.
*
* 2005-08-12 Keith Owens <kaos@sgi.com>
- * Convert MCA/INIT handlers to use per event stacks and SAL/OS state.
+ * Convert MCA/INIT handlers to use per event stacks and SAL/OS
+ * state.
*
* 2005-10-07 Keith Owens <kaos@sgi.com>
* Add notify_die() hooks.
*
* 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
- * Add printing support for MCA/INIT.
+ * Add printing support for MCA/INIT.
*
* 2007-04-27 Russ Anderson <rja@sgi.com>
* Support multiple cpus going through OS_MCA in the same event.
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 0f5965fcdf85..8bc7d259e0c6 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -1,24 +1,28 @@
-//
-// assembly portion of the IA64 MCA handling
-//
-// Mods by cfleck to integrate into kernel build
-// 00/03/15 davidm Added various stop bits to get a clean compile
-//
-// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
-// kstack, switch modes, jump to C INIT handler
-//
-// 02/01/04 J.Hall <jenna.s.hall@intel.com>
-// Before entering virtual mode code:
-// 1. Check for TLB CPU error
-// 2. Restore current thread pointer to kr6
-// 3. Move stack ptr 16 bytes to conform to C calling convention
-//
-// 04/11/12 Russ Anderson <rja@sgi.com>
-// Added per cpu MCA/INIT stack save areas.
-//
-// 12/08/05 Keith Owens <kaos@sgi.com>
-// Use per cpu MCA/INIT stacks for all data.
-//
+/*
+ * File: mca_asm.S
+ * Purpose: assembly portion of the IA64 MCA handling
+ *
+ * Mods by cfleck to integrate into kernel build
+ *
+ * 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com>
+ * Added various stop bits to get a clean compile
+ *
+ * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
+ * Added code to save INIT handoff state in pt_regs format,
+ * switch to temp kstack, switch modes, jump to C INIT handler
+ *
+ * 2002-01-04 J.Hall <jenna.s.hall@intel.com>
+ * Before entering virtual mode code:
+ * 1. Check for TLB CPU error
+ * 2. Restore current thread pointer to kr6
+ * 3. Move stack ptr 16 bytes to conform to C calling convention
+ *
+ * 2004-11-12 Russ Anderson <rja@sgi.com>
+ * Added per cpu MCA/INIT stack save areas.
+ *
+ * 2005-12-08 Keith Owens <kaos@sgi.com>
+ * Use per cpu MCA/INIT stacks for all data.
+ */
#include <linux/threads.h>
#include <asm/asmmacro.h>
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index aba813c2c150..fab1d21a4f2c 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -3,7 +3,7 @@
* Purpose: Generic MCA handling layer
*
* Copyright (C) 2004 FUJITSU LIMITED
- * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
+ * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
* Copyright (C) 2005 Silicon Graphics, Inc
* Copyright (C) 2005 Keith Owens <kaos@sgi.com>
* Copyright (C) 2006 Russ Anderson <rja@sgi.com>
diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h
index 485e34d0b199..53b8ecb5b4b9 100644
--- a/arch/ia64/kernel/mca_drv.h
+++ b/arch/ia64/kernel/mca_drv.h
@@ -3,7 +3,7 @@
* Purpose: Define helpers for Generic MCA handling
*
* Copyright (C) 2004 FUJITSU LIMITED
- * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
+ * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
*/
/*
* Processor error section:
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index 3bccb06c8d21..767ac2c20d16 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -3,7 +3,7 @@
* Purpose: Assembly portion of Generic MCA handling
*
* Copyright (C) 2004 FUJITSU LIMITED
- * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
+ * Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
*/
#include <linux/threads.h>
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 5ae177f557d8..78acd9fe97e9 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2654,11 +2654,11 @@ pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
/* XXX: need to add more checks here */
if (pid < 2) return -EPERM;
- if (pid != current->pid) {
+ if (pid != task_pid_vnr(current)) {
read_lock(&tasklist_lock);
- p = find_task_by_pid(pid);
+ p = find_task_by_vpid(pid);
/* make sure task cannot go away while we operate on it */
if (p) get_task_struct(p);
@@ -5795,7 +5795,7 @@ pfm_proc_show(struct seq_file *m, void *v)
return 0;
}
-struct seq_operations pfm_seq_ops = {
+const struct seq_operations pfm_seq_ops = {
.start = pfm_proc_start,
.next = pfm_proc_next,
.stop = pfm_proc_stop,
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 27c2ef445a56..f44fe8412162 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -284,6 +284,7 @@ ia64_sal_cache_flush (u64 cache_type)
SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
return isrv.status;
}
+EXPORT_SYMBOL_GPL(ia64_sal_cache_flush);
void __init
ia64_sal_init (struct ia64_sal_systab *systab)
@@ -372,3 +373,16 @@ ia64_sal_oemcall_reentrant(struct ia64_sal_retval *isrvp, u64 oemfunc,
return 0;
}
EXPORT_SYMBOL(ia64_sal_oemcall_reentrant);
+
+long
+ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
+ unsigned long *drift_info)
+{
+ struct ia64_sal_retval isrv;
+
+ SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
+ *ticks_per_second = isrv.v0;
+ *drift_info = isrv.v1;
+ return isrv.status;
+}
+EXPORT_SYMBOL_GPL(ia64_sal_freq_base);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 86028c69861e..ebd1a09f3201 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -654,7 +654,7 @@ c_stop (struct seq_file *m, void *v)
{
}
-struct seq_operations cpuinfo_op = {
+const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index f0fc4d8465ad..32ee5979a042 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -120,7 +120,6 @@ static volatile unsigned long go[SLAVE + 1];
#define DEBUG_ITC_SYNC 0
-extern void __devinit calibrate_delay (void);
extern void start_ap (void);
extern unsigned long ia64_iobase;
@@ -477,7 +476,7 @@ start_secondary (void *unused)
return 0;
}
-struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
+struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
{
return NULL;
}
@@ -767,17 +766,6 @@ void __cpu_die(unsigned int cpu)
}
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
}
-#else /* !CONFIG_HOTPLUG_CPU */
-int __cpu_disable(void)
-{
- return -ENOSYS;
-}
-
-void __cpu_die(unsigned int cpu)
-{
- /* We said "no" in __cpu_disable */
- BUG();
-}
#endif /* CONFIG_HOTPLUG_CPU */
void
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 78d65cb947d2..f0cda765e681 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -35,7 +35,7 @@ trap_init (void)
fpswa_interface = __va(ia64_boot_param->fpswa);
}
-void
+int
die (const char *str, struct pt_regs *regs, long err)
{
static struct {
@@ -62,8 +62,11 @@ die (const char *str, struct pt_regs *regs, long err)
if (++die.lock_owner_depth < 3) {
printk("%s[%d]: %s %ld [%d]\n",
current->comm, task_pid_nr(current), str, err, ++die_counter);
- (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
- show_regs(regs);
+ if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
+ != NOTIFY_STOP)
+ show_regs(regs);
+ else
+ regs = NULL;
} else
printk(KERN_ERR "Recursive die() failure, output suppressed\n");
@@ -72,17 +75,22 @@ die (const char *str, struct pt_regs *regs, long err)
add_taint(TAINT_DIE);
spin_unlock_irq(&die.lock);
+ if (!regs)
+ return 1;
+
if (panic_on_oops)
panic("Fatal exception");
do_exit(SIGSEGV);
+ return 0;
}
-void
+int
die_if_kernel (char *str, struct pt_regs *regs, long err)
{
if (!user_mode(regs))
- die(str, regs, err);
+ return die(str, regs, err);
+ return 0;
}
void
@@ -102,7 +110,8 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
== NOTIFY_STOP)
return;
- die_if_kernel("bugcheck!", regs, break_num);
+ if (die_if_kernel("bugcheck!", regs, break_num))
+ return;
sig = SIGILL; code = ILL_ILLOPC;
break;
@@ -155,8 +164,9 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
break;
default:
- if (break_num < 0x40000 || break_num > 0x100000)
- die_if_kernel("Bad break", regs, break_num);
+ if ((break_num < 0x40000 || break_num > 0x100000)
+ && die_if_kernel("Bad break", regs, break_num))
+ return;
if (break_num < 0x80000) {
sig = SIGILL; code = __ILL_BREAK;
@@ -402,14 +412,15 @@ ia64_illegal_op_fault (unsigned long ec, long arg1, long arg2, long arg3,
#endif
sprintf(buf, "IA-64 Illegal operation fault");
- die_if_kernel(buf, &regs, 0);
+ rv.fkt = 0;
+ if (die_if_kernel(buf, &regs, 0))
+ return rv;
memset(&si, 0, sizeof(si));
si.si_signo = SIGILL;
si.si_code = ILL_ILLOPC;
si.si_addr = (void __user *) (regs.cr_iip + ia64_psr(&regs)->ri);
force_sig_info(SIGILL, &si, current);
- rv.fkt = 0;
return rv;
}
@@ -644,6 +655,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
sprintf(buf, "Fault %lu", vector);
break;
}
- die_if_kernel(buf, &regs, error);
- force_sig(SIGILL, current);
+ if (!die_if_kernel(buf, &regs, error))
+ force_sig(SIGILL, current);
}
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index f6a1aeb742b3..52f70bbc192a 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -23,7 +23,7 @@
#include <asm/uaccess.h>
#include <asm/unaligned.h>
-extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
+extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
#undef DEBUG_UNALIGNED_TRAP
@@ -675,8 +675,9 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
*/
if (ld.x6_op == 1 || ld.x6_op == 3) {
printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__);
- die_if_kernel("unaligned reference on speculative load with register update\n",
- regs, 30);
+ if (die_if_kernel("unaligned reference on speculative load with register update\n",
+ regs, 30))
+ return;
}
@@ -1317,7 +1318,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (ia64_psr(regs)->be) {
/* we don't support big-endian accesses */
- die_if_kernel("big-endian unaligned accesses are not supported", regs, 0);
+ if (die_if_kernel("big-endian unaligned accesses are not supported", regs, 0))
+ return;
goto force_sigbus;
}
@@ -1534,7 +1536,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
ia64_handle_exception(regs, eh);
goto done;
}
- die_if_kernel("error during unaligned kernel access\n", regs, ret);
+ if (die_if_kernel("error during unaligned kernel access\n", regs, ret))
+ return;
/* NOT_REACHED */
}
force_sigbus:
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 7e9c275ea148..344f64eca7a9 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -218,7 +218,7 @@ find_memory (void)
/* Free all available memory, then mark bootmem-map as being in use. */
efi_memmap_walk(filter_rsvd_memory, free_bootmem);
- reserve_bootmem(bootmap_start, bootmap_size);
+ reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
find_initrd();
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 0b567398f38e..ee5e68b2af94 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -299,12 +299,12 @@ static void __init reserve_pernode_space(void)
pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
base = __pa(bdp->node_bootmem_map);
- reserve_bootmem_node(pdp, base, size);
+ reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
/* Now the per-node space */
size = mem_data[node].pernode_size;
base = __pa(mem_data[node].pernode_addr);
- reserve_bootmem_node(pdp, base, size);
+ reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
}
}
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 7571076a16a1..3e69881648a3 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -16,7 +16,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
-extern void die (char *, struct pt_regs *, long);
+extern int die(char *, struct pt_regs *, long);
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, int trap)
@@ -267,9 +267,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
else
printk(KERN_ALERT "Unable to handle kernel paging request at "
"virtual address %016lx\n", address);
- die("Oops", regs, isr);
+ if (die("Oops", regs, isr))
+ regs = NULL;
bust_spinlocks(0);
- do_exit(SIGKILL);
+ if (regs)
+ do_exit(SIGKILL);
return;
out_of_memory:
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index f3c69329e145..dfc6bf1c7b41 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -523,7 +523,7 @@ static ssize_t sn2_ptc_proc_write(struct file *file, const char __user *user, si
return count;
}
-static struct seq_operations sn2_ptc_seq_ops = {
+static const struct seq_operations sn2_ptc_seq_ops = {
.start = sn2_ptc_seq_start,
.next = sn2_ptc_seq_next,
.stop = sn2_ptc_seq_stop,
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 1a8e49607f11..4b0d1538e7e5 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -33,6 +33,7 @@
#include <linux/smp_lock.h>
#include <linux/nodemask.h>
#include <linux/smp.h>
+#include <linux/mutex.h>
#include <asm/processor.h>
#include <asm/topology.h>
@@ -50,7 +51,7 @@ static void *sn_hwperf_salheap = NULL;
static int sn_hwperf_obj_cnt = 0;
static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
static int sn_hwperf_init(void);
-static DECLARE_MUTEX(sn_hwperf_init_mutex);
+static DEFINE_MUTEX(sn_hwperf_init_mutex);
#define cnode_possible(n) ((n) < num_cnodes)
@@ -577,7 +578,7 @@ static void sn_topology_stop(struct seq_file *m, void *v)
/*
* /proc/sgi_sn/sn_topology, read-only using seq_file
*/
-static struct seq_operations sn_topology_seq_ops = {
+static const struct seq_operations sn_topology_seq_ops = {
.start = sn_topology_start,
.next = sn_topology_next,
.stop = sn_topology_stop,
@@ -884,10 +885,10 @@ static int sn_hwperf_init(void)
int e = 0;
/* single threaded, once-only initialization */
- down(&sn_hwperf_init_mutex);
+ mutex_lock(&sn_hwperf_init_mutex);
if (sn_hwperf_salheap) {
- up(&sn_hwperf_init_mutex);
+ mutex_unlock(&sn_hwperf_init_mutex);
return e;
}
@@ -936,7 +937,7 @@ out:
sn_hwperf_salheap = NULL;
sn_hwperf_obj_cnt = 0;
}
- up(&sn_hwperf_init_mutex);
+ mutex_unlock(&sn_hwperf_init_mutex);
return e;
}
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index ab3eaf85fe4d..2c676cc05418 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -100,11 +100,11 @@ u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
static irqreturn_t
pcibr_error_intr_handler(int irq, void *arg)
{
- struct pcibus_info *soft = (struct pcibus_info *)arg;
+ struct pcibus_info *soft = arg;
- if (sal_pcibr_error_interrupt(soft) < 0) {
+ if (sal_pcibr_error_interrupt(soft) < 0)
panic("pcibr_error_intr_handler(): Fatal Bridge Error");
- }
+
return IRQ_HANDLED;
}
diff --git a/arch/m32r/boot/compressed/m32r_sio.c b/arch/m32r/boot/compressed/m32r_sio.c
index ee3c8be12fa0..01d877c6868f 100644
--- a/arch/m32r/boot/compressed/m32r_sio.c
+++ b/arch/m32r/boot/compressed/m32r_sio.c
@@ -17,7 +17,7 @@ static int puts(const char *s)
return 0;
}
-#if defined(CONFIG_PLAT_M32700UT_Alpha) || defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_OPSPUT)
+#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_OPSPUT)
#include <asm/m32r.h>
#include <asm/io.h>
@@ -52,7 +52,7 @@ static void putc(char c)
}
*BOOT_SIO0TXB = c;
}
-#else /* !(CONFIG_PLAT_M32700UT_Alpha) && !(CONFIG_PLAT_M32700UT) */
+#else /* !(CONFIG_PLAT_M32700UT) */
#if defined(CONFIG_PLAT_MAPPI2)
#define SIO0STS (volatile unsigned short *)(0xa0efd000 + 14)
#define SIO0TXB (volatile unsigned short *)(0xa0efd000 + 30)
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index d64814385d70..f1f5db0c4084 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -177,25 +177,28 @@ static unsigned long __init setup_memory(void)
*/
reserve_bootmem(CONFIG_MEMORY_START + PAGE_SIZE,
(PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE - 1)
- - CONFIG_MEMORY_START);
+ - CONFIG_MEMORY_START,
+ BOOTMEM_DEFAULT);
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
* enabling clean reboots, SMP operation, laptop functions.
*/
- reserve_bootmem(CONFIG_MEMORY_START, PAGE_SIZE);
+ reserve_bootmem(CONFIG_MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT);
/*
* reserve memory hole
*/
#ifdef CONFIG_MEMHOLE
- reserve_bootmem(CONFIG_MEMHOLE_START, CONFIG_MEMHOLE_SIZE);
+ reserve_bootmem(CONFIG_MEMHOLE_START, CONFIG_MEMHOLE_SIZE,
+ BOOTMEM_DEFAULT);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
- reserve_bootmem(INITRD_START, INITRD_SIZE);
+ reserve_bootmem(INITRD_START, INITRD_SIZE,
+ BOOTMEM_DEFAULT);
initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
printk("initrd:start[%08lx],size[%08lx]\n",
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 0e383da158e9..2c03ac1d005f 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -43,6 +43,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/sched.h>
#include <linux/err.h>
#include <linux/irq.h>
#include <linux/bootmem.h>
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index 95aa79874847..aa3bf4cfab37 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -321,6 +321,6 @@ ENTRY(sys_call_table)
.long sys_epoll_pwait
.long sys_utimensat /* 320 */
.long sys_signalfd
- .long sys_timerfd
+ .long sys_ni_syscall
.long sys_eventfd
.long sys_fallocate
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index c7efdb0aefc5..07c1af7dc0e2 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -91,7 +91,8 @@ unsigned long __init setup_memory(void)
PFN_PHYS(mp->pages));
reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
- PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size);
+ PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size,
+ BOOTMEM_DEFAULT);
if (max_low_pfn < max_pfn)
max_low_pfn = max_pfn;
@@ -104,7 +105,7 @@ unsigned long __init setup_memory(void)
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
reserve_bootmem_node(NODE_DATA(0), INITRD_START,
- INITRD_SIZE);
+ INITRD_SIZE, BOOTMEM_DEFAULT);
initrd_start = INITRD_START + PAGE_OFFSET;
initrd_end = initrd_start + INITRD_SIZE;
printk("initrd:start[%08lx],size[%08lx]\n",
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 8236e42ef711..ffabd01c45eb 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -577,20 +577,6 @@ config MAC_HID
depends on INPUT_ADBHID
default y
-config MAC_ADBKEYCODES
- bool "Support for ADB raw keycodes"
- depends on INPUT_ADBHID
- help
- This provides support for sending raw ADB keycodes to console
- devices. This is the default up to 2.4.0, but in future this may be
- phased out in favor of generic Linux keycodes. If you say Y here,
- you can dynamically switch via the
- /proc/sys/dev/mac_hid/keyboard_sends_linux_keycodes
- sysctl and with the "keyboard_sends_linux_keycodes=" kernel
- argument.
-
- If unsure, say Y here.
-
config ADB_KEYBOARD
bool "Support for ADB keyboard (old driver)"
depends on MAC && !INPUT_ADBHID
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 4a1bd44ff162..2cba605cb59d 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -13,16 +13,15 @@
# Copyright (C) 1994 by Hamish Macdonald
#
-# test for cross compiling
-COMPILE_ARCH = $(shell uname -m)
-
# override top level makefile
AS += -m68020
LDFLAGS := -m m68kelf
LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
-ifneq ($(COMPILE_ARCH),$(ARCH))
- # prefix for cross-compiling binaries
- CROSS_COMPILE = m68k-linux-gnu-
+ifneq ($(SUBARCH),$(ARCH))
+ ifeq ($(CROSS_COMPILE),)
+ CROSS_COMPILE := $(call cc-cross-prefix, \
+ m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
+ endif
endif
ifdef CONFIG_SUN3
diff --git a/arch/m68k/amiga/Makefile b/arch/m68k/amiga/Makefile
index 8b415651edee..6a0d7650f980 100644
--- a/arch/m68k/amiga/Makefile
+++ b/arch/m68k/amiga/Makefile
@@ -2,6 +2,6 @@
# Makefile for Linux arch/m68k/amiga source directory
#
-obj-y := config.o amiints.o cia.o chipram.o amisound.o amiga_ksyms.o
+obj-y := config.o amiints.o cia.o chipram.o amisound.o
obj-$(CONFIG_AMIGA_PCMCIA) += pcmcia.o
diff --git a/arch/m68k/amiga/amiga_ksyms.c b/arch/m68k/amiga/amiga_ksyms.c
deleted file mode 100644
index 7fdcf6bf3ada..000000000000
--- a/arch/m68k/amiga/amiga_ksyms.c
+++ /dev/null
@@ -1,33 +0,0 @@
-#include <linux/module.h>
-#include <linux/types.h>
-#include <asm/ptrace.h>
-#include <asm/amigahw.h>
-#include <asm/amigaints.h>
-#include <asm/amipcmcia.h>
-
-extern volatile u_short amiga_audio_min_period;
-extern u_short amiga_audio_period;
-
-/*
- * Add things here when you find the need for it.
- */
-EXPORT_SYMBOL(amiga_model);
-EXPORT_SYMBOL(amiga_chipset);
-EXPORT_SYMBOL(amiga_hw_present);
-EXPORT_SYMBOL(amiga_eclock);
-EXPORT_SYMBOL(amiga_colorclock);
-EXPORT_SYMBOL(amiga_chip_alloc);
-EXPORT_SYMBOL(amiga_chip_free);
-EXPORT_SYMBOL(amiga_chip_avail);
-EXPORT_SYMBOL(amiga_chip_size);
-EXPORT_SYMBOL(amiga_audio_period);
-EXPORT_SYMBOL(amiga_audio_min_period);
-
-#ifdef CONFIG_AMIGA_PCMCIA
- EXPORT_SYMBOL(pcmcia_reset);
- EXPORT_SYMBOL(pcmcia_copy_tuple);
- EXPORT_SYMBOL(pcmcia_program_voltage);
- EXPORT_SYMBOL(pcmcia_access_speed);
- EXPORT_SYMBOL(pcmcia_write_enable);
- EXPORT_SYMBOL(pcmcia_write_disable);
-#endif
diff --git a/arch/m68k/amiga/amisound.c b/arch/m68k/amiga/amisound.c
index 1f5bfb584297..61e5c54625ae 100644
--- a/arch/m68k/amiga/amisound.c
+++ b/arch/m68k/amiga/amisound.c
@@ -12,6 +12,7 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/string.h>
+#include <linux/module.h>
#include <asm/system.h>
#include <asm/amigahw.h>
@@ -21,7 +22,7 @@ static const signed char sine_data[] = {
0, 39, 75, 103, 121, 127, 121, 103, 75, 39,
0, -39, -75, -103, -121, -127, -121, -103, -75, -39
};
-#define DATA_SIZE (sizeof(sine_data)/sizeof(sine_data[0]))
+#define DATA_SIZE ARRAY_SIZE(sine_data)
#define custom amiga_custom
@@ -31,6 +32,7 @@ static const signed char sine_data[] = {
*/
volatile unsigned short amiga_audio_min_period = 124; /* Default for pre-OCS */
+EXPORT_SYMBOL(amiga_audio_min_period);
#define MAX_PERIOD (65535)
@@ -40,6 +42,7 @@ volatile unsigned short amiga_audio_min_period = 124; /* Default for pre-OCS */
*/
unsigned short amiga_audio_period = MAX_PERIOD;
+EXPORT_SYMBOL(amiga_audio_period);
static unsigned long clock_constant;
diff --git a/arch/m68k/amiga/chipram.c b/arch/m68k/amiga/chipram.c
index fa015d801617..cbe36538af47 100644
--- a/arch/m68k/amiga/chipram.c
+++ b/arch/m68k/amiga/chipram.c
@@ -13,10 +13,13 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/module.h>
+
#include <asm/page.h>
#include <asm/amigahw.h>
unsigned long amiga_chip_size;
+EXPORT_SYMBOL(amiga_chip_size);
static struct resource chipram_res = {
.name = "Chip RAM", .start = CHIP_PHYSADDR
@@ -29,12 +32,10 @@ void __init amiga_chip_init(void)
if (!AMIGAHW_PRESENT(CHIP_RAM))
return;
-#ifndef CONFIG_APUS_FAST_EXCEPT
/*
* Remove the first 4 pages where PPC exception handlers will be located
*/
amiga_chip_size -= 0x4000;
-#endif
chipram_res.end = amiga_chip_size-1;
request_resource(&iomem_resource, &chipram_res);
@@ -67,6 +68,7 @@ void *amiga_chip_alloc(unsigned long size, const char *name)
#endif
return (void *)ZTWO_VADDR(res->start);
}
+EXPORT_SYMBOL(amiga_chip_alloc);
/*
@@ -120,6 +122,7 @@ void amiga_chip_free(void *ptr)
}
printk("amiga_chip_free: trying to free nonexistent region at %p\n", ptr);
}
+EXPORT_SYMBOL(amiga_chip_free);
unsigned long amiga_chip_avail(void)
@@ -129,3 +132,5 @@ unsigned long amiga_chip_avail(void)
#endif
return chipavail;
}
+EXPORT_SYMBOL(amiga_chip_avail);
+
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index c4a4ffd45bc0..343fab49bd9a 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -84,7 +84,7 @@ unsigned char cia_able_irq(struct ciabase *base, unsigned char mask)
static irqreturn_t cia_handler(int irq, void *dev_id)
{
- struct ciabase *base = (struct ciabase *)dev_id;
+ struct ciabase *base = dev_id;
int mach_irq;
unsigned char ints;
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index 35748531327d..50f5daab46b7 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/zorro.h>
+#include <linux/module.h>
#include <asm/bootinfo.h>
#include <asm/setup.h>
@@ -36,13 +37,24 @@
#include <asm/io.h>
unsigned long amiga_model;
+EXPORT_SYMBOL(amiga_model);
+
unsigned long amiga_eclock;
+EXPORT_SYMBOL(amiga_eclock);
+
unsigned long amiga_masterclock;
+
unsigned long amiga_colorclock;
+EXPORT_SYMBOL(amiga_colorclock);
+
unsigned long amiga_chipset;
+EXPORT_SYMBOL(amiga_chipset);
+
unsigned char amiga_vblank;
unsigned char amiga_psfreq;
+
struct amiga_hw_present amiga_hw_present;
+EXPORT_SYMBOL(amiga_hw_present);
static char s_a500[] __initdata = "A500";
static char s_a500p[] __initdata = "A500+";
diff --git a/arch/m68k/amiga/pcmcia.c b/arch/m68k/amiga/pcmcia.c
index 186662ca1a89..7106f0c3639b 100644
--- a/arch/m68k/amiga/pcmcia.c
+++ b/arch/m68k/amiga/pcmcia.c
@@ -15,6 +15,8 @@
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
+#include <linux/module.h>
+
#include <asm/amigayle.h>
#include <asm/amipcmcia.h>
@@ -30,6 +32,7 @@ void pcmcia_reset(void)
while (time_before(jiffies, reset_start_time + 1*HZ/100));
b = gayle_reset;
}
+EXPORT_SYMBOL(pcmcia_reset);
/* copy a tuple, including tuple header. return nb bytes copied */
@@ -61,6 +64,7 @@ int pcmcia_copy_tuple(unsigned char tuple_id, void *tuple, int max_len)
return 0;
}
+EXPORT_SYMBOL(pcmcia_copy_tuple);
void pcmcia_program_voltage(int voltage)
{
@@ -84,6 +88,7 @@ void pcmcia_program_voltage(int voltage)
gayle.config = cfg_byte;
}
+EXPORT_SYMBOL(pcmcia_program_voltage);
void pcmcia_access_speed(int speed)
{
@@ -101,13 +106,17 @@ void pcmcia_access_speed(int speed)
cfg_byte = (cfg_byte & 0xf3) | s;
gayle.config = cfg_byte;
}
+EXPORT_SYMBOL(pcmcia_access_speed);
void pcmcia_write_enable(void)
{
gayle.cardstatus = GAYLE_CS_WR|GAYLE_CS_DA;
}
+EXPORT_SYMBOL(pcmcia_write_enable);
void pcmcia_write_disable(void)
{
gayle.cardstatus = 0;
}
+EXPORT_SYMBOL(pcmcia_write_disable);
+
diff --git a/arch/m68k/atari/Makefile b/arch/m68k/atari/Makefile
index 2cb86191f0aa..2cd905efe63a 100644
--- a/arch/m68k/atari/Makefile
+++ b/arch/m68k/atari/Makefile
@@ -3,7 +3,7 @@
#
obj-y := config.o time.o debug.o ataints.o stdma.o \
- atasound.o stram.o atari_ksyms.o
+ atasound.o stram.o
ifeq ($(CONFIG_PCI),y)
obj-$(CONFIG_HADES) += hades-pci.o
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index b85ca22024c1..b45593a60bdd 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -40,6 +40,7 @@
#include <linux/kernel_stat.h>
#include <linux/init.h>
#include <linux/seq_file.h>
+#include <linux/module.h>
#include <asm/system.h>
#include <asm/traps.h>
@@ -446,6 +447,7 @@ unsigned long atari_register_vme_int(void)
free_vme_vec_bitmap |= 1 << i;
return VME_SOURCE_BASE + i;
}
+EXPORT_SYMBOL(atari_register_vme_int);
void atari_unregister_vme_int(unsigned long irq)
@@ -455,5 +457,6 @@ void atari_unregister_vme_int(unsigned long irq)
free_vme_vec_bitmap &= ~(1 << irq);
}
}
+EXPORT_SYMBOL(atari_unregister_vme_int);
diff --git a/arch/m68k/atari/atari_ksyms.c b/arch/m68k/atari/atari_ksyms.c
deleted file mode 100644
index a04757151538..000000000000
--- a/arch/m68k/atari/atari_ksyms.c
+++ /dev/null
@@ -1,35 +0,0 @@
-#include <linux/module.h>
-
-#include <asm/ptrace.h>
-#include <asm/traps.h>
-#include <asm/atarihw.h>
-#include <asm/atariints.h>
-#include <asm/atarikb.h>
-#include <asm/atari_joystick.h>
-#include <asm/atari_stdma.h>
-#include <asm/atari_stram.h>
-
-extern void atari_microwire_cmd( int cmd );
-extern int atari_MFP_init_done;
-extern int atari_SCC_init_done;
-extern int atari_SCC_reset_done;
-
-EXPORT_SYMBOL(atari_mch_cookie);
-EXPORT_SYMBOL(atari_mch_type);
-EXPORT_SYMBOL(atari_hw_present);
-EXPORT_SYMBOL(atari_switches);
-EXPORT_SYMBOL(atari_dont_touch_floppy_select);
-EXPORT_SYMBOL(atari_register_vme_int);
-EXPORT_SYMBOL(atari_unregister_vme_int);
-EXPORT_SYMBOL(stdma_lock);
-EXPORT_SYMBOL(stdma_release);
-EXPORT_SYMBOL(stdma_others_waiting);
-EXPORT_SYMBOL(stdma_islocked);
-EXPORT_SYMBOL(atari_stram_alloc);
-EXPORT_SYMBOL(atari_stram_free);
-
-EXPORT_SYMBOL(atari_MFP_init_done);
-EXPORT_SYMBOL(atari_SCC_init_done);
-EXPORT_SYMBOL(atari_SCC_reset_done);
-
-EXPORT_SYMBOL(atari_microwire_cmd);
diff --git a/arch/m68k/atari/atasound.c b/arch/m68k/atari/atasound.c
index ee04250eb56b..d266fe89c125 100644
--- a/arch/m68k/atari/atasound.c
+++ b/arch/m68k/atari/atasound.c
@@ -22,6 +22,7 @@
#include <linux/fcntl.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <asm/atarihw.h>
#include <asm/system.h>
@@ -43,6 +44,7 @@ void atari_microwire_cmd (int cmd)
while( tt_microwire.mask != 0x7ff)
;
}
+EXPORT_SYMBOL(atari_microwire_cmd);
/* PSG base frequency */
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index e40e5dcaa347..5945e1505558 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -31,6 +31,7 @@
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/vt_kern.h>
+#include <linux/module.h>
#include <asm/bootinfo.h>
#include <asm/setup.h>
@@ -43,10 +44,20 @@
#include <asm/io.h>
u_long atari_mch_cookie;
+EXPORT_SYMBOL(atari_mch_cookie);
+
u_long atari_mch_type;
+EXPORT_SYMBOL(atari_mch_type);
+
struct atari_hw_present atari_hw_present;
+EXPORT_SYMBOL(atari_hw_present);
+
u_long atari_switches;
+EXPORT_SYMBOL(atari_switches);
+
int atari_dont_touch_floppy_select;
+EXPORT_SYMBOL(atari_dont_touch_floppy_select);
+
int atari_rtc_year_offset;
/* local function prototypes */
diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c
index fbeed8c8ecbc..043ddbc61c7b 100644
--- a/arch/m68k/atari/debug.c
+++ b/arch/m68k/atari/debug.c
@@ -15,17 +15,23 @@
#include <linux/console.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <asm/atarihw.h>
#include <asm/atariints.h>
/* Flag that Modem1 port is already initialized and used */
int atari_MFP_init_done;
+EXPORT_SYMBOL(atari_MFP_init_done);
+
/* Flag that Modem1 port is already initialized and used */
int atari_SCC_init_done;
+EXPORT_SYMBOL(atari_SCC_init_done);
+
/* Can be set somewhere, if a SCC master reset has already be done and should
* not be repeated; used by kgdb */
int atari_SCC_reset_done;
+EXPORT_SYMBOL(atari_SCC_reset_done);
static struct console atari_console_driver = {
.name = "debug",
diff --git a/arch/m68k/atari/hades-pci.c b/arch/m68k/atari/hades-pci.c
index bee2b1443e36..2bbabc008708 100644
--- a/arch/m68k/atari/hades-pci.c
+++ b/arch/m68k/atari/hades-pci.c
@@ -376,8 +376,8 @@ struct pci_bus_info * __init init_hades_pci(void)
*/
bus = kzalloc(sizeof(struct pci_bus_info), GFP_KERNEL);
- if (!bus)
- return NULL;
+ if (unlikely(!bus))
+ goto iounmap_base_virt;
/*
* Claim resources. The m68k has no separate I/O space, both
@@ -385,43 +385,25 @@ struct pci_bus_info * __init init_hades_pci(void)
* the I/O resources are requested in memory space as well.
*/
- if (request_resource(&iomem_resource, &config_space) != 0)
- {
- kfree(bus);
- return NULL;
- }
+ if (unlikely(request_resource(&iomem_resource, &config_space) != 0))
+ goto free_bus;
- if (request_resource(&iomem_resource, &io_space) != 0)
- {
- release_resource(&config_space);
- kfree(bus);
- return NULL;
- }
+ if (unlikely(request_resource(&iomem_resource, &io_space) != 0))
+ goto release_config_space;
bus->mem_space.start = HADES_MEM_BASE;
bus->mem_space.end = HADES_MEM_BASE + HADES_MEM_SIZE - 1;
bus->mem_space.name = pci_mem_name;
#if 1
- if (request_resource(&iomem_resource, &bus->mem_space) != 0)
- {
- release_resource(&io_space);
- release_resource(&config_space);
- kfree(bus);
- return NULL;
- }
+ if (unlikely(request_resource(&iomem_resource, &bus->mem_space) != 0))
+ goto release_io_space;
#endif
bus->io_space.start = pci_io_base_virt;
bus->io_space.end = pci_io_base_virt + HADES_VIRT_IO_SIZE - 1;
bus->io_space.name = pci_io_name;
#if 1
- if (request_resource(&ioport_resource, &bus->io_space) != 0)
- {
- release_resource(&bus->mem_space);
- release_resource(&io_space);
- release_resource(&config_space);
- kfree(bus);
- return NULL;
- }
+ if (unlikely(request_resource(&ioport_resource, &bus->io_space) != 0))
+ goto release_bus_mem_space;
#endif
/*
* Set hardware dependent functions.
@@ -438,5 +420,21 @@ struct pci_bus_info * __init init_hades_pci(void)
tt_mfp.active_edge &= ~0x27;
return bus;
+
+release_bus_mem_space:
+ release_resource(&bus->mem_space);
+release_io_space:
+ release_resource(&io_space);
+release_config_space:
+ release_resource(&config_space);
+free_bus:
+ kfree(bus);
+iounmap_base_virt:
+ iounmap((void *)pci_io_base_virt);
+
+ for (i = 0; i < N_SLOTS; i++)
+ iounmap((void *)pci_conf_base_virt[i]);
+
+ return NULL;
}
#endif
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c
index ab3fd5202b24..d1bd029a34ac 100644
--- a/arch/m68k/atari/stdma.c
+++ b/arch/m68k/atari/stdma.c
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
+#include <linux/module.h>
#include <asm/atari_stdma.h>
#include <asm/atariints.h>
@@ -91,6 +92,7 @@ void stdma_lock(irq_handler_t handler, void *data)
stdma_isr_data = data;
local_irq_restore(flags);
}
+EXPORT_SYMBOL(stdma_lock);
/*
@@ -117,6 +119,7 @@ void stdma_release(void)
local_irq_restore(flags);
}
+EXPORT_SYMBOL(stdma_release);
/*
@@ -134,6 +137,7 @@ int stdma_others_waiting(void)
{
return waitqueue_active(&stdma_wait);
}
+EXPORT_SYMBOL(stdma_others_waiting);
/*
@@ -155,6 +159,7 @@ int stdma_islocked(void)
{
return stdma_locked;
}
+EXPORT_SYMBOL(stdma_islocked);
/*
diff --git a/arch/m68k/atari/stram.c b/arch/m68k/atari/stram.c
index bf4588cbe371..0055a6c06f75 100644
--- a/arch/m68k/atari/stram.c
+++ b/arch/m68k/atari/stram.c
@@ -20,6 +20,7 @@
#include <linux/bootmem.h>
#include <linux/mount.h>
#include <linux/blkdev.h>
+#include <linux/module.h>
#include <asm/setup.h>
#include <asm/machdep.h>
@@ -153,7 +154,7 @@ void __init atari_stram_reserve_pages(void *start_mem)
/* always reserve first page of ST-RAM, the first 2 kB are
* supervisor-only! */
if (!kernel_in_stram)
- reserve_bootmem (0, PAGE_SIZE);
+ reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT);
}
@@ -208,6 +209,7 @@ void *atari_stram_alloc(long size, const char *owner)
}
return( addr );
}
+EXPORT_SYMBOL(atari_stram_alloc);
void atari_stram_free( void *addr )
@@ -237,6 +239,7 @@ void atari_stram_free( void *addr )
printk( KERN_ERR "atari_stram_free: cannot free block at %p "
"(called from %p)\n", addr, __builtin_return_address(0) );
}
+EXPORT_SYMBOL(atari_stram_free);
/* ------------------------------------------------------------------------ */
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 15b80abfe94a..ff9dffa5b860 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -678,7 +678,6 @@ CONFIG_LOGO_MAC_CLUT224=y
#
CONFIG_MAC_SCC=y
CONFIG_MAC_HID=y
-CONFIG_MAC_ADBKEYCODES=y
CONFIG_SERIAL_CONSOLE=y
#
diff --git a/arch/m68k/hp300/Makefile b/arch/m68k/hp300/Makefile
index 288b9c67c9bf..96d4244c82fd 100644
--- a/arch/m68k/hp300/Makefile
+++ b/arch/m68k/hp300/Makefile
@@ -2,4 +2,4 @@
# Makefile for Linux arch/m68k/hp300 source directory
#
-obj-y := ksyms.o config.o time.o reboot.o
+obj-y := config.o time.o reboot.o
diff --git a/arch/m68k/hp300/ksyms.c b/arch/m68k/hp300/ksyms.c
deleted file mode 100644
index 8202830763d1..000000000000
--- a/arch/m68k/hp300/ksyms.c
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * linux/arch/m68k/hp300/ksyms.c
- *
- * Copyright (C) 1998 Philip Blundell <philb@gnu.org>
- *
- * This file contains the HP300-specific kernel symbols. None yet. :-)
- */
-
-#include <linux/module.h>
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 918f5dbeaef6..6dfa3b3c0e2a 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -742,7 +742,7 @@ sys_call_table:
.long sys_epoll_pwait /* 315 */
.long sys_utimensat
.long sys_signalfd
- .long sys_timerfd
+ .long sys_ni_syscall
.long sys_eventfd
.long sys_fallocate /* 320 */
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index 3ee918695215..f85b928ffac4 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -335,7 +335,7 @@ void dump_thread(struct pt_regs * regs, struct user * dump)
if (dump->start_stack < TASK_SIZE)
dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
- dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump);
+ dump->u_ar0 = offsetof(struct user, regs);
sw = ((struct switch_stack *)regs) - 1;
dump->regs.d1 = regs->d1;
dump->regs.d2 = regs->d2;
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index ed3a4caec620..9a06c48edcb3 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -323,7 +323,8 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
- m68k_ramdisk.addr, m68k_ramdisk.size);
+ m68k_ramdisk.addr, m68k_ramdisk.size,
+ BOOTMEM_DEFAULT);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
diff --git a/arch/m68k/mac/Makefile b/arch/m68k/mac/Makefile
index 995a09d912f5..1d265ba365ad 100644
--- a/arch/m68k/mac/Makefile
+++ b/arch/m68k/mac/Makefile
@@ -3,4 +3,4 @@
#
obj-y := config.o bootparse.o macints.o iop.o via.o oss.o psc.o \
- baboon.o macboing.o debug.o misc.o mac_ksyms.o
+ baboon.o macboing.o debug.o misc.o
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 01b468b9392e..735a49b4b936 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -58,8 +58,6 @@ extern struct mem_info m68k_memory[NUM_MEMINFO];
extern struct mem_info m68k_ramdisk;
-extern char m68k_command_line[CL_SIZE];
-
void *mac_env; /* Loaded by the boot asm */
/* The phys. video addr. - might be bogus on some machines */
diff --git a/arch/m68k/mac/mac_ksyms.c b/arch/m68k/mac/mac_ksyms.c
deleted file mode 100644
index 6e37ceb0f3b5..000000000000
--- a/arch/m68k/mac/mac_ksyms.c
+++ /dev/null
@@ -1,8 +0,0 @@
-#include <linux/module.h>
-#include <asm/ptrace.h>
-#include <asm/traps.h>
-
-/* Says whether we're using A/UX interrupts or not */
-extern int via_alt_mapping;
-
-EXPORT_SYMBOL(via_alt_mapping);
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index 8df270e950fa..fa485df4160e 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -28,6 +28,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/ide.h>
+#include <linux/module.h>
#include <asm/bootinfo.h>
#include <asm/macintosh.h>
@@ -41,7 +42,9 @@ volatile __u8 *via1, *via2;
/* See note in mac_via.h about how this is possibly not useful */
volatile long *via_memory_bogon=(long *)&via_memory_bogon;
#endif
-int rbv_present, via_alt_mapping;
+int rbv_present;
+int via_alt_mapping;
+EXPORT_SYMBOL(via_alt_mapping);
__u8 rbv_clear;
/*
diff --git a/arch/m68k/mvme16x/Makefile b/arch/m68k/mvme16x/Makefile
index 950e82f21640..edb3f6e6ee6a 100644
--- a/arch/m68k/mvme16x/Makefile
+++ b/arch/m68k/mvme16x/Makefile
@@ -2,4 +2,4 @@
# Makefile for Linux arch/m68k/mvme16x source directory
#
-obj-y := config.o rtc.o mvme16x_ksyms.o
+obj-y := config.o rtc.o
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index daa785161401..24cbc3030454 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -25,6 +25,7 @@
#include <linux/genhd.h>
#include <linux/rtc.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <asm/bootinfo.h>
#include <asm/system.h>
@@ -58,6 +59,7 @@ static irq_handler_t tick_handler;
unsigned short mvme16x_config;
+EXPORT_SYMBOL(mvme16x_config);
int mvme16x_parse_bootinfo(const struct bi_record *bi)
diff --git a/arch/m68k/mvme16x/mvme16x_ksyms.c b/arch/m68k/mvme16x/mvme16x_ksyms.c
deleted file mode 100644
index 4a8a3634bb47..000000000000
--- a/arch/m68k/mvme16x/mvme16x_ksyms.c
+++ /dev/null
@@ -1,6 +0,0 @@
-#include <linux/module.h>
-#include <linux/types.h>
-#include <asm/ptrace.h>
-#include <asm/mvme16xhw.h>
-
-EXPORT_SYMBOL(mvme16x_config);
diff --git a/arch/m68knommu/Kconfig.debug b/arch/m68knommu/Kconfig.debug
index 9ff47bd09aee..ed6d9a83bfdb 100644
--- a/arch/m68knommu/Kconfig.debug
+++ b/arch/m68knommu/Kconfig.debug
@@ -21,13 +21,6 @@ config BOOTPARAM_STRING
default 'console=ttyS0,19200'
depends on BOOTPARAM
-config DUMPTOFLASH
- bool "Panic/Dump to FLASH"
- depends on COLDFIRE
- help
- Dump any panic of trap output into a flash memory segment
- for later analysis.
-
config NO_KERNEL_MSG
bool "Suppress Kernel BUG Messages"
help
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig
index 5a0ecaaee3b0..648113075f97 100644
--- a/arch/m68knommu/defconfig
+++ b/arch/m68knommu/defconfig
@@ -597,7 +597,6 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_FULLDEBUG is not set
# CONFIG_HIGHPROFILE is not set
# CONFIG_BOOTPARAM is not set
-# CONFIG_DUMPTOFLASH is not set
# CONFIG_NO_KERNEL_MSG is not set
# CONFIG_BDM_DISABLE is not set
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index f795062aba1e..53fad1490282 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -24,14 +24,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
EXPORT_SYMBOL(__ioremap);
EXPORT_SYMBOL(iounmap);
EXPORT_SYMBOL(dump_fpu);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(ip_fast_csum);
@@ -46,9 +38,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
it's OK to leave it out of version control. */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__down_failed);
EXPORT_SYMBOL(__down_failed_interruptible);
diff --git a/arch/m68knommu/kernel/setup.c b/arch/m68knommu/kernel/setup.c
index 332345d7675d..156c6c662c7e 100644
--- a/arch/m68knommu/kernel/setup.c
+++ b/arch/m68knommu/kernel/setup.c
@@ -64,9 +64,6 @@ void (*mach_power_off)(void);
#ifdef CONFIG_M68VZ328
#define CPU "MC68VZ328"
#endif
-#ifdef CONFIG_M68332
- #define CPU "MC68332"
-#endif
#ifdef CONFIG_M68360
#define CPU "MC68360"
#endif
@@ -206,7 +203,7 @@ void __init setup_arch(char **cmdline_p)
* the bootmem bitmap so we then reserve it after freeing it :-)
*/
free_bootmem(memory_start, memory_end - memory_start);
- reserve_bootmem(memory_start, bootmap_size);
+ reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
/*
* Get kmalloc into gear.
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index 9620093514bc..1b02b8820068 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -336,7 +336,7 @@ ENTRY(sys_call_table)
.long sys_epoll_pwait /* 315 */
.long sys_utimensat
.long sys_signalfd
- .long sys_timerfd
+ .long sys_ni_syscall
.long sys_eventfd
.long sys_fallocate /* 320 */
diff --git a/arch/m68knommu/lib/memcpy.c b/arch/m68knommu/lib/memcpy.c
index 0d5577569e4c..b50dbcad4746 100644
--- a/arch/m68knommu/lib/memcpy.c
+++ b/arch/m68knommu/lib/memcpy.c
@@ -1,6 +1,5 @@
#include <linux/types.h>
-#include <linux/autoconf.h>
void * memcpy(void * to, const void * from, size_t n)
{
diff --git a/arch/mips/au1000/common/gpio.c b/arch/mips/au1000/common/gpio.c
index 8527856aec45..0b658f1db4ce 100644
--- a/arch/mips/au1000/common/gpio.c
+++ b/arch/mips/au1000/common/gpio.c
@@ -27,7 +27,6 @@
* others have a second one : GPIO2
*/
-#include <linux/autoconf.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/types.h>
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 82480a1717d8..f798139e888e 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -660,7 +660,7 @@ einval: li v0, -EINVAL
sys sys_ioprio_get 2 /* 4315 */
sys sys_utimensat 4
sys sys_signalfd 3
- sys sys_timerfd 4
+ sys sys_ni_syscall 0
sys sys_eventfd 1
sys sys_fallocate 6 /* 4320 */
.endm
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index c2c10876da2e..a626be6baea3 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -475,7 +475,7 @@ sys_call_table:
PTR sys_ioprio_get
PTR sys_utimensat /* 5275 */
PTR sys_signalfd
- PTR sys_timerfd
+ PTR sys_ni_syscall
PTR sys_eventfd
PTR sys_fallocate
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 01993ec3368b..9d5bcaf1b389 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -401,7 +401,7 @@ EXPORT(sysn32_call_table)
PTR sys_ioprio_get
PTR compat_sys_utimensat
PTR compat_sys_signalfd /* 5280 */
- PTR compat_sys_timerfd
+ PTR sys_ni_syscall
PTR sys_eventfd
PTR sys_fallocate
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index dd68afce7da5..fd2019c1ec2d 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -523,7 +523,7 @@ sys_call_table:
PTR sys_ioprio_get /* 4315 */
PTR compat_sys_utimensat
PTR compat_sys_signalfd
- PTR compat_sys_timerfd
+ PTR sys_ni_syscall
PTR sys_eventfd
PTR sys32_fallocate /* 4320 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c032409cba9b..39f3dfe134fb 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -232,7 +232,7 @@ static void __init finalize_initrd(void)
goto disable;
}
- reserve_bootmem(__pa(initrd_start), size);
+ reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
initrd_below_start_ok = 1;
printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
@@ -413,7 +413,7 @@ static void __init bootmem_init(void)
/*
* Reserve the bootmap memory.
*/
- reserve_bootmem(PFN_PHYS(mapstart), bootmap_size);
+ reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
/*
* Reserve initrd memory if needed.
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 1e5dfc28294a..9d41dab90a80 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -52,7 +52,6 @@ int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
EXPORT_SYMBOL(phys_cpu_present_map);
EXPORT_SYMBOL(cpu_online_map);
-extern void __init calibrate_delay(void);
extern void cpu_idle(void);
/* Number of TCs (or siblings in Intel speak) per CPU core */
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 4c477c7ff74a..22fd41e946b2 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -356,7 +356,7 @@ asmlinkage int irix_syssgi(struct pt_regs *regs)
retval = NGROUPS_MAX;
goto out;
case 5:
- retval = NR_OPEN;
+ retval = sysctl_nr_open;
goto out;
case 6:
retval = 1;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index e5e023f50a07..bf438d02366e 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -465,7 +465,8 @@ static void __init node_mem_init(cnodeid_t node)
free_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
(slot_lastpfn - slot_firstpfn) << PAGE_SHIFT);
reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
- ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size);
+ ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size,
+ BOOTMEM_DEFAULT);
}
/*
diff --git a/arch/parisc/Kconfig.debug b/arch/parisc/Kconfig.debug
index 9166bd117267..bc989e522a04 100644
--- a/arch/parisc/Kconfig.debug
+++ b/arch/parisc/Kconfig.debug
@@ -2,15 +2,6 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-config DEBUG_RWLOCK
- bool "Read-write spinlock debugging"
- depends on DEBUG_KERNEL && SMP
- help
- If you say Y here then read-write lock processing will count how many
- times it has tried to get the lock and issue an error message after
- too many attempts. If you suspect a rwlock problem or a kernel
- hacker asks for this option then say Y. Otherwise say N.
-
config DEBUG_RODATA
bool "Write protect kernel read-only data structures"
depends on DEBUG_KERNEL
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index ea071218a3ed..ddacc72e38fb 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -1050,7 +1050,6 @@ CONFIG_SCHED_DEBUG=y
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_FAULT_INJECTION is not set
-# CONFIG_DEBUG_RWLOCK is not set
# CONFIG_DEBUG_RODATA is not set
#
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index aa875fa43488..eb80f5e33d7d 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -315,11 +315,13 @@ static void __init setup_bootmem(void)
#define PDC_CONSOLE_IO_IODC_SIZE 32768
reserve_bootmem_node(NODE_DATA(0), 0UL,
- (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
+ (unsigned long)(PAGE0->mem_free +
+ PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text),
- (unsigned long)(_end - _text));
+ (unsigned long)(_end - _text), BOOTMEM_DEFAULT);
reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
- ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
+ ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
+ BOOTMEM_DEFAULT);
#ifndef CONFIG_DISCONTIGMEM
@@ -328,7 +330,8 @@ static void __init setup_bootmem(void)
for (i = 0; i < npmem_holes; i++) {
reserve_bootmem_node(NODE_DATA(0),
(pmem_holes[i].start_pfn << PAGE_SHIFT),
- (pmem_holes[i].pages << PAGE_SHIFT));
+ (pmem_holes[i].pages << PAGE_SHIFT),
+ BOOTMEM_DEFAULT);
}
#endif
@@ -346,7 +349,8 @@ static void __init setup_bootmem(void)
initrd_below_start_ok = 1;
printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
- reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve);
+ reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
+ initrd_reserve, BOOTMEM_DEFAULT);
}
}
#endif
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b94d4502a477..8dcac0b22d68 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -97,6 +97,7 @@ config EARLY_PRINTK
config COMPAT
bool
default y if PPC64
+ select COMPAT_BINFMT_ELF
config SYSVIPC_COMPAT
bool
@@ -256,6 +257,9 @@ config IOMMU_VMERGE
Most drivers don't have this problem; it is safe to say Y here.
+config IOMMU_HELPER
+ def_bool PPC64
+
config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
depends on SMP && HOTPLUG && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
@@ -435,25 +439,6 @@ config WANT_DEVICE_TREE
bool
default n
-config DEVICE_TREE
- string "Static device tree source file"
- depends on WANT_DEVICE_TREE
- help
- This specifies the device tree source (.dts) file to be
- compiled and included when building the bootwrapper. If a
- relative filename is given, then it will be relative to
- arch/powerpc/boot/dts. If you are not using the bootwrapper,
- or do not need to build a dts into the bootwrapper, this
- field is ignored.
-
- For example, this is required when building a cuImage target
- for an older U-Boot, which cannot pass a device tree itself.
- Such a kernel will not work with a newer U-Boot that tries to
- pass a device tree (unless you tell it not to). If your U-Boot
- does not mention a device tree in "help bootm", then use the
- cuImage target and specify a device tree here. Otherwise, use
- the uImage target and leave this field blank.
-
endmenu
config ISA_DMA_API
@@ -509,7 +494,7 @@ config PCI
bool "PCI support" if 40x || CPM2 || PPC_83xx || PPC_85xx || PPC_86xx \
|| PPC_MPC52xx || (EMBEDDED && (PPC_PSERIES || PPC_ISERIES)) \
|| PPC_PS3 || 44x
- default y if !40x && !CPM2 && !8xx && !PPC_83xx \
+ default y if !40x && !CPM2 && !8xx && !PPC_MPC512x && !PPC_83xx \
&& !PPC_85xx && !PPC_86xx
default PCI_PERMEDIA if !4xx && !CPM2 && !8xx
default PCI_QSPAN if !4xx && !CPM2 && 8xx
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index f70df9b64f8f..6845482f0093 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -151,14 +151,11 @@ core-$(CONFIG_XMON) += arch/powerpc/xmon/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
# Default to zImage, override when needed
-defaultimage-y := zImage
-defaultimage-$(CONFIG_DEFAULT_UIMAGE) := uImage
-KBUILD_IMAGE := $(defaultimage-y)
-all: $(KBUILD_IMAGE)
+all: zImage
CPPFLAGS_vmlinux.lds := -Upowerpc
-BOOT_TARGETS = zImage zImage.initrd uImage
+BOOT_TARGETS = zImage zImage.initrd uImage treeImage.% cuImage.%
PHONY += $(BOOT_TARGETS)
@@ -180,7 +177,7 @@ define archhelp
endef
install: vdso_install
- $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
+ $(Q)$(MAKE) $(build)=$(boot) install
vdso_install:
ifeq ($(CONFIG_PPC64),y)
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 122a27078998..49797a45416c 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -60,8 +60,9 @@ src-wlib := string.S crt0.S stdio.c main.c \
src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c \
cuboot-ebony.c treeboot-ebony.c prpmc2800.c \
ps3-head.S ps3-hvcall.S ps3.c treeboot-bamboo.c cuboot-8xx.c \
- cuboot-pq2.c cuboot-sequoia.c treeboot-walnut.c cuboot-bamboo.c \
- fixed-head.S ep88xc.c cuboot-hpc2.c ep405.c cuboot-taishan.c \
+ cuboot-pq2.c cuboot-sequoia.c treeboot-walnut.c \
+ cuboot-bamboo.c cuboot-mpc7448hpc2.c cuboot-taishan.c \
+ fixed-head.S ep88xc.c ep405.c \
cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \
cuboot-warp.c cuboot-85xx-cpm2.c
src-boot := $(src-wlib) $(src-plat) empty.c
@@ -123,6 +124,8 @@ targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a)
extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
$(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
+dtstree := $(srctree)/$(src)/dts
+
wrapper :=$(srctree)/$(src)/wrapper
wrapperbits := $(extra-y) $(addprefix $(obj)/,addnote hack-coff mktree dtc) \
$(wrapper) FORCE
@@ -181,7 +184,7 @@ quiet_cmd_wrap = WRAP $@
image-$(CONFIG_PPC_PSERIES) += zImage.pseries
image-$(CONFIG_PPC_MAPLE) += zImage.pseries
image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
-image-$(CONFIG_PPC_PS3) += zImage.ps3
+image-$(CONFIG_PPC_PS3) += zImage-dtb.ps3
image-$(CONFIG_PPC_CELLEB) += zImage.pseries
image-$(CONFIG_PPC_CHRP) += zImage.chrp
image-$(CONFIG_PPC_EFIKA) += zImage.chrp
@@ -191,33 +194,69 @@ image-$(CONFIG_PPC_PRPMC2800) += zImage.prpmc2800
image-$(CONFIG_PPC_ISERIES) += zImage.iseries
image-$(CONFIG_DEFAULT_UIMAGE) += uImage
-ifneq ($(CONFIG_DEVICE_TREE),"")
-image-$(CONFIG_PPC_8xx) += cuImage.8xx
-image-$(CONFIG_PPC_EP88XC) += zImage.ep88xc
-image-$(CONFIG_EP405) += zImage.ep405
-image-$(CONFIG_8260) += cuImage.pq2
-image-$(CONFIG_EP8248E) += zImage.ep8248e
-image-$(CONFIG_PPC_MPC52xx) += cuImage.52xx
-image-$(CONFIG_STORCENTER) += cuImage.824x
-image-$(CONFIG_PPC_83xx) += cuImage.83xx
-image-$(CONFIG_PPC_85xx) += cuImage.85xx
-ifeq ($(CONFIG_CPM2),y)
-image-$(CONFIG_PPC_85xx) += cuImage.85xx-cpm2
-endif
-image-$(CONFIG_MPC7448HPC2) += cuImage.hpc2
+#
+# Targets which embed a device tree blob
+#
+# Theses are default targets to build images which embed device tree blobs.
+# They are only required on boards which do not have FDT support in firmware.
+# Boards with newish u-boot firmare can use the uImage target above
+#
+
+# Board ports in arch/powerpc/platform/40x/Kconfig
+image-$(CONFIG_EP405) += zImage-dtb.ep405
+image-$(CONFIG_WALNUT) += treeImage.walnut
+
+# Board ports in arch/powerpc/platform/44x/Kconfig
image-$(CONFIG_EBONY) += treeImage.ebony cuImage.ebony
image-$(CONFIG_BAMBOO) += treeImage.bamboo cuImage.bamboo
image-$(CONFIG_SEQUOIA) += cuImage.sequoia
image-$(CONFIG_RAINIER) += cuImage.rainier
-image-$(CONFIG_WALNUT) += treeImage.walnut
image-$(CONFIG_TAISHAN) += cuImage.taishan
image-$(CONFIG_KATMAI) += cuImage.katmai
image-$(CONFIG_WARP) += cuImage.warp
-endif
-ifneq ($(CONFIG_REDBOOT),"")
-image-$(CONFIG_PPC_8xx) += zImage.redboot-8xx
-endif
+# Board ports in arch/powerpc/platform/8xx/Kconfig
+image-$(CONFIG_PPC_MPC86XADS) += cuImage.mpc866ads
+image-$(CONFIG_PPC_MPC885ADS) += cuImage.mpc885ads
+image-$(CONFIG_PPC_EP88XC) += zImage-dtb.ep88xc
+image-$(CONFIG_PPC_ADDER875) += cuImage.adder875-uboot \
+ zImage-dtb.adder875-redboot
+
+# Board ports in arch/powerpc/platform/52xx/Kconfig
+image-$(CONFIG_PPC_LITE5200) += cuImage.lite5200 cuImage.lite5200b
+
+# Board ports in arch/powerpc/platform/82xx/Kconfig
+image-$(CONFIG_MPC8272_ADS) += cuImage.mpc8272ads
+image-$(CONFIG_PQ2FADS) += cuImage.pq2fads
+image-$(CONFIG_EP8248E) += zImage-dtb.ep8248e
+
+# Board ports in arch/powerpc/platform/83xx/Kconfig
+image-$(CONFIG_MPC832x_MDS) += cuImage.mpc832x_mds
+image-$(CONFIG_MPC832x_RDB) += cuImage.mpc832x_rdb
+image-$(CONFIG_MPC834x_ITX) += cuImage.mpc8349emitx \
+ cuImage.mpc8349emitxgp
+image-$(CONFIG_MPC834x_MDS) += cuImage.mpc834x_mds
+image-$(CONFIG_MPC836x_MDS) += cuImage.mpc836x_mds
+
+# Board ports in arch/powerpc/platform/85xx/Kconfig
+image-$(CONFIG_MPC8540_ADS) += cuImage.mpc8540ads
+image-$(CONFIG_MPC8560_ADS) += cuImage.mpc8560ads
+image-$(CONFIG_MPC85xx_CDS) += cuImage.mpc8541cds \
+ cuImage.mpc8548cds \
+ cuImage.mpc8555cds
+image-$(CONFIG_MPC85xx_MDS) += cuImage.mpc8568mds
+image-$(CONFIG_MPC85xx_DS) += cuImage.mpc8544ds \
+ cuImage.mpc8572ds
+image-$(CONFIG_TQM8540) += cuImage.tqm8540
+image-$(CONFIG_TQM8541) += cuImage.tqm8541
+image-$(CONFIG_TQM8555) += cuImage.tqm8555
+image-$(CONFIG_TQM8560) += cuImage.tqm8560
+image-$(CONFIG_SBC8548) += cuImage.tqm8548
+image-$(CONFIG_SBC8560) += cuImage.tqm8560
+
+# Board ports in arch/powerpc/platform/embedded6xx/Kconfig
+image-$(CONFIG_STORCENTER) += cuImage.storcenter
+image-$(CONFIG_MPC7448HPC2) += cuImage.mpc7448hpc2
# For 32-bit powermacs, build the COFF and miboot images
# as well as the ELF images.
@@ -233,24 +272,20 @@ targets += $(image-y) $(initrd-y)
$(addprefix $(obj)/, $(initrd-y)): $(obj)/ramdisk.image.gz
-# If CONFIG_WANT_DEVICE_TREE is set and CONFIG_DEVICE_TREE isn't an
-# empty string, define 'dts' to be path to the dts
-# CONFIG_DEVICE_TREE will have "" around it, make sure to strip them
-ifeq ($(CONFIG_WANT_DEVICE_TREE),y)
-ifneq ($(CONFIG_DEVICE_TREE),"")
-dts = $(if $(shell echo $(CONFIG_DEVICE_TREE) | grep '^/'),\
- ,$(srctree)/$(src)/dts/)$(CONFIG_DEVICE_TREE:"%"=%)
-endif
-endif
-
# Don't put the ramdisk on the pattern rule; when its missing make will try
# the pattern rule with less dependencies that also matches (even with the
# hard dependency listed).
-$(obj)/zImage.initrd.%: vmlinux $(wrapperbits) $(dts)
- $(call if_changed,wrap,$*,$(dts),,$(obj)/ramdisk.image.gz)
+$(obj)/zImage.initrd.%: vmlinux $(wrapperbits)
+ $(call if_changed,wrap,$*,,,$(obj)/ramdisk.image.gz)
-$(obj)/zImage.%: vmlinux $(wrapperbits) $(dts)
- $(call if_changed,wrap,$*,$(dts))
+$(obj)/zImage.%: vmlinux $(wrapperbits)
+ $(call if_changed,wrap,$*)
+
+$(obj)/zImage-dtb.initrd.%: vmlinux $(wrapperbits) $(dtstree)/%.dts
+ $(call if_changed,wrap,$*,$(dtstree)/$*.dts,,$(obj)/ramdisk.image.gz)
+
+$(obj)/zImage-dtb.%: vmlinux $(wrapperbits) $(dtstree)/%.dts
+ $(call if_changed,wrap,$*,$(dtstree)/$*.dts)
# This cannot be in the root of $(src) as the zImage rule always adds a $(obj)
# prefix
@@ -260,24 +295,17 @@ $(obj)/vmlinux.strip: vmlinux
$(obj)/zImage.iseries: vmlinux
$(STRIP) -s -R .comment $< -o $@
-$(obj)/zImage.ps3: vmlinux $(wrapper) $(wrapperbits) $(srctree)/$(src)/dts/ps3.dts
- $(STRIP) -s -R .comment $< -o vmlinux.strip
- $(call cmd,wrap,ps3,$(srctree)/$(src)/dts/ps3.dts,,)
-
-$(obj)/zImage.initrd.ps3: vmlinux $(wrapper) $(wrapperbits) $(srctree)/$(src)/dts/ps3.dts $(obj)/ramdisk.image.gz
- $(call cmd,wrap,ps3,$(srctree)/$(src)/dts/ps3.dts,,$(obj)/ramdisk.image.gz)
-
$(obj)/uImage: vmlinux $(wrapperbits)
$(call if_changed,wrap,uboot)
-$(obj)/cuImage.%: vmlinux $(dts) $(wrapperbits)
- $(call if_changed,wrap,cuboot-$*,$(dts))
+$(obj)/cuImage.%: vmlinux $(dtstree)/%.dts $(wrapperbits)
+ $(call if_changed,wrap,cuboot-$*,$(dtstree)/$*.dts)
-$(obj)/treeImage.initrd.%: vmlinux $(dts) $(wrapperbits)
- $(call if_changed,wrap,treeboot-$*,$(dts),,$(obj)/ramdisk.image.gz)
+$(obj)/treeImage.initrd.%: vmlinux $(dtstree)/%.dts $(wrapperbits)
+ $(call if_changed,wrap,treeboot-$*,$(dtstree)/$*.dts,,$(obj)/ramdisk.image.gz)
-$(obj)/treeImage.%: vmlinux $(dts) $(wrapperbits)
- $(call if_changed,wrap,treeboot-$*,$(dts))
+$(obj)/treeImage.%: vmlinux $(dtstree)/%.dts $(wrapperbits)
+ $(call if_changed,wrap,treeboot-$*,$(dtstree)/$*.dts)
# If there isn't a platform selected then just strip the vmlinux.
ifeq (,$(image-y))
diff --git a/arch/powerpc/boot/cuboot-hpc2.c b/arch/powerpc/boot/cuboot-mpc7448hpc2.c
index 1b8953259d75..1b8953259d75 100644
--- a/arch/powerpc/boot/cuboot-hpc2.c
+++ b/arch/powerpc/boot/cuboot-mpc7448hpc2.c
diff --git a/arch/powerpc/boot/dts/adder875-redboot.dts b/arch/powerpc/boot/dts/adder875-redboot.dts
index 930bfb3894eb..28e9cd3d7a21 100644
--- a/arch/powerpc/boot/dts/adder875-redboot.dts
+++ b/arch/powerpc/boot/dts/adder875-redboot.dts
@@ -151,6 +151,7 @@
compatible = "fsl,mpc875-brg",
"fsl,cpm1-brg",
"fsl,cpm-brg";
+ clock-frequency = <50000000>;
reg = <0x9f0 0x10>;
};
diff --git a/arch/powerpc/boot/dts/adder875-uboot.dts b/arch/powerpc/boot/dts/adder875-uboot.dts
index 0197242dacfb..54fb60ec03e5 100644
--- a/arch/powerpc/boot/dts/adder875-uboot.dts
+++ b/arch/powerpc/boot/dts/adder875-uboot.dts
@@ -150,6 +150,7 @@
compatible = "fsl,mpc875-brg",
"fsl,cpm1-brg",
"fsl,cpm-brg";
+ clock-frequency = <50000000>;
reg = <0x9f0 0x10>;
};
diff --git a/arch/powerpc/boot/dts/mpc5121ads.dts b/arch/powerpc/boot/dts/mpc5121ads.dts
new file mode 100644
index 000000000000..94ad7b2b241e
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc5121ads.dts
@@ -0,0 +1,122 @@
+/*
+ * MPC5121E MDS Device Tree Source
+ *
+ * Copyright 2007 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+
+/ {
+ model = "mpc5121ads";
+ compatible = "fsl,mpc5121ads";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ PowerPC,5121@0 {
+ device_type = "cpu";
+ reg = <0>;
+ d-cache-line-size = <0x20>; // 32 bytes
+ i-cache-line-size = <0x20>; // 32 bytes
+ d-cache-size = <0x8000>; // L1, 32K
+ i-cache-size = <0x8000>; // L1, 32K
+ timebase-frequency = <49500000>;// 49.5 MHz (csb/4)
+ bus-frequency = <198000000>; // 198 MHz csb bus
+ clock-frequency = <396000000>; // 396 MHz ppc core
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>; // 256MB at 0
+ };
+
+ localbus@80000020 {
+ compatible = "fsl,mpc5121ads-localbus";
+ #address-cells = <2>;
+ #size-cells = <1>;
+ reg = <0x80000020 0x40>;
+
+ ranges = <0x0 0x0 0xfc000000 0x04000000
+ 0x2 0x0 0x82000000 0x00008000>;
+
+ flash@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0x0 0x4000000>;
+ bank-width = <4>;
+ device-width = <1>;
+ };
+
+ board-control@2,0 {
+ compatible = "fsl,mpc5121ads-cpld";
+ reg = <0x2 0x0 0x8000>;
+ };
+ };
+
+ soc@80000000 {
+ compatible = "fsl,mpc5121-immr";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #interrupt-cells = <2>;
+ ranges = <0x0 0x80000000 0x400000>;
+ reg = <0x80000000 0x400000>;
+ bus-frequency = <66000000>; // 66 MHz ips bus
+
+
+ // IPIC
+ // interrupts cell = <intr #, sense>
+ // sense values match linux IORESOURCE_IRQ_* defines:
+ // sense == 8: Level, low assertion
+ // sense == 2: Edge, high-to-low change
+ //
+ ipic: interrupt-controller@c00 {
+ compatible = "fsl,mpc5121-ipic", "fsl,ipic";
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <2>;
+ reg = <0xc00 0x100>;
+ };
+
+ // 512x PSCs are not 52xx PSCs compatible
+ // PSC3 serial port A aka ttyPSC0
+ serial@11300 {
+ device_type = "serial";
+ compatible = "fsl,mpc5121-psc-uart";
+ // Logical port assignment needed until driver
+ // learns to use aliases
+ port-number = <0>;
+ cell-index = <3>;
+ reg = <0x11300 0x100>;
+ interrupts = <0x28 0x8>; // actually the fifo irq
+ interrupt-parent = < &ipic >;
+ };
+
+ // PSC4 serial port B aka ttyPSC1
+ serial@11400 {
+ device_type = "serial";
+ compatible = "fsl,mpc5121-psc-uart";
+ // Logical port assignment needed until driver
+ // learns to use aliases
+ port-number = <1>;
+ cell-index = <4>;
+ reg = <0x11400 0x100>;
+ interrupts = <0x28 0x8>; // actually the fifo irq
+ interrupt-parent = < &ipic >;
+ };
+
+ pscsfifo@11f00 {
+ compatible = "fsl,mpc5121-psc-fifo";
+ reg = <0x11f00 0x100>;
+ interrupts = <0x28 0x8>;
+ interrupt-parent = < &ipic >;
+ };
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts
index 2d6653fe72ff..e1f0dca8ac39 100644
--- a/arch/powerpc/boot/dts/mpc8313erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8313erdb.dts
@@ -118,6 +118,10 @@
interrupts = <14 0x8>;
interrupt-parent = <&ipic>;
dfsrr;
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
};
i2c@3100 {
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts
index b582032ba3d6..d7a1ececa30f 100644
--- a/arch/powerpc/boot/dts/mpc8315erdb.dts
+++ b/arch/powerpc/boot/dts/mpc8315erdb.dts
@@ -96,7 +96,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
- compatible = "simple-bus";
+ compatible = "fsl,mpc8315-immr", "simple-bus";
ranges = <0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/mpc834x_mds.dts b/arch/powerpc/boot/dts/mpc834x_mds.dts
index 7480edae85ed..0199c5c548d8 100644
--- a/arch/powerpc/boot/dts/mpc834x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc834x_mds.dts
@@ -332,7 +332,7 @@
0xc000 0x0 0x0 0x3 &ipic 23 0x8
0xc000 0x0 0x0 0x4 &ipic 20 0x8>;
interrupt-parent = <&ipic>;
- interrupts = <66 0x8>;
+ interrupts = <67 0x8>;
bus-range = <0 0>;
ranges = <0x02000000 0x0 0xb0000000 0xb0000000 0x0 0x10000000
0x42000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts
index 813c259abbe5..db37214aee37 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds.dts
@@ -42,6 +42,18 @@
bus-frequency = <0>;
clock-frequency = <0>;
};
+
+ PowerPC,8572@1 {
+ device_type = "cpu";
+ reg = <1>;
+ d-cache-line-size = <20>; // 32 bytes
+ i-cache-line-size = <20>; // 32 bytes
+ d-cache-size = <8000>; // L1, 32K
+ i-cache-size = <8000>; // L1, 32K
+ timebase-frequency = <0>;
+ bus-frequency = <0>;
+ clock-frequency = <0>;
+ };
};
memory {
diff --git a/arch/powerpc/boot/dts/mpc885ads.dts b/arch/powerpc/boot/dts/mpc885ads.dts
index 8848e637293e..d84a012c2aaf 100644
--- a/arch/powerpc/boot/dts/mpc885ads.dts
+++ b/arch/powerpc/boot/dts/mpc885ads.dts
@@ -166,6 +166,7 @@
compatible = "fsl,mpc885-brg",
"fsl,cpm1-brg",
"fsl,cpm-brg";
+ clock-frequency = <0>;
reg = <9f0 10>;
};
diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts
index d9046c1adcbe..5c13d46f441d 100644
--- a/arch/powerpc/boot/dts/sequoia.dts
+++ b/arch/powerpc/boot/dts/sequoia.dts
@@ -138,6 +138,14 @@
interrupts = <15 8>;
};
+ USB0: ehci@e0000300 {
+ compatible = "ibm,usb-ehci-440epx", "usb-ehci";
+ interrupt-parent = <&UIC0>;
+ interrupts = <1a 4>;
+ reg = <0 e0000300 90 0 e0000390 70>;
+ big-endian;
+ };
+
POB0: opb {
compatible = "ibm,opb-440epx", "ibm,opb";
#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/storcenter.dts b/arch/powerpc/boot/dts/storcenter.dts
index 2204874ac5f3..5893816c0bce 100644
--- a/arch/powerpc/boot/dts/storcenter.dts
+++ b/arch/powerpc/boot/dts/storcenter.dts
@@ -15,7 +15,7 @@
/ {
model = "StorCenter";
- compatible = "storcenter";
+ compatible = "iomega,storcenter";
#address-cells = <1>;
#size-cells = <1>;
@@ -62,12 +62,12 @@
#size-cells = <0>;
compatible = "fsl-i2c";
reg = <0x3000 0x100>;
- interrupts = <5 2>;
+ interrupts = <17 2>;
interrupt-parent = <&mpic>;
rtc@68 {
compatible = "dallas,ds1337";
- reg = <68>;
+ reg = <0x68>;
};
};
@@ -78,7 +78,7 @@
reg = <0x4500 0x20>;
clock-frequency = <97553800>; /* Hz */
current-speed = <115200>;
- interrupts = <9 2>;
+ interrupts = <25 2>;
interrupt-parent = <&mpic>;
};
@@ -89,7 +89,7 @@
reg = <0x4600 0x20>;
clock-frequency = <97553800>; /* Hz */
current-speed = <9600>;
- interrupts = <10 2>;
+ interrupts = <26 2>;
interrupt-parent = <&mpic>;
};
@@ -136,6 +136,6 @@
};
chosen {
- linux,stdout-path = "/soc/serial@4500";
+ linux,stdout-path = &serial0;
};
};
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 763a0c46f441..c3178155311b 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -158,6 +158,29 @@ miboot|uboot)
cuboot*)
binary=y
gzip=
+ case "$platform" in
+ *-mpc885ads|*-adder875*|*-ep88xc)
+ platformo=$object/cuboot-8xx.o
+ ;;
+ *5200*|*-motionpro)
+ platformo=$object/cuboot-52xx.o
+ ;;
+ *-pq2fads|*-ep8248e|*-mpc8272*|*-storcenter)
+ platformo=$object/cuboot-pq2.o
+ ;;
+ *-mpc824*)
+ platformo=$object/cuboot-824x.o
+ ;;
+ *-mpc83*)
+ platformo=$object/cuboot-83xx.o
+ ;;
+ *-tqm8541|*-mpc8560*|*-tqm8560|*-tqm8555*)
+ platformo=$object/cuboot-85xx-cpm2.o
+ ;;
+ *-mpc85*)
+ platformo=$object/cuboot-85xx.o
+ ;;
+ esac
;;
ps3)
platformo="$object/ps3-head.o $object/ps3-hvcall.o $object/ps3.o"
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 31bdbf3f7566..a9807f083bc4 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -186,7 +186,7 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT is not set
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
-# CONFIG_MATH_EMULATION is not set
+CONFIG_MATH_EMULATION=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
@@ -416,14 +416,14 @@ CONFIG_PHYLIB=y
# MII PHY device drivers
#
CONFIG_MARVELL_PHY=y
-# CONFIG_DAVICOM_PHY is not set
+CONFIG_DAVICOM_PHY=y
# CONFIG_QSEMI_PHY is not set
# CONFIG_LXT_PHY is not set
# CONFIG_CICADA_PHY is not set
-# CONFIG_VITESSE_PHY is not set
+CONFIG_VITESSE_PHY=y
# CONFIG_SMSC_PHY is not set
# CONFIG_BROADCOM_PHY is not set
-# CONFIG_ICPLUS_PHY is not set
+CONFIG_ICPLUS_PHY=y
# CONFIG_FIXED_PHY is not set
# CONFIG_MDIO_BITBANG is not set
CONFIG_NET_ETHERNET=y
@@ -436,7 +436,7 @@ CONFIG_MII=y
CONFIG_NETDEV_1000=y
CONFIG_GIANFAR=y
# CONFIG_GFAR_NAPI is not set
-# CONFIG_UCC_GETH is not set
+CONFIG_UCC_GETH=y
CONFIG_NETDEV_10000=y
#
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 58dbfeff9b4d..0662ae46f724 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -2,6 +2,8 @@
# Makefile for the linux kernel.
#
+CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
ifeq ($(CONFIG_PPC64),y)
CFLAGS_prom_init.o += -mno-minimal-toc
endif
@@ -15,7 +17,7 @@ obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
init_task.o process.o systbl.o idle.o \
signal.o
obj-y += vdso32/
-obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
+obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o cpu_setup_ppc970.o \
cpu_setup_pa6t.o \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index ed083feaf6f9..4b749c416464 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -22,6 +22,7 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/suspend.h>
+#include <linux/hrtimer.h>
#ifdef CONFIG_PPC64
#include <linux/time.h>
#include <linux/hardirq.h>
@@ -312,7 +313,7 @@ int main(void)
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
- DEFINE(CLOCK_REALTIME_RES, TICK_NSEC);
+ DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
#ifdef CONFIG_BUG
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
diff --git a/arch/powerpc/kernel/binfmt_elf32.c b/arch/powerpc/kernel/binfmt_elf32.c
deleted file mode 100644
index 1d45d7782d4e..000000000000
--- a/arch/powerpc/kernel/binfmt_elf32.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * binfmt_elf32.c: Support 32-bit PPC ELF binaries on Power3 and followons.
- * based on the SPARC64 version.
- * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
- *
- * Copyright (C) 2000,2001 Ken Aaker (kdaaker@rchland.vnet.ibm.com), IBM Corp
- * Copyright (C) 2001 Anton Blanchard (anton@au.ibm.com), IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/processor.h>
-#include <linux/module.h>
-#include <linux/compat.h>
-#include <linux/elfcore-compat.h>
-
-#undef ELF_ARCH
-#undef ELF_CLASS
-#define ELF_CLASS ELFCLASS32
-#define ELF_ARCH EM_PPC
-
-#undef elfhdr
-#undef elf_phdr
-#undef elf_note
-#undef elf_addr_t
-#define elfhdr elf32_hdr
-#define elf_phdr elf32_phdr
-#define elf_note elf32_note
-#define elf_addr_t Elf32_Off
-
-#define elf_prstatus compat_elf_prstatus
-#define elf_prpsinfo compat_elf_prpsinfo
-
-#define elf_core_copy_regs compat_elf_core_copy_regs
-static inline void compat_elf_core_copy_regs(compat_elf_gregset_t *elf_regs,
- struct pt_regs *regs)
-{
- PPC_ELF_CORE_COPY_REGS((*elf_regs), regs);
-}
-
-#define elf_core_copy_task_regs compat_elf_core_copy_task_regs
-static int compat_elf_core_copy_task_regs(struct task_struct *tsk,
- compat_elf_gregset_t *elf_regs)
-{
- struct pt_regs *regs = tsk->thread.regs;
- if (regs)
- compat_elf_core_copy_regs(elf_regs, regs);
- return 1;
-}
-
-#include <linux/time.h>
-
-#undef cputime_to_timeval
-#define cputime_to_timeval cputime_to_compat_timeval
-static __inline__ void
-cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
-{
- unsigned long jiffies = cputime_to_jiffies(cputime);
- value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
- value->tv_sec = jiffies / HZ;
-}
-
-#define init_elf_binfmt init_elf32_binfmt
-
-#include "../../../fs/binfmt_elf.c"
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index a4c2771b5e62..2a8f5cc5184f 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -959,6 +959,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
+ .num_pmcs = 4,
+ .oprofile_cpu_type = "ppc/e300",
+ .oprofile_type = PPC_OPROFILE_FSL_EMB,
.platform = "ppc603",
},
{ /* e300c4 (e300c1, plus one IU) */
@@ -971,6 +974,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
.machine_check = machine_check_generic,
+ .num_pmcs = 4,
+ .oprofile_cpu_type = "ppc/e300",
+ .oprofile_type = PPC_OPROFILE_FSL_EMB,
.platform = "ppc603",
},
{ /* default match, we assume split I/D cache & TB (non-601)... */
@@ -1435,7 +1441,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500",
- .oprofile_type = PPC_OPROFILE_BOOKE,
+ .oprofile_type = PPC_OPROFILE_FSL_EMB,
.machine_check = machine_check_e500,
.platform = "ppc8540",
},
@@ -1453,7 +1459,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 32,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e500",
- .oprofile_type = PPC_OPROFILE_BOOKE,
+ .oprofile_type = PPC_OPROFILE_FSL_EMB,
.machine_check = machine_check_e500,
.platform = "ppc8548",
},
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c
index 84239076a5b8..3a317cb0636a 100644
--- a/arch/powerpc/kernel/dma_64.c
+++ b/arch/powerpc/kernel/dma_64.c
@@ -31,8 +31,8 @@ static inline unsigned long device_to_mask(struct device *dev)
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- return iommu_alloc_coherent(dev->archdata.dma_data, size, dma_handle,
- device_to_mask(dev), flag,
+ return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
+ dma_handle, device_to_mask(dev), flag,
dev->archdata.numa_node);
}
@@ -52,7 +52,7 @@ static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr,
size_t size,
enum dma_data_direction direction)
{
- return iommu_map_single(dev->archdata.dma_data, vaddr, size,
+ return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size,
device_to_mask(dev), direction);
}
@@ -68,7 +68,7 @@ static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle,
static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- return iommu_map_sg(dev->archdata.dma_data, sglist, nelems,
+ return iommu_map_sg(dev, sglist, nelems,
device_to_mask(dev), direction);
}
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index a3c406aca664..8f1f4e539c4b 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -31,6 +31,7 @@
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
+#include <linux/iommu-helper.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
@@ -81,17 +82,19 @@ static int __init setup_iommu(char *str)
__setup("protect4gb=", setup_protect4gb);
__setup("iommu=", setup_iommu);
-static unsigned long iommu_range_alloc(struct iommu_table *tbl,
+static unsigned long iommu_range_alloc(struct device *dev,
+ struct iommu_table *tbl,
unsigned long npages,
unsigned long *handle,
unsigned long mask,
unsigned int align_order)
{
- unsigned long n, end, i, start;
+ unsigned long n, end, start;
unsigned long limit;
int largealloc = npages > 15;
int pass = 0;
unsigned long align_mask;
+ unsigned long boundary_size;
align_mask = 0xffffffffffffffffl >> (64 - align_order);
@@ -136,14 +139,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
start &= mask;
}
- n = find_next_zero_bit(tbl->it_map, limit, start);
-
- /* Align allocation */
- n = (n + align_mask) & ~align_mask;
-
- end = n + npages;
+ if (dev)
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ 1 << IOMMU_PAGE_SHIFT);
+ else
+ boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
+ /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
- if (unlikely(end >= limit)) {
+ n = iommu_area_alloc(tbl->it_map, limit, start, npages,
+ tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
+ align_mask);
+ if (n == -1) {
if (likely(pass < 2)) {
/* First failure, just rescan the half of the table.
* Second failure, rescan the other half of the table.
@@ -158,14 +164,7 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
}
}
- for (i = n; i < end; i++)
- if (test_bit(i, tbl->it_map)) {
- start = i+1;
- goto again;
- }
-
- for (i = n; i < end; i++)
- __set_bit(i, tbl->it_map);
+ end = n + npages;
/* Bump the hint to a new block for small allocs. */
if (largealloc) {
@@ -184,16 +183,17 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
return n;
}
-static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
- unsigned int npages, enum dma_data_direction direction,
- unsigned long mask, unsigned int align_order)
+static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
+ void *page, unsigned int npages,
+ enum dma_data_direction direction,
+ unsigned long mask, unsigned int align_order)
{
unsigned long entry, flags;
dma_addr_t ret = DMA_ERROR_CODE;
spin_lock_irqsave(&(tbl->it_lock), flags);
- entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
+ entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
if (unlikely(entry == DMA_ERROR_CODE)) {
spin_unlock_irqrestore(&(tbl->it_lock), flags);
@@ -224,7 +224,6 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
unsigned long entry, free_entry;
- unsigned long i;
entry = dma_addr >> IOMMU_PAGE_SHIFT;
free_entry = entry - tbl->it_offset;
@@ -246,9 +245,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
}
ppc_md.tce_free(tbl, entry, npages);
-
- for (i = 0; i < npages; i++)
- __clear_bit(free_entry+i, tbl->it_map);
+ iommu_area_free(tbl->it_map, free_entry, npages);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
@@ -270,16 +267,18 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
-int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, unsigned long mask,
enum dma_data_direction direction)
{
+ struct iommu_table *tbl = dev->archdata.dma_data;
dma_addr_t dma_next = 0, dma_addr;
unsigned long flags;
struct scatterlist *s, *outs, *segstart;
int outcount, incount, i;
unsigned int align;
unsigned long handle;
+ unsigned int max_seg_size;
BUG_ON(direction == DMA_NONE);
@@ -298,6 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
spin_lock_irqsave(&(tbl->it_lock), flags);
+ max_seg_size = dma_get_max_seg_size(dev);
for_each_sg(sglist, s, nelems, i) {
unsigned long vaddr, npages, entry, slen;
@@ -314,7 +314,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
(vaddr & ~PAGE_MASK) == 0)
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
- entry = iommu_range_alloc(tbl, npages, &handle,
+ entry = iommu_range_alloc(dev, tbl, npages, &handle,
mask >> IOMMU_PAGE_SHIFT, align);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
@@ -344,7 +344,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
- if (novmerge || (dma_addr != dma_next)) {
+ if (novmerge || (dma_addr != dma_next) ||
+ (outs->dma_length + s->length > max_seg_size)) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
@@ -452,9 +453,6 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
- unsigned long start_index, end_index;
- unsigned long entries_per_4g;
- unsigned long index;
static int welcomed = 0;
struct page *page;
@@ -476,6 +474,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
#ifdef CONFIG_CRASH_DUMP
if (ppc_md.tce_get) {
+ unsigned long index;
unsigned long tceval;
unsigned long tcecount = 0;
@@ -506,23 +505,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
#endif
- /*
- * DMA cannot cross 4 GB boundary. Mark last entry of each 4
- * GB chunk as reserved.
- */
- if (protect4gb) {
- entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
-
- /* Mark the last bit before a 4GB boundary as used */
- start_index = tbl->it_offset | (entries_per_4g - 1);
- start_index -= tbl->it_offset;
-
- end_index = tbl->it_size;
-
- for (index = start_index; index < end_index - 1; index += entries_per_4g)
- __set_bit(index, tbl->it_map);
- }
-
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
novmerge ? "disabled" : "enabled");
@@ -570,9 +552,9 @@ void iommu_free_table(struct iommu_table *tbl, const char *node_name)
* need not be page aligned, the dma_addr_t returned will point to the same
* byte within the page as vaddr.
*/
-dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
- size_t size, unsigned long mask,
- enum dma_data_direction direction)
+dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
+ void *vaddr, size_t size, unsigned long mask,
+ enum dma_data_direction direction)
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
unsigned long uaddr;
@@ -589,7 +571,7 @@ dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
((unsigned long)vaddr & ~PAGE_MASK) == 0)
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
- dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
+ dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> IOMMU_PAGE_SHIFT, align);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
@@ -621,8 +603,9 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
-void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
+void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
+ size_t size, dma_addr_t *dma_handle,
+ unsigned long mask, gfp_t flag, int node)
{
void *ret = NULL;
dma_addr_t mapping;
@@ -656,7 +639,7 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
/* Set up tces to cover the allocated range */
nio_pages = size >> IOMMU_PAGE_SHIFT;
io_order = get_iommu_order(size);
- mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
+ mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> IOMMU_PAGE_SHIFT, io_order);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 76b862bd1fe9..61dd17449ddc 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -36,7 +36,8 @@ static struct legacy_serial_info {
static struct __initdata of_device_id parents[] = {
{.type = "soc",},
{.type = "tsi-bridge",},
- {.type = "opb", .compatible = "ibm,opb",},
+ {.type = "opb", },
+ {.compatible = "ibm,opb",},
{.compatible = "simple-bus",},
{.compatible = "wrs,epld-localbus",},
};
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index ea04e0ab3f2f..0516e2d3e02e 100644
--- a/arch/powerpc/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -26,7 +26,7 @@
static void dummy_perf(struct pt_regs *regs)
{
-#if defined(CONFIG_FSL_BOOKE) && !defined(CONFIG_E200)
+#if defined(CONFIG_FSL_EMB_PERFMON)
mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE);
#elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
if (cur_cpu_spec->pmc_type == PPC_PMC_IBM)
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 8b056d2295cc..7673e9865733 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -21,6 +21,8 @@
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
@@ -58,20 +60,38 @@
#define PT_MAX_PUT_REG PT_CCR
#endif
+static unsigned long get_user_msr(struct task_struct *task)
+{
+ return task->thread.regs->msr | task->thread.fpexc_mode;
+}
+
+static int set_user_msr(struct task_struct *task, unsigned long msr)
+{
+ task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
+ task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
+ return 0;
+}
+
+/*
+ * We prevent mucking around with the reserved area of trap
+ * which are used internally by the kernel.
+ */
+static int set_user_trap(struct task_struct *task, unsigned long trap)
+{
+ task->thread.regs->trap = trap & 0xfff0;
+ return 0;
+}
+
/*
* Get contents of register REGNO in task TASK.
*/
unsigned long ptrace_get_reg(struct task_struct *task, int regno)
{
- unsigned long tmp = 0;
-
if (task->thread.regs == NULL)
return -EIO;
- if (regno == PT_MSR) {
- tmp = ((unsigned long *)task->thread.regs)[PT_MSR];
- return tmp | task->thread.fpexc_mode;
- }
+ if (regno == PT_MSR)
+ return get_user_msr(task);
if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
return ((unsigned long *)task->thread.regs)[regno];
@@ -87,40 +107,134 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
if (task->thread.regs == NULL)
return -EIO;
- if (regno <= PT_MAX_PUT_REG || regno == PT_TRAP) {
- if (regno == PT_MSR)
- data = (data & MSR_DEBUGCHANGE)
- | (task->thread.regs->msr & ~MSR_DEBUGCHANGE);
- /* We prevent mucking around with the reserved area of trap
- * which are used internally by the kernel
- */
- if (regno == PT_TRAP)
- data &= 0xfff0;
+ if (regno == PT_MSR)
+ return set_user_msr(task, data);
+ if (regno == PT_TRAP)
+ return set_user_trap(task, data);
+
+ if (regno <= PT_MAX_PUT_REG) {
((unsigned long *)task->thread.regs)[regno] = data;
return 0;
}
return -EIO;
}
+static int gpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ CHECK_FULL_REGS(target->thread.regs);
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ target->thread.regs,
+ 0, offsetof(struct pt_regs, msr));
+ if (!ret) {
+ unsigned long msr = get_user_msr(target);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
+ offsetof(struct pt_regs, msr),
+ offsetof(struct pt_regs, msr) +
+ sizeof(msr));
+ }
+
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct pt_regs, msr) + sizeof(long));
+
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.regs->orig_gpr3,
+ offsetof(struct pt_regs, orig_gpr3),
+ sizeof(struct pt_regs));
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(struct pt_regs), -1);
+
+ return ret;
+}
+
+static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned long reg;
+ int ret;
+
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ CHECK_FULL_REGS(target->thread.regs);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.regs,
+ 0, PT_MSR * sizeof(reg));
-static int get_fpregs(void __user *data, struct task_struct *task,
- int has_fpscr)
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_MSR * sizeof(reg),
+ (PT_MSR + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_msr(target, reg);
+ }
+
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct pt_regs, msr) + sizeof(long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.regs->orig_gpr3,
+ PT_ORIG_R3 * sizeof(reg),
+ (PT_MAX_PUT_REG + 1) * sizeof(reg));
+
+ if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
+ ret = user_regset_copyin_ignore(
+ &pos, &count, &kbuf, &ubuf,
+ (PT_MAX_PUT_REG + 1) * sizeof(reg),
+ PT_TRAP * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_TRAP * sizeof(reg),
+ (PT_TRAP + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_trap(target, reg);
+ }
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(
+ &pos, &count, &kbuf, &ubuf,
+ (PT_TRAP + 1) * sizeof(reg), -1);
+
+ return ret;
+}
+
+static int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
{
- unsigned int count = has_fpscr ? 33 : 32;
+ flush_fp_to_thread(target);
- if (copy_to_user(data, task->thread.fpr, count * sizeof(double)))
- return -EFAULT;
- return 0;
+ BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
+ offsetof(struct thread_struct, fpr[32]));
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpr, 0, -1);
}
-static int set_fpregs(void __user *data, struct task_struct *task,
- int has_fpscr)
+static int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
{
- unsigned int count = has_fpscr ? 33 : 32;
+ flush_fp_to_thread(target);
- if (copy_from_user(task->thread.fpr, data, count * sizeof(double)))
- return -EFAULT;
- return 0;
+ BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
+ offsetof(struct thread_struct, fpr[32]));
+
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpr, 0, -1);
}
@@ -138,56 +252,74 @@ static int set_fpregs(void __user *data, struct task_struct *task,
* (combined (32- and 64-bit) gdb.
*/
-/*
- * Get contents of AltiVec register state in task TASK
- */
-static int get_vrregs(unsigned long __user *data, struct task_struct *task)
+static int vr_active(struct task_struct *target,
+ const struct user_regset *regset)
{
- unsigned long regsize;
+ flush_altivec_to_thread(target);
+ return target->thread.used_vr ? regset->n : 0;
+}
- /* copy AltiVec registers VR[0] .. VR[31] */
- regsize = 32 * sizeof(vector128);
- if (copy_to_user(data, task->thread.vr, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
+static int vr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
- /* copy VSCR */
- regsize = 1 * sizeof(vector128);
- if (copy_to_user(data, &task->thread.vscr, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
+ flush_altivec_to_thread(target);
- /* copy VRSAVE */
- if (put_user(task->thread.vrsave, (u32 __user *)data))
- return -EFAULT;
+ BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
+ offsetof(struct thread_struct, vr[32]));
- return 0;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.vr, 0,
+ 33 * sizeof(vector128));
+ if (!ret) {
+ /*
+ * Copy out only the low-order word of vrsave.
+ */
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ memset(&vrsave, 0, sizeof(vrsave));
+ vrsave.word = target->thread.vrsave;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
+ 33 * sizeof(vector128), -1);
+ }
+
+ return ret;
}
-/*
- * Write contents of AltiVec register state into task TASK.
- */
-static int set_vrregs(struct task_struct *task, unsigned long __user *data)
+static int vr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
{
- unsigned long regsize;
+ int ret;
- /* copy AltiVec registers VR[0] .. VR[31] */
- regsize = 32 * sizeof(vector128);
- if (copy_from_user(task->thread.vr, data, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
+ flush_altivec_to_thread(target);
- /* copy VSCR */
- regsize = 1 * sizeof(vector128);
- if (copy_from_user(&task->thread.vscr, data, regsize))
- return -EFAULT;
- data += (regsize / sizeof(unsigned long));
+ BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
+ offsetof(struct thread_struct, vr[32]));
- /* copy VRSAVE */
- if (get_user(task->thread.vrsave, (u32 __user *)data))
- return -EFAULT;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.vr, 0, 33 * sizeof(vector128));
+ if (!ret && count > 0) {
+ /*
+ * We use only the first word of vrsave.
+ */
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ memset(&vrsave, 0, sizeof(vrsave));
+ vrsave.word = target->thread.vrsave;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
+ 33 * sizeof(vector128), -1);
+ if (!ret)
+ target->thread.vrsave = vrsave.word;
+ }
- return 0;
+ return ret;
}
#endif /* CONFIG_ALTIVEC */
@@ -203,57 +335,273 @@ static int set_vrregs(struct task_struct *task, unsigned long __user *data)
* }
*/
-/*
- * Get contents of SPE register state in task TASK.
- */
-static int get_evrregs(unsigned long *data, struct task_struct *task)
+static int evr_active(struct task_struct *target,
+ const struct user_regset *regset)
{
- int i;
+ flush_spe_to_thread(target);
+ return target->thread.used_spe ? regset->n : 0;
+}
- if (!access_ok(VERIFY_WRITE, data, 35 * sizeof(unsigned long)))
- return -EFAULT;
+static int evr_get(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
- /* copy SPEFSCR */
- if (__put_user(task->thread.spefscr, &data[34]))
- return -EFAULT;
+ flush_spe_to_thread(target);
- /* copy SPE registers EVR[0] .. EVR[31] */
- for (i = 0; i < 32; i++, data++)
- if (__put_user(task->thread.evr[i], data))
- return -EFAULT;
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.evr,
+ 0, sizeof(target->thread.evr));
- /* copy ACC */
- if (__put_user64(task->thread.acc, (unsigned long long *)data))
- return -EFAULT;
+ BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
+ offsetof(struct thread_struct, spefscr));
- return 0;
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.acc,
+ sizeof(target->thread.evr), -1);
+
+ return ret;
+}
+
+static int evr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ flush_spe_to_thread(target);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.evr,
+ 0, sizeof(target->thread.evr));
+
+ BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
+ offsetof(struct thread_struct, spefscr));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.acc,
+ sizeof(target->thread.evr), -1);
+
+ return ret;
}
+#endif /* CONFIG_SPE */
+
/*
- * Write contents of SPE register state into task TASK.
+ * These are our native regset flavors.
*/
-static int set_evrregs(struct task_struct *task, unsigned long *data)
+enum powerpc_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+#ifdef CONFIG_ALTIVEC
+ REGSET_VMX,
+#endif
+#ifdef CONFIG_SPE
+ REGSET_SPE,
+#endif
+};
+
+static const struct user_regset native_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+ .size = sizeof(long), .align = sizeof(long),
+ .get = gpr_get, .set = gpr_set
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .get = fpr_get, .set = fpr_set
+ },
+#ifdef CONFIG_ALTIVEC
+ [REGSET_VMX] = {
+ .core_note_type = NT_PPC_VMX, .n = 34,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = vr_active, .get = vr_get, .set = vr_set
+ },
+#endif
+#ifdef CONFIG_SPE
+ [REGSET_SPE] = {
+ .n = 35,
+ .size = sizeof(u32), .align = sizeof(u32),
+ .active = evr_active, .get = evr_get, .set = evr_set
+ },
+#endif
+};
+
+static const struct user_regset_view user_ppc_native_view = {
+ .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
+ .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
+};
+
+#ifdef CONFIG_PPC64
+#include <linux/compat.h>
+
+static int gpr32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
{
- int i;
+ const unsigned long *regs = &target->thread.regs->gpr[0];
+ compat_ulong_t *k = kbuf;
+ compat_ulong_t __user *u = ubuf;
+ compat_ulong_t reg;
+
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ CHECK_FULL_REGS(target->thread.regs);
- if (!access_ok(VERIFY_READ, data, 35 * sizeof(unsigned long)))
- return -EFAULT;
+ pos /= sizeof(reg);
+ count /= sizeof(reg);
- /* copy SPEFSCR */
- if (__get_user(task->thread.spefscr, &data[34]))
- return -EFAULT;
+ if (kbuf)
+ for (; count > 0 && pos < PT_MSR; --count)
+ *k++ = regs[pos++];
+ else
+ for (; count > 0 && pos < PT_MSR; --count)
+ if (__put_user((compat_ulong_t) regs[pos++], u++))
+ return -EFAULT;
- /* copy SPE registers EVR[0] .. EVR[31] */
- for (i = 0; i < 32; i++, data++)
- if (__get_user(task->thread.evr[i], data))
+ if (count > 0 && pos == PT_MSR) {
+ reg = get_user_msr(target);
+ if (kbuf)
+ *k++ = reg;
+ else if (__put_user(reg, u++))
return -EFAULT;
- /* copy ACC */
- if (__get_user64(task->thread.acc, (unsigned long long*)data))
- return -EFAULT;
+ ++pos;
+ --count;
+ }
- return 0;
+ if (kbuf)
+ for (; count > 0 && pos < PT_REGS_COUNT; --count)
+ *k++ = regs[pos++];
+ else
+ for (; count > 0 && pos < PT_REGS_COUNT; --count)
+ if (__put_user((compat_ulong_t) regs[pos++], u++))
+ return -EFAULT;
+
+ kbuf = k;
+ ubuf = u;
+ pos *= sizeof(reg);
+ count *= sizeof(reg);
+ return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ PT_REGS_COUNT * sizeof(reg), -1);
+}
+
+static int gpr32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned long *regs = &target->thread.regs->gpr[0];
+ const compat_ulong_t *k = kbuf;
+ const compat_ulong_t __user *u = ubuf;
+ compat_ulong_t reg;
+
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ CHECK_FULL_REGS(target->thread.regs);
+
+ pos /= sizeof(reg);
+ count /= sizeof(reg);
+
+ if (kbuf)
+ for (; count > 0 && pos < PT_MSR; --count)
+ regs[pos++] = *k++;
+ else
+ for (; count > 0 && pos < PT_MSR; --count) {
+ if (__get_user(reg, u++))
+ return -EFAULT;
+ regs[pos++] = reg;
+ }
+
+
+ if (count > 0 && pos == PT_MSR) {
+ if (kbuf)
+ reg = *k++;
+ else if (__get_user(reg, u++))
+ return -EFAULT;
+ set_user_msr(target, reg);
+ ++pos;
+ --count;
+ }
+
+ if (kbuf)
+ for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
+ regs[pos++] = *k++;
+ else
+ for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
+ if (__get_user(reg, u++))
+ return -EFAULT;
+ regs[pos++] = reg;
+ }
+
+ if (count > 0 && pos == PT_TRAP) {
+ if (kbuf)
+ reg = *k++;
+ else if (__get_user(reg, u++))
+ return -EFAULT;
+ set_user_trap(target, reg);
+ ++pos;
+ --count;
+ }
+
+ kbuf = k;
+ ubuf = u;
+ pos *= sizeof(reg);
+ count *= sizeof(reg);
+ return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_TRAP + 1) * sizeof(reg), -1);
+}
+
+/*
+ * These are the regset flavors matching the CONFIG_PPC32 native set.
+ */
+static const struct user_regset compat_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+ .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
+ .get = gpr32_get, .set = gpr32_set
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .get = fpr_get, .set = fpr_set
+ },
+#ifdef CONFIG_ALTIVEC
+ [REGSET_VMX] = {
+ .core_note_type = NT_PPC_VMX, .n = 34,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = vr_active, .get = vr_get, .set = vr_set
+ },
+#endif
+#ifdef CONFIG_SPE
+ [REGSET_SPE] = {
+ .core_note_type = NT_PPC_SPE, .n = 35,
+ .size = sizeof(u32), .align = sizeof(u32),
+ .active = evr_active, .get = evr_get, .set = evr_set
+ },
+#endif
+};
+
+static const struct user_regset_view user_ppc_compat_view = {
+ .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
+ .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
+};
+#endif /* CONFIG_PPC64 */
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_PPC64
+ if (test_tsk_thread_flag(task, TIF_32BIT))
+ return &user_ppc_compat_view;
+#endif
+ return &user_ppc_native_view;
}
-#endif /* CONFIG_SPE */
void user_enable_single_step(struct task_struct *task)
@@ -323,55 +671,29 @@ void ptrace_disable(struct task_struct *child)
static long arch_ptrace_old(struct task_struct *child, long request, long addr,
long data)
{
- int ret = -EPERM;
-
- switch(request) {
- case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
- int i;
- unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
- unsigned long __user *tmp = (unsigned long __user *)addr;
-
- CHECK_FULL_REGS(child->thread.regs);
- for (i = 0; i < 32; i++) {
- ret = put_user(*reg, tmp);
- if (ret)
- break;
- reg++;
- tmp++;
- }
- break;
- }
-
- case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
- int i;
- unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
- unsigned long __user *tmp = (unsigned long __user *)addr;
-
- CHECK_FULL_REGS(child->thread.regs);
- for (i = 0; i < 32; i++) {
- ret = get_user(*reg, tmp);
- if (ret)
- break;
- reg++;
- tmp++;
- }
- break;
- }
-
- case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */
- flush_fp_to_thread(child);
- ret = get_fpregs((void __user *)addr, child, 0);
- break;
- }
-
- case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */
- flush_fp_to_thread(child);
- ret = set_fpregs((void __user *)addr, child, 0);
- break;
+ switch (request) {
+ case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_GPR, 0, 32 * sizeof(long),
+ (void __user *) data);
+
+ case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_GPR, 0, 32 * sizeof(long),
+ (const void __user *) data);
+
+ case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_FPR, 0, 32 * sizeof(double),
+ (void __user *) data);
+
+ case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_FPR, 0, 32 * sizeof(double),
+ (const void __user *) data);
}
- }
- return ret;
+ return -EPERM;
}
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
@@ -379,12 +701,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int ret = -EPERM;
switch (request) {
- /* when I and D space are separate, these will need to be fixed. */
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA:
- ret = generic_ptrace_peekdata(child, addr, data);
- break;
-
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long index, tmp;
@@ -412,12 +728,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
- /* If I and D space are separate, this will have to be fixed. */
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- ret = generic_ptrace_pokedata(child, addr, data);
- break;
-
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR: {
unsigned long index;
@@ -462,85 +772,60 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
#ifdef CONFIG_PPC64
case PTRACE_GETREGS64:
#endif
- case PTRACE_GETREGS: { /* Get all pt_regs from the child. */
- int ui;
- if (!access_ok(VERIFY_WRITE, (void __user *)data,
- sizeof(struct pt_regs))) {
- ret = -EIO;
- break;
- }
- CHECK_FULL_REGS(child->thread.regs);
- ret = 0;
- for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
- ret |= __put_user(ptrace_get_reg(child, ui),
- (unsigned long __user *) data);
- data += sizeof(long);
- }
- break;
- }
+ case PTRACE_GETREGS: /* Get all pt_regs from the child. */
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_GPR,
+ 0, sizeof(struct pt_regs),
+ (void __user *) data);
#ifdef CONFIG_PPC64
case PTRACE_SETREGS64:
#endif
- case PTRACE_SETREGS: { /* Set all gp regs in the child. */
- unsigned long tmp;
- int ui;
- if (!access_ok(VERIFY_READ, (void __user *)data,
- sizeof(struct pt_regs))) {
- ret = -EIO;
- break;
- }
- CHECK_FULL_REGS(child->thread.regs);
- ret = 0;
- for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
- ret = __get_user(tmp, (unsigned long __user *) data);
- if (ret)
- break;
- ptrace_put_reg(child, ui, tmp);
- data += sizeof(long);
- }
- break;
- }
-
- case PTRACE_GETFPREGS: { /* Get the child FPU state (FPR0...31 + FPSCR) */
- flush_fp_to_thread(child);
- ret = get_fpregs((void __user *)data, child, 1);
- break;
- }
-
- case PTRACE_SETFPREGS: { /* Set the child FPU state (FPR0...31 + FPSCR) */
- flush_fp_to_thread(child);
- ret = set_fpregs((void __user *)data, child, 1);
- break;
- }
+ case PTRACE_SETREGS: /* Set all gp regs in the child. */
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_GPR,
+ 0, sizeof(struct pt_regs),
+ (const void __user *) data);
+
+ case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_FPR,
+ 0, sizeof(elf_fpregset_t),
+ (void __user *) data);
+
+ case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_FPR,
+ 0, sizeof(elf_fpregset_t),
+ (const void __user *) data);
#ifdef CONFIG_ALTIVEC
case PTRACE_GETVRREGS:
- /* Get the child altivec register state. */
- flush_altivec_to_thread(child);
- ret = get_vrregs((unsigned long __user *)data, child);
- break;
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_VMX,
+ 0, (33 * sizeof(vector128) +
+ sizeof(u32)),
+ (void __user *) data);
case PTRACE_SETVRREGS:
- /* Set the child altivec register state. */
- flush_altivec_to_thread(child);
- ret = set_vrregs(child, (unsigned long __user *)data);
- break;
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_VMX,
+ 0, (33 * sizeof(vector128) +
+ sizeof(u32)),
+ (const void __user *) data);
#endif
#ifdef CONFIG_SPE
case PTRACE_GETEVRREGS:
/* Get the child spe register state. */
- flush_spe_to_thread(child);
- ret = get_evrregs((unsigned long __user *)data, child);
- break;
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_SPE, 0, 35 * sizeof(u32),
+ (void __user *) data);
case PTRACE_SETEVRREGS:
/* Set the child spe register state. */
- /* this is to clear the MSR_SPE bit to force a reload
- * of register state from memory */
- flush_spe_to_thread(child);
- ret = set_evrregs(child, (unsigned long __user *)data);
- break;
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_SPE, 0, 35 * sizeof(u32),
+ (const void __user *) data);
#endif
/* Old reverse args ptrace callss */
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index fea6206ff90f..4c1de6af4c09 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -24,9 +24,11 @@
#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
+#include <linux/regset.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
+#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -45,87 +47,31 @@
static long compat_ptrace_old(struct task_struct *child, long request,
long addr, long data)
{
- int ret = -EPERM;
-
- switch(request) {
- case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */
- int i;
- unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
- unsigned int __user *tmp = (unsigned int __user *)addr;
-
- CHECK_FULL_REGS(child->thread.regs);
- for (i = 0; i < 32; i++) {
- ret = put_user(*reg, tmp);
- if (ret)
- break;
- reg++;
- tmp++;
- }
- break;
- }
-
- case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */
- int i;
- unsigned long *reg = &((unsigned long *)child->thread.regs)[0];
- unsigned int __user *tmp = (unsigned int __user *)addr;
-
- CHECK_FULL_REGS(child->thread.regs);
- for (i = 0; i < 32; i++) {
- ret = get_user(*reg, tmp);
- if (ret)
- break;
- reg++;
- tmp++;
- }
- break;
+ switch (request) {
+ case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
+ return copy_regset_to_user(child,
+ task_user_regset_view(current), 0,
+ 0, 32 * sizeof(compat_long_t),
+ compat_ptr(data));
+
+ case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
+ return copy_regset_from_user(child,
+ task_user_regset_view(current), 0,
+ 0, 32 * sizeof(compat_long_t),
+ compat_ptr(data));
}
- }
- return ret;
+ return -EPERM;
}
-long compat_sys_ptrace(int request, int pid, unsigned long addr,
- unsigned long data)
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t caddr, compat_ulong_t cdata)
{
- struct task_struct *child;
+ unsigned long addr = caddr;
+ unsigned long data = cdata;
int ret;
- lock_kernel();
- if (request == PTRACE_TRACEME) {
- ret = ptrace_traceme();
- goto out;
- }
-
- child = ptrace_get_task_struct(pid);
- if (IS_ERR(child)) {
- ret = PTR_ERR(child);
- goto out;
- }
-
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
- goto out_tsk;
- }
-
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0)
- goto out_tsk;
-
switch (request) {
- /* when I and D space are separate, these will need to be fixed. */
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned int tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp, (u32 __user *)data);
- break;
- }
-
/*
* Read 4 bytes of the other process' storage
* data is a pointer specifying where the user wants the
@@ -225,19 +171,6 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr,
break;
}
- /* If I and D space are separate, this will have to be fixed. */
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA: {
- unsigned int tmp;
- tmp = data;
- ret = 0;
- if (access_process_vm(child, addr, &tmp, sizeof(tmp), 1)
- == sizeof(tmp))
- break;
- ret = -EIO;
- break;
- }
-
/*
* Write 4 bytes into the other process' storage
* data is the 4 bytes that the user wants written
@@ -337,46 +270,17 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr,
break;
}
- case PTRACE_GETEVENTMSG:
- ret = put_user(child->ptrace_message, (unsigned int __user *) data);
- break;
+ case PTRACE_GETREGS: /* Get all pt_regs from the child. */
+ return copy_regset_to_user(
+ child, task_user_regset_view(current), 0,
+ 0, PT_REGS_COUNT * sizeof(compat_long_t),
+ compat_ptr(data));
- case PTRACE_GETREGS: { /* Get all pt_regs from the child. */
- int ui;
- if (!access_ok(VERIFY_WRITE, (void __user *)data,
- PT_REGS_COUNT * sizeof(int))) {
- ret = -EIO;
- break;
- }
- CHECK_FULL_REGS(child->thread.regs);
- ret = 0;
- for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
- ret |= __put_user(ptrace_get_reg(child, ui),
- (unsigned int __user *) data);
- data += sizeof(int);
- }
- break;
- }
-
- case PTRACE_SETREGS: { /* Set all gp regs in the child. */
- unsigned long tmp;
- int ui;
- if (!access_ok(VERIFY_READ, (void __user *)data,
- PT_REGS_COUNT * sizeof(int))) {
- ret = -EIO;
- break;
- }
- CHECK_FULL_REGS(child->thread.regs);
- ret = 0;
- for (ui = 0; ui < PT_REGS_COUNT; ui ++) {
- ret = __get_user(tmp, (unsigned int __user *) data);
- if (ret)
- break;
- ptrace_put_reg(child, ui, tmp);
- data += sizeof(int);
- }
- break;
- }
+ case PTRACE_SETREGS: /* Set all gp regs in the child. */
+ return copy_regset_from_user(
+ child, task_user_regset_view(current), 0,
+ 0, PT_REGS_COUNT * sizeof(compat_long_t),
+ compat_ptr(data));
case PTRACE_GETFPREGS:
case PTRACE_SETFPREGS:
@@ -402,12 +306,9 @@ long compat_sys_ptrace(int request, int pid, unsigned long addr,
break;
default:
- ret = ptrace_request(child, request, addr, data);
+ ret = compat_ptrace_request(child, request, addr, data);
break;
}
-out_tsk:
- put_task_struct(child);
-out:
- unlock_kernel();
+
return ret;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 5cd3db5cae41..3b26fbd6bec9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -66,6 +66,7 @@
#include <asm/smp.h>
#include <asm/vdso_datapage.h>
#include <asm/firmware.h>
+#include <asm/cputime.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/iseries/it_lp_queue.h>
#include <asm/iseries/hv_call_xm.h>
@@ -189,6 +190,8 @@ u64 __cputime_sec_factor;
EXPORT_SYMBOL(__cputime_sec_factor);
u64 __cputime_clockt_factor;
EXPORT_SYMBOL(__cputime_clockt_factor);
+DEFINE_PER_CPU(unsigned long, cputime_last_delta);
+DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
static void calc_cputime_factors(void)
{
@@ -257,8 +260,8 @@ void account_system_vtime(struct task_struct *tsk)
}
account_system_time(tsk, 0, delta);
account_system_time_scaled(tsk, deltascaled);
- get_paca()->purrdelta = delta;
- get_paca()->spurrdelta = deltascaled;
+ per_cpu(cputime_last_delta, smp_processor_id()) = delta;
+ per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled;
local_irq_restore(flags);
}
@@ -276,10 +279,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
get_paca()->user_time = 0;
account_user_time(tsk, utime);
- /* Estimate the scaled utime by scaling the real utime based
- * on the last spurr to purr ratio */
- utimescaled = utime * get_paca()->spurrdelta / get_paca()->purrdelta;
- get_paca()->spurrdelta = get_paca()->purrdelta = 0;
+ utimescaled = cputime_to_scaled(utime);
account_user_time_scaled(tsk, utimescaled);
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 848a20475db8..4b5b7ff4f78b 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -54,7 +54,7 @@
#endif
#include <asm/kexec.h>
-#ifdef CONFIG_DEBUGGER
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
int (*__debugger)(struct pt_regs *regs);
int (*__debugger_ipi)(struct pt_regs *regs);
int (*__debugger_bpt)(struct pt_regs *regs);
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index f0bad7070fb5..f98867252ee2 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -176,7 +176,7 @@ static void __devinit vio_dev_release(struct device *dev)
* Returns a pointer to the created vio_dev or NULL if node has
* NULL device_type or compatible fields.
*/
-struct vio_dev * __devinit vio_register_device_node(struct device_node *of_node)
+struct vio_dev *vio_register_device_node(struct device_node *of_node)
{
struct vio_dev *viodev;
const unsigned int *unit_address;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index e8122447f019..93a5c53e3423 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -220,12 +220,13 @@ void __init do_init_bootmem(void)
lmb_size_bytes(&lmb.reserved, i) - 1;
if (addr < total_lowmem)
reserve_bootmem(lmb.reserved.region[i].base,
- lmb_size_bytes(&lmb.reserved, i));
+ lmb_size_bytes(&lmb.reserved, i),
+ BOOTMEM_DEFAULT);
else if (lmb.reserved.region[i].base < total_lowmem) {
unsigned long adjusted_size = total_lowmem -
lmb.reserved.region[i].base;
reserve_bootmem(lmb.reserved.region[i].base,
- adjusted_size);
+ adjusted_size, BOOTMEM_DEFAULT);
}
}
#else
@@ -234,7 +235,8 @@ void __init do_init_bootmem(void)
/* reserve the sections we're already using */
for (i = 0; i < lmb.reserved.cnt; i++)
reserve_bootmem(lmb.reserved.region[i].base,
- lmb_size_bytes(&lmb.reserved, i));
+ lmb_size_bytes(&lmb.reserved, i),
+ BOOTMEM_DEFAULT);
#endif
/* XXX need to clip this if using highmem? */
@@ -483,7 +485,12 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
*/
_tlbie(address, 0 /* 8xx doesn't care about PID */);
#endif
- if (!PageReserved(page)
+ /* The _PAGE_USER test should really be _PAGE_EXEC, but
+ * older glibc versions execute some code from no-exec
+ * pages, which for now we are supporting. If exec-only
+ * pages are ever implemented, this will have to change.
+ */
+ if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
&& !test_bit(PG_arch_1, &page->flags)) {
if (vma->vm_mm == current->active_mm) {
__flush_dcache_icache((void *) address);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index c12adc3ddffd..a300d254aac6 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -24,6 +24,8 @@
static int numa_enabled = 1;
+static char *cmdline __initdata;
+
static int numa_debug;
#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
@@ -39,6 +41,53 @@ static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
static int min_common_depth;
static int n_mem_addr_cells, n_mem_size_cells;
+static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
+ unsigned int *nid)
+{
+ unsigned long long mem;
+ char *p = cmdline;
+ static unsigned int fake_nid;
+ static unsigned long long curr_boundary;
+
+ /*
+ * Modify node id, iff we started creating NUMA nodes
+ * We want to continue from where we left of the last time
+ */
+ if (fake_nid)
+ *nid = fake_nid;
+ /*
+ * In case there are no more arguments to parse, the
+ * node_id should be the same as the last fake node id
+ * (we've handled this above).
+ */
+ if (!p)
+ return 0;
+
+ mem = memparse(p, &p);
+ if (!mem)
+ return 0;
+
+ if (mem < curr_boundary)
+ return 0;
+
+ curr_boundary = mem;
+
+ if ((end_pfn << PAGE_SHIFT) > mem) {
+ /*
+ * Skip commas and spaces
+ */
+ while (*p == ',' || *p == ' ' || *p == '\t')
+ p++;
+
+ cmdline = p;
+ fake_nid++;
+ *nid = fake_nid;
+ dbg("created new fake_node with id %d\n", fake_nid);
+ return 1;
+ }
+ return 0;
+}
+
static void __cpuinit map_cpu_to_node(int cpu, int node)
{
numa_cpu_lookup_table[cpu] = node;
@@ -344,6 +393,9 @@ static void __init parse_drconf_memory(struct device_node *memory)
if (nid == 0xffff || nid >= MAX_NUMNODES)
nid = default_nid;
}
+
+ fake_numa_create_new_node(((start + lmb_size) >> PAGE_SHIFT),
+ &nid);
node_set_online(nid);
size = numa_enforce_memory_limit(start, lmb_size);
@@ -429,6 +481,8 @@ new_range:
nid = of_node_to_nid_single(memory);
if (nid < 0)
nid = default_nid;
+
+ fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
node_set_online(nid);
if (!(size = numa_enforce_memory_limit(start, size))) {
@@ -461,7 +515,7 @@ static void __init setup_nonnuma(void)
unsigned long top_of_ram = lmb_end_of_DRAM();
unsigned long total_ram = lmb_phys_mem_size();
unsigned long start_pfn, end_pfn;
- unsigned int i;
+ unsigned int i, nid = 0;
printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
@@ -471,9 +525,11 @@ static void __init setup_nonnuma(void)
for (i = 0; i < lmb.memory.cnt; ++i) {
start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
- add_active_range(0, start_pfn, end_pfn);
+
+ fake_numa_create_new_node(end_pfn, &nid);
+ add_active_range(nid, start_pfn, end_pfn);
+ node_set_online(nid);
}
- node_set_online(0);
}
void __init dump_numa_cpu_topology(void)
@@ -675,7 +731,7 @@ void __init do_init_bootmem(void)
dbg("reserve_bootmem %lx %lx\n", physbase,
size);
reserve_bootmem_node(NODE_DATA(nid), physbase,
- size);
+ size, BOOTMEM_DEFAULT);
}
}
@@ -702,6 +758,10 @@ static int __init early_numa(char *p)
if (strstr(p, "debug"))
numa_debug = 1;
+ p = strstr(p, "fake=");
+ if (p)
+ cmdline = p + strlen("fake=");
+
return 0;
}
early_param("numa", early_numa);
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 64488723162a..f80f90c4d58b 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -86,7 +86,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
return ret;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGDIR_ORDER);
}
@@ -123,7 +123,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return ptepage;
}
-void pte_free_kernel(pte_t *pte)
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
#ifdef CONFIG_SMP
hash_page_sync();
@@ -131,7 +131,7 @@ void pte_free_kernel(pte_t *pte)
free_page((unsigned long)pte);
}
-void pte_free(struct page *ptepage)
+void pte_free(struct mm_struct *mm, struct page *ptepage)
{
#ifdef CONFIG_SMP
hash_page_sync();
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index c5f64c3bd668..2ef6b0dddd8c 100644
--- a/arch/powerpc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -15,5 +15,5 @@ oprofile-$(CONFIG_OPROFILE_CELL) += op_model_cell.o \
cell/spu_profiler.o cell/vma_map.o \
cell/spu_task_sync.o
oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o op_model_pa6t.o
-oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o
+oprofile-$(CONFIG_FSL_EMB_PERFMON) += op_model_fsl_emb.o
oprofile-$(CONFIG_6xx) += op_model_7450.o
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index a28cce1d6c24..4908dc98f9ca 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -202,9 +202,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
model = &op_model_7450;
break;
#endif
-#ifdef CONFIG_FSL_BOOKE
- case PPC_OPROFILE_BOOKE:
- model = &op_model_fsl_booke;
+#if defined(CONFIG_FSL_EMB_PERFMON)
+ case PPC_OPROFILE_FSL_EMB:
+ model = &op_model_fsl_emb;
break;
#endif
default:
diff --git a/arch/powerpc/oprofile/op_model_fsl_booke.c b/arch/powerpc/oprofile/op_model_fsl_emb.c
index 183a28bb1812..91596f6ba1f4 100644
--- a/arch/powerpc/oprofile/op_model_fsl_booke.c
+++ b/arch/powerpc/oprofile/op_model_fsl_emb.c
@@ -1,7 +1,5 @@
/*
- * arch/powerpc/oprofile/op_model_fsl_booke.c
- *
- * Freescale Book-E oprofile support, based on ppc64 oprofile support
+ * Freescale Embedded oprofile support, based on ppc64 oprofile support
* Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
*
* Copyright (c) 2004 Freescale Semiconductor, Inc
@@ -22,7 +20,7 @@
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/cputable.h>
-#include <asm/reg_booke.h>
+#include <asm/reg_fsl_emb.h>
#include <asm/page.h>
#include <asm/pmc.h>
#include <asm/oprofile_impl.h>
@@ -244,7 +242,7 @@ static void dump_pmcs(void)
mfpmr(PMRN_PMLCA3), mfpmr(PMRN_PMLCB3));
}
-static int fsl_booke_cpu_setup(struct op_counter_config *ctr)
+static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
{
int i;
@@ -262,7 +260,7 @@ static int fsl_booke_cpu_setup(struct op_counter_config *ctr)
return 0;
}
-static int fsl_booke_reg_setup(struct op_counter_config *ctr,
+static int fsl_emb_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
int num_ctrs)
{
@@ -281,7 +279,7 @@ static int fsl_booke_reg_setup(struct op_counter_config *ctr,
return 0;
}
-static int fsl_booke_start(struct op_counter_config *ctr)
+static int fsl_emb_start(struct op_counter_config *ctr)
{
int i;
@@ -315,7 +313,7 @@ static int fsl_booke_start(struct op_counter_config *ctr)
return 0;
}
-static void fsl_booke_stop(void)
+static void fsl_emb_stop(void)
{
/* freeze counters */
pmc_stop_ctrs();
@@ -329,7 +327,7 @@ static void fsl_booke_stop(void)
}
-static void fsl_booke_handle_interrupt(struct pt_regs *regs,
+static void fsl_emb_handle_interrupt(struct pt_regs *regs,
struct op_counter_config *ctr)
{
unsigned long pc;
@@ -362,10 +360,10 @@ static void fsl_booke_handle_interrupt(struct pt_regs *regs,
pmc_start_ctrs(1);
}
-struct op_powerpc_model op_model_fsl_booke = {
- .reg_setup = fsl_booke_reg_setup,
- .cpu_setup = fsl_booke_cpu_setup,
- .start = fsl_booke_start,
- .stop = fsl_booke_stop,
- .handle_interrupt = fsl_booke_handle_interrupt,
+struct op_powerpc_model op_model_fsl_emb = {
+ .reg_setup = fsl_emb_reg_setup,
+ .cpu_setup = fsl_emb_cpu_setup,
+ .start = fsl_emb_start,
+ .stop = fsl_emb_stop,
+ .handle_interrupt = fsl_emb_handle_interrupt,
};
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 74f31177e47a..a9260e21451e 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -72,6 +72,7 @@ config WALNUT
default y
select 405GP
select PCI
+ select OF_RTC
help
This option enables support for the IBM PPC405GP evaluation board.
diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c
index 88b66444dfb2..0422590040db 100644
--- a/arch/powerpc/platforms/40x/virtex.c
+++ b/arch/powerpc/platforms/40x/virtex.c
@@ -37,7 +37,7 @@ static int __init virtex_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- if (!of_flat_dt_is_compatible(root, "xilinx,virtex"))
+ if (!of_flat_dt_is_compatible(root, "xlnx,virtex"))
return 0;
return 1;
diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c
index 5d9edd917f92..b8b257efeb77 100644
--- a/arch/powerpc/platforms/40x/walnut.c
+++ b/arch/powerpc/platforms/40x/walnut.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/of_platform.h>
+#include <linux/rtc.h>
#include <asm/machdep.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 8f01563dbd2a..da5b7b7599db 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -137,7 +137,7 @@ static int __init pika_dtm_start(void)
}
of_node_put(np);
- fpga = ioremap(res.start + 0x20, 4);
+ fpga = ioremap(res.start, 0x24);
if (fpga == NULL)
return -ENOENT;
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
new file mode 100644
index 000000000000..c6fa49e23dc0
--- /dev/null
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -0,0 +1,20 @@
+config PPC_MPC512x
+ bool
+ select FSL_SOC
+ select IPIC
+ default n
+
+config PPC_MPC5121
+ bool
+ select PPC_MPC512x
+ default n
+
+config MPC5121_ADS
+ bool "Freescale MPC5121E ADS"
+ depends on PPC_MULTIPLATFORM && PPC32
+ select DEFAULT_UIMAGE
+ select WANT_DEVICE_TREE
+ select PPC_MPC5121
+ help
+ This option enables support for the MPC5121E ADS board.
+ default n
diff --git a/arch/powerpc/platforms/512x/Makefile b/arch/powerpc/platforms/512x/Makefile
new file mode 100644
index 000000000000..232c89f2039a
--- /dev/null
+++ b/arch/powerpc/platforms/512x/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for the Freescale PowerPC 512x linux kernel.
+#
+obj-$(CONFIG_MPC5121_ADS) += mpc5121_ads.o
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c
new file mode 100644
index 000000000000..50bd3a319022
--- /dev/null
+++ b/arch/powerpc/platforms/512x/mpc5121_ads.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: John Rigby, <jrigby@freescale.com>, Thur Mar 29 2007
+ *
+ * Description:
+ * MPC5121 ADS board setup
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/ipic.h>
+#include <asm/prom.h>
+#include <asm/time.h>
+
+/**
+ * mpc512x_find_ips_freq - Find the IPS bus frequency for a device
+ * @node: device node
+ *
+ * Returns IPS bus frequency, or 0 if the bus frequency cannot be found.
+ */
+unsigned long
+mpc512x_find_ips_freq(struct device_node *node)
+{
+ struct device_node *np;
+ const unsigned int *p_ips_freq = NULL;
+
+ of_node_get(node);
+ while (node) {
+ p_ips_freq = of_get_property(node, "bus-frequency", NULL);
+ if (p_ips_freq)
+ break;
+
+ np = of_get_parent(node);
+ of_node_put(node);
+ node = np;
+ }
+ if (node)
+ of_node_put(node);
+
+ return p_ips_freq ? *p_ips_freq : 0;
+}
+EXPORT_SYMBOL(mpc512x_find_ips_freq);
+
+static struct of_device_id __initdata of_bus_ids[] = {
+ { .name = "soc", },
+ { .name = "localbus", },
+ {},
+};
+
+static void __init mpc5121_ads_declare_of_platform_devices(void)
+{
+ /* Find every child of the SOC node and add it to of_platform */
+ if (of_platform_bus_probe(NULL, of_bus_ids, NULL))
+ printk(KERN_ERR __FILE__ ": "
+ "Error while probing of_platform bus\n");
+}
+
+static void __init mpc5121_ads_init_IRQ(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,ipic");
+ if (!np)
+ return;
+
+ ipic_init(np, 0);
+ of_node_put(np);
+
+ /*
+ * Initialize the default interrupt mapping priorities,
+ * in case the boot rom changed something on us.
+ */
+ ipic_set_default_priority();
+}
+
+/*
+ * Called very early, MMU is off, device-tree isn't unflattened
+ */
+static int __init mpc5121_ads_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,mpc5121ads");
+}
+
+define_machine(mpc5121_ads) {
+ .name = "MPC5121 ADS",
+ .probe = mpc5121_ads_probe,
+ .init = mpc5121_ads_declare_of_platform_devices,
+ .init_IRQ = mpc5121_ads_init_IRQ,
+ .get_irq = ipic_get_irq,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
index 3fce6b375dbc..7d3018751988 100644
--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
@@ -134,13 +134,12 @@ static void __init mpc8272_ads_setup_arch(void)
}
bcsr = of_iomap(np, 0);
+ of_node_put(np);
if (!bcsr) {
printk(KERN_ERR "Cannot map BCSR registers\n");
return;
}
- of_node_put(np);
-
clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN);
setbits32(&bcsr[1], BCSR1_FETH_RST);
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
index 68196e349994..e1dceeec4994 100644
--- a/arch/powerpc/platforms/82xx/pq2fads.c
+++ b/arch/powerpc/platforms/82xx/pq2fads.c
@@ -130,13 +130,12 @@ static void __init pq2fads_setup_arch(void)
}
bcsr = of_iomap(np, 0);
+ of_node_put(np);
if (!bcsr) {
printk(KERN_ERR "Cannot map BCSR registers\n");
return;
}
- of_node_put(np);
-
/* Enable the serial and ethernet ports */
clrbits32(&bcsr[1], BCSR1_RS232_EN1 | BCSR1_RS232_EN2 | BCSR1_FETHIEN);
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 9f0fd88b2b1f..e7f706b624fe 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -101,7 +101,7 @@ static void __init mpc832x_rdb_setup_arch(void)
#ifdef CONFIG_QUICC_ENGINE
qe_reset();
- if ((np = of_find_node_by_name(np, "par_io")) != NULL) {
+ if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
par_io_init(np);
of_node_put(np);
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 88bb748aff0d..68065e62fc3d 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -14,6 +14,8 @@
#define MPC83XX_SCCR_USB_DRCM_11 0x00300000
#define MPC83XX_SCCR_USB_DRCM_01 0x00100000
#define MPC83XX_SCCR_USB_DRCM_10 0x00200000
+#define MPC8315_SCCR_USB_MASK 0x00c00000
+#define MPC8315_SCCR_USB_DRCM_11 0x00c00000
#define MPC837X_SCCR_USB_DRCM_11 0x00c00000
/* system i/o configuration register low */
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index 681230a30acd..471fdd8f4108 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -104,6 +104,7 @@ int mpc831x_usb_cfg(void)
u32 temp;
void __iomem *immap, *usb_regs;
struct device_node *np = NULL;
+ struct device_node *immr_node = NULL;
const void *prop;
struct resource res;
int ret = 0;
@@ -124,10 +125,15 @@ int mpc831x_usb_cfg(void)
}
/* Configure clock */
- temp = in_be32(immap + MPC83XX_SCCR_OFFS);
- temp &= ~MPC83XX_SCCR_USB_MASK;
- temp |= MPC83XX_SCCR_USB_DRCM_11; /* 1:3 */
- out_be32(immap + MPC83XX_SCCR_OFFS, temp);
+ immr_node = of_get_parent(np);
+ if (immr_node && of_device_is_compatible(immr_node, "fsl,mpc8315-immr"))
+ clrsetbits_be32(immap + MPC83XX_SCCR_OFFS,
+ MPC8315_SCCR_USB_MASK,
+ MPC8315_SCCR_USB_DRCM_11);
+ else
+ clrsetbits_be32(immap + MPC83XX_SCCR_OFFS,
+ MPC83XX_SCCR_USB_MASK,
+ MPC83XX_SCCR_USB_DRCM_11);
/* Configure pin mux for ULPI. There is no pin mux for UTMI */
if (prop && !strcmp(prop, "ulpi")) {
@@ -144,6 +150,9 @@ int mpc831x_usb_cfg(void)
iounmap(immap);
+ if (immr_node)
+ of_node_put(immr_node);
+
/* Map USB SOC space */
ret = of_address_to_resource(np, 0, &res);
if (ret) {
diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c
index c6bc0783c3b0..82363e98f50e 100644
--- a/arch/powerpc/platforms/8xx/adder875.c
+++ b/arch/powerpc/platforms/8xx/adder875.c
@@ -15,12 +15,12 @@
#include <asm/time.h>
#include <asm/machdep.h>
-#include <asm/commproc.h>
+#include <asm/cpm1.h>
#include <asm/fs_pd.h>
#include <asm/udbg.h>
#include <asm/prom.h>
-#include <sysdev/commproc.h>
+#include "mpc8xx.h"
struct cpm_pin {
int port, pin, flags;
@@ -108,7 +108,7 @@ define_machine(adder875) {
.name = "Adder MPC875",
.probe = adder875_probe,
.setup_arch = adder875_setup,
- .init_IRQ = m8xx_pic_init,
+ .init_IRQ = mpc8xx_pics_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c
index a8dffa005775..7d9ac6040d63 100644
--- a/arch/powerpc/platforms/8xx/ep88xc.c
+++ b/arch/powerpc/platforms/8xx/ep88xc.c
@@ -15,7 +15,6 @@
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/udbg.h>
-#include <asm/commproc.h>
#include <asm/cpm1.h>
#include "mpc8xx.h"
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index fdce10c4f074..fcedbec07f94 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -24,6 +24,7 @@ config PPC_83xx
select MPC83xx
select IPIC
select WANT_DEVICE_TREE
+ select FSL_EMB_PERFMON
config PPC_86xx
bool "Freescale 86xx"
@@ -41,6 +42,7 @@ config CLASSIC32
source "arch/powerpc/platforms/pseries/Kconfig"
source "arch/powerpc/platforms/iseries/Kconfig"
source "arch/powerpc/platforms/chrp/Kconfig"
+source "arch/powerpc/platforms/512x/Kconfig"
source "arch/powerpc/platforms/52xx/Kconfig"
source "arch/powerpc/platforms/powermac/Kconfig"
source "arch/powerpc/platforms/prep/Kconfig"
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 7fc41104d53e..69941ba70975 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -14,7 +14,7 @@ choice
There are five families of 32 bit PowerPC chips supported.
The most common ones are the desktop and server CPUs (601, 603,
604, 740, 750, 74xx) CPUs from Freescale and IBM, with their
- embedded 52xx/82xx/83xx/86xx counterparts.
+ embedded 512x/52xx/82xx/83xx/86xx counterparts.
The other embeeded parts, namely 4xx, 8xx, e200 (55xx) and e500
(85xx) each form a family of their own that is not compatible
with the others.
@@ -22,7 +22,7 @@ choice
If unsure, select 52xx/6xx/7xx/74xx/82xx/83xx/86xx.
config 6xx
- bool "52xx/6xx/7xx/74xx/82xx/83xx/86xx"
+ bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx"
select PPC_FPU
config PPC_85xx
@@ -94,6 +94,7 @@ config 8xx
bool
config E500
+ select FSL_EMB_PERFMON
bool
config PPC_FPU
@@ -115,6 +116,9 @@ config FSL_BOOKE
depends on E200 || E500
default y
+config FSL_EMB_PERFMON
+ bool
+
config PTE_64BIT
bool
depends on 44x || E500
@@ -221,7 +225,7 @@ config NR_CPUS
config NOT_COHERENT_CACHE
bool
- depends on 4xx || 8xx || E200
+ depends on 4xx || 8xx || E200 || PPC_MPC512x
default y
config CHECK_CACHE_COHERENCY
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 6d9079da5f5a..a984894466d9 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -11,6 +11,7 @@ endif
obj-$(CONFIG_PPC_CHRP) += chrp/
obj-$(CONFIG_40x) += 40x/
obj-$(CONFIG_44x) += 44x/
+obj-$(CONFIG_PPC_MPC512x) += 512x/
obj-$(CONFIG_PPC_MPC52xx) += 52xx/
obj-$(CONFIG_PPC_8xx) += 8xx/
obj-$(CONFIG_PPC_82xx) += 82xx/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 3a963b4a9be0..2f169991896d 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -54,6 +54,13 @@ config SPU_FS_64K_LS
uses 4K pages. This can improve performances of applications
using multiple SPEs by lowering the TLB pressure on them.
+config SPU_TRACE
+ tristate "SPU event tracing support"
+ depends on SPU_FS && MARKERS
+ help
+ This option allows reading a trace of spu-related events through
+ the sputrace file in procfs.
+
config SPU_BASE
bool
default n
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 095988f13bf4..d95e71dee91f 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -13,7 +13,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/msi.h>
-#include <linux/reboot.h>
+#include <linux/of_platform.h>
#include <asm/dcr.h>
#include <asm/machdep.h>
@@ -65,14 +65,12 @@
struct axon_msic {
struct irq_host *irq_host;
- __le32 *fifo;
+ __le32 *fifo_virt;
+ dma_addr_t fifo_phys;
dcr_host_t dcr_host;
- struct list_head list;
u32 read_offset;
};
-static LIST_HEAD(axon_msic_list);
-
static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
{
pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
@@ -94,7 +92,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
while (msic->read_offset != write_offset) {
idx = msic->read_offset / sizeof(__le32);
- msi = le32_to_cpu(msic->fifo[idx]);
+ msi = le32_to_cpu(msic->fifo_virt[idx]);
msi &= 0xFFFF;
pr_debug("axon_msi: woff %x roff %x msi %x\n",
@@ -139,6 +137,7 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
tmp = dn;
dn = of_find_node_by_phandle(*ph);
+ of_node_put(tmp);
if (!dn) {
dev_dbg(&dev->dev,
"axon_msi: msi-translator doesn't point to a node\n");
@@ -156,7 +155,6 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
out_error:
of_node_put(dn);
- of_node_put(tmp);
return msic;
}
@@ -292,30 +290,24 @@ static struct irq_host_ops msic_host_ops = {
.map = msic_host_map,
};
-static int axon_msi_notify_reboot(struct notifier_block *nb,
- unsigned long code, void *data)
+static int axon_msi_shutdown(struct of_device *device)
{
- struct axon_msic *msic;
+ struct axon_msic *msic = device->dev.platform_data;
u32 tmp;
- list_for_each_entry(msic, &axon_msic_list, list) {
- pr_debug("axon_msi: disabling %s\n",
- msic->irq_host->of_node->full_name);
- tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
- tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
- msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
- }
+ pr_debug("axon_msi: disabling %s\n",
+ msic->irq_host->of_node->full_name);
+ tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
+ tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
+ msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
return 0;
}
-static struct notifier_block axon_msi_reboot_notifier = {
- .notifier_call = axon_msi_notify_reboot
-};
-
-static int axon_msi_setup_one(struct device_node *dn)
+static int axon_msi_probe(struct of_device *device,
+ const struct of_device_id *device_id)
{
- struct page *page;
+ struct device_node *dn = device->node;
struct axon_msic *msic;
unsigned int virq;
int dcr_base, dcr_len;
@@ -346,16 +338,14 @@ static int axon_msi_setup_one(struct device_node *dn)
goto out_free_msic;
}
- page = alloc_pages_node(of_node_to_nid(dn), GFP_KERNEL,
- get_order(MSIC_FIFO_SIZE_BYTES));
- if (!page) {
+ msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
+ &msic->fifo_phys, GFP_KERNEL);
+ if (!msic->fifo_virt) {
printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
dn->full_name);
goto out_free_msic;
}
- msic->fifo = page_address(page);
-
msic->irq_host = irq_alloc_host(of_node_get(dn), IRQ_HOST_MAP_NOMAP,
NR_IRQS, &msic_host_ops, 0);
if (!msic->irq_host) {
@@ -378,14 +368,18 @@ static int axon_msi_setup_one(struct device_node *dn)
pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq);
/* Enable the MSIC hardware */
- msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, (u64)msic->fifo >> 32);
+ msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
- (u64)msic->fifo & 0xFFFFFFFF);
+ msic->fifo_phys & 0xFFFFFFFF);
msic_dcr_write(msic, MSIC_CTRL_REG,
MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
MSIC_CTRL_FIFO_SIZE);
- list_add(&msic->list, &axon_msic_list);
+ device->dev.platform_data = msic;
+
+ ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
+ ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
+ ppc_md.msi_check_device = axon_msi_check_device;
printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
@@ -394,7 +388,8 @@ static int axon_msi_setup_one(struct device_node *dn)
out_free_host:
kfree(msic->irq_host);
out_free_fifo:
- __free_pages(virt_to_page(msic->fifo), get_order(MSIC_FIFO_SIZE_BYTES));
+ dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
+ msic->fifo_phys);
out_free_msic:
kfree(msic);
out:
@@ -402,28 +397,24 @@ out:
return -1;
}
-static int axon_msi_init(void)
-{
- struct device_node *dn;
- int found = 0;
-
- pr_debug("axon_msi: initialising ...\n");
-
- for_each_compatible_node(dn, NULL, "ibm,axon-msic") {
- if (axon_msi_setup_one(dn) == 0)
- found++;
- }
-
- if (found) {
- ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
- ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
- ppc_md.msi_check_device = axon_msi_check_device;
-
- register_reboot_notifier(&axon_msi_reboot_notifier);
+static const struct of_device_id axon_msi_device_id[] = {
+ {
+ .compatible = "ibm,axon-msic"
+ },
+ {}
+};
- pr_debug("axon_msi: registered callbacks!\n");
- }
+static struct of_platform_driver axon_msi_driver = {
+ .match_table = axon_msi_device_id,
+ .probe = axon_msi_probe,
+ .shutdown = axon_msi_shutdown,
+ .driver = {
+ .name = "axon-msi"
+ },
+};
- return 0;
+static int __init axon_msi_init(void)
+{
+ return of_register_platform_driver(&axon_msi_driver);
}
-arch_initcall(axon_msi_init);
+subsys_initcall(axon_msi_init);
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index e6534b519c9a..a7f609b3b876 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -98,7 +98,7 @@ static int __init cell_publish_devices(void)
}
return 0;
}
-machine_device_initcall(cell, cell_publish_devices);
+machine_subsys_initcall(cell, cell_publish_devices);
static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc)
{
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index d3a349fb42e5..99610a6361f2 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -4,6 +4,8 @@ spufs-y += inode.o file.o context.o syscalls.o coredump.o
spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
spufs-y += switch.o fault.o lscsa_alloc.o
+obj-$(CONFIG_SPU_TRACE) += sputrace.o
+
# Rules to build switch.o with the help of SPU tool chain
SPU_CROSS := spu-
SPU_CC := $(SPU_CROSS)gcc
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 3fcd06418b01..1018acd1746b 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -29,6 +29,7 @@
#include <linux/poll.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
+#include <linux/marker.h>
#include <asm/io.h>
#include <asm/semaphore.h>
@@ -358,6 +359,8 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
struct spu_context *ctx = vma->vm_file->private_data;
unsigned long area, offset = address - vma->vm_start;
+ spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx);
+
offset += vma->vm_pgoff << PAGE_SHIFT;
if (offset >= ps_size)
return NOPFN_SIGBUS;
@@ -375,11 +378,14 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
if (ctx->state == SPU_STATE_SAVED) {
up_read(&current->mm->mmap_sem);
+ spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx);
spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
+ spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu);
down_read(&current->mm->mmap_sem);
} else {
area = ctx->spu->problem_phys + ps_offs;
vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+ spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu);
}
spu_release(ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index c0e968a4c211..90784c029f25 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -322,7 +322,7 @@ static struct spu_context *
spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
struct file *filp)
{
- struct spu_context *tmp, *neighbor;
+ struct spu_context *tmp, *neighbor, *err;
int count, node;
int aff_supp;
@@ -354,11 +354,15 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
!list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
!list_entry(neighbor->aff_list.next, struct spu_context,
- aff_list)->aff_head)
- return ERR_PTR(-EEXIST);
+ aff_list)->aff_head) {
+ err = ERR_PTR(-EEXIST);
+ goto out_put_neighbor;
+ }
- if (gang != neighbor->gang)
- return ERR_PTR(-EINVAL);
+ if (gang != neighbor->gang) {
+ err = ERR_PTR(-EINVAL);
+ goto out_put_neighbor;
+ }
count = 1;
list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
@@ -372,11 +376,17 @@ spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
break;
}
- if (node == MAX_NUMNODES)
- return ERR_PTR(-EEXIST);
+ if (node == MAX_NUMNODES) {
+ err = ERR_PTR(-EEXIST);
+ goto out_put_neighbor;
+ }
}
return neighbor;
+
+out_put_neighbor:
+ put_spu_context(neighbor);
+ return err;
}
static void
@@ -454,9 +464,12 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
if (ret)
goto out_aff_unlock;
- if (affinity)
+ if (affinity) {
spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx,
neighbor);
+ if (neighbor)
+ put_spu_context(neighbor);
+ }
/*
* get references for dget and mntget, will be released
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index c01a09da1e56..b4814c740d8a 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -410,8 +410,11 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
* since we have TIF_SINGLESTEP set, thus the kernel will do
* it upon return from the syscall anyawy
*/
- if ((status & SPU_STATUS_STOPPED_BY_STOP)
- && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) {
+ if (unlikely(status & SPU_STATUS_SINGLE_STEP))
+ ret = -ERESTARTSYS;
+
+ else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP)
+ && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) {
force_sig(SIGTRAP, current);
ret = -ERESTARTSYS;
}
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 00d914232af1..5915343e2599 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -39,6 +39,7 @@
#include <linux/pid_namespace.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/marker.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
@@ -216,8 +217,8 @@ void do_notify_spus_active(void)
*/
static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
{
- pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
- spu->number, spu->node);
+ spu_context_trace(spu_bind_context__enter, ctx, spu);
+
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
if (ctx->flags & SPU_CREATE_NOSCHED)
@@ -399,8 +400,8 @@ static int has_affinity(struct spu_context *ctx)
*/
static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
{
- pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
- spu->pid, spu->number, spu->node);
+ spu_context_trace(spu_unbind_context__enter, ctx, spu);
+
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
if (spu->ctx->flags & SPU_CREATE_NOSCHED)
@@ -528,6 +529,8 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
struct spu *spu, *aff_ref_spu;
int node, n;
+ spu_context_nospu_trace(spu_get_idle__enter, ctx);
+
if (ctx->gang) {
mutex_lock(&ctx->gang->aff_mutex);
if (has_affinity(ctx)) {
@@ -546,8 +549,7 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
ctx->gang->aff_ref_spu = NULL;
mutex_unlock(&ctx->gang->aff_mutex);
-
- return NULL;
+ goto not_found;
}
mutex_unlock(&ctx->gang->aff_mutex);
}
@@ -565,12 +567,14 @@ static struct spu *spu_get_idle(struct spu_context *ctx)
mutex_unlock(&cbe_spu_info[node].list_mutex);
}
+ not_found:
+ spu_context_nospu_trace(spu_get_idle__not_found, ctx);
return NULL;
found:
spu->alloc_state = SPU_USED;
mutex_unlock(&cbe_spu_info[node].list_mutex);
- pr_debug("Got SPU %d %d\n", spu->number, spu->node);
+ spu_context_trace(spu_get_idle__found, ctx, spu);
spu_init_channels(spu);
return spu;
}
@@ -587,6 +591,8 @@ static struct spu *find_victim(struct spu_context *ctx)
struct spu *spu;
int node, n;
+ spu_context_nospu_trace(spu_find_vitim__enter, ctx);
+
/*
* Look for a possible preemption candidate on the local node first.
* If there is no candidate look at the other nodes. This isn't
@@ -640,6 +646,8 @@ static struct spu *find_victim(struct spu_context *ctx)
goto restart;
}
+ spu_context_trace(__spu_deactivate__unload, ctx, spu);
+
mutex_lock(&cbe_spu_info[node].list_mutex);
cbe_spu_info[node].nr_active--;
spu_unbind_context(spu, victim);
@@ -822,6 +830,7 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
*/
void spu_deactivate(struct spu_context *ctx)
{
+ spu_context_nospu_trace(spu_deactivate__enter, ctx);
__spu_deactivate(ctx, 1, MAX_PRIO);
}
@@ -835,6 +844,7 @@ void spu_deactivate(struct spu_context *ctx)
*/
void spu_yield(struct spu_context *ctx)
{
+ spu_context_nospu_trace(spu_yield__enter, ctx);
if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
mutex_lock(&ctx->state_mutex);
__spu_deactivate(ctx, 0, MAX_PRIO);
@@ -864,11 +874,15 @@ static noinline void spusched_tick(struct spu_context *ctx)
goto out;
spu = ctx->spu;
+
+ spu_context_trace(spusched_tick__preempt, ctx, spu);
+
new = grab_runnable_context(ctx->prio + 1, spu->node);
if (new) {
spu_unschedule(spu, ctx);
spu_add_to_rq(ctx);
} else {
+ spu_context_nospu_trace(spusched_tick__newslice, ctx);
ctx->time_slice++;
}
out:
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 0e114038ea6f..795a1b52538b 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -325,4 +325,9 @@ extern void spu_free_lscsa(struct spu_state *csa);
extern void spuctx_switch_state(struct spu_context *ctx,
enum spu_utilization_state new_state);
+#define spu_context_trace(name, ctx, spu) \
+ trace_mark(name, "%p %p", ctx, spu);
+#define spu_context_nospu_trace(name, ctx) \
+ trace_mark(name, "%p", ctx);
+
#endif
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c
new file mode 100644
index 000000000000..2b1953f6f12e
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spufs/sputrace.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2007 IBM Deutschland Entwicklung GmbH
+ * Released under GPL v2.
+ *
+ * Partially based on net/ipv4/tcp_probe.c.
+ *
+ * Simple tracing facility for spu contexts.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/marker.h>
+#include <linux/proc_fs.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+#include <asm/uaccess.h>
+#include "spufs.h"
+
+struct spu_probe {
+ const char *name;
+ const char *format;
+ marker_probe_func *probe_func;
+};
+
+struct sputrace {
+ ktime_t tstamp;
+ int owner_tid; /* owner */
+ int curr_tid;
+ const char *name;
+ int number;
+};
+
+static int bufsize __read_mostly = 16384;
+MODULE_PARM_DESC(bufsize, "Log buffer size (number of records)");
+module_param(bufsize, int, 0);
+
+
+static DEFINE_SPINLOCK(sputrace_lock);
+static DECLARE_WAIT_QUEUE_HEAD(sputrace_wait);
+static ktime_t sputrace_start;
+static unsigned long sputrace_head, sputrace_tail;
+static struct sputrace *sputrace_log;
+
+static int sputrace_used(void)
+{
+ return (sputrace_head - sputrace_tail) % bufsize;
+}
+
+static inline int sputrace_avail(void)
+{
+ return bufsize - sputrace_used();
+}
+
+static int sputrace_sprint(char *tbuf, int n)
+{
+ const struct sputrace *t = sputrace_log + sputrace_tail % bufsize;
+ struct timespec tv =
+ ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start));
+
+ return snprintf(tbuf, n,
+ "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n",
+ (unsigned long) tv.tv_sec,
+ (unsigned long) tv.tv_nsec,
+ t->owner_tid,
+ t->name,
+ t->curr_tid,
+ t->number);
+}
+
+static ssize_t sputrace_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ int error = 0, cnt = 0;
+
+ if (!buf || len < 0)
+ return -EINVAL;
+
+ while (cnt < len) {
+ char tbuf[128];
+ int width;
+
+ error = wait_event_interruptible(sputrace_wait,
+ sputrace_used() > 0);
+ if (error)
+ break;
+
+ spin_lock(&sputrace_lock);
+ if (sputrace_head == sputrace_tail) {
+ spin_unlock(&sputrace_lock);
+ continue;
+ }
+
+ width = sputrace_sprint(tbuf, sizeof(tbuf));
+ if (width < len)
+ sputrace_tail = (sputrace_tail + 1) % bufsize;
+ spin_unlock(&sputrace_lock);
+
+ if (width >= len)
+ break;
+
+ error = copy_to_user(buf + cnt, tbuf, width);
+ if (error)
+ break;
+ cnt += width;
+ }
+
+ return cnt == 0 ? error : cnt;
+}
+
+static int sputrace_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&sputrace_lock);
+ sputrace_head = sputrace_tail = 0;
+ sputrace_start = ktime_get();
+ spin_unlock(&sputrace_lock);
+
+ return 0;
+}
+
+static const struct file_operations sputrace_fops = {
+ .owner = THIS_MODULE,
+ .open = sputrace_open,
+ .read = sputrace_read,
+};
+
+static void sputrace_log_item(const char *name, struct spu_context *ctx,
+ struct spu *spu)
+{
+ spin_lock(&sputrace_lock);
+ if (sputrace_avail() > 1) {
+ struct sputrace *t = sputrace_log + sputrace_head;
+
+ t->tstamp = ktime_get();
+ t->owner_tid = ctx->tid;
+ t->name = name;
+ t->curr_tid = current->pid;
+ t->number = spu ? spu->number : -1;
+
+ sputrace_head = (sputrace_head + 1) % bufsize;
+ } else {
+ printk(KERN_WARNING
+ "sputrace: lost samples due to full buffer.\n");
+ }
+ spin_unlock(&sputrace_lock);
+
+ wake_up(&sputrace_wait);
+}
+
+static void spu_context_event(const struct marker *mdata,
+ void *private, const char *format, ...)
+{
+ struct spu_probe *p = mdata->private;
+ va_list ap;
+ struct spu_context *ctx;
+ struct spu *spu;
+
+ va_start(ap, format);
+ ctx = va_arg(ap, struct spu_context *);
+ spu = va_arg(ap, struct spu *);
+
+ sputrace_log_item(p->name, ctx, spu);
+ va_end(ap);
+}
+
+static void spu_context_nospu_event(const struct marker *mdata,
+ void *private, const char *format, ...)
+{
+ struct spu_probe *p = mdata->private;
+ va_list ap;
+ struct spu_context *ctx;
+
+ va_start(ap, format);
+ ctx = va_arg(ap, struct spu_context *);
+
+ sputrace_log_item(p->name, ctx, NULL);
+ va_end(ap);
+}
+
+struct spu_probe spu_probes[] = {
+ { "spu_bind_context__enter", "%p %p", spu_context_event },
+ { "spu_unbind_context__enter", "%p %p", spu_context_event },
+ { "spu_get_idle__enter", "%p", spu_context_nospu_event },
+ { "spu_get_idle__found", "%p %p", spu_context_event },
+ { "spu_get_idle__not_found", "%p", spu_context_nospu_event },
+ { "spu_find_victim__enter", "%p", spu_context_nospu_event },
+ { "spusched_tick__preempt", "%p %p", spu_context_event },
+ { "spusched_tick__newslice", "%p", spu_context_nospu_event },
+ { "spu_yield__enter", "%p", spu_context_nospu_event },
+ { "spu_deactivate__enter", "%p", spu_context_nospu_event },
+ { "__spu_deactivate__unload", "%p %p", spu_context_event },
+ { "spufs_ps_nopfn__enter", "%p", spu_context_nospu_event },
+ { "spufs_ps_nopfn__sleep", "%p", spu_context_nospu_event },
+ { "spufs_ps_nopfn__wake", "%p %p", spu_context_event },
+ { "spufs_ps_nopfn__insert", "%p %p", spu_context_event },
+ { "spu_acquire_saved__enter", "%p", spu_context_nospu_event },
+ { "destroy_spu_context__enter", "%p", spu_context_nospu_event },
+};
+
+static int __init sputrace_init(void)
+{
+ struct proc_dir_entry *entry;
+ int i, error = -ENOMEM;
+
+ sputrace_log = kcalloc(sizeof(struct sputrace),
+ bufsize, GFP_KERNEL);
+ if (!sputrace_log)
+ goto out;
+
+ entry = create_proc_entry("sputrace", S_IRUSR, NULL);
+ if (!entry)
+ goto out_free_log;
+ entry->proc_fops = &sputrace_fops;
+
+ for (i = 0; i < ARRAY_SIZE(spu_probes); i++) {
+ struct spu_probe *p = &spu_probes[i];
+
+ error = marker_probe_register(p->name, p->format,
+ p->probe_func, p);
+ if (error)
+ printk(KERN_INFO "Unable to register probe %s\n",
+ p->name);
+
+ error = marker_arm(p->name);
+ if (error)
+ printk(KERN_INFO "Unable to arm probe %s\n", p->name);
+ }
+
+ return 0;
+
+out_free_log:
+ kfree(sputrace_log);
+out:
+ return -ENOMEM;
+}
+
+static void __exit sputrace_exit(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spu_probes); i++)
+ marker_probe_unregister(spu_probes[i].name);
+
+ remove_proc_entry("sputrace", NULL);
+ kfree(sputrace_log);
+}
+
+module_init(sputrace_init);
+module_exit(sputrace_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index e12e9d298716..8864e4884980 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -132,33 +132,18 @@ static void __init storcenter_init_IRQ(void)
paddr = (phys_addr_t)of_translate_address(dnp, prop);
mpic = mpic_alloc(dnp, paddr, MPIC_PRIMARY | MPIC_WANTS_RESET,
- 4, 32, " EPIC ");
+ 16, 32, " OpenPIC ");
of_node_put(dnp);
BUG_ON(mpic == NULL);
- /* PCI IRQs */
/*
- * 2.6.12 patch:
- * openpic_set_sources(0, 5, OpenPIC_Addr + 0x10200);
- * openpic_set_sources(5, 2, OpenPIC_Addr + 0x11120);
- * first_irq, num_irqs, __iomem first_ISR
- * o_ss: i, src: 0, fdf50200
- * o_ss: i, src: 1, fdf50220
- * o_ss: i, src: 2, fdf50240
- * o_ss: i, src: 3, fdf50260
- * o_ss: i, src: 4, fdf50280
- * o_ss: i, src: 5, fdf51120
- * o_ss: i, src: 6, fdf51140
+ * 16 Serial Interrupts followed by 16 Internal Interrupts.
+ * I2C is the second internal, so it is at 17, 0x11020.
*/
mpic_assign_isu(mpic, 0, paddr + 0x10200);
- mpic_assign_isu(mpic, 1, paddr + 0x10220);
- mpic_assign_isu(mpic, 2, paddr + 0x10240);
- mpic_assign_isu(mpic, 3, paddr + 0x10260);
- mpic_assign_isu(mpic, 4, paddr + 0x10280);
- mpic_assign_isu(mpic, 5, paddr + 0x11120);
- mpic_assign_isu(mpic, 6, paddr + 0x11140);
+ mpic_assign_isu(mpic, 1, paddr + 0x11000);
mpic_init(mpic);
}
@@ -178,7 +163,7 @@ static int __init storcenter_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- return of_flat_dt_is_compatible(root, "storcenter");
+ return of_flat_dt_is_compatible(root, "iomega,storcenter");
}
define_machine(storcenter){
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
index 6a0c6f6675cd..11fa3c772ed5 100644
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -199,7 +199,7 @@ static struct iommu_table vio_iommu_table;
void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
{
- return iommu_alloc_coherent(&vio_iommu_table, size, dma_handle,
+ return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
DMA_32BIT_MASK, flag, -1);
}
EXPORT_SYMBOL_GPL(iseries_hv_alloc);
@@ -213,7 +213,7 @@ EXPORT_SYMBOL_GPL(iseries_hv_free);
dma_addr_t iseries_hv_map(void *vaddr, size_t size,
enum dma_data_direction direction)
{
- return iommu_map_single(&vio_iommu_table, vaddr, size,
+ return iommu_map_single(NULL, &vio_iommu_table, vaddr, size,
DMA_32BIT_MASK, direction);
}
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index c04abcc28a7a..792d3ce8112e 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -113,8 +113,6 @@ static inline void debug_calc_bogomips(void)
* result. We backup/restore the value to avoid affecting the
* core cpufreq framework's own calculation.
*/
- extern void calibrate_delay(void);
-
unsigned long save_lpj = loops_per_jiffy;
calibrate_delay();
loops_per_jiffy = save_lpj;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index c4ad54e0f288..1f032483c026 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -58,7 +58,7 @@ static void pseries_mach_cpu_die(void)
{
local_irq_disable();
idle_task_exit();
- xics_teardown_cpu(0);
+ xics_teardown_cpu();
unregister_slb_shadow(hard_smp_processor_id(), __pa(get_slb_shadow()));
rtas_stop_self();
/* Should never get here... */
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 412a5e7aff2d..e9dd5fe081c9 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -54,7 +54,7 @@ void __init setup_kexec_cpu_down_mpic(void)
static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary)
{
pseries_kexec_cpu_down(crash_shutdown, secondary);
- xics_teardown_cpu(secondary);
+ xics_kexec_teardown_cpu(secondary);
}
void __init setup_kexec_cpu_down_xics(void)
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index c02f8742c54d..2800fced8c7c 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -167,6 +167,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
if ((child = of_get_next_child(np, NULL))) {
of_node_put(child);
+ of_node_put(parent);
return -EBUSY;
}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 8f8dd9c3ca6b..ca52b587166d 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -160,6 +160,46 @@ static inline void lpar_qirr_info(int n_cpu , u8 value)
/* High level handlers and init code */
+static void xics_update_irq_servers(void)
+{
+ int i, j;
+ struct device_node *np;
+ u32 ilen;
+ const u32 *ireg, *isize;
+ u32 hcpuid;
+
+ /* Find the server numbers for the boot cpu. */
+ np = of_get_cpu_node(boot_cpuid, NULL);
+ BUG_ON(!np);
+
+ ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
+ if (!ireg) {
+ of_node_put(np);
+ return;
+ }
+
+ i = ilen / sizeof(int);
+ hcpuid = get_hard_smp_processor_id(boot_cpuid);
+
+ /* Global interrupt distribution server is specified in the last
+ * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
+ * entry fom this property for current boot cpu id and use it as
+ * default distribution server
+ */
+ for (j = 0; j < i; j += 2) {
+ if (ireg[j] == hcpuid) {
+ default_server = hcpuid;
+ default_distrib_server = ireg[j+1];
+
+ isize = of_get_property(np,
+ "ibm,interrupt-server#-size", NULL);
+ if (isize)
+ interrupt_server_size = *isize;
+ }
+ }
+
+ of_node_put(np);
+}
#ifdef CONFIG_SMP
static int get_irq_server(unsigned int virq, unsigned int strict_check)
@@ -169,6 +209,9 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
cpumask_t cpumask = irq_desc[virq].affinity;
cpumask_t tmp = CPU_MASK_NONE;
+ if (! cpu_isset(default_server, cpu_online_map))
+ xics_update_irq_servers();
+
if (!distribute_irqs)
return default_server;
@@ -658,39 +701,11 @@ static void __init xics_setup_8259_cascade(void)
set_irq_chained_handler(cascade, pseries_8259_cascade);
}
-static struct device_node *cpuid_to_of_node(int cpu)
-{
- struct device_node *np;
- u32 hcpuid = get_hard_smp_processor_id(cpu);
-
- for_each_node_by_type(np, "cpu") {
- int i, len;
- const u32 *intserv;
-
- intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
- &len);
-
- if (!intserv)
- intserv = of_get_property(np, "reg", &len);
-
- i = len / sizeof(u32);
-
- while (i--)
- if (intserv[i] == hcpuid)
- return np;
- }
-
- return NULL;
-}
-
void __init xics_init_IRQ(void)
{
- int i, j;
struct device_node *np;
- u32 ilen, indx = 0;
- const u32 *ireg, *isize;
+ u32 indx = 0;
int found = 0;
- u32 hcpuid;
ppc64_boot_msg(0x20, "XICS Init");
@@ -709,34 +724,7 @@ void __init xics_init_IRQ(void)
return;
xics_init_host();
-
- /* Find the server numbers for the boot cpu. */
- np = cpuid_to_of_node(boot_cpuid);
- BUG_ON(!np);
- ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
- if (!ireg)
- goto skip_gserver_check;
- i = ilen / sizeof(int);
- hcpuid = get_hard_smp_processor_id(boot_cpuid);
-
- /* Global interrupt distribution server is specified in the last
- * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
- * entry fom this property for current boot cpu id and use it as
- * default distribution server
- */
- for (j = 0; j < i; j += 2) {
- if (ireg[j] == hcpuid) {
- default_server = hcpuid;
- default_distrib_server = ireg[j+1];
-
- isize = of_get_property(np,
- "ibm,interrupt-server#-size", NULL);
- if (isize)
- interrupt_server_size = *isize;
- }
- }
-skip_gserver_check:
- of_node_put(np);
+ xics_update_irq_servers();
if (firmware_has_feature(FW_FEATURE_LPAR))
ppc_md.get_irq = xics_get_irq_lpar;
@@ -775,11 +763,9 @@ void xics_request_IPIs(void)
}
#endif /* CONFIG_SMP */
-void xics_teardown_cpu(int secondary)
+void xics_teardown_cpu()
{
int cpu = smp_processor_id();
- unsigned int ipi;
- struct irq_desc *desc;
xics_set_cpu_priority(0);
@@ -790,9 +776,17 @@ void xics_teardown_cpu(int secondary)
lpar_qirr_info(cpu, 0xff);
else
direct_qirr_info(cpu, 0xff);
+}
+
+void xics_kexec_teardown_cpu(int secondary)
+{
+ unsigned int ipi;
+ struct irq_desc *desc;
+
+ xics_teardown_cpu();
/*
- * we need to EOI the IPI if we got here from kexec down IPI
+ * we need to EOI the IPI
*
* probably need to check all the other interrupts too
* should we be flagging idle loop instead?
@@ -880,8 +874,8 @@ void xics_migrate_irqs_away(void)
virq, cpu);
/* Reset affinity to all cpus */
+ irq_desc[virq].affinity = CPU_MASK_ALL;
desc->chip->set_affinity(virq, CPU_MASK_ALL);
- irq_desc[irq].affinity = CPU_MASK_ALL;
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
}
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h
index 9ffd809d29e2..c26bcff47b6d 100644
--- a/arch/powerpc/platforms/pseries/xics.h
+++ b/arch/powerpc/platforms/pseries/xics.h
@@ -16,7 +16,8 @@
extern void xics_init_IRQ(void);
extern void xics_setup_cpu(void);
-extern void xics_teardown_cpu(int secondary);
+extern void xics_teardown_cpu(void);
+extern void xics_kexec_teardown_cpu(int secondary);
extern void xics_cause_IPI(int cpu);
extern void xics_request_IPIs(void);
extern void xics_migrate_irqs_away(void);
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index 427027c7ea0f..437e48d3ae33 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -137,5 +137,6 @@ void dcr_unmap(dcr_host_t host, unsigned int dcr_c)
h.token = NULL;
}
EXPORT_SYMBOL_GPL(dcr_unmap);
-
-#endif /* !defined(CONFIG_PPC_DCR_NATIVE) */
+#else /* defined(CONFIG_PPC_DCR_NATIVE) */
+DEFINE_SPINLOCK(dcr_ind_lock);
+#endif /* !defined(CONFIG_PPC_DCR_NATIVE) */
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index e48b20e934ca..2c5388ce902a 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -1342,7 +1342,7 @@ static int __init of_fsl_spi_probe(char *type, char *compatible, u32 sysclk,
if (ret)
goto unreg;
- ret = platform_device_register(pdev);
+ ret = platform_device_add(pdev);
if (ret)
goto unreg;
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 0e74a4bd9827..5d2d5522ef41 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -174,15 +174,19 @@ int mpc8xx_pic_init(void)
goto out;
siu_reg = ioremap(res.start, res.end - res.start + 1);
- if (siu_reg == NULL)
- return -EINVAL;
+ if (siu_reg == NULL) {
+ ret = -EINVAL;
+ goto out;
+ }
- mpc8xx_pic_host = irq_alloc_host(of_node_get(np), IRQ_HOST_MAP_LINEAR,
+ mpc8xx_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
64, &mpc8xx_pic_host_ops, 64);
if (mpc8xx_pic_host == NULL) {
printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
ret = -ENOMEM;
+ goto out;
}
+ return 0;
out:
of_node_put(np);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 5ef844da9355..6efbd5e5bb1b 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -66,7 +66,7 @@ phys_addr_t get_qe_base(void)
{
struct device_node *qe;
unsigned int size;
- const void *prop;
+ const u32 *prop;
if (qebase != -1)
return qebase;
@@ -79,7 +79,8 @@ phys_addr_t get_qe_base(void)
}
prop = of_get_property(qe, "reg", &size);
- qebase = of_translate_address(qe, prop);
+ if (prop && size >= sizeof(*prop))
+ qebase = of_translate_address(qe, prop);
of_node_put(qe);
return qebase;
@@ -172,10 +173,9 @@ unsigned int get_brg_clk(void)
}
prop = of_get_property(qe, "brg-frequency", &size);
- if (!prop || size != sizeof(*prop))
- return brg_clk;
+ if (prop && size == sizeof(*prop))
+ brg_clk = *prop;
- brg_clk = *prop;
of_node_put(qe);
return brg_clk;
diff --git a/arch/ppc/8260_io/enet.c b/arch/ppc/8260_io/enet.c
index 25ef55bacd99..ec1defea9c1e 100644
--- a/arch/ppc/8260_io/enet.c
+++ b/arch/ppc/8260_io/enet.c
@@ -418,7 +418,7 @@ scc_enet_rx(struct net_device *dev)
struct sk_buff *skb;
ushort pkt_len;
- cep = (struct scc_enet_private *)dev->priv;
+ cep = dev->priv;
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c
index a3a27dafff1f..bcc3aa9d04f3 100644
--- a/arch/ppc/8260_io/fcc_enet.c
+++ b/arch/ppc/8260_io/fcc_enet.c
@@ -682,7 +682,7 @@ fcc_enet_rx(struct net_device *dev)
struct sk_buff *skb;
ushort pkt_len;
- cep = (struct fcc_enet_private *)dev->priv;
+ cep = dev->priv;
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S
index 52b64fcbdfc5..8a24bc47eb6c 100644
--- a/arch/ppc/kernel/vmlinux.lds.S
+++ b/arch/ppc/kernel/vmlinux.lds.S
@@ -143,11 +143,6 @@ SECTIONS
. = ALIGN(4096);
__init_end = .;
-
- . = ALIGN(4096);
- _sextratext = .;
- _eextratext = .;
-
__bss_start = .;
.bss :
{
diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c
index fadacfd18806..409fcaa4994a 100644
--- a/arch/ppc/mm/pgtable.c
+++ b/arch/ppc/mm/pgtable.c
@@ -74,7 +74,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
return ret;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGDIR_ORDER);
}
@@ -111,7 +111,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return ptepage;
}
-void pte_free_kernel(pte_t *pte)
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
#ifdef CONFIG_SMP
hash_page_sync();
@@ -119,7 +119,7 @@ void pte_free_kernel(pte_t *pte)
free_page((unsigned long)pte);
}
-void pte_free(struct page *ptepage)
+void pte_free(struct mm_struct *mm, struct page *ptepage)
{
#ifdef CONFIG_SMP
hash_page_sync();
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index 3c56654bfc6f..38449855d5ff 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -91,20 +91,11 @@ extern void prep_tiger1_setup_pci(char *irq_edge_mask_lo, char *irq_edge_mask_hi
#define cached_21 (((char *)(ppc_cached_irq_mask))[3])
#define cached_A1 (((char *)(ppc_cached_irq_mask))[2])
-#ifdef CONFIG_SOUND_CS4232
-long ppc_cs4232_dma, ppc_cs4232_dma2;
-#endif
-
extern PTE *Hash, *Hash_end;
extern unsigned long Hash_size, Hash_mask;
extern int probingmem;
extern unsigned long loops_per_jiffy;
-#ifdef CONFIG_SOUND_CS4232
-EXPORT_SYMBOL(ppc_cs4232_dma);
-EXPORT_SYMBOL(ppc_cs4232_dma2);
-#endif
-
/* useful ISA ports */
#define PREP_SYSCTL 0x81c
/* present in the IBM reference design; possibly identical in Mot boxes: */
@@ -569,74 +560,6 @@ prep_show_percpuinfo(struct seq_file *m, int i)
return 0;
}
-#ifdef CONFIG_SOUND_CS4232
-static long __init masktoint(unsigned int i)
-{
- int t = -1;
- while (i >> ++t)
- ;
- return (t-1);
-}
-
-/*
- * ppc_cs4232_dma and ppc_cs4232_dma2 are used in include/asm/dma.h
- * to distinguish sound dma-channels from others. This is because
- * blocksize on 16 bit dma-channels 5,6,7 is 128k, but
- * the cs4232.c uses 64k like on 8 bit dma-channels 0,1,2,3
- */
-
-static void __init prep_init_sound(void)
-{
- PPC_DEVICE *audiodevice = NULL;
-
- /*
- * Get the needed resource information from residual data.
- *
- */
- if (have_residual_data)
- audiodevice = residual_find_device(~0, NULL,
- MultimediaController, AudioController, -1, 0);
-
- if (audiodevice != NULL) {
- PnP_TAG_PACKET *pkt;
-
- pkt = PnP_find_packet((unsigned char *)&res->DevicePnPHeap[audiodevice->AllocatedOffset],
- S5_Packet, 0);
- if (pkt != NULL)
- ppc_cs4232_dma = masktoint(pkt->S5_Pack.DMAMask);
- pkt = PnP_find_packet((unsigned char*)&res->DevicePnPHeap[audiodevice->AllocatedOffset],
- S5_Packet, 1);
- if (pkt != NULL)
- ppc_cs4232_dma2 = masktoint(pkt->S5_Pack.DMAMask);
- }
-
- /*
- * These are the PReP specs' defaults for the cs4231. We use these
- * as fallback incase we don't have residual data.
- * At least the IBM Thinkpad 850 with IDE DMA Channels at 6 and 7
- * will use the other values.
- */
- if (audiodevice == NULL) {
- switch (_prep_type) {
- case _PREP_IBM:
- ppc_cs4232_dma = 1;
- ppc_cs4232_dma2 = -1;
- break;
- default:
- ppc_cs4232_dma = 6;
- ppc_cs4232_dma2 = 7;
- }
- }
-
- /*
- * Find a way to push this information to the cs4232 driver
- * Give it out with printk, when not in cmd_line?
- * Append it to cmd_line and boot_command_line?
- * Format is cs4232=io,irq,dma,dma2
- */
-}
-#endif /* CONFIG_SOUND_CS4232 */
-
/*
* Fill out screen_info according to the residual data. This allows us to use
* at least vesafb.
@@ -898,10 +821,6 @@ prep_setup_arch(void)
}
}
-#ifdef CONFIG_SOUND_CS4232
- prep_init_sound();
-#endif /* CONFIG_SOUND_CS4232 */
-
prep_init_vesa();
switch (_prep_type) {
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 82cbffd03654..92a4f7b4323a 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -16,6 +16,9 @@ config LOCKDEP_SUPPORT
config STACKTRACE_SUPPORT
def_bool y
+config HAVE_LATENCYTOP_SUPPORT
+ def_bool y
+
config RWSEM_GENERIC_SPINLOCK
bool
@@ -47,6 +50,11 @@ config NO_IOMEM
config NO_DMA
def_bool y
+config GENERIC_LOCKBREAK
+ bool
+ default y
+ depends on SMP && PREEMPT
+
mainmenu "Linux Kernel Configuration"
config S390
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 2283933a9a93..4599fa06bd82 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -6,4 +6,12 @@ config TRACE_IRQFLAGS_SUPPORT
source "lib/Kconfig.debug"
+config DEBUG_PAGEALLOC
+ bool "Debug page memory allocations"
+ depends on DEBUG_KERNEL
+ help
+ Unmap pages from the kernel linear mapping after free_pages().
+ This results in a slowdown, but helps to find certain types of
+ memory corruptions.
+
endmenu
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 6ee1bedbd1bf..062c3d4c0394 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1698,14 +1698,6 @@ compat_sys_signalfd_wrapper:
llgfr %r4,%r4 # compat_size_t
jg compat_sys_signalfd
- .globl compat_sys_timerfd_wrapper
-compat_sys_timerfd_wrapper:
- lgfr %r2,%r2 # int
- lgfr %r3,%r3 # int
- lgfr %r4,%r4 # int
- llgtr %r5,%r5 # struct compat_itimerspec *
- jg compat_sys_timerfd
-
.globl sys_eventfd_wrapper
sys_eventfd_wrapper:
llgfr %r2,%r2 # unsigned int
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1a6dac8df6fb..6766e37fe8ea 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -11,6 +11,7 @@
#include <linux/sys.h>
#include <linux/linkage.h>
+#include <linux/init.h>
#include <asm/cache.h>
#include <asm/lowcore.h>
#include <asm/errno.h>
@@ -830,9 +831,7 @@ mcck_return:
* Restart interruption handler, kick starter for additional CPUs
*/
#ifdef CONFIG_SMP
-#ifndef CONFIG_HOTPLUG_CPU
- .section .init.text,"ax"
-#endif
+ __CPUINIT
.globl restart_int_handler
restart_int_handler:
l %r15,__LC_SAVE_AREA+60 # load ksp
@@ -845,9 +844,7 @@ restart_int_handler:
br %r14 # branch to start_secondary
restart_addr:
.long start_secondary
-#ifndef CONFIG_HOTPLUG_CPU
.previous
-#endif
#else
/*
* If we do not run with SMP enabled, let the new CPU crash ...
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index a3e47b893f07..efde6e178f6c 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -11,6 +11,7 @@
#include <linux/sys.h>
#include <linux/linkage.h>
+#include <linux/init.h>
#include <asm/cache.h>
#include <asm/lowcore.h>
#include <asm/errno.h>
@@ -801,9 +802,7 @@ mcck_return:
* Restart interruption handler, kick starter for additional CPUs
*/
#ifdef CONFIG_SMP
-#ifndef CONFIG_HOTPLUG_CPU
- .section .init.text,"ax"
-#endif
+ __CPUINIT
.globl restart_int_handler
restart_int_handler:
lg %r15,__LC_SAVE_AREA+120 # load ksp
@@ -814,9 +813,7 @@ restart_int_handler:
lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone
stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
jg start_secondary
-#ifndef CONFIG_HOTPLUG_CPU
.previous
-#endif
#else
/*
* If we do not run with SMP enabled, let the new CPU crash ...
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index db28cca81fef..60acdc266db1 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -439,7 +439,7 @@ static void ipl_run(struct shutdown_trigger *trigger)
reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
}
-static int ipl_init(void)
+static int __init ipl_init(void)
{
int rc;
@@ -471,8 +471,11 @@ out:
return 0;
}
-static struct shutdown_action ipl_action = {SHUTDOWN_ACTION_IPL_STR, ipl_run,
- ipl_init};
+static struct shutdown_action __refdata ipl_action = {
+ .name = SHUTDOWN_ACTION_IPL_STR,
+ .fn = ipl_run,
+ .init = ipl_init,
+};
/*
* reipl shutdown action: Reboot Linux on shutdown.
@@ -792,7 +795,7 @@ static int __init reipl_fcp_init(void)
return 0;
}
-static int reipl_init(void)
+static int __init reipl_init(void)
{
int rc;
@@ -819,8 +822,11 @@ static int reipl_init(void)
return 0;
}
-static struct shutdown_action reipl_action = {SHUTDOWN_ACTION_REIPL_STR,
- reipl_run, reipl_init};
+static struct shutdown_action __refdata reipl_action = {
+ .name = SHUTDOWN_ACTION_REIPL_STR,
+ .fn = reipl_run,
+ .init = reipl_init,
+};
/*
* dump shutdown action: Dump Linux on shutdown.
@@ -998,7 +1004,7 @@ static int __init dump_fcp_init(void)
return 0;
}
-static int dump_init(void)
+static int __init dump_init(void)
{
int rc;
@@ -1020,8 +1026,11 @@ static int dump_init(void)
return 0;
}
-static struct shutdown_action dump_action = {SHUTDOWN_ACTION_DUMP_STR,
- dump_run, dump_init};
+static struct shutdown_action __refdata dump_action = {
+ .name = SHUTDOWN_ACTION_DUMP_STR,
+ .fn = dump_run,
+ .init = dump_init,
+};
/*
* vmcmd shutdown action: Trigger vm command on shutdown.
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 766c783bd7a7..f9f8779022a0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -77,7 +77,7 @@ unsigned long machine_flags = 0;
unsigned long elf_hwcap = 0;
char elf_platform[ELF_PLATFORM_SIZE];
-struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
+struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS];
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
static unsigned long __initdata memory_end;
@@ -145,7 +145,7 @@ __setup("condev=", condev_setup);
static int __init conmode_setup(char *str)
{
-#if defined(CONFIG_SCLP_CONSOLE)
+#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
SET_CONSOLE_SCLP;
#endif
@@ -183,7 +183,7 @@ static void __init conmode_default(void)
*/
cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
if (ptr == NULL) {
-#if defined(CONFIG_SCLP_CONSOLE)
+#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
return;
@@ -193,7 +193,7 @@ static void __init conmode_default(void)
SET_CONSOLE_3270;
#elif defined(CONFIG_TN3215_CONSOLE)
SET_CONSOLE_3215;
-#elif defined(CONFIG_SCLP_CONSOLE)
+#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
} else if (strncmp(ptr + 8, "3215", 4) == 0) {
@@ -201,7 +201,7 @@ static void __init conmode_default(void)
SET_CONSOLE_3215;
#elif defined(CONFIG_TN3270_CONSOLE)
SET_CONSOLE_3270;
-#elif defined(CONFIG_SCLP_CONSOLE)
+#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
}
@@ -212,7 +212,7 @@ static void __init conmode_default(void)
SET_CONSOLE_3270;
#endif
} else {
-#if defined(CONFIG_SCLP_CONSOLE)
+#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
}
@@ -528,7 +528,7 @@ static void __init setup_memory_end(void)
memory_size = 0;
memory_end &= PAGE_MASK;
- max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
+ max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
memory_end = min(max_mem, memory_end);
/*
@@ -649,21 +649,24 @@ setup_memory(void)
/*
* Reserve memory used for lowcore/command line/kernel image.
*/
- reserve_bootmem(0, (unsigned long)_ehead);
+ reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
reserve_bootmem((unsigned long)_stext,
- PFN_PHYS(start_pfn) - (unsigned long)_stext);
+ PFN_PHYS(start_pfn) - (unsigned long)_stext,
+ BOOTMEM_DEFAULT);
/*
* Reserve the bootmem bitmap itself as well. We do this in two
* steps (first step was init_bootmem()) because this catches
* the (very unlikely) case of us accidentally initializing the
* bootmem allocator with an invalid RAM area.
*/
- reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
+ reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
+ BOOTMEM_DEFAULT);
#ifdef CONFIG_BLK_DEV_INITRD
if (INITRD_START && INITRD_SIZE) {
if (INITRD_START + INITRD_SIZE <= memory_end) {
- reserve_bootmem(INITRD_START, INITRD_SIZE);
+ reserve_bootmem(INITRD_START, INITRD_SIZE,
+ BOOTMEM_DEFAULT);
initrd_start = INITRD_START;
initrd_end = initrd_start + INITRD_SIZE;
} else {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index aa37fa154512..85060659fb12 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -225,12 +225,11 @@ EXPORT_SYMBOL(smp_call_function_single);
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
-int
-smp_call_function_mask(cpumask_t mask,
- void (*func)(void *), void *info,
- int wait)
+int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
+ int wait)
{
preempt_disable();
+ cpu_clear(smp_processor_id(), mask);
__smp_call_function_map(func, info, 0, wait, mask);
preempt_enable();
return 0;
@@ -1008,7 +1007,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
.notifier_call = smp_cpu_notify,
};
-static int smp_add_present_cpu(int cpu)
+static int __devinit smp_add_present_cpu(int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev;
@@ -1036,8 +1035,8 @@ out:
}
#ifdef CONFIG_HOTPLUG_CPU
-static ssize_t rescan_store(struct sys_device *dev, const char *buf,
- size_t count)
+static ssize_t __ref rescan_store(struct sys_device *dev,
+ const char *buf, size_t count)
{
cpumask_t newcpus;
int cpu;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index da6924729964..85e46a5d0e08 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -14,7 +14,8 @@
static unsigned long save_context_stack(struct stack_trace *trace,
unsigned long sp,
unsigned long low,
- unsigned long high)
+ unsigned long high,
+ int savesched)
{
struct stack_frame *sf;
struct pt_regs *regs;
@@ -47,10 +48,12 @@ static unsigned long save_context_stack(struct stack_trace *trace,
return sp;
regs = (struct pt_regs *)sp;
addr = regs->psw.addr & PSW_ADDR_INSN;
- if (!trace->skip)
- trace->entries[trace->nr_entries++] = addr;
- else
- trace->skip--;
+ if (savesched || !in_sched_functions(addr)) {
+ if (!trace->skip)
+ trace->entries[trace->nr_entries++] = addr;
+ else
+ trace->skip--;
+ }
if (trace->nr_entries >= trace->max_entries)
return sp;
low = sp;
@@ -66,15 +69,27 @@ void save_stack_trace(struct stack_trace *trace)
orig_sp = sp & PSW_ADDR_INSN;
new_sp = save_context_stack(trace, orig_sp,
S390_lowcore.panic_stack - PAGE_SIZE,
- S390_lowcore.panic_stack);
+ S390_lowcore.panic_stack, 1);
if (new_sp != orig_sp)
return;
new_sp = save_context_stack(trace, new_sp,
S390_lowcore.async_stack - ASYNC_SIZE,
- S390_lowcore.async_stack);
+ S390_lowcore.async_stack, 1);
if (new_sp != orig_sp)
return;
save_context_stack(trace, new_sp,
S390_lowcore.thread_info,
- S390_lowcore.thread_info + THREAD_SIZE);
+ S390_lowcore.thread_info + THREAD_SIZE, 1);
+}
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ unsigned long sp, low, high;
+
+ sp = tsk->thread.ksp & PSW_ADDR_INSN;
+ low = (unsigned long) task_stack_page(tsk);
+ high = (unsigned long) task_pt_regs(tsk);
+ save_context_stack(trace, sp, low, high, 0);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
}
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9e26ed9fe4e7..25eac7802fc4 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -325,5 +325,5 @@ SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper)
SYSCALL(s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper) /* 315 */
SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper)
-SYSCALL(sys_timerfd,sys_timerfd,compat_sys_timerfd_wrapper)
+NI_SYSCALL /* 317 old sys_timer_fd */
SYSCALL(sys_eventfd,sys_eventfd,sys_eventfd_wrapper)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 52b8342c6bf2..1a2fdb6991df 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -271,7 +271,10 @@ void die(const char * str, struct pt_regs * regs, long err)
printk("PREEMPT ");
#endif
#ifdef CONFIG_SMP
- printk("SMP");
+ printk("SMP ");
+#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("DEBUG_PAGEALLOC");
#endif
printk("\n");
notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 7d43c3cd3ef3..b4607155e8d0 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -35,7 +35,7 @@ SECTIONS
KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
- } = 0x0700
+ } :text = 0x0700
_etext = .; /* End of text section */
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index b234bb4a6da7..983ec6ec0e7c 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -167,6 +167,33 @@ void __init mem_init(void)
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long address;
+ int i;
+
+ for (i = 0; i < numpages; i++) {
+ address = page_to_phys(page + i);
+ pgd = pgd_offset_k(address);
+ pud = pud_offset(pgd, address);
+ pmd = pmd_offset(pud, address);
+ pte = pte_offset_kernel(pmd, address);
+ if (!enable) {
+ ptep_invalidate(address, pte);
+ continue;
+ }
+ *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
+ /* Flush cpu write queue. */
+ mb();
+ }
+}
+#endif
+
void free_initmem(void)
{
unsigned long addr;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 79d13a166a3d..7c1287ccf788 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -62,7 +62,7 @@ void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
}
}
-static void __init_refok *vmem_alloc_pages(unsigned int order)
+static void __ref *vmem_alloc_pages(unsigned int order)
{
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
@@ -250,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
{
struct memory_segment *tmp;
- if (seg->start + seg->size >= VMALLOC_START ||
+ if (seg->start + seg->size >= VMEM_MAX_PHYS ||
seg->start + seg->size < seg->start)
return -ERANGE;
@@ -360,7 +360,6 @@ void __init vmem_map_init(void)
{
int i;
- BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
NODE_DATA(0)->node_mem_map = VMEM_MAP;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
diff --git a/arch/sh/boards/landisk/setup.c b/arch/sh/boards/landisk/setup.c
index eda71763ecc5..2b708ec72558 100644
--- a/arch/sh/boards/landisk/setup.c
+++ b/arch/sh/boards/landisk/setup.c
@@ -14,7 +14,7 @@
*/
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/pm.h>
#include <linux/mm.h>
#include <asm/machvec.h>
diff --git a/arch/sh/boards/lboxre2/setup.c b/arch/sh/boards/lboxre2/setup.c
index 9c830fdc411b..c74440d38ee9 100644
--- a/arch/sh/boards/lboxre2/setup.c
+++ b/arch/sh/boards/lboxre2/setup.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <asm/machvec.h>
#include <asm/addrspace.h>
#include <asm/lboxre2.h>
diff --git a/arch/sh/boards/renesas/r7780rp/setup.c b/arch/sh/boards/renesas/r7780rp/setup.c
index a43b47726f54..f7a8d5c9d510 100644
--- a/arch/sh/boards/renesas/r7780rp/setup.c
+++ b/arch/sh/boards/renesas/r7780rp/setup.c
@@ -15,7 +15,7 @@
*/
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/types.h>
#include <net/ax88796.h>
#include <asm/machvec.h>
diff --git a/arch/sh/boards/renesas/rts7751r2d/setup.c b/arch/sh/boards/renesas/rts7751r2d/setup.c
index 3452b072adde..a0ef81b7de37 100644
--- a/arch/sh/boards/renesas/rts7751r2d/setup.c
+++ b/arch/sh/boards/renesas/rts7751r2d/setup.c
@@ -10,7 +10,7 @@
*/
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/serial_8250.h>
#include <linux/sm501.h>
#include <linux/sm501-regs.h>
diff --git a/arch/sh/boards/renesas/sdk7780/setup.c b/arch/sh/boards/renesas/sdk7780/setup.c
index 5df32f201870..acc5932587f1 100644
--- a/arch/sh/boards/renesas/sdk7780/setup.c
+++ b/arch/sh/boards/renesas/sdk7780/setup.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/platform_device.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <asm/machvec.h>
#include <asm/sdk7780.h>
#include <asm/heartbeat.h>
diff --git a/arch/sh/boards/se/7722/setup.c b/arch/sh/boards/se/7722/setup.c
index eb97dca5b736..b1a3d9d0172f 100644
--- a/arch/sh/boards/se/7722/setup.c
+++ b/arch/sh/boards/se/7722/setup.c
@@ -12,7 +12,7 @@
*/
#include <linux/init.h>
#include <linux/platform_device.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <asm/machvec.h>
#include <asm/se7722.h>
#include <asm/io.h>
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 855cdf9d85b1..18a5baf2cbad 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -140,18 +140,26 @@ static void __init reserve_crashkernel(void)
ret = parse_crashkernel(boot_command_line, free_mem,
&crash_size, &crash_base);
if (ret == 0 && crash_size) {
- if (crash_base > 0) {
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(crash_size >> 20),
- (unsigned long)(crash_base >> 20),
- (unsigned long)(free_mem >> 20));
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
- reserve_bootmem(crash_base, crash_size);
- } else
+ if (crash_base <= 0) {
printk(KERN_INFO "crashkernel reservation failed - "
"you have to specify a base address\n");
+ return;
+ }
+
+ if (reserve_bootmem(crash_base, crash_size,
+ BOOTMEM_EXCLUSIVE) < 0) {
+ printk(KERN_INFO "crashkernel reservation failed - "
+ "memory is in use\n");
+ return;
+ }
+
+ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
+ "for crashkernel (System RAM: %ldMB)\n",
+ (unsigned long)(crash_size >> 20),
+ (unsigned long)(crash_base >> 20),
+ (unsigned long)(free_mem >> 20));
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
}
}
#else
@@ -184,13 +192,14 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
* an invalid RAM area.
*/
reserve_bootmem(__MEMORY_START+PAGE_SIZE,
- (PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START);
+ (PFN_PHYS(free_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START,
+ BOOTMEM_DEFAULT);
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
* enabling clean reboots, SMP operation, laptop functions.
*/
- reserve_bootmem(__MEMORY_START, PAGE_SIZE);
+ reserve_bootmem(__MEMORY_START, PAGE_SIZE, BOOTMEM_DEFAULT);
sparse_memory_present_with_active_regions(0);
@@ -200,7 +209,7 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
if (LOADER_TYPE && INITRD_START) {
if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
reserve_bootmem(INITRD_START + __MEMORY_START,
- INITRD_SIZE);
+ INITRD_SIZE, BOOTMEM_DEFAULT);
initrd_start = INITRD_START + PAGE_OFFSET +
__MEMORY_START;
initrd_end = initrd_start + INITRD_SIZE;
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 10bec45415ba..719e127a7c05 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -338,6 +338,6 @@ ENTRY(sys_call_table)
.long sys_epoll_pwait
.long sys_utimensat /* 320 */
.long sys_signalfd
- .long sys_timerfd
+ .long sys_ni_syscall
.long sys_eventfd
.long sys_fallocate
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 98a93efe3691..12c7340356ae 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -376,6 +376,6 @@ sys_call_table:
.long sys_epoll_pwait
.long sys_utimensat
.long sys_signalfd
- .long sys_timerfd /* 350 */
+ .long sys_ni_syscall /* 350 */
.long sys_eventfd
.long sys_fallocate
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 8aff065dd307..2de7302724fc 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -80,9 +80,9 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
/* Reserve the pgdat and bootmap space with the bootmem allocator */
reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
- sizeof(struct pglist_data));
+ sizeof(struct pglist_data), BOOTMEM_DEFAULT);
reserve_bootmem_node(NODE_DATA(nid), free_pfn << PAGE_SHIFT,
- bootmap_pages << PAGE_SHIFT);
+ bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
/* It's up */
node_set_online(nid);
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 88d2cefd01be..c2eed8f71516 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -1224,23 +1224,6 @@ sys_nis_syscall:
call c_sys_nis_syscall
mov %l5, %o7
- .align 4
- .globl sys_ptrace
-sys_ptrace:
- call do_ptrace
- add %sp, STACKFRAME_SZ, %o0
-
- ld [%curptr + TI_FLAGS], %l5
- andcc %l5, _TIF_SYSCALL_TRACE, %g0
- be 1f
- nop
-
- call syscall_trace
- nop
-
-1:
- RESTORE_ALL
-
.align 4
.globl sys_execve
sys_execve:
diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c
index 7452269bba2a..5b54f11f4e59 100644
--- a/arch/sparc/kernel/ptrace.c
+++ b/arch/sparc/kernel/ptrace.c
@@ -1,6 +1,6 @@
/* ptrace.c: Sparc process tracing support.
*
- * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
*
* Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
* and David Mosberger.
@@ -19,389 +19,343 @@
#include <linux/smp_lock.h>
#include <linux/security.h>
#include <linux/signal.h>
+#include <linux/regset.h>
+#include <linux/elf.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/uaccess.h>
-#define MAGIC_CONSTANT 0x80000000
-
+/* #define ALLOW_INIT_TRACING */
-/* Returning from ptrace is a bit tricky because the syscall return
- * low level code assumes any value returned which is negative and
- * is a valid errno will mean setting the condition codes to indicate
- * an error return. This doesn't work, so we have this hook.
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
*/
-static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
+void ptrace_disable(struct task_struct *child)
{
- regs->u_regs[UREG_I0] = error;
- regs->psr |= PSR_C;
- regs->pc = regs->npc;
- regs->npc += 4;
+ /* nothing to do */
}
-static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
-{
- regs->u_regs[UREG_I0] = value;
- regs->psr &= ~PSR_C;
- regs->pc = regs->npc;
- regs->npc += 4;
-}
+enum sparc_regset {
+ REGSET_GENERAL,
+ REGSET_FP,
+};
-static void
-pt_succ_return_linux(struct pt_regs *regs, unsigned long value, long __user *addr)
+static int genregs32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
{
- if (put_user(value, addr)) {
- pt_error_return(regs, EFAULT);
- return;
+ const struct pt_regs *regs = target->thread.kregs;
+ unsigned long __user *reg_window;
+ unsigned long *k = kbuf;
+ unsigned long __user *u = ubuf;
+ unsigned long reg;
+
+ if (target == current)
+ flush_user_windows();
+
+ pos /= sizeof(reg);
+ count /= sizeof(reg);
+
+ if (kbuf) {
+ for (; count > 0 && pos < 16; count--)
+ *k++ = regs->u_regs[pos++];
+
+ reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(*k++, &reg_window[pos++]))
+ return -EFAULT;
+ }
+ } else {
+ for (; count > 0 && pos < 16; count--) {
+ if (put_user(regs->u_regs[pos++], u++))
+ return -EFAULT;
+ }
+
+ reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(reg, &reg_window[pos++]) ||
+ put_user(reg, u++))
+ return -EFAULT;
+ }
}
- regs->u_regs[UREG_I0] = 0;
- regs->psr &= ~PSR_C;
- regs->pc = regs->npc;
- regs->npc += 4;
-}
+ while (count > 0) {
+ switch (pos) {
+ case 32: /* PSR */
+ reg = regs->psr;
+ break;
+ case 33: /* PC */
+ reg = regs->pc;
+ break;
+ case 34: /* NPC */
+ reg = regs->npc;
+ break;
+ case 35: /* Y */
+ reg = regs->y;
+ break;
+ case 36: /* WIM */
+ case 37: /* TBR */
+ reg = 0;
+ break;
+ default:
+ goto finish;
+ }
-static void
-pt_os_succ_return (struct pt_regs *regs, unsigned long val, long __user *addr)
-{
- if (current->personality == PER_SUNOS)
- pt_succ_return (regs, val);
- else
- pt_succ_return_linux (regs, val, addr);
+ if (kbuf)
+ *k++ = reg;
+ else if (put_user(reg, u++))
+ return -EFAULT;
+ pos++;
+ count--;
+ }
+finish:
+ pos *= sizeof(reg);
+ count *= sizeof(reg);
+
+ return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 38 * sizeof(reg), -1);
}
-/* Fuck me gently with a chainsaw... */
-static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset,
- struct task_struct *tsk, long __user *addr)
+static int genregs32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
{
- struct pt_regs *cregs = tsk->thread.kregs;
- struct thread_info *t = task_thread_info(tsk);
- int v;
-
- if(offset >= 1024)
- offset -= 1024; /* whee... */
- if(offset & ((sizeof(unsigned long) - 1))) {
- pt_error_return(regs, EIO);
- return;
- }
- if(offset >= 16 && offset < 784) {
- offset -= 16; offset >>= 2;
- pt_os_succ_return(regs, *(((unsigned long *)(&t->reg_window[0]))+offset), addr);
- return;
- }
- if(offset >= 784 && offset < 832) {
- offset -= 784; offset >>= 2;
- pt_os_succ_return(regs, *(((unsigned long *)(&t->rwbuf_stkptrs[0]))+offset), addr);
- return;
- }
- switch(offset) {
- case 0:
- v = t->ksp;
- break;
- case 4:
- v = t->kpc;
- break;
- case 8:
- v = t->kpsr;
- break;
- case 12:
- v = t->uwinmask;
- break;
- case 832:
- v = t->w_saved;
- break;
- case 896:
- v = cregs->u_regs[UREG_I0];
- break;
- case 900:
- v = cregs->u_regs[UREG_I1];
- break;
- case 904:
- v = cregs->u_regs[UREG_I2];
- break;
- case 908:
- v = cregs->u_regs[UREG_I3];
- break;
- case 912:
- v = cregs->u_regs[UREG_I4];
- break;
- case 916:
- v = cregs->u_regs[UREG_I5];
- break;
- case 920:
- v = cregs->u_regs[UREG_I6];
- break;
- case 924:
- if(tsk->thread.flags & MAGIC_CONSTANT)
- v = cregs->u_regs[UREG_G1];
- else
- v = 0;
- break;
- case 940:
- v = cregs->u_regs[UREG_I0];
- break;
- case 944:
- v = cregs->u_regs[UREG_I1];
- break;
+ struct pt_regs *regs = target->thread.kregs;
+ unsigned long __user *reg_window;
+ const unsigned long *k = kbuf;
+ const unsigned long __user *u = ubuf;
+ unsigned long reg;
+
+ if (target == current)
+ flush_user_windows();
+
+ pos /= sizeof(reg);
+ count /= sizeof(reg);
+
+ if (kbuf) {
+ for (; count > 0 && pos < 16; count--)
+ regs->u_regs[pos++] = *k++;
+
+ reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ for (; count > 0 && pos < 32; count--) {
+ if (put_user(*k++, &reg_window[pos++]))
+ return -EFAULT;
+ }
+ } else {
+ for (; count > 0 && pos < 16; count--) {
+ if (get_user(reg, u++))
+ return -EFAULT;
+ regs->u_regs[pos++] = reg;
+ }
- case 948:
- /* Isn't binary compatibility _fun_??? */
- if(cregs->psr & PSR_C)
- v = cregs->u_regs[UREG_I0] << 24;
- else
- v = 0;
- break;
+ reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(reg, u++) ||
+ put_user(reg, &reg_window[pos++]))
+ return -EFAULT;
+ }
+ }
+ while (count > 0) {
+ unsigned long psr;
+
+ if (kbuf)
+ reg = *k++;
+ else if (get_user(reg, u++))
+ return -EFAULT;
+
+ switch (pos) {
+ case 32: /* PSR */
+ psr = regs->psr;
+ psr &= ~PSR_ICC;
+ psr |= (reg & PSR_ICC);
+ regs->psr = psr;
+ break;
+ case 33: /* PC */
+ regs->pc = reg;
+ break;
+ case 34: /* NPC */
+ regs->npc = reg;
+ break;
+ case 35: /* Y */
+ regs->y = reg;
+ break;
+ case 36: /* WIM */
+ case 37: /* TBR */
+ break;
+ default:
+ goto finish;
+ }
- /* Rest of them are completely unsupported. */
- default:
- printk("%s [%d]: Wants to read user offset %ld\n",
- current->comm, task_pid_nr(current), offset);
- pt_error_return(regs, EIO);
- return;
+ pos++;
+ count--;
}
- if (current->personality == PER_SUNOS)
- pt_succ_return (regs, v);
- else
- pt_succ_return_linux (regs, v, addr);
- return;
+finish:
+ pos *= sizeof(reg);
+ count *= sizeof(reg);
+
+ return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 38 * sizeof(reg), -1);
}
-static inline void write_sunos_user(struct pt_regs *regs, unsigned long offset,
- struct task_struct *tsk)
+static int fpregs32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
{
- struct pt_regs *cregs = tsk->thread.kregs;
- struct thread_info *t = task_thread_info(tsk);
- unsigned long value = regs->u_regs[UREG_I3];
-
- if(offset >= 1024)
- offset -= 1024; /* whee... */
- if(offset & ((sizeof(unsigned long) - 1)))
- goto failure;
- if(offset >= 16 && offset < 784) {
- offset -= 16; offset >>= 2;
- *(((unsigned long *)(&t->reg_window[0]))+offset) = value;
- goto success;
- }
- if(offset >= 784 && offset < 832) {
- offset -= 784; offset >>= 2;
- *(((unsigned long *)(&t->rwbuf_stkptrs[0]))+offset) = value;
- goto success;
- }
- switch(offset) {
- case 896:
- cregs->u_regs[UREG_I0] = value;
- break;
- case 900:
- cregs->u_regs[UREG_I1] = value;
- break;
- case 904:
- cregs->u_regs[UREG_I2] = value;
- break;
- case 908:
- cregs->u_regs[UREG_I3] = value;
- break;
- case 912:
- cregs->u_regs[UREG_I4] = value;
- break;
- case 916:
- cregs->u_regs[UREG_I5] = value;
- break;
- case 920:
- cregs->u_regs[UREG_I6] = value;
- break;
- case 924:
- cregs->u_regs[UREG_I7] = value;
- break;
- case 940:
- cregs->u_regs[UREG_I0] = value;
- break;
- case 944:
- cregs->u_regs[UREG_I1] = value;
- break;
+ const unsigned long *fpregs = target->thread.float_regs;
+ int ret = 0;
- /* Rest of them are completely unsupported or "no-touch". */
- default:
- printk("%s [%d]: Wants to write user offset %ld\n",
- current->comm, task_pid_nr(current), offset);
- goto failure;
+#if 0
+ if (target == current)
+ save_and_clear_fpu();
+#endif
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ fpregs,
+ 0, 32 * sizeof(u32));
+
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 32 * sizeof(u32),
+ 33 * sizeof(u32));
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fsr,
+ 33 * sizeof(u32),
+ 34 * sizeof(u32));
+
+ if (!ret) {
+ unsigned long val;
+
+ val = (1 << 8) | (8 << 16);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &val,
+ 34 * sizeof(u32),
+ 35 * sizeof(u32));
}
-success:
- pt_succ_return(regs, 0);
- return;
-failure:
- pt_error_return(regs, EIO);
- return;
-}
-/* #define ALLOW_INIT_TRACING */
-/* #define DEBUG_PTRACE */
-
-#ifdef DEBUG_PTRACE
-char *pt_rq [] = {
- /* 0 */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
- /* 4 */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
- /* 8 */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
- /* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
- /* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
- /* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
- /* 24 */ "SYSCALL", ""
-};
-#endif
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 35 * sizeof(u32), -1);
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void ptrace_disable(struct task_struct *child)
-{
- /* nothing to do */
+ return ret;
}
-asmlinkage void do_ptrace(struct pt_regs *regs)
+static int fpregs32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
{
- unsigned long request = regs->u_regs[UREG_I0];
- unsigned long pid = regs->u_regs[UREG_I1];
- unsigned long addr = regs->u_regs[UREG_I2];
- unsigned long data = regs->u_regs[UREG_I3];
- unsigned long addr2 = regs->u_regs[UREG_I4];
- struct task_struct *child;
+ unsigned long *fpregs = target->thread.float_regs;
int ret;
- lock_kernel();
-#ifdef DEBUG_PTRACE
- {
- char *s;
-
- if ((request >= 0) && (request <= 24))
- s = pt_rq [request];
- else
- s = "unknown";
-
- if (request == PTRACE_POKEDATA && data == 0x91d02001){
- printk ("do_ptrace: breakpoint pid=%d, addr=%08lx addr2=%08lx\n",
- pid, addr, addr2);
- } else
- printk("do_ptrace: rq=%s(%d) pid=%d addr=%08lx data=%08lx addr2=%08lx\n",
- s, (int) request, (int) pid, addr, data, addr2);
- }
+#if 0
+ if (target == current)
+ save_and_clear_fpu();
#endif
-
- if (request == PTRACE_TRACEME) {
- ret = ptrace_traceme();
- if (ret < 0)
- pt_error_return(regs, -ret);
- else
- pt_succ_return(regs, 0);
- goto out;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ fpregs,
+ 0, 32 * sizeof(u32));
+ if (!ret)
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 32 * sizeof(u32),
+ 33 * sizeof(u32));
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fsr,
+ 33 * sizeof(u32),
+ 34 * sizeof(u32));
}
- child = ptrace_get_task_struct(pid);
- if (IS_ERR(child)) {
- ret = PTR_ERR(child);
- pt_error_return(regs, -ret);
- goto out;
- }
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 34 * sizeof(u32), -1);
+ return ret;
+}
- if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
- || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
- if (ptrace_attach(child)) {
- pt_error_return(regs, EPERM);
- goto out_tsk;
- }
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
+static const struct user_regset sparc32_regsets[] = {
+ /* Format is:
+ * G0 --> G7
+ * O0 --> O7
+ * L0 --> L7
+ * I0 --> I7
+ * PSR, PC, nPC, Y, WIM, TBR
+ */
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = 38 * sizeof(u32),
+ .size = sizeof(u32), .align = sizeof(u32),
+ .get = genregs32_get, .set = genregs32_set
+ },
+ /* Format is:
+ * F0 --> F31
+ * empty 32-bit word
+ * FSR (32--bit word)
+ * FPU QUEUE COUNT (8-bit char)
+ * FPU QUEUE ENTRYSIZE (8-bit char)
+ * FPU ENABLED (8-bit char)
+ * empty 8-bit char
+ * FPU QUEUE (64 32-bit ints)
+ */
+ [REGSET_FP] = {
+ .core_note_type = NT_PRFPREG,
+ .n = 99 * sizeof(u32),
+ .size = sizeof(u32), .align = sizeof(u32),
+ .get = fpregs32_get, .set = fpregs32_set
+ },
+};
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0) {
- pt_error_return(regs, -ret);
- goto out_tsk;
- }
+static const struct user_regset_view user_sparc32_view = {
+ .name = "sparc", .e_machine = EM_SPARC,
+ .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
+};
- switch(request) {
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned long tmp;
-
- if (access_process_vm(child, addr,
- &tmp, sizeof(tmp), 0) == sizeof(tmp))
- pt_os_succ_return(regs, tmp, (long __user *)data);
- else
- pt_error_return(regs, EIO);
- goto out_tsk;
- }
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_sparc32_view;
+}
- case PTRACE_PEEKUSR:
- read_sunos_user(regs, addr, child, (long __user *) data);
- goto out_tsk;
-
- case PTRACE_POKEUSR:
- write_sunos_user(regs, addr, child);
- goto out_tsk;
-
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA: {
- if (access_process_vm(child, addr,
- &data, sizeof(data), 1) == sizeof(data))
- pt_succ_return(regs, 0);
- else
- pt_error_return(regs, EIO);
- goto out_tsk;
- }
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+{
+ unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
+ const struct user_regset_view *view;
+ int ret;
+
+ view = task_user_regset_view(child);
+ switch(request) {
case PTRACE_GETREGS: {
struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
- struct pt_regs *cregs = child->thread.kregs;
- int rval;
- if (!access_ok(VERIFY_WRITE, pregs, sizeof(struct pt_regs))) {
- rval = -EFAULT;
- pt_error_return(regs, -rval);
- goto out_tsk;
- }
- __put_user(cregs->psr, (&pregs->psr));
- __put_user(cregs->pc, (&pregs->pc));
- __put_user(cregs->npc, (&pregs->npc));
- __put_user(cregs->y, (&pregs->y));
- for(rval = 1; rval < 16; rval++)
- __put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]));
- pt_succ_return(regs, 0);
-#ifdef DEBUG_PTRACE
- printk ("PC=%x nPC=%x o7=%x\n", cregs->pc, cregs->npc, cregs->u_regs [15]);
-#endif
- goto out_tsk;
+ ret = copy_regset_to_user(child, view, REGSET_GENERAL,
+ 32 * sizeof(u32),
+ 4 * sizeof(u32),
+ &pregs->psr);
+ if (!ret)
+ copy_regset_to_user(child, view, REGSET_GENERAL,
+ 1 * sizeof(u32),
+ 15 * sizeof(u32),
+ &pregs->u_regs[0]);
+ break;
}
case PTRACE_SETREGS: {
struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
- struct pt_regs *cregs = child->thread.kregs;
- unsigned long psr, pc, npc, y;
- int i;
-
- /* Must be careful, tracing process can only set certain
- * bits in the psr.
- */
- if (!access_ok(VERIFY_READ, pregs, sizeof(struct pt_regs))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
- __get_user(psr, (&pregs->psr));
- __get_user(pc, (&pregs->pc));
- __get_user(npc, (&pregs->npc));
- __get_user(y, (&pregs->y));
- psr &= PSR_ICC;
- cregs->psr &= ~PSR_ICC;
- cregs->psr |= psr;
- if (!((pc | npc) & 3)) {
- cregs->pc = pc;
- cregs->npc =npc;
- }
- cregs->y = y;
- for(i = 1; i < 16; i++)
- __get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]));
- pt_succ_return(regs, 0);
- goto out_tsk;
+
+ ret = copy_regset_from_user(child, view, REGSET_GENERAL,
+ 32 * sizeof(u32),
+ 4 * sizeof(u32),
+ &pregs->psr);
+ if (!ret)
+ copy_regset_from_user(child, view, REGSET_GENERAL,
+ 1 * sizeof(u32),
+ 15 * sizeof(u32),
+ &pregs->u_regs[0]);
+ break;
}
case PTRACE_GETFPREGS: {
@@ -417,26 +371,25 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
} fpq[16];
};
struct fps __user *fps = (struct fps __user *) addr;
- int i;
- if (!access_ok(VERIFY_WRITE, fps, sizeof(struct fps))) {
- i = -EFAULT;
- pt_error_return(regs, -i);
- goto out_tsk;
- }
- for(i = 0; i < 32; i++)
- __put_user(child->thread.float_regs[i], (&fps->regs[i]));
- __put_user(child->thread.fsr, (&fps->fsr));
- __put_user(child->thread.fpqdepth, (&fps->fpqd));
- __put_user(0, (&fps->flags));
- __put_user(0, (&fps->extra));
- for(i = 0; i < 16; i++) {
- __put_user(child->thread.fpqueue[i].insn_addr,
- (&fps->fpq[i].insnaddr));
- __put_user(child->thread.fpqueue[i].insn, (&fps->fpq[i].insn));
+ ret = copy_regset_to_user(child, view, REGSET_FP,
+ 0 * sizeof(u32),
+ 32 * sizeof(u32),
+ &fps->regs[0]);
+ if (!ret)
+ ret = copy_regset_to_user(child, view, REGSET_FP,
+ 33 * sizeof(u32),
+ 1 * sizeof(u32),
+ &fps->fsr);
+
+ if (!ret) {
+ if (__put_user(0, &fps->fpqd) ||
+ __put_user(0, &fps->flags) ||
+ __put_user(0, &fps->extra) ||
+ clear_user(fps->fpq, sizeof(fps->fpq)))
+ ret = -EFAULT;
}
- pt_succ_return(regs, 0);
- goto out_tsk;
+ break;
}
case PTRACE_SETFPREGS: {
@@ -452,137 +405,55 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
} fpq[16];
};
struct fps __user *fps = (struct fps __user *) addr;
- int i;
- if (!access_ok(VERIFY_READ, fps, sizeof(struct fps))) {
- i = -EFAULT;
- pt_error_return(regs, -i);
- goto out_tsk;
- }
- copy_from_user(&child->thread.float_regs[0], &fps->regs[0], (32 * sizeof(unsigned long)));
- __get_user(child->thread.fsr, (&fps->fsr));
- __get_user(child->thread.fpqdepth, (&fps->fpqd));
- for(i = 0; i < 16; i++) {
- __get_user(child->thread.fpqueue[i].insn_addr,
- (&fps->fpq[i].insnaddr));
- __get_user(child->thread.fpqueue[i].insn, (&fps->fpq[i].insn));
- }
- pt_succ_return(regs, 0);
- goto out_tsk;
+ ret = copy_regset_from_user(child, view, REGSET_FP,
+ 0 * sizeof(u32),
+ 32 * sizeof(u32),
+ &fps->regs[0]);
+ if (!ret)
+ ret = copy_regset_from_user(child, view, REGSET_FP,
+ 33 * sizeof(u32),
+ 1 * sizeof(u32),
+ &fps->fsr);
+ break;
}
case PTRACE_READTEXT:
- case PTRACE_READDATA: {
- int res = ptrace_readdata(child, addr,
- (void __user *) addr2, data);
-
- if (res == data) {
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
- /* Partial read is an IO failure */
- if (res >= 0)
- res = -EIO;
- pt_error_return(regs, -res);
- goto out_tsk;
- }
+ case PTRACE_READDATA:
+ ret = ptrace_readdata(child, addr,
+ (void __user *) addr2, data);
+
+ if (ret == data)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+ break;
case PTRACE_WRITETEXT:
- case PTRACE_WRITEDATA: {
- int res = ptrace_writedata(child, (void __user *) addr2,
- addr, data);
-
- if (res == data) {
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
- /* Partial write is an IO failure */
- if (res >= 0)
- res = -EIO;
- pt_error_return(regs, -res);
- goto out_tsk;
- }
-
- case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
- addr = 1;
-
- case PTRACE_CONT: { /* restart after signal. */
- if (!valid_signal(data)) {
- pt_error_return(regs, EIO);
- goto out_tsk;
- }
-
- if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
-
- child->exit_code = data;
-#ifdef DEBUG_PTRACE
- printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n",
- child->comm, child->pid, child->exit_code,
- child->thread.kregs->pc,
- child->thread.kregs->npc);
-#endif
- wake_up_process(child);
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
-
-/*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL: {
- if (child->exit_state == EXIT_ZOMBIE) { /* already dead */
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
- wake_up_process(child);
- child->exit_code = SIGKILL;
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
+ case PTRACE_WRITEDATA:
+ ret = ptrace_writedata(child, (void __user *) addr2,
+ addr, data);
+
+ if (ret == data)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+ break;
- case PTRACE_SUNDETACH: { /* detach a process that was attached. */
- int err = ptrace_detach(child, data);
- if (err) {
- pt_error_return(regs, EIO);
- goto out_tsk;
- }
- pt_succ_return(regs, 0);
- goto out_tsk;
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
}
- /* PTRACE_DUMPCORE unsupported... */
-
- default: {
- int err = ptrace_request(child, request, addr, data);
- if (err)
- pt_error_return(regs, -err);
- else
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
- }
-out_tsk:
- if (child)
- put_task_struct(child);
-out:
- unlock_kernel();
+ return ret;
}
asmlinkage void syscall_trace(void)
{
-#ifdef DEBUG_PTRACE
- printk("%s [%d]: syscall_trace\n", current->comm, current->pid);
-#endif
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
- current->thread.flags ^= MAGIC_CONSTANT;
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
@@ -590,10 +461,6 @@ asmlinkage void syscall_trace(void)
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
-#ifdef DEBUG_PTRACE
- printk("%s [%d]: syscall_trace exit= %x\n", current->comm,
- current->pid, current->exit_code);
-#endif
if (current->exit_code) {
send_sig (current->exit_code, current, 1);
current->exit_code = 0;
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 89a6de95070c..0def48158c7d 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -19,12 +19,12 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/profile.h>
+#include <linux/delay.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
#include <asm/irq_regs.h>
-#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -41,8 +41,6 @@
extern ctxd_t *srmmu_ctx_table_phys;
-extern void calibrate_delay(void);
-
static volatile int smp_processors_ready = 0;
static int smp_highest_cpu;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 730eb5796f8e..0b9407267162 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -16,6 +16,8 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/profile.h>
+#include <linux/delay.h>
+
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/irq_regs.h>
@@ -23,7 +25,6 @@
#include <asm/ptrace.h>
#include <asm/atomic.h>
-#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -39,8 +40,6 @@
extern ctxd_t *srmmu_ctx_table_phys;
-extern void calibrate_delay(void);
-
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern unsigned char boot_cpu_id;
diff --git a/arch/sparc/kernel/systbls.S b/arch/sparc/kernel/systbls.S
index 55722840859c..9064485dc40b 100644
--- a/arch/sparc/kernel/systbls.S
+++ b/arch/sparc/kernel/systbls.S
@@ -79,7 +79,8 @@ sys_call_table:
/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
/*300*/ .long sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
-/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd, sys_eventfd, sys_fallocate
+/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
+/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime
#ifdef CONFIG_SUNOS_EMUL
/* Now the SunOS syscall table. */
@@ -197,6 +198,7 @@ sunos_sys_table:
.long sunos_nosys, sunos_nosys, sunos_nosys
.long sunos_nosys
/*310*/ .long sunos_nosys, sunos_nosys, sunos_nosys
- .long sunos_nosys, sunos_nosys
+ .long sunos_nosys, sunos_nosys, sunos_nosys
+ .long sunos_nosys
#endif
diff --git a/arch/sparc/mm/init.c b/arch/sparc/mm/init.c
index a1bef07755a9..b89837accc88 100644
--- a/arch/sparc/mm/init.c
+++ b/arch/sparc/mm/init.c
@@ -259,7 +259,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
if (initrd_start) {
/* Reserve the initrd image area. */
size = initrd_end - initrd_start;
- reserve_bootmem(initrd_start, size);
+ reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT);
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
@@ -268,7 +268,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
#endif
/* Reserve the kernel text/data/bss. */
size = (start_pfn << PAGE_SHIFT) - phys_base;
- reserve_bootmem(phys_base, size);
+ reserve_bootmem(phys_base, size, BOOTMEM_DEFAULT);
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
/* Reserve the bootmem map. We do not account for it
@@ -276,7 +276,7 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
* in free_all_bootmem.
*/
size = bootmap_size;
- reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
+ reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT);
*pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
return max_pfn;
diff --git a/arch/sparc64/defconfig b/arch/sparc64/defconfig
index f62d9f6c5e2a..833d74b2b192 100644
--- a/arch/sparc64/defconfig
+++ b/arch/sparc64/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.24-rc4
-# Tue Dec 4 00:37:59 2007
+# Linux kernel version: 2.6.24
+# Tue Feb 5 17:28:19 2008
#
CONFIG_SPARC=y
CONFIG_SPARC64=y
@@ -17,6 +17,7 @@ CONFIG_ARCH_MAY_HAVE_PC_FDC=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_AUDIT_ARCH=y
+CONFIG_HAVE_SETUP_PER_CPU_AREA=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_OF=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
@@ -30,13 +31,15 @@ CONFIG_HZ_100=y
# CONFIG_HZ_300 is not set
# CONFIG_HZ_1000 is not set
CONFIG_HZ=100
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_HOTPLUG_CPU=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
# General setup
#
CONFIG_EXPERIMENTAL=y
-CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION=""
# CONFIG_LOCALVERSION_AUTO is not set
@@ -76,6 +79,7 @@ CONFIG_FUTEX=y
CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_VM_EVENT_COUNTERS=y
@@ -83,6 +87,14 @@ CONFIG_SLUB_DEBUG=y
# CONFIG_SLAB is not set
CONFIG_SLUB=y
# CONFIG_SLOB is not set
+CONFIG_PROFILING=y
+# CONFIG_MARKERS is not set
+CONFIG_OPROFILE=m
+CONFIG_HAVE_OPROFILE=y
+CONFIG_KPROBES=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
@@ -92,6 +104,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_BLK_DEV_BSG=y
@@ -109,6 +122,8 @@ CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PREEMPT_RCU is not set
CONFIG_SYSVIPC_COMPAT=y
CONFIG_GENERIC_HARDIRQS=y
@@ -119,7 +134,8 @@ CONFIG_TICK_ONESHOT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
-# CONFIG_SMP is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=64
# CONFIG_CPU_FREQ is not set
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
@@ -169,9 +185,12 @@ CONFIG_BINFMT_ELF32=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
CONFIG_SOLARIS_EMUL=y
+CONFIG_SCHED_SMT=y
+CONFIG_SCHED_MC=y
# CONFIG_PREEMPT_NONE is not set
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_PREEMPT is not set
+# CONFIG_RCU_TRACE is not set
# CONFIG_CMDLINE_BOOL is not set
#
@@ -189,6 +208,7 @@ CONFIG_XFRM=y
CONFIG_XFRM_USER=m
# CONFIG_XFRM_SUB_POLICY is not set
CONFIG_XFRM_MIGRATE=y
+# CONFIG_XFRM_STATISTICS is not set
CONFIG_NET_KEY=m
CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
@@ -249,9 +269,9 @@ CONFIG_IP_DCCP_ACKVEC=y
CONFIG_IP_DCCP_CCID2=m
# CONFIG_IP_DCCP_CCID2_DEBUG is not set
CONFIG_IP_DCCP_CCID3=m
-CONFIG_IP_DCCP_TFRC_LIB=m
# CONFIG_IP_DCCP_CCID3_DEBUG is not set
CONFIG_IP_DCCP_CCID3_RTO=100
+CONFIG_IP_DCCP_TFRC_LIB=m
#
# DCCP Kernel Hacking
@@ -279,6 +299,7 @@ CONFIG_VLAN_8021Q=m
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
@@ -343,6 +364,7 @@ CONFIG_BLK_DEV_IDE=y
CONFIG_BLK_DEV_IDEDISK=y
# CONFIG_IDEDISK_MULTI_MODE is not set
CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
# CONFIG_BLK_DEV_IDETAPE is not set
# CONFIG_BLK_DEV_IDEFLOPPY is not set
# CONFIG_BLK_DEV_IDESCSI is not set
@@ -359,7 +381,6 @@ CONFIG_IDE_GENERIC=y
# PCI IDE chipsets support
#
CONFIG_BLK_DEV_IDEPCI=y
-# CONFIG_IDEPCI_SHARE_IRQ is not set
CONFIG_IDEPCI_PCIBUS_ORDER=y
# CONFIG_BLK_DEV_GENERIC is not set
# CONFIG_BLK_DEV_OPTI621 is not set
@@ -389,7 +410,6 @@ CONFIG_BLK_DEV_ALI15X3=y
# CONFIG_BLK_DEV_TRM290 is not set
# CONFIG_BLK_DEV_VIA82CXXX is not set
# CONFIG_BLK_DEV_TC86C001 is not set
-# CONFIG_IDE_ARM is not set
CONFIG_BLK_DEV_IDEDMA=y
CONFIG_IDE_ARCH_OBSOLETE_INIT=y
# CONFIG_BLK_DEV_HD is not set
@@ -501,7 +521,6 @@ CONFIG_NETDEVICES=y
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
# CONFIG_VETH is not set
-# CONFIG_IP1000 is not set
# CONFIG_ARCNET is not set
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
@@ -533,6 +552,7 @@ CONFIG_NET_PCI=y
# CONFIG_NE2K_PCI is not set
# CONFIG_8139CP is not set
# CONFIG_8139TOO is not set
+# CONFIG_R6040 is not set
# CONFIG_SIS900 is not set
# CONFIG_EPIC100 is not set
# CONFIG_SUNDANCE is not set
@@ -545,6 +565,9 @@ CONFIG_E1000=m
CONFIG_E1000_NAPI=y
# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
# CONFIG_E1000E is not set
+# CONFIG_E1000E_ENABLED is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
# CONFIG_MYRI_SBUS is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
@@ -570,6 +593,7 @@ CONFIG_NETDEV_10000=y
CONFIG_NIU=m
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
+# CONFIG_BNX2X is not set
# CONFIG_TR is not set
#
@@ -602,7 +626,6 @@ CONFIG_PPPOE=m
# CONFIG_SLIP is not set
CONFIG_SLHC=m
# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
@@ -679,6 +702,7 @@ CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
# CONFIG_VT_HW_CONSOLE_BINDING is not set
# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
#
# Serial drivers
@@ -747,13 +771,13 @@ CONFIG_I2C_ALGOBIT=y
#
# Miscellaneous I2C Chip support
#
-# CONFIG_SENSORS_DS1337 is not set
-# CONFIG_SENSORS_DS1374 is not set
# CONFIG_DS1682 is not set
# CONFIG_SENSORS_EEPROM is not set
# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
# CONFIG_SENSORS_PCA9539 is not set
# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_TPS65010 is not set
# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
@@ -990,6 +1014,7 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_BT87X is not set
# CONFIG_SND_CA0106 is not set
# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_OXYGEN is not set
# CONFIG_SND_CS4281 is not set
# CONFIG_SND_CS46XX is not set
# CONFIG_SND_DARLA20 is not set
@@ -1014,6 +1039,7 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_HDA_INTEL is not set
# CONFIG_SND_HDSP is not set
# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_HIFIER is not set
# CONFIG_SND_ICE1712 is not set
# CONFIG_SND_ICE1724 is not set
# CONFIG_SND_INTEL8X0 is not set
@@ -1031,6 +1057,7 @@ CONFIG_SND_ALI5451=m
# CONFIG_SND_TRIDENT is not set
# CONFIG_SND_VIA82XX is not set
# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
# CONFIG_SND_VX222 is not set
# CONFIG_SND_YMFPCI is not set
# CONFIG_SND_AC97_POWER_SAVE is not set
@@ -1058,6 +1085,10 @@ CONFIG_SND_SUN_CS4231=m
#
#
+# ALSA SoC audio for Freescale SOCs
+#
+
+#
# Open Sound System
#
# CONFIG_SOUND_PRIME is not set
@@ -1080,6 +1111,7 @@ CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
CONFIG_USB=y
# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
#
# Miscellaneous USB options
@@ -1093,7 +1125,6 @@ CONFIG_USB_DEVICEFS=y
# USB Host Controller Drivers
#
CONFIG_USB_EHCI_HCD=m
-# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
# CONFIG_USB_ISP116X_HCD is not set
@@ -1143,10 +1174,6 @@ CONFIG_USB_STORAGE=m
#
# USB port drivers
#
-
-#
-# USB Serial Converter support
-#
# CONFIG_USB_SERIAL is not set
#
@@ -1172,14 +1199,6 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_TRANCEVIBRATOR is not set
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
-
-#
-# USB DSL modem support
-#
-
-#
-# USB Gadget Support
-#
# CONFIG_USB_GADGET is not set
# CONFIG_MMC is not set
# CONFIG_NEW_LEDS is not set
@@ -1332,11 +1351,6 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_KOI8_U is not set
# CONFIG_NLS_UTF8 is not set
# CONFIG_DLM is not set
-CONFIG_INSTRUMENTATION=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
-# CONFIG_MARKERS is not set
#
# Kernel hacking
@@ -1374,6 +1388,8 @@ CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_FORCED_INLINING=y
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_KPROBES_SANITY_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
# CONFIG_LKDTM is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_SAMPLES is not set
@@ -1396,8 +1412,9 @@ CONFIG_ASYNC_MEMCPY=m
CONFIG_ASYNC_XOR=m
CONFIG_CRYPTO=y
CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD=y
CONFIG_CRYPTO_BLKCIPHER=y
+# CONFIG_CRYPTO_SEQIV is not set
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_HMAC=y
@@ -1416,6 +1433,9 @@ CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_XTS=m
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_CCM is not set
# CONFIG_CRYPTO_CRYPTD is not set
CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_FCRYPT=m
@@ -1431,13 +1451,16 @@ CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_SEED=m
+# CONFIG_CRYPTO_SALSA20 is not set
CONFIG_CRYPTO_DEFLATE=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_AUTHENC=m
+# CONFIG_CRYPTO_LZO is not set
CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
#
# Library routines
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index ef50d217432f..4b78b24ef413 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -11,7 +11,7 @@ obj-y := process.o setup.o cpu.o idprom.o \
traps.o auxio.o una_asm.o sysfs.o iommu.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \
unaligned.o central.o pci.o starfire.o semaphore.o \
- power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
+ power.o sbus.o sparc64_ksyms.o chmc.o \
visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/sparc64/kernel/binfmt_elf32.c b/arch/sparc64/kernel/binfmt_elf32.c
index 1587a29a4b0e..d141300e76b7 100644
--- a/arch/sparc64/kernel/binfmt_elf32.c
+++ b/arch/sparc64/kernel/binfmt_elf32.c
@@ -1,7 +1,7 @@
/*
* binfmt_elf32.c: Support 32-bit Sparc ELF binaries on Ultra.
*
- * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1995, 1996, 1997, 1998, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
@@ -9,13 +9,6 @@
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2MSB;
-/* For the most part we present code dumps in the format
- * Solaris does.
- */
-typedef unsigned int elf_greg_t;
-#define ELF_NGREG 38
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
/* Format is:
* G0 --> G7
* O0 --> O7
@@ -23,25 +16,9 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
* I0 --> I7
* PSR, PC, nPC, Y, WIM, TBR
*/
-#include <asm/psrcompat.h>
-#define ELF_CORE_COPY_REGS(__elf_regs, __pt_regs) \
-do { unsigned int *dest = &(__elf_regs[0]); \
- struct pt_regs *src = (__pt_regs); \
- unsigned int __user *sp; \
- int i; \
- for(i = 0; i < 16; i++) \
- dest[i] = (unsigned int) src->u_regs[i];\
- /* Don't try this at home kids... */ \
- sp = (unsigned int __user *) (src->u_regs[14] & \
- 0x00000000fffffffc); \
- for(i = 0; i < 16; i++) \
- __get_user(dest[i+16], &sp[i]); \
- dest[32] = tstate_to_psr(src->tstate); \
- dest[33] = (unsigned int) src->tpc; \
- dest[34] = (unsigned int) src->tnpc; \
- dest[35] = src->y; \
- dest[36] = dest[37] = 0; /* XXX */ \
-} while(0);
+typedef unsigned int elf_greg_t;
+#define ELF_NGREG 38
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct {
union {
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index ea257e828364..6be4d2d2904e 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1477,10 +1477,6 @@ sys32_rt_sigreturn:
add %o7, 1f-.-4, %o7
nop
#endif
-sys_ptrace: add %sp, PTREGS_OFF, %o0
- call do_ptrace
- add %o7, 1f-.-4, %o7
- nop
.align 32
1: ldx [%curptr + TI_FLAGS], %l5
andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index c4147ad8677b..44b105c04dd3 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -632,11 +632,36 @@ tlb_fixup_done:
/* Not reached... */
1:
+ /* If we boot on a non-zero cpu, all of the per-cpu
+ * variable references we make before setting up the
+ * per-cpu areas will use a bogus offset. Put a
+ * compensating factor into __per_cpu_base to handle
+ * this cleanly.
+ *
+ * What the per-cpu code calculates is:
+ *
+ * __per_cpu_base + (cpu << __per_cpu_shift)
+ *
+ * These two variables are zero initially, so to
+ * make it all cancel out to zero we need to put
+ * "0 - (cpu << 0)" into __per_cpu_base so that the
+ * above formula evaluates to zero.
+ *
+ * We cannot even perform a printk() until this stuff
+ * is setup as that calls cpu_clock() which uses
+ * per-cpu variables.
+ */
+ sub %g0, %o0, %o1
+ sethi %hi(__per_cpu_base), %o2
+ stx %o1, [%o2 + %lo(__per_cpu_base)]
#else
mov 0, %o0
#endif
sth %o0, [%g6 + TI_CPU]
+ call prom_init_report
+ nop
+
/* Off we go.... */
call start_kernel
nop
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index 070a4846c0cb..5623a4d59dff 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -472,94 +472,15 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
spin_unlock_irqrestore(&iommu->lock, flags);
}
-#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
-
-static void fill_sg(iopte_t *iopte, struct scatterlist *sg,
- int nused, int nelems,
- unsigned long iopte_protection)
-{
- struct scatterlist *dma_sg = sg;
- int i;
-
- for (i = 0; i < nused; i++) {
- unsigned long pteval = ~0UL;
- u32 dma_npages;
-
- dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
- dma_sg->dma_length +
- ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
- do {
- unsigned long offset;
- signed int len;
-
- /* If we are here, we know we have at least one
- * more page to map. So walk forward until we
- * hit a page crossing, and begin creating new
- * mappings from that spot.
- */
- for (;;) {
- unsigned long tmp;
-
- tmp = SG_ENT_PHYS_ADDRESS(sg);
- len = sg->length;
- if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = tmp & IO_PAGE_MASK;
- offset = tmp & (IO_PAGE_SIZE - 1UL);
- break;
- }
- if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
- offset = 0UL;
- len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
- break;
- }
- sg = sg_next(sg);
- nelems--;
- }
-
- pteval = iopte_protection | (pteval & IOPTE_PAGE);
- while (len > 0) {
- *iopte++ = __iopte(pteval);
- pteval += IO_PAGE_SIZE;
- len -= (IO_PAGE_SIZE - offset);
- offset = 0;
- dma_npages--;
- }
-
- pteval = (pteval & IOPTE_PAGE) + len;
- sg = sg_next(sg);
- nelems--;
-
- /* Skip over any tail mappings we've fully mapped,
- * adjusting pteval along the way. Stop when we
- * detect a page crossing event.
- */
- while (nelems &&
- (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
- (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
- ((pteval ^
- (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
- pteval += sg->length;
- sg = sg_next(sg);
- nelems--;
- }
- if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
- pteval = ~0UL;
- } while (dma_npages != 0);
- dma_sg = sg_next(dma_sg);
- }
-}
-
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- struct iommu *iommu;
+ unsigned long flags, ctx, i, npages, iopte_protection;
+ struct scatterlist *sg;
struct strbuf *strbuf;
- unsigned long flags, ctx, npages, iopte_protection;
+ struct iommu *iommu;
iopte_t *base;
u32 dma_base;
- struct scatterlist *sgtmp;
- int used;
/* Fast path single entry scatterlists. */
if (nelems == 1) {
@@ -578,11 +499,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
- /* Step 1: Prepare scatter list. */
-
- npages = prepare_sg(sglist, nelems);
-
- /* Step 2: Allocate a cluster and context, if necessary. */
+ npages = calc_npages(sglist, nelems);
spin_lock_irqsave(&iommu->lock, flags);
@@ -599,18 +516,6 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
dma_base = iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT);
- /* Step 3: Normalize DMA addresses. */
- used = nelems;
-
- sgtmp = sglist;
- while (used && sgtmp->dma_length) {
- sgtmp->dma_address += dma_base;
- sgtmp = sg_next(sgtmp);
- used--;
- }
- used = nelems - used;
-
- /* Step 4: Create the mappings. */
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
@@ -618,13 +523,27 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE;
- fill_sg(base, sglist, used, nelems, iopte_protection);
+ for_each_sg(sglist, sg, nelems, i) {
+ unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
+ unsigned long slen = sg->length;
+ unsigned long this_npages;
-#ifdef VERIFY_SG
- verify_sglist(sglist, nelems, base, npages);
-#endif
+ this_npages = iommu_num_pages(paddr, slen);
- return used;
+ sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
+ sg->dma_length = slen;
+
+ paddr &= IO_PAGE_MASK;
+ while (this_npages--) {
+ iopte_val(*base) = iopte_protection | paddr;
+
+ base++;
+ paddr += IO_PAGE_SIZE;
+ dma_base += IO_PAGE_SIZE;
+ }
+ }
+
+ return nelems;
bad:
iommu_free_ctx(iommu, ctx);
@@ -637,11 +556,10 @@ bad_no_ctx:
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
- struct iommu *iommu;
+ unsigned long flags, ctx, i, npages;
struct strbuf *strbuf;
+ struct iommu *iommu;
iopte_t *base;
- unsigned long flags, ctx, i, npages;
- struct scatterlist *sg, *sgprv;
u32 bus_addr;
if (unlikely(direction == DMA_NONE)) {
@@ -654,15 +572,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
bus_addr = sglist->dma_address & IO_PAGE_MASK;
- sgprv = NULL;
- for_each_sg(sglist, sg, nelems, i) {
- if (sg->dma_length == 0)
- break;
- sgprv = sg;
- }
-
- npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
- bus_addr) >> IO_PAGE_SHIFT;
+ npages = calc_npages(sglist, nelems);
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
deleted file mode 100644
index efd5dff85f60..000000000000
--- a/arch/sparc64/kernel/iommu_common.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/* $Id: iommu_common.c,v 1.9 2001/12/17 07:05:09 davem Exp $
- * iommu_common.c: UltraSparc SBUS/PCI common iommu code.
- *
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
- */
-
-#include "iommu_common.h"
-
-/* You are _strongly_ advised to enable the following debugging code
- * any time you make changes to the sg code below, run it for a while
- * with filesystems mounted read-only before buying the farm... -DaveM
- */
-
-#ifdef VERIFY_SG
-static int verify_lengths(struct scatterlist *sglist, int nents, int npages)
-{
- int sg_len, dma_len;
- int i, pgcount;
- struct scatterlist *sg;
-
- sg_len = 0;
- for_each_sg(sglist, sg, nents, i)
- sg_len += sg->length;
-
- dma_len = 0;
- for_each_sg(sglist, sg, nents, i) {
- if (!sg->dma_length)
- break;
- dma_len += sg->dma_length;
- }
-
- if (sg_len != dma_len) {
- printk("verify_lengths: Error, different, sg[%d] dma[%d]\n",
- sg_len, dma_len);
- return -1;
- }
-
- pgcount = 0;
- for_each_sg(sglist, sg, nents, i) {
- unsigned long start, end;
-
- if (!sg->dma_length)
- break;
-
- start = sg->dma_address;
- start = start & IO_PAGE_MASK;
-
- end = sg->dma_address + sg->dma_length;
- end = (end + (IO_PAGE_SIZE - 1)) & IO_PAGE_MASK;
-
- pgcount += ((end - start) >> IO_PAGE_SHIFT);
- }
-
- if (pgcount != npages) {
- printk("verify_lengths: Error, page count wrong, "
- "npages[%d] pgcount[%d]\n",
- npages, pgcount);
- return -1;
- }
-
- /* This test passes... */
- return 0;
-}
-
-static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg, int nents, iopte_t **__iopte)
-{
- struct scatterlist *sg = *__sg;
- iopte_t *iopte = *__iopte;
- u32 dlen = dma_sg->dma_length;
- u32 daddr;
- unsigned int sglen;
- unsigned long sgaddr;
-
- daddr = dma_sg->dma_address;
- sglen = sg->length;
- sgaddr = (unsigned long) sg_virt(sg);
- while (dlen > 0) {
- unsigned long paddr;
-
- /* SG and DMA_SG must begin at the same sub-page boundary. */
- if ((sgaddr & ~IO_PAGE_MASK) != (daddr & ~IO_PAGE_MASK)) {
- printk("verify_one_map: Wrong start offset "
- "sg[%08lx] dma[%08x]\n",
- sgaddr, daddr);
- nents = -1;
- goto out;
- }
-
- /* Verify the IOPTE points to the right page. */
- paddr = iopte_val(*iopte) & IOPTE_PAGE;
- if ((paddr + PAGE_OFFSET) != (sgaddr & IO_PAGE_MASK)) {
- printk("verify_one_map: IOPTE[%08lx] maps the "
- "wrong page, should be [%08lx]\n",
- iopte_val(*iopte), (sgaddr & IO_PAGE_MASK) - PAGE_OFFSET);
- nents = -1;
- goto out;
- }
-
- /* If this SG crosses a page, adjust to that next page
- * boundary and loop.
- */
- if ((sgaddr & IO_PAGE_MASK) ^ ((sgaddr + sglen - 1) & IO_PAGE_MASK)) {
- unsigned long next_page, diff;
-
- next_page = (sgaddr + IO_PAGE_SIZE) & IO_PAGE_MASK;
- diff = next_page - sgaddr;
- sgaddr += diff;
- daddr += diff;
- sglen -= diff;
- dlen -= diff;
- if (dlen > 0)
- iopte++;
- continue;
- }
-
- /* SG wholly consumed within this page. */
- daddr += sglen;
- dlen -= sglen;
-
- if (dlen > 0 && ((daddr & ~IO_PAGE_MASK) == 0))
- iopte++;
-
- sg = sg_next(sg);
- if (--nents <= 0)
- break;
- sgaddr = (unsigned long) sg_virt(sg);
- sglen = sg->length;
- }
- if (dlen < 0) {
- /* Transfer overrun, big problems. */
- printk("verify_one_map: Transfer overrun by %d bytes.\n",
- -dlen);
- nents = -1;
- } else {
- /* Advance to next dma_sg implies that the next iopte will
- * begin it.
- */
- iopte++;
- }
-
-out:
- *__sg = sg;
- *__iopte = iopte;
- return nents;
-}
-
-static int verify_maps(struct scatterlist *sg, int nents, iopte_t *iopte)
-{
- struct scatterlist *dma_sg = sg;
- struct scatterlist *orig_dma_sg = dma_sg;
- int orig_nents = nents;
-
- for (;;) {
- nents = verify_one_map(dma_sg, &sg, nents, &iopte);
- if (nents <= 0)
- break;
- dma_sg = sg_next(dma_sg);
- if (dma_sg->dma_length == 0)
- break;
- }
-
- if (nents > 0) {
- printk("verify_maps: dma maps consumed by some sgs remain (%d)\n",
- nents);
- return -1;
- }
-
- if (nents < 0) {
- printk("verify_maps: Error, messed up mappings, "
- "at sg %d dma_sg %d\n",
- (int) (orig_nents + nents), (int) (dma_sg - orig_dma_sg));
- return -1;
- }
-
- /* This test passes... */
- return 0;
-}
-
-void verify_sglist(struct scatterlist *sglist, int nents, iopte_t *iopte, int npages)
-{
- struct scatterlist *sg;
-
- if (verify_lengths(sglist, nents, npages) < 0 ||
- verify_maps(sglist, nents, iopte) < 0) {
- int i;
-
- printk("verify_sglist: Crap, messed up mappings, dumping, iodma at ");
- printk("%016lx.\n", sglist->dma_address & IO_PAGE_MASK);
-
- for_each_sg(sglist, sg, nents, i) {
- printk("sg(%d): page_addr(%p) off(%x) length(%x) "
- "dma_address[%016x] dma_length[%016x]\n",
- i,
- page_address(sg_page(sg)), sg->offset,
- sg->length,
- sg->dma_address, sg->dma_length);
- }
- }
-
- /* Seems to be ok */
-}
-#endif
-
-unsigned long prepare_sg(struct scatterlist *sg, int nents)
-{
- struct scatterlist *dma_sg = sg;
- unsigned long prev;
- u32 dent_addr, dent_len;
-
- prev = (unsigned long) sg_virt(sg);
- prev += (unsigned long) (dent_len = sg->length);
- dent_addr = (u32) ((unsigned long)(sg_virt(sg)) & (IO_PAGE_SIZE - 1UL));
- while (--nents) {
- unsigned long addr;
-
- sg = sg_next(sg);
- addr = (unsigned long) sg_virt(sg);
- if (! VCONTIG(prev, addr)) {
- dma_sg->dma_address = dent_addr;
- dma_sg->dma_length = dent_len;
- dma_sg = sg_next(dma_sg);
-
- dent_addr = ((dent_addr +
- dent_len +
- (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT);
- dent_addr <<= IO_PAGE_SHIFT;
- dent_addr += addr & (IO_PAGE_SIZE - 1UL);
- dent_len = 0;
- }
- dent_len += sg->length;
- prev = addr + sg->length;
- }
- dma_sg->dma_address = dent_addr;
- dma_sg->dma_length = dent_len;
-
- if (dma_sg != sg) {
- dma_sg = sg_next(dma_sg);
- dma_sg->dma_length = 0;
- }
-
- return ((unsigned long) dent_addr +
- (unsigned long) dent_len +
- (IO_PAGE_SIZE - 1UL)) >> IO_PAGE_SHIFT;
-}
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
index 75b5a5814522..4b5cafa2877a 100644
--- a/arch/sparc64/kernel/iommu_common.h
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
+#include <linux/device.h>
#include <asm/iommu.h>
#include <asm/scatterlist.h>
@@ -29,6 +30,32 @@
*/
#define IOMMU_PAGE_SHIFT 13
+#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
+
+static inline unsigned long iommu_num_pages(unsigned long vaddr,
+ unsigned long slen)
+{
+ unsigned long npages;
+
+ npages = IO_PAGE_ALIGN(vaddr + slen) - (vaddr & IO_PAGE_MASK);
+ npages >>= IO_PAGE_SHIFT;
+
+ return npages;
+}
+
+static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems)
+{
+ unsigned long i, npages = 0;
+ struct scatterlist *sg;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
+ npages += iommu_num_pages(paddr, sg->length);
+ }
+
+ return npages;
+}
+
/* You are _strongly_ advised to enable the following debugging code
* any time you make changes to the sg code below, run it for a while
* with filesystems mounted read-only before buying the farm... -DaveM
@@ -46,4 +73,4 @@ extern void verify_sglist(struct scatterlist *sg, int nents, iopte_t *iopte, int
#define VCONTIG(__X, __Y) (((__X) == (__Y)) || \
(((__X) | (__Y)) << (64UL - PAGE_SHIFT)) == 0UL)
-extern unsigned long prepare_sg(struct scatterlist *sg, int nents);
+extern unsigned long prepare_sg(struct device *dev, struct scatterlist *sg, int nents);
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 1aa8e044b105..61baf8dc095e 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -365,113 +365,14 @@ static void dma_4v_unmap_single(struct device *dev, dma_addr_t bus_addr,
spin_unlock_irqrestore(&iommu->lock, flags);
}
-#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
-
-static long fill_sg(long entry, struct device *dev,
- struct scatterlist *sg,
- int nused, int nelems, unsigned long prot)
-{
- struct scatterlist *dma_sg = sg;
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
-
- iommu_batch_start(dev, prot, entry);
-
- for (i = 0; i < nused; i++) {
- unsigned long pteval = ~0UL;
- u32 dma_npages;
-
- dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
- dma_sg->dma_length +
- ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
- do {
- unsigned long offset;
- signed int len;
-
- /* If we are here, we know we have at least one
- * more page to map. So walk forward until we
- * hit a page crossing, and begin creating new
- * mappings from that spot.
- */
- for (;;) {
- unsigned long tmp;
-
- tmp = SG_ENT_PHYS_ADDRESS(sg);
- len = sg->length;
- if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = tmp & IO_PAGE_MASK;
- offset = tmp & (IO_PAGE_SIZE - 1UL);
- break;
- }
- if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
- pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
- offset = 0UL;
- len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
- break;
- }
- sg = sg_next(sg);
- nelems--;
- }
-
- pteval = (pteval & IOPTE_PAGE);
- while (len > 0) {
- long err;
-
- err = iommu_batch_add(pteval);
- if (unlikely(err < 0L))
- goto iommu_map_failed;
-
- pteval += IO_PAGE_SIZE;
- len -= (IO_PAGE_SIZE - offset);
- offset = 0;
- dma_npages--;
- }
-
- pteval = (pteval & IOPTE_PAGE) + len;
- sg = sg_next(sg);
- nelems--;
-
- /* Skip over any tail mappings we've fully mapped,
- * adjusting pteval along the way. Stop when we
- * detect a page crossing event.
- */
- while (nelems &&
- (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
- (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
- ((pteval ^
- (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
- pteval += sg->length;
- sg = sg_next(sg);
- nelems--;
- }
- if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
- pteval = ~0UL;
- } while (dma_npages != 0);
- dma_sg = sg_next(dma_sg);
- }
-
- if (unlikely(iommu_batch_end() < 0L))
- goto iommu_map_failed;
-
- local_irq_restore(flags);
- return 0;
-
-iommu_map_failed:
- local_irq_restore(flags);
- return -1L;
-}
-
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
+ unsigned long flags, npages, i, prot;
+ struct scatterlist *sg;
struct iommu *iommu;
- unsigned long flags, npages, prot;
- u32 dma_base;
- struct scatterlist *sgtmp;
long entry, err;
- int used;
+ u32 dma_base;
/* Fast path single entry scatterlists. */
if (nelems == 1) {
@@ -489,10 +390,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
if (unlikely(direction == DMA_NONE))
goto bad;
- /* Step 1: Prepare scatter list. */
- npages = prepare_sg(sglist, nelems);
+ npages = calc_npages(sglist, nelems);
- /* Step 2: Allocate a cluster and context, if necessary. */
spin_lock_irqsave(&iommu->lock, flags);
entry = arena_alloc(&iommu->arena, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -503,27 +402,45 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
dma_base = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
- /* Step 3: Normalize DMA addresses. */
- used = nelems;
-
- sgtmp = sglist;
- while (used && sgtmp->dma_length) {
- sgtmp->dma_address += dma_base;
- sgtmp = sg_next(sgtmp);
- used--;
- }
- used = nelems - used;
-
- /* Step 4: Create the mappings. */
prot = HV_PCI_MAP_ATTR_READ;
if (direction != DMA_TO_DEVICE)
prot |= HV_PCI_MAP_ATTR_WRITE;
- err = fill_sg(entry, dev, sglist, used, nelems, prot);
+ local_irq_save(flags);
+
+ iommu_batch_start(dev, prot, entry);
+
+ for_each_sg(sglist, sg, nelems, i) {
+ unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
+ unsigned long slen = sg->length;
+ unsigned long this_npages;
+
+ this_npages = iommu_num_pages(paddr, slen);
+
+ sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
+ sg->dma_length = slen;
+
+ paddr &= IO_PAGE_MASK;
+ while (this_npages--) {
+ err = iommu_batch_add(paddr);
+ if (unlikely(err < 0L)) {
+ local_irq_restore(flags);
+ goto iommu_map_failed;
+ }
+
+ paddr += IO_PAGE_SIZE;
+ dma_base += IO_PAGE_SIZE;
+ }
+ }
+
+ err = iommu_batch_end();
+
+ local_irq_restore(flags);
+
if (unlikely(err < 0L))
goto iommu_map_failed;
- return used;
+ return nelems;
bad:
if (printk_ratelimit())
@@ -541,12 +458,11 @@ iommu_map_failed:
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
+ unsigned long flags, npages;
struct pci_pbm_info *pbm;
+ u32 devhandle, bus_addr;
struct iommu *iommu;
- unsigned long flags, i, npages;
- struct scatterlist *sg, *sgprv;
long entry;
- u32 devhandle, bus_addr;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
@@ -558,16 +474,8 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
devhandle = pbm->devhandle;
bus_addr = sglist->dma_address & IO_PAGE_MASK;
- sgprv = NULL;
- for_each_sg(sglist, sg, nelems, i) {
- if (sg->dma_length == 0)
- break;
-
- sgprv = sg;
- }
- npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
- bus_addr) >> IO_PAGE_SHIFT;
+ npages = calc_npages(sglist, nelems);
entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
@@ -625,8 +533,8 @@ static void __init pci_sun4v_scan_bus(struct pci_pbm_info *pbm)
/* XXX register error interrupt handlers XXX */
}
-static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
- struct iommu *iommu)
+static unsigned long __init probe_existing_entries(struct pci_pbm_info *pbm,
+ struct iommu *iommu)
{
struct iommu_arena *arena = &iommu->arena;
unsigned long i, cnt = 0;
@@ -653,7 +561,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
return cnt;
}
-static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
+static void __init pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
struct iommu *iommu = pbm->iommu;
struct property *prop;
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 81111a12f0a8..51f012410f9d 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -1,6 +1,6 @@
/* ptrace.c: Sparc process tracing support.
*
- * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu)
+ * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*
* Based upon code written by Ross Biro, Linus Torvalds, Bob Manson,
@@ -22,6 +22,9 @@
#include <linux/seccomp.h>
#include <linux/audit.h>
#include <linux/signal.h>
+#include <linux/regset.h>
+#include <linux/compat.h>
+#include <linux/elf.h>
#include <asm/asi.h>
#include <asm/pgtable.h>
@@ -33,70 +36,7 @@
#include <asm/page.h>
#include <asm/cpudata.h>
-/* Returning from ptrace is a bit tricky because the syscall return
- * low level code assumes any value returned which is negative and
- * is a valid errno will mean setting the condition codes to indicate
- * an error return. This doesn't work, so we have this hook.
- */
-static inline void pt_error_return(struct pt_regs *regs, unsigned long error)
-{
- regs->u_regs[UREG_I0] = error;
- regs->tstate |= (TSTATE_ICARRY | TSTATE_XCARRY);
- regs->tpc = regs->tnpc;
- regs->tnpc += 4;
-}
-
-static inline void pt_succ_return(struct pt_regs *regs, unsigned long value)
-{
- regs->u_regs[UREG_I0] = value;
- regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
- regs->tpc = regs->tnpc;
- regs->tnpc += 4;
-}
-
-static inline void
-pt_succ_return_linux(struct pt_regs *regs, unsigned long value, void __user *addr)
-{
- if (test_thread_flag(TIF_32BIT)) {
- if (put_user(value, (unsigned int __user *) addr)) {
- pt_error_return(regs, EFAULT);
- return;
- }
- } else {
- if (put_user(value, (long __user *) addr)) {
- pt_error_return(regs, EFAULT);
- return;
- }
- }
- regs->u_regs[UREG_I0] = 0;
- regs->tstate &= ~(TSTATE_ICARRY | TSTATE_XCARRY);
- regs->tpc = regs->tnpc;
- regs->tnpc += 4;
-}
-
-static void
-pt_os_succ_return (struct pt_regs *regs, unsigned long val, void __user *addr)
-{
- if (current->personality == PER_SUNOS)
- pt_succ_return (regs, val);
- else
- pt_succ_return_linux (regs, val, addr);
-}
-
/* #define ALLOW_INIT_TRACING */
-/* #define DEBUG_PTRACE */
-
-#ifdef DEBUG_PTRACE
-char *pt_rq [] = {
- /* 0 */ "TRACEME", "PEEKTEXT", "PEEKDATA", "PEEKUSR",
- /* 4 */ "POKETEXT", "POKEDATA", "POKEUSR", "CONT",
- /* 8 */ "KILL", "SINGLESTEP", "SUNATTACH", "SUNDETACH",
- /* 12 */ "GETREGS", "SETREGS", "GETFPREGS", "SETFPREGS",
- /* 16 */ "READDATA", "WRITEDATA", "READTEXT", "WRITETEXT",
- /* 20 */ "GETFPAREGS", "SETFPAREGS", "unknown", "unknown",
- /* 24 */ "SYSCALL", ""
-};
-#endif
/*
* Called by kernel/ptrace.c when detaching..
@@ -167,267 +107,709 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
}
}
-asmlinkage void do_ptrace(struct pt_regs *regs)
+enum sparc_regset {
+ REGSET_GENERAL,
+ REGSET_FP,
+};
+
+static int genregs64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
{
- int request = regs->u_regs[UREG_I0];
- pid_t pid = regs->u_regs[UREG_I1];
- unsigned long addr = regs->u_regs[UREG_I2];
- unsigned long data = regs->u_regs[UREG_I3];
- unsigned long addr2 = regs->u_regs[UREG_I4];
- struct task_struct *child;
+ const struct pt_regs *regs = task_pt_regs(target);
int ret;
- if (test_thread_flag(TIF_32BIT)) {
- addr &= 0xffffffffUL;
- data &= 0xffffffffUL;
- addr2 &= 0xffffffffUL;
+ if (target == current)
+ flushw_user();
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs,
+ 0, 16 * sizeof(u64));
+ if (!ret) {
+ unsigned long __user *reg_window = (unsigned long __user *)
+ (regs->u_regs[UREG_I6] + STACK_BIAS);
+ unsigned long window[16];
+
+ if (copy_from_user(window, reg_window, sizeof(window)))
+ return -EFAULT;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ window,
+ 16 * sizeof(u64),
+ 32 * sizeof(u64));
}
- lock_kernel();
-#ifdef DEBUG_PTRACE
- {
- char *s;
- if ((request >= 0) && (request <= 24))
- s = pt_rq [request];
- else
- s = "unknown";
+ if (!ret) {
+ /* TSTATE, TPC, TNPC */
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &regs->tstate,
+ 32 * sizeof(u64),
+ 35 * sizeof(u64));
+ }
+
+ if (!ret) {
+ unsigned long y = regs->y;
- if (request == PTRACE_POKEDATA && data == 0x91d02001){
- printk ("do_ptrace: breakpoint pid=%d, addr=%016lx addr2=%016lx\n",
- pid, addr, addr2);
- } else
- printk("do_ptrace: rq=%s(%d) pid=%d addr=%016lx data=%016lx addr2=%016lx\n",
- s, request, pid, addr, data, addr2);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &y,
+ 35 * sizeof(u64),
+ 36 * sizeof(u64));
}
-#endif
- if (request == PTRACE_TRACEME) {
- ret = ptrace_traceme();
- if (ret < 0)
- pt_error_return(regs, -ret);
+
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 36 * sizeof(u64), -1);
+
+ return ret;
+}
+
+static int genregs64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ if (target == current)
+ flushw_user();
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs->u_regs,
+ 0, 16 * sizeof(u64));
+ if (!ret && count > 0) {
+ unsigned long __user *reg_window = (unsigned long __user *)
+ (regs->u_regs[UREG_I6] + STACK_BIAS);
+ unsigned long window[16];
+
+ if (copy_from_user(window, reg_window, sizeof(window)))
+ return -EFAULT;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ window,
+ 16 * sizeof(u64),
+ 32 * sizeof(u64));
+ if (!ret &&
+ copy_to_user(reg_window, window, sizeof(window)))
+ return -EFAULT;
+ }
+
+ if (!ret && count > 0) {
+ unsigned long tstate;
+
+ /* TSTATE */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &tstate,
+ 32 * sizeof(u64),
+ 33 * sizeof(u64));
+ if (!ret) {
+ /* Only the condition codes can be modified
+ * in the %tstate register.
+ */
+ tstate &= (TSTATE_ICC | TSTATE_XCC);
+ regs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
+ regs->tstate |= tstate;
+ }
+ }
+
+ if (!ret) {
+ /* TPC, TNPC */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->tpc,
+ 33 * sizeof(u64),
+ 35 * sizeof(u64));
+ }
+
+ if (!ret) {
+ unsigned long y;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &y,
+ 35 * sizeof(u64),
+ 36 * sizeof(u64));
+ if (!ret)
+ regs->y = y;
+ }
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 36 * sizeof(u64), -1);
+
+ return ret;
+}
+
+static int fpregs64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const unsigned long *fpregs = task_thread_info(target)->fpregs;
+ unsigned long fprs, fsr, gsr;
+ int ret;
+
+ if (target == current)
+ save_and_clear_fpu();
+
+ fprs = task_thread_info(target)->fpsaved[0];
+
+ if (fprs & FPRS_DL)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ fpregs,
+ 0, 16 * sizeof(u64));
+ else
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 0,
+ 16 * sizeof(u64));
+
+ if (!ret) {
+ if (fprs & FPRS_DU)
+ ret = user_regset_copyout(&pos, &count,
+ &kbuf, &ubuf,
+ fpregs + 16,
+ 16 * sizeof(u64),
+ 32 * sizeof(u64));
else
- pt_succ_return(regs, 0);
- goto out;
+ ret = user_regset_copyout_zero(&pos, &count,
+ &kbuf, &ubuf,
+ 16 * sizeof(u64),
+ 32 * sizeof(u64));
}
- child = ptrace_get_task_struct(pid);
- if (IS_ERR(child)) {
- ret = PTR_ERR(child);
- pt_error_return(regs, -ret);
- goto out;
+ if (fprs & FPRS_FEF) {
+ fsr = task_thread_info(target)->xfsr[0];
+ gsr = task_thread_info(target)->gsr[0];
+ } else {
+ fsr = gsr = 0;
}
- if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
- || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
- if (ptrace_attach(child)) {
- pt_error_return(regs, EPERM);
- goto out_tsk;
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &fsr,
+ 32 * sizeof(u64),
+ 33 * sizeof(u64));
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &gsr,
+ 33 * sizeof(u64),
+ 34 * sizeof(u64));
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &fprs,
+ 34 * sizeof(u64),
+ 35 * sizeof(u64));
+
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 35 * sizeof(u64), -1);
+
+ return ret;
+}
+
+static int fpregs64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned long *fpregs = task_thread_info(target)->fpregs;
+ unsigned long fprs;
+ int ret;
+
+ if (target == current)
+ save_and_clear_fpu();
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ fpregs,
+ 0, 32 * sizeof(u64));
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ task_thread_info(target)->xfsr,
+ 32 * sizeof(u64),
+ 33 * sizeof(u64));
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ task_thread_info(target)->gsr,
+ 33 * sizeof(u64),
+ 34 * sizeof(u64));
+
+ fprs = task_thread_info(target)->fpsaved[0];
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &fprs,
+ 34 * sizeof(u64),
+ 35 * sizeof(u64));
+ }
+
+ fprs |= (FPRS_FEF | FPRS_DL | FPRS_DU);
+ task_thread_info(target)->fpsaved[0] = fprs;
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 35 * sizeof(u64), -1);
+ return ret;
+}
+
+static const struct user_regset sparc64_regsets[] = {
+ /* Format is:
+ * G0 --> G7
+ * O0 --> O7
+ * L0 --> L7
+ * I0 --> I7
+ * TSTATE, TPC, TNPC, Y
+ */
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = 36 * sizeof(u64),
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = genregs64_get, .set = genregs64_set
+ },
+ /* Format is:
+ * F0 --> F63
+ * FSR
+ * GSR
+ * FPRS
+ */
+ [REGSET_FP] = {
+ .core_note_type = NT_PRFPREG,
+ .n = 35 * sizeof(u64),
+ .size = sizeof(u64), .align = sizeof(u64),
+ .get = fpregs64_get, .set = fpregs64_set
+ },
+};
+
+static const struct user_regset_view user_sparc64_view = {
+ .name = "sparc64", .e_machine = EM_SPARCV9,
+ .regsets = sparc64_regsets, .n = ARRAY_SIZE(sparc64_regsets)
+};
+
+static int genregs32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ compat_ulong_t __user *reg_window;
+ compat_ulong_t *k = kbuf;
+ compat_ulong_t __user *u = ubuf;
+ compat_ulong_t reg;
+
+ if (target == current)
+ flushw_user();
+
+ pos /= sizeof(reg);
+ count /= sizeof(reg);
+
+ if (kbuf) {
+ for (; count > 0 && pos < 16; count--)
+ *k++ = regs->u_regs[pos++];
+
+ reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(*k++, &reg_window[pos++]))
+ return -EFAULT;
+ }
+ } else {
+ for (; count > 0 && pos < 16; count--) {
+ if (put_user((compat_ulong_t) regs->u_regs[pos++], u++))
+ return -EFAULT;
+ }
+
+ reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(reg, &reg_window[pos++]) ||
+ put_user(reg, u++))
+ return -EFAULT;
}
- pt_succ_return(regs, 0);
- goto out_tsk;
}
+ while (count > 0) {
+ switch (pos) {
+ case 32: /* PSR */
+ reg = tstate_to_psr(regs->tstate);
+ break;
+ case 33: /* PC */
+ reg = regs->tpc;
+ break;
+ case 34: /* NPC */
+ reg = regs->tnpc;
+ break;
+ case 35: /* Y */
+ reg = regs->y;
+ break;
+ case 36: /* WIM */
+ case 37: /* TBR */
+ reg = 0;
+ break;
+ default:
+ goto finish;
+ }
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
- if (ret < 0) {
- pt_error_return(regs, -ret);
- goto out_tsk;
+ if (kbuf)
+ *k++ = reg;
+ else if (put_user(reg, u++))
+ return -EFAULT;
+ pos++;
+ count--;
}
+finish:
+ pos *= sizeof(reg);
+ count *= sizeof(reg);
+
+ return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 38 * sizeof(reg), -1);
+}
- if (!(test_thread_flag(TIF_32BIT)) &&
- ((request == PTRACE_READDATA64) ||
- (request == PTRACE_WRITEDATA64) ||
- (request == PTRACE_READTEXT64) ||
- (request == PTRACE_WRITETEXT64) ||
- (request == PTRACE_PEEKTEXT64) ||
- (request == PTRACE_POKETEXT64) ||
- (request == PTRACE_PEEKDATA64) ||
- (request == PTRACE_POKEDATA64))) {
- addr = regs->u_regs[UREG_G2];
- addr2 = regs->u_regs[UREG_G3];
- request -= 30; /* wheee... */
+static int genregs32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ compat_ulong_t __user *reg_window;
+ const compat_ulong_t *k = kbuf;
+ const compat_ulong_t __user *u = ubuf;
+ compat_ulong_t reg;
+
+ if (target == current)
+ flushw_user();
+
+ pos /= sizeof(reg);
+ count /= sizeof(reg);
+
+ if (kbuf) {
+ for (; count > 0 && pos < 16; count--)
+ regs->u_regs[pos++] = *k++;
+
+ reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ for (; count > 0 && pos < 32; count--) {
+ if (put_user(*k++, &reg_window[pos++]))
+ return -EFAULT;
+ }
+ } else {
+ for (; count > 0 && pos < 16; count--) {
+ if (get_user(reg, u++))
+ return -EFAULT;
+ regs->u_regs[pos++] = reg;
+ }
+
+ reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ for (; count > 0 && pos < 32; count--) {
+ if (get_user(reg, u++) ||
+ put_user(reg, &reg_window[pos++]))
+ return -EFAULT;
+ }
}
+ while (count > 0) {
+ unsigned long tstate;
+
+ if (kbuf)
+ reg = *k++;
+ else if (get_user(reg, u++))
+ return -EFAULT;
+
+ switch (pos) {
+ case 32: /* PSR */
+ tstate = regs->tstate;
+ tstate &= ~(TSTATE_ICC | TSTATE_XCC);
+ tstate |= psr_to_tstate_icc(reg);
+ regs->tstate = tstate;
+ break;
+ case 33: /* PC */
+ regs->tpc = reg;
+ break;
+ case 34: /* NPC */
+ regs->tnpc = reg;
+ break;
+ case 35: /* Y */
+ regs->y = reg;
+ break;
+ case 36: /* WIM */
+ case 37: /* TBR */
+ break;
+ default:
+ goto finish;
+ }
+
+ pos++;
+ count--;
+ }
+finish:
+ pos *= sizeof(reg);
+ count *= sizeof(reg);
+
+ return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 38 * sizeof(reg), -1);
+}
+
+static int fpregs32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const unsigned long *fpregs = task_thread_info(target)->fpregs;
+ compat_ulong_t enabled;
+ unsigned long fprs;
+ compat_ulong_t fsr;
+ int ret = 0;
+
+ if (target == current)
+ save_and_clear_fpu();
+
+ fprs = task_thread_info(target)->fpsaved[0];
+ if (fprs & FPRS_FEF) {
+ fsr = task_thread_info(target)->xfsr[0];
+ enabled = 1;
+ } else {
+ fsr = 0;
+ enabled = 0;
+ }
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ fpregs,
+ 0, 32 * sizeof(u32));
+
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 32 * sizeof(u32),
+ 33 * sizeof(u32));
+ if (!ret)
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &fsr,
+ 33 * sizeof(u32),
+ 34 * sizeof(u32));
+
+ if (!ret) {
+ compat_ulong_t val;
+
+ val = (enabled << 8) | (8 << 16);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &val,
+ 34 * sizeof(u32),
+ 35 * sizeof(u32));
+ }
+
+ if (!ret)
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ 35 * sizeof(u32), -1);
+
+ return ret;
+}
+
+static int fpregs32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned long *fpregs = task_thread_info(target)->fpregs;
+ unsigned long fprs;
+ int ret;
+
+ if (target == current)
+ save_and_clear_fpu();
+
+ fprs = task_thread_info(target)->fpsaved[0];
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ fpregs,
+ 0, 32 * sizeof(u32));
+ if (!ret)
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 32 * sizeof(u32),
+ 33 * sizeof(u32));
+ if (!ret && count > 0) {
+ compat_ulong_t fsr;
+ unsigned long val;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &fsr,
+ 33 * sizeof(u32),
+ 34 * sizeof(u32));
+ if (!ret) {
+ val = task_thread_info(target)->xfsr[0];
+ val &= 0xffffffff00000000UL;
+ val |= fsr;
+ task_thread_info(target)->xfsr[0] = val;
+ }
+ }
+
+ fprs |= (FPRS_FEF | FPRS_DL);
+ task_thread_info(target)->fpsaved[0] = fprs;
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ 34 * sizeof(u32), -1);
+ return ret;
+}
+
+static const struct user_regset sparc32_regsets[] = {
+ /* Format is:
+ * G0 --> G7
+ * O0 --> O7
+ * L0 --> L7
+ * I0 --> I7
+ * PSR, PC, nPC, Y, WIM, TBR
+ */
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = 38 * sizeof(u32),
+ .size = sizeof(u32), .align = sizeof(u32),
+ .get = genregs32_get, .set = genregs32_set
+ },
+ /* Format is:
+ * F0 --> F31
+ * empty 32-bit word
+ * FSR (32--bit word)
+ * FPU QUEUE COUNT (8-bit char)
+ * FPU QUEUE ENTRYSIZE (8-bit char)
+ * FPU ENABLED (8-bit char)
+ * empty 8-bit char
+ * FPU QUEUE (64 32-bit ints)
+ */
+ [REGSET_FP] = {
+ .core_note_type = NT_PRFPREG,
+ .n = 99 * sizeof(u32),
+ .size = sizeof(u32), .align = sizeof(u32),
+ .get = fpregs32_get, .set = fpregs32_set
+ },
+};
+
+static const struct user_regset_view user_sparc32_view = {
+ .name = "sparc", .e_machine = EM_SPARC,
+ .regsets = sparc32_regsets, .n = ARRAY_SIZE(sparc32_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_32BIT))
+ return &user_sparc32_view;
+ return &user_sparc64_view;
+}
+
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
+{
+ long addr2 = task_pt_regs(current)->u_regs[UREG_I4];
+ const struct user_regset_view *view;
+ int ret;
+
+ if (test_thread_flag(TIF_32BIT))
+ addr2 &= 0xffffffffUL;
+
+ view = task_user_regset_view(child);
switch(request) {
case PTRACE_PEEKUSR:
- if (addr != 0)
- pt_error_return(regs, EIO);
- else
- pt_succ_return(regs, 0);
- goto out_tsk;
+ ret = (addr != 0) ? -EIO : 0;
+ break;
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA: {
unsigned long tmp64;
unsigned int tmp32;
- int res, copied;
+ int copied;
- res = -EIO;
+ ret = -EIO;
if (test_thread_flag(TIF_32BIT)) {
copied = access_process_vm(child, addr,
&tmp32, sizeof(tmp32), 0);
- tmp64 = (unsigned long) tmp32;
if (copied == sizeof(tmp32))
- res = 0;
+ ret = put_user(tmp32,
+ (unsigned int __user *) data);
} else {
copied = access_process_vm(child, addr,
&tmp64, sizeof(tmp64), 0);
if (copied == sizeof(tmp64))
- res = 0;
+ ret = put_user(tmp64,
+ (unsigned long __user *) data);
}
- if (res < 0)
- pt_error_return(regs, -res);
- else
- pt_os_succ_return(regs, tmp64, (void __user *) data);
- goto out_tsk;
+ break;
}
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA: {
unsigned long tmp64;
unsigned int tmp32;
- int copied, res = -EIO;
+ int copied;
+ ret = -EIO;
if (test_thread_flag(TIF_32BIT)) {
tmp32 = data;
copied = access_process_vm(child, addr,
&tmp32, sizeof(tmp32), 1);
if (copied == sizeof(tmp32))
- res = 0;
+ ret = 0;
} else {
tmp64 = data;
copied = access_process_vm(child, addr,
&tmp64, sizeof(tmp64), 1);
if (copied == sizeof(tmp64))
- res = 0;
+ ret = 0;
}
- if (res < 0)
- pt_error_return(regs, -res);
- else
- pt_succ_return(regs, res);
- goto out_tsk;
+ break;
}
case PTRACE_GETREGS: {
struct pt_regs32 __user *pregs =
(struct pt_regs32 __user *) addr;
- struct pt_regs *cregs = task_pt_regs(child);
- int rval;
-
- if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
- __put_user(cregs->tpc, (&pregs->pc)) ||
- __put_user(cregs->tnpc, (&pregs->npc)) ||
- __put_user(cregs->y, (&pregs->y))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
- for (rval = 1; rval < 16; rval++)
- if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
- pt_succ_return(regs, 0);
-#ifdef DEBUG_PTRACE
- printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
-#endif
- goto out_tsk;
+
+ ret = copy_regset_to_user(child, view, REGSET_GENERAL,
+ 32 * sizeof(u32),
+ 4 * sizeof(u32),
+ &pregs->psr);
+ if (!ret)
+ ret = copy_regset_to_user(child, view, REGSET_GENERAL,
+ 1 * sizeof(u32),
+ 15 * sizeof(u32),
+ &pregs->u_regs[0]);
+ break;
}
case PTRACE_GETREGS64: {
struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
- struct pt_regs *cregs = task_pt_regs(child);
- unsigned long tpc = cregs->tpc;
- int rval;
-
- if ((task_thread_info(child)->flags & _TIF_32BIT) != 0)
- tpc &= 0xffffffff;
- if (__put_user(cregs->tstate, (&pregs->tstate)) ||
- __put_user(tpc, (&pregs->tpc)) ||
- __put_user(cregs->tnpc, (&pregs->tnpc)) ||
- __put_user(cregs->y, (&pregs->y))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
+
+ ret = copy_regset_to_user(child, view, REGSET_GENERAL,
+ 1 * sizeof(u64),
+ 15 * sizeof(u64),
+ &pregs->u_regs[0]);
+ if (!ret) {
+ /* XXX doesn't handle 'y' register correctly XXX */
+ ret = copy_regset_to_user(child, view, REGSET_GENERAL,
+ 32 * sizeof(u64),
+ 4 * sizeof(u64),
+ &pregs->tstate);
}
- for (rval = 1; rval < 16; rval++)
- if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
- pt_succ_return(regs, 0);
-#ifdef DEBUG_PTRACE
- printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
-#endif
- goto out_tsk;
+ break;
}
case PTRACE_SETREGS: {
struct pt_regs32 __user *pregs =
(struct pt_regs32 __user *) addr;
- struct pt_regs *cregs = task_pt_regs(child);
- unsigned int psr, pc, npc, y;
- int i;
-
- /* Must be careful, tracing process can only set certain
- * bits in the psr.
- */
- if (__get_user(psr, (&pregs->psr)) ||
- __get_user(pc, (&pregs->pc)) ||
- __get_user(npc, (&pregs->npc)) ||
- __get_user(y, (&pregs->y))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
- cregs->tstate &= ~(TSTATE_ICC);
- cregs->tstate |= psr_to_tstate_icc(psr);
- if (!((pc | npc) & 3)) {
- cregs->tpc = pc;
- cregs->tnpc = npc;
- }
- cregs->y = y;
- for (i = 1; i < 16; i++) {
- if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
- }
- pt_succ_return(regs, 0);
- goto out_tsk;
+
+ ret = copy_regset_from_user(child, view, REGSET_GENERAL,
+ 32 * sizeof(u32),
+ 4 * sizeof(u32),
+ &pregs->psr);
+ if (!ret)
+ ret = copy_regset_from_user(child, view, REGSET_GENERAL,
+ 1 * sizeof(u32),
+ 15 * sizeof(u32),
+ &pregs->u_regs[0]);
+ break;
}
case PTRACE_SETREGS64: {
struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
- struct pt_regs *cregs = task_pt_regs(child);
- unsigned long tstate, tpc, tnpc, y;
- int i;
-
- /* Must be careful, tracing process can only set certain
- * bits in the psr.
- */
- if (__get_user(tstate, (&pregs->tstate)) ||
- __get_user(tpc, (&pregs->tpc)) ||
- __get_user(tnpc, (&pregs->tnpc)) ||
- __get_user(y, (&pregs->y))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
- if ((task_thread_info(child)->flags & _TIF_32BIT) != 0) {
- tpc &= 0xffffffff;
- tnpc &= 0xffffffff;
- }
- tstate &= (TSTATE_ICC | TSTATE_XCC);
- cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
- cregs->tstate |= tstate;
- if (!((tpc | tnpc) & 3)) {
- cregs->tpc = tpc;
- cregs->tnpc = tnpc;
- }
- cregs->y = y;
- for (i = 1; i < 16; i++) {
- if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
+
+ ret = copy_regset_from_user(child, view, REGSET_GENERAL,
+ 1 * sizeof(u64),
+ 15 * sizeof(u64),
+ &pregs->u_regs[0]);
+ if (!ret) {
+ /* XXX doesn't handle 'y' register correctly XXX */
+ ret = copy_regset_from_user(child, view, REGSET_GENERAL,
+ 32 * sizeof(u64),
+ 4 * sizeof(u64),
+ &pregs->tstate);
}
- pt_succ_return(regs, 0);
- goto out_tsk;
+ break;
}
case PTRACE_GETFPREGS: {
@@ -443,20 +825,24 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
} fpq[16];
};
struct fps __user *fps = (struct fps __user *) addr;
- unsigned long *fpregs = task_thread_info(child)->fpregs;
-
- if (copy_to_user(&fps->regs[0], fpregs,
- (32 * sizeof(unsigned int))) ||
- __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr)) ||
- __put_user(0, (&fps->fpqd)) ||
- __put_user(0, (&fps->flags)) ||
- __put_user(0, (&fps->extra)) ||
- clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
+
+ ret = copy_regset_to_user(child, view, REGSET_FP,
+ 0 * sizeof(u32),
+ 32 * sizeof(u32),
+ &fps->regs[0]);
+ if (!ret)
+ ret = copy_regset_to_user(child, view, REGSET_FP,
+ 33 * sizeof(u32),
+ 1 * sizeof(u32),
+ &fps->fsr);
+ if (!ret) {
+ if (__put_user(0, &fps->flags) ||
+ __put_user(0, &fps->extra) ||
+ __put_user(0, &fps->fpqd) ||
+ clear_user(&fps->fpq[0], 32 * sizeof(unsigned int)))
+ ret = -EFAULT;
}
- pt_succ_return(regs, 0);
- goto out_tsk;
+ break;
}
case PTRACE_GETFPREGS64: {
@@ -465,16 +851,12 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned long fsr;
};
struct fps __user *fps = (struct fps __user *) addr;
- unsigned long *fpregs = task_thread_info(child)->fpregs;
- if (copy_to_user(&fps->regs[0], fpregs,
- (64 * sizeof(unsigned int))) ||
- __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
- pt_succ_return(regs, 0);
- goto out_tsk;
+ ret = copy_regset_to_user(child, view, REGSET_FP,
+ 0 * sizeof(u64),
+ 33 * sizeof(u64),
+ fps);
+ break;
}
case PTRACE_SETFPREGS: {
@@ -490,22 +872,17 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
} fpq[16];
};
struct fps __user *fps = (struct fps __user *) addr;
- unsigned long *fpregs = task_thread_info(child)->fpregs;
- unsigned fsr;
-
- if (copy_from_user(fpregs, &fps->regs[0],
- (32 * sizeof(unsigned int))) ||
- __get_user(fsr, (&fps->fsr))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
- task_thread_info(child)->xfsr[0] &= 0xffffffff00000000UL;
- task_thread_info(child)->xfsr[0] |= fsr;
- if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
- task_thread_info(child)->gsr[0] = 0;
- task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
- pt_succ_return(regs, 0);
- goto out_tsk;
+
+ ret = copy_regset_from_user(child, view, REGSET_FP,
+ 0 * sizeof(u32),
+ 32 * sizeof(u32),
+ &fps->regs[0]);
+ if (!ret)
+ ret = copy_regset_from_user(child, view, REGSET_FP,
+ 33 * sizeof(u32),
+ 1 * sizeof(u32),
+ &fps->fsr);
+ break;
}
case PTRACE_SETFPREGS64: {
@@ -514,134 +891,50 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
unsigned long fsr;
};
struct fps __user *fps = (struct fps __user *) addr;
- unsigned long *fpregs = task_thread_info(child)->fpregs;
- if (copy_from_user(fpregs, &fps->regs[0],
- (64 * sizeof(unsigned int))) ||
- __get_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
- pt_error_return(regs, EFAULT);
- goto out_tsk;
- }
- if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
- task_thread_info(child)->gsr[0] = 0;
- task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
- pt_succ_return(regs, 0);
- goto out_tsk;
+ ret = copy_regset_to_user(child, view, REGSET_FP,
+ 0 * sizeof(u64),
+ 33 * sizeof(u64),
+ fps);
+ break;
}
case PTRACE_READTEXT:
- case PTRACE_READDATA: {
- int res = ptrace_readdata(child, addr,
- (char __user *)addr2, data);
- if (res == data) {
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
- if (res >= 0)
- res = -EIO;
- pt_error_return(regs, -res);
- goto out_tsk;
- }
+ case PTRACE_READDATA:
+ ret = ptrace_readdata(child, addr,
+ (char __user *)addr2, data);
+ if (ret == data)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+ break;
case PTRACE_WRITETEXT:
- case PTRACE_WRITEDATA: {
- int res = ptrace_writedata(child, (char __user *) addr2,
- addr, data);
- if (res == data) {
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
- if (res >= 0)
- res = -EIO;
- pt_error_return(regs, -res);
- goto out_tsk;
- }
- case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
- addr = 1;
-
- case PTRACE_CONT: { /* restart after signal. */
- if (!valid_signal(data)) {
- pt_error_return(regs, EIO);
- goto out_tsk;
- }
-
- if (request == PTRACE_SYSCALL) {
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- } else {
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
-
- child->exit_code = data;
-#ifdef DEBUG_PTRACE
- printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
- child->pid, child->exit_code,
- task_pt_regs(child)->tpc,
- task_pt_regs(child)->tnpc);
-
-#endif
- wake_up_process(child);
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
-
-/*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL: {
- if (child->exit_state == EXIT_ZOMBIE) { /* already dead */
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
- child->exit_code = SIGKILL;
- wake_up_process(child);
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
-
- case PTRACE_SUNDETACH: { /* detach a process that was attached. */
- int error = ptrace_detach(child, data);
- if (error) {
- pt_error_return(regs, EIO);
- goto out_tsk;
- }
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
-
- /* PTRACE_DUMPCORE unsupported... */
+ case PTRACE_WRITEDATA:
+ ret = ptrace_writedata(child, (char __user *) addr2,
+ addr, data);
+ if (ret == data)
+ ret = 0;
+ else if (ret >= 0)
+ ret = -EIO;
+ break;
case PTRACE_GETEVENTMSG: {
- int err;
-
if (test_thread_flag(TIF_32BIT))
- err = put_user(child->ptrace_message,
+ ret = put_user(child->ptrace_message,
(unsigned int __user *) data);
else
- err = put_user(child->ptrace_message,
+ ret = put_user(child->ptrace_message,
(unsigned long __user *) data);
- if (err)
- pt_error_return(regs, -err);
- else
- pt_succ_return(regs, 0);
break;
}
- default: {
- int err = ptrace_request(child, request, addr, data);
- if (err)
- pt_error_return(regs, -err);
- else
- pt_succ_return(regs, 0);
- goto out_tsk;
- }
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
}
-out_tsk:
- if (child)
- put_task_struct(child);
-out:
- unlock_kernel();
+
+ return ret;
}
asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p)
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c39944927f1a..a8052b76df41 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -46,8 +46,6 @@
#include <asm/ldc.h>
#include <asm/hypervisor.h>
-extern void calibrate_delay(void);
-
int sparc64_multi_core __read_mostly;
cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 60765e314bd8..8649635d6d74 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -277,6 +277,7 @@ EXPORT_SYMBOL(sys_getpid);
EXPORT_SYMBOL(sys_geteuid);
EXPORT_SYMBOL(sys_getuid);
EXPORT_SYMBOL(sys_getegid);
+EXPORT_SYMBOL(sysctl_nr_open);
EXPORT_SYMBOL(sys_getgid);
EXPORT_SYMBOL(svr4_getcontext);
EXPORT_SYMBOL(svr4_setcontext);
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S
index 06d10907d8ce..adc62f490f36 100644
--- a/arch/sparc64/kernel/systbls.S
+++ b/arch/sparc64/kernel/systbls.S
@@ -80,7 +80,8 @@ sys_call_table32:
.word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
-/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, compat_sys_timerfd, sys_eventfd, compat_sys_fallocate
+/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
+ .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime
#endif /* CONFIG_COMPAT */
@@ -152,7 +153,8 @@ sys_call_table:
.word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
-/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd, sys_eventfd, sys_fallocate
+/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
+ .word sys_timerfd_settime, sys_timerfd_gettime
#if defined(CONFIG_SUNOS_EMUL) || defined(CONFIG_SOLARIS_EMUL) || \
defined(CONFIG_SOLARIS_EMUL_MODULE)
@@ -271,6 +273,7 @@ sunos_sys_table:
.word sunos_nosys, sunos_nosys, sunos_nosys
.word sunos_nosys
/*310*/ .word sunos_nosys, sunos_nosys, sunos_nosys
- .word sunos_nosys, sunos_nosys
+ .word sunos_nosys, sunos_nosys, sunos_nosys
+ .word sunos_nosys
#endif
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 4352ee4d8dac..d204f1ab1d4c 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -1707,6 +1707,11 @@ static void __exit rtc_mini_exit(void)
misc_deregister(&rtc_mini_dev);
}
+int __devinit read_current_timer(unsigned long *timer_val)
+{
+ *timer_val = tick_ops->get_tick();
+ return 0;
+}
module_init(rtc_mini_init);
module_exit(rtc_mini_exit);
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 523e993ee90c..e726c45645ff 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -997,7 +997,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
initrd_start, initrd_end);
#endif
- reserve_bootmem(initrd_start, size);
+ reserve_bootmem(initrd_start, size, BOOTMEM_DEFAULT);
initrd_start += PAGE_OFFSET;
initrd_end += PAGE_OFFSET;
@@ -1007,7 +1007,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
#ifdef CONFIG_DEBUG_BOOTMEM
prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
#endif
- reserve_bootmem(kern_base, kern_size);
+ reserve_bootmem(kern_base, kern_size, BOOTMEM_DEFAULT);
*pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
/* Add back in the initmem pages. */
@@ -1024,7 +1024,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
(bootmap_pfn << PAGE_SHIFT), size);
#endif
- reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
+ reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size, BOOTMEM_DEFAULT);
for (i = 0; i < pavail_ents; i++) {
unsigned long start_pfn, end_pfn;
@@ -1489,7 +1489,7 @@ static void __init taint_real_pages(void)
goto do_next_page;
}
}
- reserve_bootmem(old_start, PAGE_SIZE);
+ reserve_bootmem(old_start, PAGE_SIZE, BOOTMEM_DEFAULT);
do_next_page:
old_start += PAGE_SIZE;
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c
index 1c0db842a6f4..87e7c7ea0ee6 100644
--- a/arch/sparc64/prom/init.c
+++ b/arch/sparc64/prom/init.c
@@ -48,7 +48,10 @@ void __init prom_init(void *cif_handler, void *cif_stack)
prom_getstring(node, "version", prom_version, sizeof(prom_version));
prom_printf("\n");
+}
+void __init prom_init_report(void)
+{
printk("PROMLIB: Sun IEEE Boot Prom '%s'\n", prom_version);
printk("PROMLIB: Root node compatible: %s\n", prom_root_compatible);
}
diff --git a/arch/sparc64/solaris/fs.c b/arch/sparc64/solaris/fs.c
index 61be597bf430..9311bfe4f2f7 100644
--- a/arch/sparc64/solaris/fs.c
+++ b/arch/sparc64/solaris/fs.c
@@ -624,7 +624,7 @@ asmlinkage int solaris_ulimit(int cmd, int val)
case 3: /* UL_GMEMLIM */
return current->signal->rlim[RLIMIT_DATA].rlim_cur;
case 4: /* UL_GDESLIM */
- return NR_OPEN;
+ return sysctl_nr_open;
}
return -EINVAL;
}
diff --git a/arch/sparc64/solaris/timod.c b/arch/sparc64/solaris/timod.c
index a9d32ceabf26..f53123c02c2b 100644
--- a/arch/sparc64/solaris/timod.c
+++ b/arch/sparc64/solaris/timod.c
@@ -859,7 +859,8 @@ asmlinkage int solaris_getmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
SOLD("entry");
lock_kernel();
- if(fd >= NR_OPEN) goto out;
+ if (fd >= sysctl_nr_open)
+ goto out;
fdt = files_fdtable(current->files);
filp = fdt->fd[fd];
@@ -927,7 +928,8 @@ asmlinkage int solaris_putmsg(unsigned int fd, u32 arg1, u32 arg2, u32 arg3)
SOLD("entry");
lock_kernel();
- if(fd >= NR_OPEN) goto out;
+ if (fd >= sysctl_nr_open)
+ goto out;
fdt = files_fdtable(current->files);
filp = fdt->fd[fd];
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 55945db1313c..99e51d059a02 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -68,6 +68,10 @@ config IRQ_RELEASE_METHOD
bool
default y
+config HZ
+ int
+ default 100
+
menu "UML-specific options"
config STATIC_LINK
@@ -95,23 +99,6 @@ config LD_SCRIPT_DYN
default y
depends on !LD_SCRIPT_STATIC
-config NET
- bool "Networking support"
- help
- Unless you really know what you are doing, you should say Y here.
- The reason is that some programs need kernel networking support even
- when running on a stand-alone machine that isn't connected to any
- other computer. If you are upgrading from an older kernel, you
- should consider updating your networking tools too because changes
- in the kernel and the tools often go hand in hand. The tools are
- contained in the package net-tools, the location and version number
- of which are given in <file:Documentation/Changes>.
-
- For a general introduction to Linux networking, it is highly
- recommended to read the NET-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
-
source "fs/Kconfig.binfmt"
config HOSTFS
@@ -145,7 +132,7 @@ config HPPFS
by removing or changing anything in /proc which gives away the
identity of a UML.
- See <http://user-mode-linux.sf.net/hppfs.html> for more information.
+ See <http://user-mode-linux.sf.net/old/hppfs.html> for more information.
You only need this if you are setting up a UML honeypot. Otherwise,
it is safe to say 'N' here.
@@ -189,8 +176,7 @@ config MAGIC_SYSRQ
config SMP
bool "Symmetric multi-processing support (EXPERIMENTAL)"
default n
- #SMP_BROKEN is for x86_64.
- depends on EXPERIMENTAL && (!SMP_BROKEN || (BROKEN && SMP_BROKEN))
+ depends on BROKEN
help
This option enables UML SMP support.
It is NOT related to having a real SMP box. Not directly, at least.
diff --git a/arch/um/Kconfig.char b/arch/um/Kconfig.char
index 9a78d354f0b4..3a4b396d7979 100644
--- a/arch/um/Kconfig.char
+++ b/arch/um/Kconfig.char
@@ -18,7 +18,7 @@ config SSL
lines on the UML that are usually made to show up on the host as
ttys or ptys.
- See <http://user-mode-linux.sourceforge.net/input.html> for more
+ See <http://user-mode-linux.sourceforge.net/old/input.html> for more
information and command line examples of how to use this facility.
Unless you have a specific reason for disabling this, say Y.
diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
index 1f6462ffd3e8..8fce5e536b0f 100644
--- a/arch/um/Kconfig.debug
+++ b/arch/um/Kconfig.debug
@@ -4,12 +4,12 @@ source "lib/Kconfig.debug"
config GPROF
bool "Enable gprof support"
- depends on DEBUG_INFO
+ depends on DEBUG_INFO && FRAME_POINTER
help
This allows profiling of a User-Mode Linux kernel with the gprof
utility.
- See <http://user-mode-linux.sourceforge.net/gprof.html> for more
+ See <http://user-mode-linux.sourceforge.net/old/gprof.html> for more
details.
If you're involved in UML kernel development and want to use gprof,
@@ -22,7 +22,7 @@ config GCOV
This option allows developers to retrieve coverage data from a UML
session.
- See <http://user-mode-linux.sourceforge.net/gprof.html> for more
+ See <http://user-mode-linux.sourceforge.net/old/gprof.html> for more
details.
If you're involved in UML kernel development and want to use gcov,
diff --git a/arch/um/Kconfig.net b/arch/um/Kconfig.net
index 66e50026ade9..9e9a4aaa703d 100644
--- a/arch/um/Kconfig.net
+++ b/arch/um/Kconfig.net
@@ -14,7 +14,7 @@ config UML_NET
For more information, including explanations of the networking and
sample configurations, see
- <http://user-mode-linux.sourceforge.net/networking.html>.
+ <http://user-mode-linux.sourceforge.net/old/networking.html>.
If you'd like to be able to enable networking in the User-Mode
linux environment, say Y; otherwise say N. Note that you must
@@ -38,7 +38,7 @@ config UML_NET_ETHERTAP
CONFIG_NETLINK_DEV configured as Y or M.
For more information, see
- <http://user-mode-linux.sourceforge.net/networking.html> That site
+ <http://user-mode-linux.sourceforge.net/old/networking.html> That site
has examples of the UML command line to use to enable Ethertap
networking.
@@ -72,7 +72,7 @@ config UML_NET_SLIP
To use this, your host must support slip devices.
For more information, see
- <http://user-mode-linux.sourceforge.net/networking.html>. That site
+ <http://user-mode-linux.sourceforge.net/old/networking.html>.
has examples of the UML command line to use to enable slip
networking, and details of a few quirks with it.
@@ -96,7 +96,7 @@ config UML_NET_DAEMON
networking daemon on the host.
For more information, see
- <http://user-mode-linux.sourceforge.net/networking.html> That site
+ <http://user-mode-linux.sourceforge.net/old/networking.html> That site
has examples of the UML command line to use to enable Daemon
networking.
@@ -144,7 +144,7 @@ config UML_NET_MCAST
To use this, your host kernel(s) must support IP Multicasting.
For more information, see
- <http://user-mode-linux.sourceforge.net/networking.html> That site
+ <http://user-mode-linux.sourceforge.net/old/networking.html> That site
has examples of the UML command line to use to enable Multicast
networking, and notes about the security of this approach.
@@ -165,7 +165,7 @@ config UML_NET_PCAP
installed in order to build the pcap transport into UML.
For more information, see
- <http://user-mode-linux.sourceforge.net/networking.html> That site
+ <http://user-mode-linux.sourceforge.net/old/networking.html> That site
has examples of the UML command line to use to enable this option.
If you intend to use UML as a network monitor for the host, say
diff --git a/arch/um/Makefile b/arch/um/Makefile
index ba6813a4aa37..cb4af9bf2074 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -49,7 +49,7 @@ SYS_DIR := $(ARCH_DIR)/include/sysdep-$(SUBARCH)
#
# These apply to USER_CFLAGS to.
-KBUILD_CFLAGS += $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \
+KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ -DSUBARCH=\"$(SUBARCH)\" \
$(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \
-Din6addr_loopback=kernel_in6addr_loopback \
-Din6addr_any=kernel_in6addr_any
@@ -58,7 +58,7 @@ KBUILD_AFLAGS += $(ARCH_INCLUDE)
USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
$(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
- -D_FILE_OFFSET_BITS=64
+ $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
@@ -130,7 +130,9 @@ CPPFLAGS_vmlinux.lds = -U$(SUBARCH) -DSTART=$(START) -DELF_ARCH=$(ELF_ARCH) \
# The wrappers will select whether using "malloc" or the kernel allocator.
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
-CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS)
+LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
+
+CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
define cmd_vmlinux__
$(CC) $(CFLAGS_vmlinux) -o $@ \
-Wl,-T,$(vmlinux-lds) $(vmlinux-init) \
@@ -158,7 +160,7 @@ ifneq ($(KBUILD_SRC),)
$(Q)mkdir -p $(objtree)/include/asm-um
$(Q)ln -fsn $(srctree)/include/asm-um/$(basename $(notdir $@))-$(SUBARCH)$(suffix $@) $@
else
- $(Q)cd $(TOPDIR)/$(dir $@) ; \
+ $(Q)cd $(srctree)/$(dir $@) ; \
ln -sf $(basename $(notdir $@))-$(SUBARCH)$(suffix $@) $(notdir $@)
endif
@@ -168,7 +170,7 @@ ifneq ($(KBUILD_SRC),)
$(Q)mkdir -p $(objtree)/include/asm-um
$(Q)ln -fsn $(srctree)/include/asm-$(HEADER_ARCH) include/asm-um/arch
else
- $(Q)cd $(TOPDIR)/include/asm-um && ln -fsn ../asm-$(HEADER_ARCH) arch
+ $(Q)cd $(srctree)/include/asm-um && ln -fsn ../asm-$(HEADER_ARCH) arch
endif
$(objtree)/$(ARCH_DIR)/include:
diff --git a/arch/um/Makefile-tt b/arch/um/Makefile-tt
deleted file mode 100644
index 03f7b10cfd0b..000000000000
--- a/arch/um/Makefile-tt
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
-# Licensed under the GPL
-#
-
diff --git a/arch/um/defconfig b/arch/um/defconfig
index f609edede065..86db2862f222 100644
--- a/arch/um/defconfig
+++ b/arch/um/defconfig
@@ -77,7 +77,7 @@ CONFIG_LD_SCRIPT_DYN=y
CONFIG_NET=y
CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m
-# CONFIG_HOSTFS is not set
+CONFIG_HOSTFS=y
# CONFIG_HPPFS is not set
CONFIG_MCONSOLE=y
CONFIG_MAGIC_SYSRQ=y
@@ -188,7 +188,7 @@ CONFIG_CON_CHAN="xterm"
CONFIG_SSL_CHAN="pts"
CONFIG_UNIX98_PTYS=y
CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
+CONFIG_LEGACY_PTY_COUNT=32
# CONFIG_WATCHDOG is not set
CONFIG_UML_SOUND=m
CONFIG_SOUND=m
@@ -508,7 +508,7 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set
-CONFIG_DEBUG_SLAB=y
+# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_SLAB_LEAK is not set
# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 83bf15a3dda8..2c898c4d6b6a 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -8,6 +8,7 @@
#include "chan_kern.h"
#include "irq_kern.h"
#include "irq_user.h"
+#include "kern_util.h"
#include "os.h"
#define LINE_BUFSIZE 4096
@@ -48,7 +49,7 @@ static int write_room(struct line *line)
n = line->head - line->tail;
if (n <= 0)
- n = LINE_BUFSIZE + n; /* The other case */
+ n += LINE_BUFSIZE; /* The other case */
return n - 1;
}
@@ -58,17 +59,10 @@ int line_write_room(struct tty_struct *tty)
unsigned long flags;
int room;
- if (tty->stopped)
- return 0;
-
spin_lock_irqsave(&line->lock, flags);
room = write_room(line);
spin_unlock_irqrestore(&line->lock, flags);
- /*XXX: Warning to remove */
- if (0 == room)
- printk(KERN_DEBUG "%s: %s: no room left in buffer\n",
- __FUNCTION__,tty->name);
return room;
}
@@ -79,8 +73,7 @@ int line_chars_in_buffer(struct tty_struct *tty)
int ret;
spin_lock_irqsave(&line->lock, flags);
-
- /*write_room subtracts 1 for the needed NULL, so we readd it.*/
+ /* write_room subtracts 1 for the needed NULL, so we readd it.*/
ret = LINE_BUFSIZE - (write_room(line) + 1);
spin_unlock_irqrestore(&line->lock, flags);
@@ -184,10 +177,6 @@ void line_flush_buffer(struct tty_struct *tty)
unsigned long flags;
int err;
- /*XXX: copied from line_write, verify if it is correct!*/
- if (tty->stopped)
- return;
-
spin_lock_irqsave(&line->lock, flags);
err = flush_buffer(line);
spin_unlock_irqrestore(&line->lock, flags);
@@ -213,9 +202,6 @@ int line_write(struct tty_struct *tty, const unsigned char *buf, int len)
unsigned long flags;
int n, ret = 0;
- if (tty->stopped)
- return 0;
-
spin_lock_irqsave(&line->lock, flags);
if (line->head != line->tail)
ret = buffer_data(line, buf, len);
@@ -788,9 +774,11 @@ static irqreturn_t winch_interrupt(int irq, void *data)
tty = winch->tty;
if (tty != NULL) {
line = tty->driver_data;
- chan_window_size(&line->chan_list, &tty->winsize.ws_row,
- &tty->winsize.ws_col);
- kill_pgrp(tty->pgrp, SIGWINCH, 1);
+ if (line != NULL) {
+ chan_window_size(&line->chan_list, &tty->winsize.ws_row,
+ &tty->winsize.ws_col);
+ kill_pgrp(tty->pgrp, SIGWINCH, 1);
+ }
}
out:
if (winch->fd != -1)
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 0f3c7d14a6e3..ebb265c07e4d 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -1,23 +1,25 @@
/*
* Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org)
- * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Copyright (C) 2001 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
-#include "linux/console.h"
-#include "linux/ctype.h"
-#include "linux/interrupt.h"
-#include "linux/list.h"
-#include "linux/mm.h"
-#include "linux/module.h"
-#include "linux/notifier.h"
-#include "linux/reboot.h"
-#include "linux/proc_fs.h"
-#include "linux/slab.h"
-#include "linux/syscalls.h"
-#include "linux/utsname.h"
-#include "linux/workqueue.h"
-#include "asm/uaccess.h"
+#include <linux/console.h>
+#include <linux/ctype.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/utsname.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <asm/uaccess.h>
+
#include "init.h"
#include "irq_kern.h"
#include "irq_user.h"
@@ -305,7 +307,9 @@ void mconsole_stop(struct mc_request *req)
deactivate_fd(req->originating_fd, MCONSOLE_IRQ);
os_set_fd_block(req->originating_fd, 1);
mconsole_reply(req, "stopped", 0, 0);
- while (mconsole_get_request(req->originating_fd, req)) {
+ for (;;) {
+ if (!mconsole_get_request(req->originating_fd, req))
+ continue;
if (req->cmd->handler == mconsole_go)
break;
if (req->cmd->handler == mconsole_stop) {
@@ -358,7 +362,7 @@ struct unplugged_pages {
void *pages[UNPLUGGED_PER_PAGE];
};
-static DECLARE_MUTEX(plug_mem_mutex);
+static DEFINE_MUTEX(plug_mem_mutex);
static unsigned long long unplugged_pages_count = 0;
static LIST_HEAD(unplugged_pages);
static int unplug_index = UNPLUGGED_PER_PAGE;
@@ -394,7 +398,7 @@ static int mem_config(char *str, char **error_out)
diff /= PAGE_SIZE;
- down(&plug_mem_mutex);
+ mutex_lock(&plug_mem_mutex);
for (i = 0; i < diff; i++) {
struct unplugged_pages *unplugged;
void *addr;
@@ -451,7 +455,7 @@ static int mem_config(char *str, char **error_out)
err = 0;
out_unlock:
- up(&plug_mem_mutex);
+ mutex_unlock(&plug_mem_mutex);
out:
return err;
}
@@ -741,7 +745,6 @@ void mconsole_stack(struct mc_request *req)
{
char *ptr = req->request.data;
int pid_requested= -1;
- struct task_struct *from = NULL;
struct task_struct *to = NULL;
/*
@@ -763,9 +766,7 @@ void mconsole_stack(struct mc_request *req)
return;
}
- from = current;
-
- to = find_task_by_pid(pid_requested);
+ to = find_task_by_pid_ns(pid_requested, &init_pid_ns);
if ((to == NULL) || (pid_requested == 0)) {
mconsole_reply(req, "Couldn't find that pid", 1, 0);
return;
@@ -795,6 +796,8 @@ static int __init mconsole_init(void)
printk(KERN_ERR "Failed to initialize management console\n");
return 1;
}
+ if (os_set_fd_block(sock, 0))
+ goto out;
register_reboot_notifier(&reboot_notifier);
@@ -803,7 +806,7 @@ static int __init mconsole_init(void)
"mconsole", (void *)sock);
if (err) {
printk(KERN_ERR "Failed to get IRQ for management console\n");
- return 1;
+ goto out;
}
if (notify_socket != NULL) {
@@ -819,6 +822,10 @@ static int __init mconsole_init(void)
printk(KERN_INFO "mconsole (version %d) initialized on %s\n",
MCONSOLE_VERSION, mconsole_socket_name);
return 0;
+
+ out:
+ os_close_file(sock);
+ return 1;
}
__initcall(mconsole_init);
diff --git a/arch/um/drivers/mconsole_user.c b/arch/um/drivers/mconsole_user.c
index 430c024a19b0..13af2f03ed84 100644
--- a/arch/um/drivers/mconsole_user.c
+++ b/arch/um/drivers/mconsole_user.c
@@ -83,9 +83,8 @@ int mconsole_get_request(int fd, struct mc_request *req)
int len;
req->originlen = sizeof(req->origin);
- req->len = recvfrom(fd, &req->request, sizeof(req->request),
- MSG_DONTWAIT, (struct sockaddr *) req->origin,
- &req->originlen);
+ req->len = recvfrom(fd, &req->request, sizeof(req->request), 0,
+ (struct sockaddr *) req->origin, &req->originlen);
if (req->len < 0)
return 0;
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 3c6c44ca1ffa..1e8f41a99511 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -318,7 +318,7 @@ static void setup_etheraddr(char *str, unsigned char *addr, char *name)
if (str == NULL)
goto random;
- for (i = 0;i < 6; i++) {
+ for (i = 0; i < 6; i++) {
addr[i] = simple_strtoul(str, &end, 16);
if ((end == str) ||
((*end != ':') && (*end != ',') && (*end != '\0'))) {
@@ -343,14 +343,13 @@ static void setup_etheraddr(char *str, unsigned char *addr, char *name)
}
if (!is_local_ether_addr(addr)) {
printk(KERN_WARNING
- "Warning: attempt to assign a globally valid ethernet "
+ "Warning: Assigning a globally valid ethernet "
"address to a device\n");
- printk(KERN_WARNING "You should better enable the 2nd "
- "rightmost bit in the first byte of the MAC,\n");
+ printk(KERN_WARNING "You should set the 2nd rightmost bit in "
+ "the first byte of the MAC,\n");
printk(KERN_WARNING "i.e. %02x:%02x:%02x:%02x:%02x:%02x\n",
addr[0] | 0x02, addr[1], addr[2], addr[3], addr[4],
addr[5]);
- goto random;
}
return;
@@ -368,7 +367,6 @@ static struct platform_driver uml_net_driver = {
.name = DRIVER_NAME,
},
};
-static int driver_registered;
static void net_device_release(struct device *dev)
{
@@ -383,6 +381,12 @@ static void net_device_release(struct device *dev)
free_netdev(netdev);
}
+/*
+ * Ensures that platform_driver_register is called only once by
+ * eth_configure. Will be set in an initcall.
+ */
+static int driver_registered;
+
static void eth_configure(int n, void *init, char *mac,
struct transport *transport)
{
diff --git a/arch/um/drivers/net_user.c b/arch/um/drivers/net_user.c
index 29185cad9fff..abf2653f5517 100644
--- a/arch/um/drivers/net_user.c
+++ b/arch/um/drivers/net_user.c
@@ -201,7 +201,7 @@ static int change_tramp(char **argv, char *output, int output_len)
close(fds[1]);
if (pid > 0)
- helper_wait(pid, 0, "change_tramp");
+ helper_wait(pid);
return pid;
}
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index 330543b3129b..19930081d3d8 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -6,6 +6,7 @@
#include "linux/completion.h"
#include "linux/interrupt.h"
#include "linux/list.h"
+#include "linux/mutex.h"
#include "asm/atomic.h"
#include "init.h"
#include "irq_kern.h"
@@ -120,7 +121,7 @@ static int port_accept(struct port_list *port)
return 0;
}
-static DECLARE_MUTEX(ports_sem);
+static DEFINE_MUTEX(ports_mutex);
static LIST_HEAD(ports);
static void port_work_proc(struct work_struct *unused)
@@ -161,7 +162,7 @@ void *port_data(int port_num)
struct port_dev *dev = NULL;
int fd;
- down(&ports_sem);
+ mutex_lock(&ports_mutex);
list_for_each(ele, &ports) {
port = list_entry(ele, struct port_list, list);
if (port->port == port_num)
@@ -216,7 +217,7 @@ void *port_data(int port_num)
out_free:
kfree(port);
out:
- up(&ports_sem);
+ mutex_unlock(&ports_mutex);
return dev;
}
diff --git a/arch/um/drivers/random.c b/arch/um/drivers/random.c
index e942e836f995..71f0959c1535 100644
--- a/arch/um/drivers/random.c
+++ b/arch/um/drivers/random.c
@@ -5,6 +5,7 @@
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
+#include <linux/sched.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
diff --git a/arch/um/drivers/slip_user.c b/arch/um/drivers/slip_user.c
index b8711e50da80..8b80505a3fb0 100644
--- a/arch/um/drivers/slip_user.c
+++ b/arch/um/drivers/slip_user.c
@@ -109,7 +109,7 @@ static int slip_tramp(char **argv, int fd)
read_output(fds[0], output, output_len);
printk("%s", output);
- err = helper_wait(pid, 0, argv[0]);
+ err = helper_wait(pid);
close(fds[0]);
out_free:
diff --git a/arch/um/drivers/slirp_user.c b/arch/um/drivers/slirp_user.c
index 89c1be225fda..a0ada8fec72a 100644
--- a/arch/um/drivers/slirp_user.c
+++ b/arch/um/drivers/slirp_user.c
@@ -98,7 +98,7 @@ static void slirp_close(int fd, void *data)
"(%d)\n", pri->pid, errno);
}
#endif
- err = helper_wait(pri->pid, 1, "slirp_close");
+ err = helper_wait(pri->pid);
if (err < 0)
return;
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 875d60d0c6a2..f1786e64607f 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -15,7 +15,6 @@
#include "line.h"
#include "ssl.h"
#include "chan_kern.h"
-#include "kern_util.h"
#include "kern.h"
#include "init.h"
#include "irq_user.h"
diff --git a/arch/um/drivers/stdio_console.c b/arch/um/drivers/stdio_console.c
index 656036e90b19..cec0c33cdd39 100644
--- a/arch/um/drivers/stdio_console.c
+++ b/arch/um/drivers/stdio_console.c
@@ -22,7 +22,6 @@
#include "stdio_console.h"
#include "line.h"
#include "chan_kern.h"
-#include "kern_util.h"
#include "irq_user.h"
#include "mconsole_kern.h"
#include "init.h"
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 99f9f9605e9c..be3a2797dac4 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -49,6 +49,7 @@
#include "irq_user.h"
#include "irq_kern.h"
#include "ubd_user.h"
+#include "kern_util.h"
#include "os.h"
#include "mem.h"
#include "mem_kern.h"
@@ -229,7 +230,7 @@ static int proc_ide_read_media(char *page, char **start, off_t off, int count,
return len;
}
-static void make_ide_entries(char *dev_name)
+static void make_ide_entries(const char *dev_name)
{
struct proc_dir_entry *dir, *ent;
char name[64];
@@ -244,7 +245,7 @@ static void make_ide_entries(char *dev_name)
ent->data = NULL;
ent->read_proc = proc_ide_read_media;
ent->write_proc = NULL;
- sprintf(name,"ide0/%s", dev_name);
+ snprintf(name, sizeof(name), "ide0/%s", dev_name);
proc_symlink(dev_name, proc_ide_root, name);
}
@@ -437,7 +438,10 @@ __uml_help(ubd_setup,
" machine by running 'dd' on the device. <n> must be in the range\n"
" 0 to 7. Appending an 'r' to the number will cause that device\n"
" to be mounted read-only. For example ubd1r=./ext_fs. Appending\n"
-" an 's' will cause data to be written to disk on the host immediately.\n\n"
+" an 's' will cause data to be written to disk on the host immediately.\n"
+" 'c' will cause the device to be treated as being shared between multiple\n"
+" UMLs and file locking will be turned off - this is appropriate for a\n"
+" cluster filesystem and inappropriate at almost all other times.\n\n"
);
static int udb_setup(char *str)
@@ -456,20 +460,6 @@ __uml_help(udb_setup,
" in the boot output.\n\n"
);
-static int fakehd_set = 0;
-static int fakehd(char *str)
-{
- printk(KERN_INFO "fakehd : Changing ubd name to \"hd\".\n");
- fakehd_set = 1;
- return 1;
-}
-
-__setup("fakehd", fakehd);
-__uml_help(fakehd,
-"fakehd\n"
-" Change the ubd device name to \"hd\".\n\n"
-);
-
static void do_ubd_request(struct request_queue * q);
/* Only changed by ubd_init, which is an initcall. */
@@ -718,8 +708,10 @@ static int ubd_add(int n, char **error_out)
ubd_disk_register(fake_major, ubd_dev->size, n,
&fake_gendisk[n]);
- /* perhaps this should also be under the "if (fake_major)" above */
- /* using the fake_disk->disk_name and also the fakehd_set name */
+ /*
+ * Perhaps this should also be under the "if (fake_major)" above
+ * using the fake_disk->disk_name
+ */
if (fake_ide)
make_ide_entries(ubd_gendisk[n]->disk_name);
diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c
index 48fc7452bc1d..b591bb9c41dd 100644
--- a/arch/um/drivers/ubd_user.c
+++ b/arch/um/drivers/ubd_user.c
@@ -16,7 +16,6 @@
#include <sys/mman.h>
#include <sys/param.h>
#include "asm/types.h"
-#include "kern_util.h"
#include "user.h"
#include "ubd_user.h"
#include "os.h"
diff --git a/arch/um/drivers/vde_user.c b/arch/um/drivers/vde_user.c
index d9941fe5f931..56533db25343 100644
--- a/arch/um/drivers/vde_user.c
+++ b/arch/um/drivers/vde_user.c
@@ -80,7 +80,7 @@ void vde_init_libstuff(struct vde_data *vpri, struct vde_init *init)
vpri->args = kmalloc(sizeof(struct vde_open_args), UM_GFP_KERNEL);
if (vpri->args == NULL) {
- printk(UM_KERN_ERR "vde_init_libstuff - vde_open_args"
+ printk(UM_KERN_ERR "vde_init_libstuff - vde_open_args "
"allocation failed");
return;
}
diff --git a/arch/um/include/arch.h b/arch/um/include/arch.h
index 49c601ff2bac..2de92a08a76b 100644
--- a/arch/um/include/arch.h
+++ b/arch/um/include/arch.h
@@ -10,6 +10,6 @@
extern void arch_check_bugs(void);
extern int arch_fixup(unsigned long address, struct uml_pt_regs *regs);
-extern int arch_handle_signal(int sig, struct uml_pt_regs *regs);
+extern void arch_examine_signal(int sig, struct uml_pt_regs *regs);
#endif
diff --git a/arch/um/include/as-layout.h b/arch/um/include/as-layout.h
index a5cdf953e04a..606bb5c7fdf6 100644
--- a/arch/um/include/as-layout.h
+++ b/arch/um/include/as-layout.h
@@ -10,23 +10,31 @@
#include "kern_constants.h"
/*
- * Assembly doesn't want any casting, but C does, so define these
- * without casts here, and define new symbols with casts inside the C
- * section.
+ * Stolen from linux/const.h, which can't be directly included since
+ * this is used in userspace code, which has no access to the kernel
+ * headers. Changed to be suitable for adding casts to the start,
+ * rather than "UL" to the end.
*/
-#define ASM_STUB_CODE (UML_CONFIG_TOP_ADDR - 2 * UM_KERN_PAGE_SIZE)
-#define ASM_STUB_DATA (UML_CONFIG_TOP_ADDR - UM_KERN_PAGE_SIZE)
-#define ASM_STUB_START ASM_STUB_CODE
-/*
- * This file is included by the assembly stubs, which just want the
- * definitions above.
+/* Some constant macros are used in both assembler and
+ * C code. Therefore we cannot annotate them always with
+ * 'UL' and other type specifiers unilaterally. We
+ * use the following macros to deal with this.
*/
-#ifndef __ASSEMBLY__
-#define STUB_CODE ((unsigned long) ASM_STUB_CODE)
-#define STUB_DATA ((unsigned long) ASM_STUB_DATA)
-#define STUB_START ((unsigned long) ASM_STUB_START)
+#ifdef __ASSEMBLY__
+#define _AC(X, Y) (Y)
+#else
+#define __AC(X, Y) (X (Y))
+#define _AC(X, Y) __AC(X, Y)
+#endif
+
+#define STUB_START _AC(, 0x100000)
+#define STUB_CODE _AC((unsigned long), STUB_START)
+#define STUB_DATA _AC((unsigned long), STUB_CODE + UM_KERN_PAGE_SIZE)
+#define STUB_END _AC((unsigned long), STUB_DATA + UM_KERN_PAGE_SIZE)
+
+#ifndef __ASSEMBLY__
#include "sysdep/ptrace.h"
diff --git a/arch/um/include/chan_user.h b/arch/um/include/chan_user.h
index 5a2263e05bb2..9b9ced85b703 100644
--- a/arch/um/include/chan_user.h
+++ b/arch/um/include/chan_user.h
@@ -48,7 +48,7 @@ extern void register_winch_irq(int fd, int tty_fd, int pid,
#define __channel_help(fn, prefix) \
__uml_help(fn, prefix "[0-9]*=<channel description>\n" \
" Attach a console or serial line to a host channel. See\n" \
-" http://user-mode-linux.sourceforge.net/input.html for a complete\n" \
+" http://user-mode-linux.sourceforge.net/old/input.html for a complete\n" \
" description of this switch.\n\n" \
);
diff --git a/arch/um/include/common-offsets.h b/arch/um/include/common-offsets.h
index 0edab695ed4e..b54bd35585c2 100644
--- a/arch/um/include/common-offsets.h
+++ b/arch/um/include/common-offsets.h
@@ -18,6 +18,7 @@ DEFINE_STR(UM_KERN_WARNING, KERN_WARNING);
DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE);
DEFINE_STR(UM_KERN_INFO, KERN_INFO);
DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG);
+DEFINE_STR(UM_KERN_CONT, KERN_CONT);
DEFINE(UM_ELF_CLASS, ELF_CLASS);
DEFINE(UM_ELFCLASS32, ELFCLASS32);
diff --git a/arch/um/include/init.h b/arch/um/include/init.h
index cebc6cae9190..b00a95741d41 100644
--- a/arch/um/include/init.h
+++ b/arch/um/include/init.h
@@ -40,6 +40,20 @@
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
+#ifndef __KERNEL__
+#ifndef __section
+# define __section(S) __attribute__ ((__section__(#S)))
+#endif
+
+#if __GNUC_MINOR__ >= 3
+# define __used __attribute__((__used__))
+#else
+# define __used __attribute__((__unused__))
+#endif
+
+#else
+#include <linux/compiler.h>
+#endif
/* These are for everybody (although not all archs will actually
discard it in modules) */
#define __init __section(.init.text)
@@ -127,14 +141,3 @@ extern struct uml_param __uml_setup_start, __uml_setup_end;
#endif
#endif /* _LINUX_UML_INIT_H */
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/include/irq_user.h b/arch/um/include/irq_user.h
index 884a9c17eea0..e60b31873de1 100644
--- a/arch/um/include/irq_user.h
+++ b/arch/um/include/irq_user.h
@@ -14,7 +14,6 @@ struct irq_fd {
int fd;
int type;
int irq;
- int pid;
int events;
int current_events;
};
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h
index 74ce8e5370a6..3c341222d252 100644
--- a/arch/um/include/kern_util.h
+++ b/arch/um/include/kern_util.h
@@ -9,107 +9,61 @@
#include "sysdep/ptrace.h"
#include "sysdep/faultinfo.h"
-typedef void (*kern_hndl)(int, struct uml_pt_regs *);
-
-struct kern_handlers {
- kern_hndl relay_signal;
- kern_hndl winch;
- kern_hndl bus_handler;
- kern_hndl page_fault;
- kern_hndl sigio_handler;
- kern_hndl timer_handler;
-};
-
-extern const struct kern_handlers handlinfo_kern;
+extern int uml_exitcode;
extern int ncpus;
-extern char *gdb_init;
extern int kmalloc_ok;
-extern int jail;
-extern int nsyscalls;
-#define UML_ROUND_DOWN(addr) ((void *)(((unsigned long) addr) & PAGE_MASK))
#define UML_ROUND_UP(addr) \
- UML_ROUND_DOWN(((unsigned long) addr) + PAGE_SIZE - 1)
+ ((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
-extern int kernel_fork(unsigned long flags, int (*fn)(void *), void * arg);
-extern int kernel_thread_proc(void *data);
-extern void syscall_segv(int sig);
-extern int current_pid(void);
extern unsigned long alloc_stack(int order, int atomic);
+extern void free_stack(unsigned long stack, int order);
+
extern int do_signal(void);
-extern int is_stack_fault(unsigned long sp);
+extern void copy_sc(struct uml_pt_regs *regs, void *from);
+extern void interrupt_end(void);
+extern void relay_signal(int sig, struct uml_pt_regs *regs);
+
extern unsigned long segv(struct faultinfo fi, unsigned long ip,
int is_user, struct uml_pt_regs *regs);
extern int handle_page_fault(unsigned long address, unsigned long ip,
int is_write, int is_user, int *code_out);
-extern void syscall_ready(void);
-extern void set_tracing(void *t, int tracing);
-extern int is_tracing(void *task);
-extern int segv_syscall(void);
-extern void kern_finish_exec(void *task, int new_pid, unsigned long stack);
-extern unsigned long page_mask(void);
-extern int need_finish_fork(void);
-extern void free_stack(unsigned long stack, int order);
-extern void add_input_request(int op, void (*proc)(int), void *arg);
-extern char *current_cmd(void);
-extern void timer_handler(int sig, struct uml_pt_regs *regs);
-extern int set_signals(int enable);
-extern int pid_to_processor_id(int pid);
-extern void deliver_signals(void *t);
-extern int next_trap_index(int max);
-extern void default_idle(void);
-extern void finish_fork(void);
-extern void paging_init(void);
-extern void init_flush_vm(void);
-extern void *syscall_sp(void *t);
-extern void syscall_trace(struct uml_pt_regs *regs, int entryexit);
+
extern unsigned int do_IRQ(int irq, struct uml_pt_regs *regs);
-extern void interrupt_end(void);
-extern void initial_thread_cb(void (*proc)(void *), void *arg);
-extern int debugger_signal(int status, int pid);
-extern void debugger_parent_signal(int status, int pid);
-extern void child_signal(int pid, int status);
-extern int init_ptrace_proxy(int idle_pid, int startup, int stop);
-extern int init_parent_proxy(int pid);
-extern int singlestepping(void *t);
-extern void check_stack_overflow(void *ptr);
-extern void relay_signal(int sig, struct uml_pt_regs *regs);
-extern int user_context(unsigned long sp);
-extern void timer_irq(struct uml_pt_regs *regs);
-extern void do_uml_exitcalls(void);
-extern int attach_debugger(int idle_pid, int pid, int stop);
-extern int config_gdb(char *str);
-extern int remove_gdb(void);
-extern char *uml_strdup(char *string);
-extern void unprotect_kernel_mem(void);
-extern void protect_kernel_mem(void);
-extern void uml_cleanup(void);
-extern void lock_signalled_task(void *t);
-extern void IPI_handler(int cpu);
-extern int jail_setup(char *line, int *add);
-extern void *get_init_task(void);
-extern int clear_user_proc(void *buf, int size);
-extern int copy_to_user_proc(void *to, void *from, int size);
-extern int copy_from_user_proc(void *to, void *from, int size);
-extern int strlen_user_proc(char *str);
-extern long execute_syscall(void *r);
extern int smp_sigio_handler(void);
-extern void *get_current(void);
-extern struct task_struct *get_task(int pid, int require);
-extern void machine_halt(void);
+extern void initial_thread_cb(void (*proc)(void *), void *arg);
extern int is_syscall(unsigned long addr);
+extern void timer_handler(int sig, struct uml_pt_regs *regs);
-extern void free_irq(unsigned int, void *);
-extern int cpu(void);
+extern void timer_handler(int sig, struct uml_pt_regs *regs);
-extern void time_init_kern(void);
+extern int start_uml(void);
+extern void paging_init(void);
-/* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */
+extern void uml_cleanup(void);
+extern void do_uml_exitcalls(void);
+
+/*
+ * Are we disallowed to sleep? Used to choose between GFP_KERNEL and
+ * GFP_ATOMIC.
+ */
extern int __cant_sleep(void);
-extern void sigio_handler(int sig, struct uml_pt_regs *regs);
-extern void copy_sc(struct uml_pt_regs *regs, void *from);
+extern void *get_current(void);
+extern int copy_from_user_proc(void *to, void *from, int size);
+extern int cpu(void);
+extern char *uml_strdup(const char *string);
+
extern unsigned long to_irq_stack(unsigned long *mask_out);
-unsigned long from_irq_stack(int nested);
-extern int start_uml(void);
+extern unsigned long from_irq_stack(int nested);
+
+extern void syscall_trace(struct uml_pt_regs *regs, int entryexit);
+extern int singlestepping(void *t);
+
+extern void segv_handler(int sig, struct uml_pt_regs *regs);
+extern void bus_handler(int sig, struct uml_pt_regs *regs);
+extern void winch(int sig, struct uml_pt_regs *regs);
+extern void fatal_sigsegv(void) __attribute__ ((noreturn));
+
+
#endif
diff --git a/arch/um/include/mem_user.h b/arch/um/include/mem_user.h
index a54514d2cc3a..46384acd547b 100644
--- a/arch/um/include/mem_user.h
+++ b/arch/um/include/mem_user.h
@@ -46,9 +46,6 @@ extern int iomem_size;
#define ROUND_4M(n) ((((unsigned long) (n)) + (1 << 22)) & ~((1 << 22) - 1))
-extern unsigned long host_task_size;
-extern unsigned long task_size;
-
extern int init_mem_user(void);
extern void setup_memory(void *entry);
extern unsigned long find_iomem(char *driver, unsigned long *len_out);
@@ -59,9 +56,7 @@ extern void setup_physmem(unsigned long start, unsigned long usable,
unsigned long len, unsigned long long highmem);
extern void add_iomem(char *name, int fd, unsigned long size);
extern unsigned long phys_offset(unsigned long phys);
-extern void unmap_physmem(void);
extern void map_memory(unsigned long virt, unsigned long phys,
unsigned long len, int r, int w, int x);
-extern unsigned long get_kmem_end(void);
#endif
diff --git a/arch/um/include/misc_constants.h b/arch/um/include/misc_constants.h
deleted file mode 100644
index 989bc08de36e..000000000000
--- a/arch/um/include/misc_constants.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __MISC_CONSTANT_H_
-#define __MISC_CONSTANT_H_
-
-#include <user_constants.h>
-
-#endif
diff --git a/arch/um/include/os.h b/arch/um/include/os.h
index 6f0d1c741bca..0b6b62733303 100644
--- a/arch/um/include/os.h
+++ b/arch/um/include/os.h
@@ -8,7 +8,6 @@
#include <stdarg.h>
#include "irq_user.h"
-#include "kern_util.h"
#include "longjmp.h"
#include "mm_id.h"
#include "sysdep/tls.h"
@@ -128,33 +127,31 @@ static inline struct openflags of_cloexec(struct openflags flags)
extern int os_stat_file(const char *file_name, struct uml_stat *buf);
extern int os_stat_fd(const int fd, struct uml_stat *buf);
extern int os_access(const char *file, int mode);
-extern int os_get_exec_close(int fd, int *close_on_exec);
extern int os_set_exec_close(int fd);
extern int os_ioctl_generic(int fd, unsigned int cmd, unsigned long arg);
extern int os_get_ifname(int fd, char *namebuf);
extern int os_set_slip(int fd);
-extern int os_set_owner(int fd, int pid);
extern int os_mode_fd(int fd, int mode);
extern int os_seek_file(int fd, unsigned long long offset);
-extern int os_open_file(char *file, struct openflags flags, int mode);
+extern int os_open_file(const char *file, struct openflags flags, int mode);
extern int os_read_file(int fd, void *buf, int len);
extern int os_write_file(int fd, const void *buf, int count);
-extern int os_file_size(char *file, unsigned long long *size_out);
-extern int os_file_modtime(char *file, unsigned long *modtime);
+extern int os_file_size(const char *file, unsigned long long *size_out);
+extern int os_file_modtime(const char *file, unsigned long *modtime);
extern int os_pipe(int *fd, int stream, int close_on_exec);
-extern int os_set_fd_async(int fd, int owner);
+extern int os_set_fd_async(int fd);
extern int os_clear_fd_async(int fd);
extern int os_set_fd_block(int fd, int blocking);
extern int os_accept_connection(int fd);
-extern int os_create_unix_socket(char *file, int len, int close_on_exec);
+extern int os_create_unix_socket(const char *file, int len, int close_on_exec);
extern int os_shutdown_socket(int fd, int r, int w);
extern void os_close_file(int fd);
extern int os_rcv_fd(int fd, int *helper_pid_out);
extern int create_unix_socket(char *file, int len, int close_on_exec);
-extern int os_connect_socket(char *name);
+extern int os_connect_socket(const char *name);
extern int os_file_type(char *file);
-extern int os_file_mode(char *file, struct openflags *mode_out);
+extern int os_file_mode(const char *file, struct openflags *mode_out);
extern int os_lock_file(int fd, int excl);
extern void os_flush_stdout(void);
extern int os_stat_filesystem(char *path, long *bsize_out,
@@ -168,14 +165,10 @@ extern int os_fchange_dir(int fd);
/* start_up.c */
extern void os_early_checks(void);
-extern int can_do_skas(void);
+extern void can_do_skas(void);
extern void os_check_bugs(void);
extern void check_host_supports_tls(int *supports_tls, int *tls_min);
-/* Make sure they are clear when running in TT mode. Required by
- * SEGV_MAYBE_FIXABLE */
-#define clear_can_do_skas() do { ptrace_faultinfo = proc_mm = 0; } while (0)
-
/* mem.c */
extern int create_mem_file(unsigned long long len);
@@ -214,7 +207,7 @@ extern int execvp_noalloc(char *buf, const char *file, char *const argv[]);
extern int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv);
extern int run_helper_thread(int (*proc)(void *), void *arg,
unsigned int flags, unsigned long *stack_out);
-extern int helper_wait(int pid, int nohang, char *pname);
+extern int helper_wait(int pid);
/* tls.c */
@@ -237,16 +230,12 @@ extern void unblock_signals(void);
extern int get_signals(void);
extern int set_signals(int enable);
-/* trap.c */
-extern void os_fill_handlinfo(struct kern_handlers h);
-
/* util.c */
extern void stack_protections(unsigned long address);
extern int raw(int fd);
extern void setup_machinename(char *machine_out);
extern void setup_hostinfo(char *buf, int len);
-extern int setjmp_wrapper(void (*proc)(void *, void *), ...);
-extern void os_dump_core(void);
+extern void os_dump_core(void) __attribute__ ((noreturn));
/* time.c */
extern void idle_sleep(unsigned long long nsecs);
@@ -275,11 +264,9 @@ extern int protect(struct mm_id * mm_idp, unsigned long addr,
extern int is_skas_winch(int pid, int fd, void *data);
extern int start_userspace(unsigned long stub_stack);
extern int copy_context_skas0(unsigned long stack, int pid);
-extern void save_registers(int pid, struct uml_pt_regs *regs);
-extern void restore_registers(int pid, struct uml_pt_regs *regs);
extern void userspace(struct uml_pt_regs *regs);
-extern void map_stub_pages(int fd, unsigned long code,
- unsigned long data, unsigned long stack);
+extern int map_stub_pages(int fd, unsigned long code, unsigned long data,
+ unsigned long stack);
extern void new_thread(void *stack, jmp_buf *buf, void (*handler)(void));
extern void switch_threads(jmp_buf *me, jmp_buf *you);
extern int start_idle_thread(void *stack, jmp_buf *switch_buf);
@@ -298,16 +285,12 @@ extern void os_free_irq_later(struct irq_fd *active_fds,
extern int os_get_pollfd(int i);
extern void os_set_pollfd(int i, int fd);
extern void os_set_ioignore(void);
-extern void init_irq_signals(int on_sigstack);
/* sigio.c */
extern int add_sigio_fd(int fd);
extern int ignore_sigio_fd(int fd);
extern void maybe_sigio_broken(int fd, int read);
-/* skas/trap */
-extern void sig_handler_common_skas(int sig, void *sc_ptr);
-
/* sys-x86_64/prctl.c */
extern int os_arch_prctl(int pid, int code, unsigned long *addr);
diff --git a/arch/um/include/ptrace_user.h b/arch/um/include/ptrace_user.h
index f3450e6bc18d..4bce6e012889 100644
--- a/arch/um/include/ptrace_user.h
+++ b/arch/um/include/ptrace_user.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@@ -10,12 +10,6 @@
extern int ptrace_getregs(long pid, unsigned long *regs_out);
extern int ptrace_setregs(long pid, unsigned long *regs_in);
-extern int ptrace_getfpregs(long pid, unsigned long *regs_out);
-extern int ptrace_setfpregs(long pid, unsigned long *regs);
-extern void arch_enter_kernel(void *task, int pid);
-extern void arch_leave_kernel(void *task, int pid);
-extern void ptrace_pokeuser(unsigned long addr, unsigned long data);
-
/* syscall emulation path in ptrace */
@@ -54,7 +48,8 @@ extern int sysemu_supported;
(((int[3][3] ) { \
{ PTRACE_SYSCALL, PTRACE_SYSCALL, PTRACE_SINGLESTEP }, \
{ PTRACE_SYSEMU, PTRACE_SYSEMU, PTRACE_SINGLESTEP }, \
- { PTRACE_SYSEMU, PTRACE_SYSEMU_SINGLESTEP, PTRACE_SYSEMU_SINGLESTEP }}) \
+ { PTRACE_SYSEMU, PTRACE_SYSEMU_SINGLESTEP, \
+ PTRACE_SYSEMU_SINGLESTEP } }) \
[sysemu_mode][singlestep_mode])
#endif
diff --git a/arch/um/include/registers.h b/arch/um/include/registers.h
index 0e27406a43a4..9ea1ae3c8f46 100644
--- a/arch/um/include/registers.h
+++ b/arch/um/include/registers.h
@@ -9,14 +9,13 @@
#include "sysdep/ptrace.h"
#include "sysdep/archsetjmp.h"
-extern void init_thread_registers(struct uml_pt_regs *to);
extern int save_fp_registers(int pid, unsigned long *fp_regs);
extern int restore_fp_registers(int pid, unsigned long *fp_regs);
extern int save_fpx_registers(int pid, unsigned long *fp_regs);
extern int restore_fpx_registers(int pid, unsigned long *fp_regs);
-extern void save_registers(int pid, struct uml_pt_regs *regs);
-extern void restore_registers(int pid, struct uml_pt_regs *regs);
-extern void init_registers(int pid);
+extern int save_registers(int pid, struct uml_pt_regs *regs);
+extern int restore_registers(int pid, struct uml_pt_regs *regs);
+extern int init_registers(int pid);
extern void get_safe_registers(unsigned long *regs);
extern unsigned long get_thread_reg(int reg, jmp_buf *buf);
diff --git a/arch/um/include/signal_kern.h b/arch/um/include/signal_kern.h
deleted file mode 100644
index aeb5d5ab1dfd..000000000000
--- a/arch/um/include/signal_kern.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
- * Licensed under the GPL
- */
-
-#ifndef __SIGNAL_KERN_H__
-#define __SIGNAL_KERN_H__
-
-extern int have_signals(void *t);
-
-#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/include/skas/mode-skas.h b/arch/um/include/skas/mode-skas.h
deleted file mode 100644
index e065feb000df..000000000000
--- a/arch/um/include/skas/mode-skas.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/*
- * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
- * Licensed under the GPL
- */
-
-#ifndef __MODE_SKAS_H__
-#define __MODE_SKAS_H__
-
-extern void kill_off_processes_skas(void);
-
-#endif
diff --git a/arch/um/include/sysdep-i386/syscalls.h b/arch/um/include/sysdep-i386/syscalls.h
index 57bd79efbee3..905698197e35 100644
--- a/arch/um/include/sysdep-i386/syscalls.h
+++ b/arch/um/include/sysdep-i386/syscalls.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2008 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@@ -18,7 +18,8 @@ extern syscall_handler_t old_mmap_i386;
extern syscall_handler_t *sys_call_table[];
#define EXECUTE_SYSCALL(syscall, regs) \
- ((long (*)(struct syscall_args)) (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
+ ((long (*)(struct syscall_args)) \
+ (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
extern long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
diff --git a/arch/um/include/sysdep-x86_64/kernel-offsets.h b/arch/um/include/sysdep-x86_64/kernel-offsets.h
index c978b589df41..a307237b7964 100644
--- a/arch/um/include/sysdep-x86_64/kernel-offsets.h
+++ b/arch/um/include/sysdep-x86_64/kernel-offsets.h
@@ -17,16 +17,7 @@
#define OFFSET(sym, str, mem) \
DEFINE(sym, offsetof(struct str, mem));
-#define __NO_STUBS 1
-#undef __SYSCALL
-#undef _ASM_X86_64_UNISTD_H_
-#define __SYSCALL(nr, sym) [nr] = 1,
-static char syscalls[] = {
-#include <asm/arch/unistd.h>
-};
-
void foo(void)
{
#include <common-offsets.h>
-DEFINE(UM_NR_syscall_max, sizeof(syscalls) - 1);
}
diff --git a/arch/um/include/sysdep-x86_64/syscalls.h b/arch/um/include/sysdep-x86_64/syscalls.h
index cf72256609e4..7cfb0b085655 100644
--- a/arch/um/include/sysdep-x86_64/syscalls.h
+++ b/arch/um/include/sysdep-x86_64/syscalls.h
@@ -30,6 +30,4 @@ extern long old_mmap(unsigned long addr, unsigned long len,
extern syscall_handler_t sys_modify_ldt;
extern syscall_handler_t sys_arch_prctl;
-#define NR_syscalls (UM_NR_syscall_max + 1)
-
#endif
diff --git a/arch/um/include/um_mmu.h b/arch/um/include/um_mmu.h
index 8855d8df512f..82865fcf6872 100644
--- a/arch/um/include/um_mmu.h
+++ b/arch/um/include/um_mmu.h
@@ -12,10 +12,6 @@
typedef struct mm_context {
struct mm_id id;
- unsigned long last_page_table;
-#ifdef CONFIG_3_LEVEL_PGTABLES
- unsigned long last_pmd;
-#endif
struct uml_ldt ldt;
} mm_context_t;
diff --git a/arch/um/include/um_uaccess.h b/arch/um/include/um_uaccess.h
index fdfc06b85605..2b6fc8e0f071 100644
--- a/arch/um/include/um_uaccess.h
+++ b/arch/um/include/um_uaccess.h
@@ -6,7 +6,9 @@
#ifndef __ARCH_UM_UACCESS_H
#define __ARCH_UM_UACCESS_H
-#include "asm/fixmap.h"
+#include <asm/elf.h>
+#include <asm/fixmap.h>
+#include "sysdep/archsetjmp.h"
#define __under_task_size(addr, size) \
(((unsigned long) (addr) < TASK_SIZE) && \
diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c
index 8196450451cd..76a62c0cb2bc 100644
--- a/arch/um/kernel/exec.c
+++ b/arch/um/kernel/exec.c
@@ -19,12 +19,13 @@
void flush_thread(void)
{
void *data = NULL;
- unsigned long end = proc_mm ? task_size : STUB_START;
int ret;
arch_flush_thread(&current->thread.arch);
- ret = unmap(&current->mm->context.id, 0, end, 1, &data);
+ ret = unmap(&current->mm->context.id, 0, STUB_START, 0, &data);
+ ret = ret || unmap(&current->mm->context.id, STUB_END,
+ TASK_SIZE - STUB_END, 1, &data);
if (ret) {
printk(KERN_ERR "flush_thread - clearing address space failed, "
"err = %d\n", ret);
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c
index c716b5a6db13..984f80e668ca 100644
--- a/arch/um/kernel/exitcode.c
+++ b/arch/um/kernel/exitcode.c
@@ -1,15 +1,17 @@
/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
-#include "linux/kernel.h"
-#include "linux/init.h"
-#include "linux/ctype.h"
-#include "linux/proc_fs.h"
-#include "asm/uaccess.h"
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/types.h>
+#include <asm/uaccess.h>
-/* If read and write race, the read will still atomically read a valid
+/*
+ * If read and write race, the read will still atomically read a valid
* value.
*/
int uml_exitcode = 0;
@@ -19,18 +21,19 @@ static int read_proc_exitcode(char *page, char **start, off_t off,
{
int len, val;
- /* Save uml_exitcode in a local so that we don't need to guarantee
+ /*
+ * Save uml_exitcode in a local so that we don't need to guarantee
* that sprintf accesses it atomically.
*/
val = uml_exitcode;
len = sprintf(page, "%d\n", val);
len -= off;
- if(len <= off+count)
+ if (len <= off+count)
*eof = 1;
*start = page + off;
- if(len > count)
+ if (len > count)
len = count;
- if(len < 0)
+ if (len < 0)
len = 0;
return len;
}
@@ -41,11 +44,11 @@ static int write_proc_exitcode(struct file *file, const char __user *buffer,
char *end, buf[sizeof("nnnnn\0")];
int tmp;
- if(copy_from_user(buf, buffer, count))
+ if (copy_from_user(buf, buffer, count))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0);
- if((*end != '\0') && !isspace(*end))
+ if ((*end != '\0') && !isspace(*end))
return -EINVAL;
uml_exitcode = tmp;
@@ -57,7 +60,7 @@ static int make_proc_exitcode(void)
struct proc_dir_entry *ent;
ent = create_proc_entry("exitcode", 0600, &proc_root);
- if(ent == NULL){
+ if (ent == NULL) {
printk(KERN_WARNING "make_proc_exitcode : Failed to register "
"/proc/exitcode\n");
return 0;
diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c
index 734f873cab12..72eccd2a4113 100644
--- a/arch/um/kernel/gmon_syms.c
+++ b/arch/um/kernel/gmon_syms.c
@@ -1,5 +1,5 @@
-/*
- * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
+/*
+ * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@@ -8,12 +8,13 @@
extern void __bb_init_func(void *) __attribute__((weak));
EXPORT_SYMBOL(__bb_init_func);
-/* This is defined (and referred to in profiling stub code) only by some GCC
+/*
+ * This is defined (and referred to in profiling stub code) only by some GCC
* versions in libgcov.
*
* Since SuSE backported the fix, we cannot handle it depending on GCC version.
- * So, unconditionally export it. But also give it a weak declaration, which will
- * be overridden by any other one.
+ * So, unconditionally export it. But also give it a weak declaration, which
+ * will be overridden by any other one.
*/
extern void __gcov_init(void *) __attribute__((weak));
diff --git a/arch/um/kernel/gprof_syms.c b/arch/um/kernel/gprof_syms.c
index 9244f018d44c..e2f043d0de6c 100644
--- a/arch/um/kernel/gprof_syms.c
+++ b/arch/um/kernel/gprof_syms.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@@ -7,14 +7,3 @@
extern void mcount(void);
EXPORT_SYMBOL(mcount);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
index 16dc43e9d940..fa015565001b 100644
--- a/arch/um/kernel/initrd.c
+++ b/arch/um/kernel/initrd.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@@ -7,7 +7,6 @@
#include "linux/bootmem.h"
#include "linux/initrd.h"
#include "asm/types.h"
-#include "kern_util.h"
#include "initrd.h"
#include "init.h"
#include "os.h"
@@ -21,18 +20,27 @@ static int __init read_initrd(void)
long long size;
int err;
- if(initrd == NULL)
+ if (initrd == NULL)
return 0;
err = os_file_size(initrd, &size);
- if(err)
+ if (err)
return 0;
+ /*
+ * This is necessary because alloc_bootmem craps out if you
+ * ask for no memory.
+ */
+ if (size == 0) {
+ printk(KERN_ERR "\"%\" is a zero-size initrd\n");
+ return 0;
+ }
+
area = alloc_bootmem(size);
- if(area == NULL)
+ if (area == NULL)
return 0;
- if(load_initrd(initrd, area, size) == -1)
+ if (load_initrd(initrd, area, size) == -1)
return 0;
initrd_start = (unsigned long) area;
@@ -59,13 +67,15 @@ int load_initrd(char *filename, void *buf, int size)
int fd, n;
fd = os_open_file(filename, of_read(OPENFLAGS()), 0);
- if(fd < 0){
- printk("Opening '%s' failed - err = %d\n", filename, -fd);
+ if (fd < 0) {
+ printk(KERN_ERR "Opening '%s' failed - err = %d\n", filename,
+ -fd);
return -1;
}
n = os_read_file(fd, buf, size);
- if(n != size){
- printk("Read of %d bytes from '%s' failed, err = %d\n", size,
+ if (n != size) {
+ printk(KERN_ERR "Read of %d bytes from '%s' failed, "
+ "err = %d\n", size,
filename, -n);
return -1;
}
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index ba11ccd6a8a3..91587f8db340 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -107,10 +107,9 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
struct pollfd *tmp_pfd;
struct irq_fd *new_fd, *irq_fd;
unsigned long flags;
- int pid, events, err, n;
+ int events, err, n;
- pid = os_getpid();
- err = os_set_fd_async(fd, pid);
+ err = os_set_fd_async(fd);
if (err < 0)
goto out;
@@ -127,7 +126,6 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
.fd = fd,
.type = type,
.irq = irq,
- .pid = pid,
.events = events,
.current_events = 0 } );
diff --git a/arch/um/kernel/ksyms.c b/arch/um/kernel/ksyms.c
index 7c7142ba3bd7..5311ee93ede3 100644
--- a/arch/um/kernel/ksyms.c
+++ b/arch/um/kernel/ksyms.c
@@ -18,15 +18,11 @@ EXPORT_SYMBOL(set_signals);
EXPORT_SYMBOL(get_signals);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(sys_waitpid);
-EXPORT_SYMBOL(task_size);
EXPORT_SYMBOL(flush_tlb_range);
-EXPORT_SYMBOL(host_task_size);
EXPORT_SYMBOL(arch_validate);
-EXPORT_SYMBOL(get_kmem_end);
EXPORT_SYMBOL(high_physmem);
EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(um_virt_to_phys);
EXPORT_SYMBOL(handle_page_fault);
EXPORT_SYMBOL(find_iomem);
@@ -40,7 +36,6 @@ EXPORT_SYMBOL(uml_strdup);
EXPORT_SYMBOL(os_stat_fd);
EXPORT_SYMBOL(os_stat_file);
EXPORT_SYMBOL(os_access);
-EXPORT_SYMBOL(os_get_exec_close);
EXPORT_SYMBOL(os_set_exec_close);
EXPORT_SYMBOL(os_getpid);
EXPORT_SYMBOL(os_open_file);
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 59822dee438a..d872fdce1d7e 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -1,49 +1,41 @@
/*
- * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
-#include "linux/stddef.h"
-#include "linux/kernel.h"
-#include "linux/mm.h"
-#include "linux/bootmem.h"
-#include "linux/swap.h"
-#include "linux/highmem.h"
-#include "linux/gfp.h"
-#include "asm/page.h"
-#include "asm/fixmap.h"
-#include "asm/pgalloc.h"
-#include "kern_util.h"
+#include <linux/stddef.h>
+#include <linux/bootmem.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <asm/fixmap.h>
+#include <asm/page.h>
#include "as-layout.h"
+#include "init.h"
#include "kern.h"
+#include "kern_util.h"
#include "mem_user.h"
-#include "um_uaccess.h"
#include "os.h"
-#include "linux/types.h"
-#include "linux/string.h"
-#include "init.h"
-#include "kern_constants.h"
/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
unsigned long *empty_zero_page = NULL;
/* allocated in paging_init and unchanged thereafter */
unsigned long *empty_bad_page = NULL;
+
+/*
+ * Initialized during boot, and readonly for initializing page tables
+ * afterwards
+ */
pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+/* Initialized at boot time, and readonly after that */
unsigned long long highmem;
int kmalloc_ok = 0;
+/* Used during early boot */
static unsigned long brk_end;
-void unmap_physmem(void)
-{
- os_unmap_memory((void *) brk_end, uml_reserved - brk_end);
-}
-
-static void map_cb(void *unused)
-{
- map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
-}
-
#ifdef CONFIG_HIGHMEM
static void setup_highmem(unsigned long highmem_start,
unsigned long highmem_len)
@@ -53,7 +45,7 @@ static void setup_highmem(unsigned long highmem_start,
int i;
highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
- for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){
+ for (i = 0; i < highmem_len >> PAGE_SHIFT; i++) {
page = &mem_map[highmem_pfn + i];
ClearPageReserved(page);
init_page_count(page);
@@ -65,14 +57,13 @@ static void setup_highmem(unsigned long highmem_start,
void __init mem_init(void)
{
/* clear the zero-page */
- memset((void *) empty_zero_page, 0, PAGE_SIZE);
+ memset(empty_zero_page, 0, PAGE_SIZE);
/* Map in the area just after the brk now that kmalloc is about
* to be turned on.
*/
brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
- map_cb(NULL);
- initial_thread_cb(map_cb, NULL);
+ map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
free_bootmem(__pa(brk_end), uml_reserved - brk_end);
uml_reserved = brk_end;
@@ -85,7 +76,7 @@ void __init mem_init(void)
#endif
num_physpages = totalram_pages;
max_pfn = totalram_pages;
- printk(KERN_INFO "Memory: %luk available\n",
+ printk(KERN_INFO "Memory: %luk available\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
kmalloc_ok = 1;
@@ -119,7 +110,7 @@ static void __init one_md_table_init(pud_t *pud)
#endif
}
-static void __init fixrange_init(unsigned long start, unsigned long end,
+static void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
{
pgd_t *pgd;
@@ -138,7 +129,7 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
if (pud_none(*pud))
one_md_table_init(pud);
pmd = pmd_offset(pud, vaddr);
- for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
+ for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
one_page_table_init(pmd);
vaddr += PMD_SIZE;
}
@@ -152,7 +143,7 @@ pgprot_t kmap_prot;
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\
- (vaddr)), (vaddr))
+ (vaddr)), (vaddr))
static void __init kmap_init(void)
{
@@ -197,21 +188,23 @@ static void __init fixaddr_user_init( void)
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- unsigned long paddr, vaddr = FIXADDR_USER_START;
+ phys_t p;
+ unsigned long v, vaddr = FIXADDR_USER_START;
- if ( ! size )
+ if (!size)
return;
fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
- paddr = (unsigned long)alloc_bootmem_low_pages( size);
- memcpy( (void *)paddr, (void *)FIXADDR_USER_START, size);
- paddr = __pa(paddr);
- for ( ; size > 0; size-=PAGE_SIZE, vaddr+=PAGE_SIZE, paddr+=PAGE_SIZE){
+ v = (unsigned long) alloc_bootmem_low_pages(size);
+ memcpy((void *) v , (void *) FIXADDR_USER_START, size);
+ p = __pa(v);
+ for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
+ p += PAGE_SIZE) {
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
- pte_set_val( (*pte), paddr, PAGE_READONLY);
+ pte_set_val(*pte, p, PAGE_READONLY);
}
#endif
}
@@ -223,7 +216,7 @@ void __init paging_init(void)
empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
- for(i = 0; i < ARRAY_SIZE(zones_size); i++)
+ for (i = 0; i < ARRAY_SIZE(zones_size); i++)
zones_size[i] = 0;
zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
@@ -253,32 +246,33 @@ struct page *arch_validate(struct page *page, gfp_t mask, int order)
int i;
again:
- if(page == NULL)
+ if (page == NULL)
return page;
- if(PageHighMem(page))
+ if (PageHighMem(page))
return page;
addr = (unsigned long) page_address(page);
- for(i = 0; i < (1 << order); i++){
+ for (i = 0; i < (1 << order); i++) {
current->thread.fault_addr = (void *) addr;
- if(__do_copy_to_user((void __user *) addr, &zero,
+ if (__do_copy_to_user((void __user *) addr, &zero,
sizeof(zero),
&current->thread.fault_addr,
- &current->thread.fault_catcher)){
- if(!(mask & __GFP_WAIT))
+ &current->thread.fault_catcher)) {
+ if (!(mask & __GFP_WAIT))
return NULL;
else break;
}
addr += PAGE_SIZE;
}
- if(i == (1 << order))
+ if (i == (1 << order))
return page;
page = alloc_pages(mask, order);
goto again;
}
-/* This can't do anything because nothing in the kernel image can be freed
+/*
+ * This can't do anything because nothing in the kernel image can be freed
* since it's not in kernel physical memory.
*/
@@ -290,8 +284,8 @@ void free_initmem(void)
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (start < end)
- printk ("Freeing initrd memory: %ldk freed\n",
- (end - start) >> 10);
+ printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
+ (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
@@ -308,32 +302,31 @@ void show_mem(void)
int highmem = 0;
struct page *page;
- printk("Mem-info:\n");
+ printk(KERN_INFO "Mem-info:\n");
show_free_areas();
- printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+ printk(KERN_INFO "Free swap: %6ldkB\n",
+ nr_swap_pages<<(PAGE_SHIFT-10));
pfn = max_mapnr;
- while(pfn-- > 0) {
+ while (pfn-- > 0) {
page = pfn_to_page(pfn);
total++;
- if(PageHighMem(page))
+ if (PageHighMem(page))
highmem++;
- if(PageReserved(page))
+ if (PageReserved(page))
reserved++;
- else if(PageSwapCache(page))
+ else if (PageSwapCache(page))
cached++;
- else if(page_count(page))
+ else if (page_count(page))
shared += page_count(page) - 1;
}
- printk("%d pages of RAM\n", total);
- printk("%d pages of HIGHMEM\n", highmem);
- printk("%d reserved pages\n", reserved);
- printk("%d pages shared\n", shared);
- printk("%d pages swap cached\n", cached);
+ printk(KERN_INFO "%d pages of RAM\n", total);
+ printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
+ printk(KERN_INFO "%d reserved pages\n", reserved);
+ printk(KERN_INFO "%d pages shared\n", shared);
+ printk(KERN_INFO "%d pages swap cached\n", cached);
}
-/*
- * Allocate and free page tables.
- */
+/* Allocate and free page tables. */
pgd_t *pgd_alloc(struct mm_struct *mm)
{
@@ -341,14 +334,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
if (pgd) {
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy(pgd + USER_PTRS_PER_PGD,
- swapper_pg_dir + USER_PTRS_PER_PGD,
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
}
return pgd;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long) pgd);
}
@@ -368,3 +361,15 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
return pte;
}
+
+#ifdef CONFIG_3_LEVEL_PGTABLES
+pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
+
+ if (pmd)
+ memset(pmd, 0, PAGE_SIZE);
+
+ return pmd;
+}
+#endif
diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c
index e66432f42485..9757085a0220 100644
--- a/arch/um/kernel/physmem.c
+++ b/arch/um/kernel/physmem.c
@@ -55,16 +55,6 @@ int __init init_maps(unsigned long physmem, unsigned long iomem,
return 0;
}
-/* Changed during early boot */
-static unsigned long kmem_top = 0;
-
-unsigned long get_kmem_end(void)
-{
- if (kmem_top == 0)
- kmem_top = host_task_size - 1024 * 1024;
- return kmem_top;
-}
-
void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
int r, int w, int x)
{
@@ -174,10 +164,10 @@ __uml_setup("iomem=", parse_iomem,
* setup_iomem, both of which run during early boot. Afterwards, it's
* unchanged.
*/
-struct iomem_region *iomem_regions = NULL;
+struct iomem_region *iomem_regions;
-/* Initialized in parse_iomem */
-int iomem_size = 0;
+/* Initialized in parse_iomem and unchanged thereafter */
+int iomem_size;
unsigned long find_iomem(char *driver, unsigned long *len_out)
{
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 0eae00b3e588..c07961bedb75 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -4,19 +4,21 @@
* Licensed under the GPL
*/
-#include "linux/stddef.h"
-#include "linux/err.h"
-#include "linux/hardirq.h"
-#include "linux/mm.h"
-#include "linux/personality.h"
-#include "linux/proc_fs.h"
-#include "linux/ptrace.h"
-#include "linux/random.h"
-#include "linux/sched.h"
-#include "linux/tick.h"
-#include "linux/threads.h"
-#include "asm/pgtable.h"
-#include "asm/uaccess.h"
+#include <linux/stddef.h>
+#include <linux/err.h>
+#include <linux/hardirq.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/personality.h>
+#include <linux/proc_fs.h>
+#include <linux/ptrace.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/threads.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
#include "as-layout.h"
#include "kern_util.h"
#include "os.h"
@@ -30,7 +32,7 @@
*/
struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
-static inline int external_pid(struct task_struct *task)
+static inline int external_pid(void)
{
/* FIXME: Need to look up userspace_pid by cpu */
return userspace_pid[0];
@@ -40,7 +42,7 @@ int pid_to_processor_id(int pid)
{
int i;
- for(i = 0; i < ncpus; i++) {
+ for (i = 0; i < ncpus; i++) {
if (cpu_tasks[i].pid == pid)
return i;
}
@@ -60,8 +62,6 @@ unsigned long alloc_stack(int order, int atomic)
if (atomic)
flags = GFP_ATOMIC;
page = __get_free_pages(flags, order);
- if (page == 0)
- return 0;
return page;
}
@@ -80,15 +80,15 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
static inline void set_current(struct task_struct *task)
{
cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
- { external_pid(task), task });
+ { external_pid(), task });
}
-extern void arch_switch_to(struct task_struct *from, struct task_struct *to);
+extern void arch_switch_to(struct task_struct *to);
void *_switch_to(void *prev, void *next, void *last)
{
struct task_struct *from = prev;
- struct task_struct *to= next;
+ struct task_struct *to = next;
to->thread.prev_sched = from;
set_current(to);
@@ -99,13 +99,13 @@ void *_switch_to(void *prev, void *next, void *last)
switch_threads(&from->thread.switch_buf,
&to->thread.switch_buf);
- arch_switch_to(current->thread.prev_sched, current);
+ arch_switch_to(current);
if (current->thread.saved_task)
show_regs(&(current->thread.regs));
- next= current->thread.saved_task;
- prev= current;
- } while(current->thread.saved_task);
+ to = current->thread.saved_task;
+ from = current;
+ } while (current->thread.saved_task);
return current->thread.prev_sched;
@@ -163,8 +163,6 @@ void new_thread_handler(void)
void fork_handler(void)
{
force_flush_all();
- if (current->thread.prev_sched == NULL)
- panic("blech");
schedule_tail(current->thread.prev_sched);
@@ -173,7 +171,7 @@ void fork_handler(void)
* arch_switch_to isn't needed. We could want to apply this to
* improve performance. -bb
*/
- arch_switch_to(current->thread.prev_sched, current);
+ arch_switch_to(current);
current->thread.prev_sched = NULL;
@@ -204,7 +202,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
arch_copy_thread(&current->thread.arch, &p->thread.arch);
}
else {
- init_thread_registers(&p->thread.regs.regs);
+ get_safe_registers(p->thread.regs.regs.gp);
p->thread.request.u.thread = current->thread.request.u.thread;
handler = new_thread_handler;
}
@@ -237,7 +235,7 @@ void default_idle(void)
{
unsigned long long nsecs;
- while(1) {
+ while (1) {
/* endless idle loop with no priority at all */
/*
@@ -256,53 +254,10 @@ void default_idle(void)
void cpu_idle(void)
{
- cpu_tasks[current_thread->cpu].pid = os_getpid();
+ cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
default_idle();
}
-void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
- pte_t *pte_out)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- pte_t ptent;
-
- if (task->mm == NULL)
- return ERR_PTR(-EINVAL);
- pgd = pgd_offset(task->mm, addr);
- if (!pgd_present(*pgd))
- return ERR_PTR(-EINVAL);
-
- pud = pud_offset(pgd, addr);
- if (!pud_present(*pud))
- return ERR_PTR(-EINVAL);
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd))
- return ERR_PTR(-EINVAL);
-
- pte = pte_offset_kernel(pmd, addr);
- ptent = *pte;
- if (!pte_present(ptent))
- return ERR_PTR(-EINVAL);
-
- if (pte_out != NULL)
- *pte_out = ptent;
- return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK);
-}
-
-char *current_cmd(void)
-{
-#if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
- return "(Unknown)";
-#else
- void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL);
- return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr);
-#endif
-}
-
void dump_thread(struct pt_regs *regs, struct user *u)
{
}
@@ -317,7 +272,7 @@ int user_context(unsigned long sp)
unsigned long stack;
stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
- return stack != (unsigned long) current_thread;
+ return stack != (unsigned long) current_thread_info();
}
extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
@@ -331,7 +286,7 @@ void do_uml_exitcalls(void)
(*call)();
}
-char *uml_strdup(char *string)
+char *uml_strdup(const char *string)
{
return kstrdup(string, GFP_KERNEL);
}
@@ -359,7 +314,7 @@ int strlen_user_proc(char __user *str)
int smp_sigio_handler(void)
{
#ifdef CONFIG_SMP
- int cpu = current_thread->cpu;
+ int cpu = current_thread_info()->cpu;
IPI_handler(cpu);
if (cpu != 0)
return 1;
@@ -369,7 +324,7 @@ int smp_sigio_handler(void)
int cpu(void)
{
- return current_thread->cpu;
+ return current_thread_info()->cpu;
}
static atomic_t using_sysemu = ATOMIC_INIT(0);
@@ -435,7 +390,7 @@ int singlestepping(void * t)
{
struct task_struct *task = t ? t : current;
- if ( ! (task->ptrace & PT_DTRACE) )
+ if (!(task->ptrace & PT_DTRACE))
return 0;
if (task->thread.singlestep_syscall)
@@ -459,3 +414,46 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ~0xf;
}
#endif
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long stack_page, sp, ip;
+ bool seen_sched = 0;
+
+ if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
+ return 0;
+
+ stack_page = (unsigned long) task_stack_page(p);
+ /* Bail if the process has no kernel stack for some reason */
+ if (stack_page == 0)
+ return 0;
+
+ sp = p->thread.switch_buf->JB_SP;
+ /*
+ * Bail if the stack pointer is below the bottom of the kernel
+ * stack for some reason
+ */
+ if (sp < stack_page)
+ return 0;
+
+ while (sp < stack_page + THREAD_SIZE) {
+ ip = *((unsigned long *) sp);
+ if (in_sched_functions(ip))
+ /* Ignore everything until we're above the scheduler */
+ seen_sched = 1;
+ else if (kernel_text_address(ip) && seen_sched)
+ return ip;
+
+ sp += sizeof(unsigned long);
+ }
+
+ return 0;
+}
+
+int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
+{
+ int cpu = current_thread_info()->cpu;
+
+ return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
+}
+
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 04cebcf0679f..00197d3d21ec 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -4,6 +4,7 @@
*/
#include "linux/sched.h"
+#include "kern_util.h"
#include "os.h"
#include "skas.h"
@@ -11,7 +12,7 @@ void (*pm_power_off)(void);
static void kill_off_processes(void)
{
- if(proc_mm)
+ if (proc_mm)
/*
* FIXME: need to loop over userspace_pids
*/
@@ -21,8 +22,8 @@ static void kill_off_processes(void)
int pid, me;
me = os_getpid();
- for_each_process(p){
- if(p->mm == NULL)
+ for_each_process(p) {
+ if (p->mm == NULL)
continue;
pid = p->mm->context.id.u.pid;
diff --git a/arch/um/kernel/sigio.c b/arch/um/kernel/sigio.c
index 89f9866a1354..2b272b63b514 100644
--- a/arch/um/kernel/sigio.c
+++ b/arch/um/kernel/sigio.c
@@ -1,18 +1,12 @@
/*
- * Copyright (C) 2002 - 2003 Jeff Dike (jdike@addtoit.com)
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
* Licensed under the GPL
*/
-#include "linux/kernel.h"
-#include "linux/list.h"
-#include "linux/slab.h"
-#include "linux/signal.h"
-#include "linux/interrupt.h"
-#include "init.h"
-#include "sigio.h"
-#include "irq_user.h"
+#include <linux/interrupt.h>
#include "irq_kern.h"
#include "os.h"
+#include "sigio.h"
/* Protected by sigio_lock() called from write_sigio_workaround */
static int sigio_irq_fd = -1;
@@ -33,9 +27,9 @@ int write_sigio_irq(int fd)
err = um_request_irq(SIGIO_WRITE_IRQ, fd, IRQ_READ, sigio_interrupt,
IRQF_DISABLED|IRQF_SAMPLE_RANDOM, "write sigio",
NULL);
- if(err){
- printk("write_sigio_irq : um_request_irq failed, err = %d\n",
- err);
+ if (err) {
+ printk(KERN_ERR "write_sigio_irq : um_request_irq failed, "
+ "err = %d\n", err);
return -1;
}
sigio_irq_fd = fd;
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 19cb97733937..b0fce720c4d0 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -3,12 +3,12 @@
* Licensed under the GPL
*/
-#include "linux/module.h"
-#include "linux/ptrace.h"
-#include "linux/sched.h"
-#include "asm/siginfo.h"
-#include "asm/signal.h"
-#include "asm/unistd.h"
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+#include <asm/unistd.h>
#include "frame_kern.h"
#include "kern_util.h"
#include "sigcontext.h"
@@ -36,7 +36,7 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
/* Did we come from a system call? */
if (PT_REGS_SYSCALL_NR(regs) >= 0) {
/* If so, check system call restarting.. */
- switch(PT_REGS_SYSCALL_RET(regs)) {
+ switch (PT_REGS_SYSCALL_RET(regs)) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
PT_REGS_SYSCALL_RET(regs) = -EINTR;
@@ -116,7 +116,7 @@ static int kern_do_signal(struct pt_regs *regs)
/* Did we come from a system call? */
if (!handled_sig && (PT_REGS_SYSCALL_NR(regs) >= 0)) {
/* Restart the system call - no handlers present */
- switch(PT_REGS_SYSCALL_RET(regs)) {
+ switch (PT_REGS_SYSCALL_RET(regs)) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
diff --git a/arch/um/kernel/skas/clone.c b/arch/um/kernel/skas/clone.c
index 8d07a7acb909..2c8583c1a344 100644
--- a/arch/um/kernel/skas/clone.c
+++ b/arch/um/kernel/skas/clone.c
@@ -1,17 +1,20 @@
-#include <sched.h>
+/*
+ * Copyright (C) 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ * Licensed under the GPL
+ */
+
#include <signal.h>
-#include <sys/mman.h>
-#include <sys/time.h>
+#include <sched.h>
#include <asm/unistd.h>
+#include <sys/time.h>
#include "as-layout.h"
+#include "kern_constants.h"
#include "ptrace_user.h"
-#include "skas.h"
#include "stub-data.h"
-#include "uml-config.h"
#include "sysdep/stub.h"
-#include "kern_constants.h"
-/* This is in a separate file because it needs to be compiled with any
+/*
+ * This is in a separate file because it needs to be compiled with any
* extraneous gcc flags (-pg, -fprofile-arcs, -ftest-coverage) disabled
*
* Use UM_KERN_PAGE_SIZE instead of PAGE_SIZE because that calls getpagesize
@@ -26,25 +29,26 @@ stub_clone_handler(void)
err = stub_syscall2(__NR_clone, CLONE_PARENT | CLONE_FILES | SIGCHLD,
STUB_DATA + UM_KERN_PAGE_SIZE / 2 - sizeof(void *));
- if(err != 0)
+ if (err != 0)
goto out;
err = stub_syscall4(__NR_ptrace, PTRACE_TRACEME, 0, 0, 0);
- if(err)
+ if (err)
goto out;
- err = stub_syscall3(__NR_setitimer, ITIMER_VIRTUAL,
+ err = stub_syscall3(__NR_setitimer, ITIMER_VIRTUAL,
(long) &data->timer, 0);
- if(err)
+ if (err)
goto out;
remap_stack(data->fd, data->offset);
goto done;
out:
- /* save current result.
- * Parent: pid;
- * child: retcode of mmap already saved and it jumps around this
+ /*
+ * save current result.
+ * Parent: pid;
+ * child: retcode of mmap already saved and it jumps around this
* assignment
*/
data->err = err;
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index f859ec306cd5..78b3e9f69d57 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -34,33 +34,14 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
if (!pte)
goto out_pte;
- /*
- * There's an interaction between the skas0 stub pages, stack
- * randomization, and the BUG at the end of exit_mmap. exit_mmap
- * checks that the number of page tables freed is the same as had
- * been allocated. If the stack is on the last page table page,
- * then the stack pte page will be freed, and if not, it won't. To
- * avoid having to know where the stack is, or if the process mapped
- * something at the top of its address space for some other reason,
- * we set TASK_SIZE to end at the start of the last page table.
- * This keeps exit_mmap off the last page, but introduces a leak
- * of that page. So, we hang onto it here and free it in
- * destroy_context_skas.
- */
-
- mm->context.last_page_table = pmd_page_vaddr(*pmd);
-#ifdef CONFIG_3_LEVEL_PGTABLES
- mm->context.last_pmd = (unsigned long) __va(pud_val(*pud));
-#endif
-
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
*pte = pte_mkread(*pte);
return 0;
out_pmd:
- pud_free(pud);
+ pud_free(mm, pud);
out_pte:
- pmd_free(pmd);
+ pmd_free(mm, pmd);
out:
return -ENOMEM;
}
@@ -76,24 +57,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
stack = get_zeroed_page(GFP_KERNEL);
if (stack == 0)
goto out;
-
- /*
- * This zeros the entry that pgd_alloc didn't, needed since
- * we are about to reinitialize it, and want mm.nr_ptes to
- * be accurate.
- */
- mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
-
- ret = init_stub_pte(mm, STUB_CODE,
- (unsigned long) &__syscall_stub_start);
- if (ret)
- goto out_free;
-
- ret = init_stub_pte(mm, STUB_DATA, stack);
- if (ret)
- goto out_free;
-
- mm->nr_ptes--;
}
to_mm->id.stack = stack;
@@ -114,6 +77,11 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
to_mm->id.u.pid = copy_context_skas0(stack,
from_mm->id.u.pid);
else to_mm->id.u.pid = start_userspace(stack);
+
+ if (to_mm->id.u.pid < 0) {
+ ret = to_mm->id.u.pid;
+ goto out_free;
+ }
}
ret = init_new_ldt(to_mm, from_mm);
@@ -132,24 +100,87 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
return ret;
}
+void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+{
+ struct page **pages;
+ int err, ret;
+
+ if (!skas_needs_stub)
+ return;
+
+ ret = init_stub_pte(mm, STUB_CODE,
+ (unsigned long) &__syscall_stub_start);
+ if (ret)
+ goto out;
+
+ ret = init_stub_pte(mm, STUB_DATA, mm->context.id.stack);
+ if (ret)
+ goto out;
+
+ pages = kmalloc(2 * sizeof(struct page *), GFP_KERNEL);
+ if (pages == NULL) {
+ printk(KERN_ERR "arch_dup_mmap failed to allocate 2 page "
+ "pointers\n");
+ goto out;
+ }
+
+ pages[0] = virt_to_page(&__syscall_stub_start);
+ pages[1] = virt_to_page(mm->context.id.stack);
+
+ /* dup_mmap already holds mmap_sem */
+ err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
+ VM_READ | VM_MAYREAD | VM_EXEC |
+ VM_MAYEXEC | VM_DONTCOPY, pages);
+ if (err) {
+ printk(KERN_ERR "install_special_mapping returned %d\n", err);
+ goto out_free;
+ }
+ return;
+
+out_free:
+ kfree(pages);
+out:
+ force_sigsegv(SIGSEGV, current);
+}
+
+void arch_exit_mmap(struct mm_struct *mm)
+{
+ pte_t *pte;
+
+ pte = virt_to_pte(mm, STUB_CODE);
+ if (pte != NULL)
+ pte_clear(mm, STUB_CODE, pte);
+
+ pte = virt_to_pte(mm, STUB_DATA);
+ if (pte == NULL)
+ return;
+
+ pte_clear(mm, STUB_DATA, pte);
+}
+
void destroy_context(struct mm_struct *mm)
{
struct mm_context *mmu = &mm->context;
if (proc_mm)
os_close_file(mmu->id.u.mm_fd);
- else
+ else {
+ /*
+ * If init_new_context wasn't called, this will be
+ * zero, resulting in a kill(0), which will result in the
+ * whole UML suddenly dying. Also, cover negative and
+ * 1 cases, since they shouldn't happen either.
+ */
+ if (mmu->id.u.pid < 2) {
+ printk(KERN_ERR "corrupt mm_context - pid = %d\n",
+ mmu->id.u.pid);
+ return;
+ }
os_kill_ptraced_process(mmu->id.u.pid, 1);
+ }
- if (!proc_mm || !ptrace_faultinfo) {
+ if (skas_needs_stub)
free_page(mmu->id.stack);
- pte_lock_deinit(virt_to_page(mmu->last_page_table));
- pte_free_kernel((pte_t *) mmu->last_page_table);
- dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
-#ifdef CONFIG_3_LEVEL_PGTABLES
- pmd_free((pmd_t *) mmu->last_pmd);
-#endif
- }
free_ldt(mmu);
}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index fce389c2342f..2e9852c0d487 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -6,19 +6,25 @@
#include "linux/init.h"
#include "linux/sched.h"
#include "as-layout.h"
+#include "kern.h"
#include "os.h"
#include "skas.h"
int new_mm(unsigned long stack)
{
- int fd;
+ int fd, err;
fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
if (fd < 0)
return fd;
- if (skas_needs_stub)
- map_stub_pages(fd, STUB_CODE, STUB_DATA, stack);
+ if (skas_needs_stub) {
+ err = map_stub_pages(fd, STUB_CODE, STUB_DATA, stack);
+ if (err) {
+ os_close_file(fd);
+ return err;
+ }
+ }
return fd;
}
@@ -49,8 +55,14 @@ int __init start_uml(void)
{
stack_protections((unsigned long) &cpu0_irqstack);
set_sigstack(cpu0_irqstack, THREAD_SIZE);
- if (proc_mm)
+ if (proc_mm) {
userspace_pid[0] = start_userspace(0);
+ if (userspace_pid[0] < 0) {
+ printf("start_uml - start_userspace returned %d\n",
+ userspace_pid[0]);
+ exit(1);
+ }
+ }
init_new_thread_signals();
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 50b476f2b38d..4e3b820bd2be 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -9,6 +9,9 @@
#include "sysdep/ptrace.h"
#include "sysdep/syscalls.h"
+extern int syscall_table_size;
+#define NR_syscalls (syscall_table_size / sizeof(void *))
+
void handle_syscall(struct uml_pt_regs *r)
{
struct pt_regs *regs = container_of(r, struct pt_regs, regs);
@@ -17,9 +20,6 @@ void handle_syscall(struct uml_pt_regs *r)
syscall_trace(r, 0);
- current->thread.nsyscalls++;
- nsyscalls++;
-
/*
* This should go in the declaration of syscall, but when I do that,
* strace -f -c bash -c 'ls ; ls' breaks, sometimes not tracing
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 1d8b119f2d0e..e22c96993db3 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -3,128 +3,130 @@
* Licensed under the GPL
*/
-#include "linux/err.h"
-#include "linux/highmem.h"
-#include "linux/mm.h"
-#include "asm/current.h"
-#include "asm/page.h"
-#include "asm/pgtable.h"
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/current.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
#include "kern_util.h"
#include "os.h"
-extern void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
- pte_t *pte_out);
-
-static unsigned long maybe_map(unsigned long virt, int is_write)
+pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr)
{
- pte_t pte;
- int err;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+
+ if (mm == NULL)
+ return NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_present(*pgd))
+ return NULL;
+
+ pud = pud_offset(pgd, addr);
+ if (!pud_present(*pud))
+ return NULL;
- void *phys = um_virt_to_phys(current, virt, &pte);
- int dummy_code;
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd))
+ return NULL;
+
+ return pte_offset_kernel(pmd, addr);
+}
+
+static pte_t *maybe_map(unsigned long virt, int is_write)
+{
+ pte_t *pte = virt_to_pte(current->mm, virt);
+ int err, dummy_code;
- if (IS_ERR(phys) || (is_write && !pte_write(pte))) {
+ if ((pte == NULL) || !pte_present(*pte) ||
+ (is_write && !pte_write(*pte))) {
err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
if (err)
- return -1UL;
- phys = um_virt_to_phys(current, virt, NULL);
+ return NULL;
+ pte = virt_to_pte(current->mm, virt);
}
- if (IS_ERR(phys))
- phys = (void *) -1;
+ if (!pte_present(*pte))
+ pte = NULL;
- return (unsigned long) phys;
+ return pte;
}
static int do_op_one_page(unsigned long addr, int len, int is_write,
int (*op)(unsigned long addr, int len, void *arg), void *arg)
{
+ jmp_buf buf;
struct page *page;
- int n;
+ pte_t *pte;
+ int n, faulted;
- addr = maybe_map(addr, is_write);
- if (addr == -1UL)
+ pte = maybe_map(addr, is_write);
+ if (pte == NULL)
return -1;
- page = phys_to_page(addr);
+ page = pte_page(*pte);
addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) +
(addr & ~PAGE_MASK);
- n = (*op)(addr, len, arg);
+ current->thread.fault_catcher = &buf;
+
+ faulted = UML_SETJMP(&buf);
+ if (faulted == 0)
+ n = (*op)(addr, len, arg);
+ else
+ n = -1;
+
+ current->thread.fault_catcher = NULL;
kunmap_atomic(page, KM_UML_USERCOPY);
return n;
}
-static void do_buffer_op(void *jmpbuf, void *arg_ptr)
+static int buffer_op(unsigned long addr, int len, int is_write,
+ int (*op)(unsigned long, int, void *), void *arg)
{
- va_list args;
- unsigned long addr;
- int len, is_write, size, remain, n;
- int (*op)(unsigned long, int, void *);
- void *arg;
- int *res;
-
- va_copy(args, *(va_list *)arg_ptr);
- addr = va_arg(args, unsigned long);
- len = va_arg(args, int);
- is_write = va_arg(args, int);
- op = va_arg(args, void *);
- arg = va_arg(args, void *);
- res = va_arg(args, int *);
- va_end(args);
+ int size, remain, n;
+
size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
remain = len;
- current->thread.fault_catcher = jmpbuf;
n = do_op_one_page(addr, size, is_write, op, arg);
if (n != 0) {
- *res = (n < 0 ? remain : 0);
+ remain = (n < 0 ? remain : 0);
goto out;
}
addr += size;
remain -= size;
- if (remain == 0) {
- *res = 0;
+ if (remain == 0)
goto out;
- }
- while(addr < ((addr + remain) & PAGE_MASK)) {
+ while (addr < ((addr + remain) & PAGE_MASK)) {
n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
if (n != 0) {
- *res = (n < 0 ? remain : 0);
+ remain = (n < 0 ? remain : 0);
goto out;
}
addr += PAGE_SIZE;
remain -= PAGE_SIZE;
}
- if (remain == 0) {
- *res = 0;
+ if (remain == 0)
goto out;
- }
n = do_op_one_page(addr, remain, is_write, op, arg);
- if (n != 0)
- *res = (n < 0 ? remain : 0);
- else *res = 0;
- out:
- current->thread.fault_catcher = NULL;
-}
-
-static int buffer_op(unsigned long addr, int len, int is_write,
- int (*op)(unsigned long addr, int len, void *arg),
- void *arg)
-{
- int faulted, res;
-
- faulted = setjmp_wrapper(do_buffer_op, addr, len, is_write, op, arg,
- &res);
- if (!faulted)
- return res;
+ if (n != 0) {
+ remain = (n < 0 ? remain : 0);
+ goto out;
+ }
- return addr + len - (unsigned long) current->thread.fault_addr;
+ return 0;
+ out:
+ return remain;
}
static int copy_chunk_from_user(unsigned long from, int len, void *arg)
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index 36d89cf8d20b..e1062ec36d40 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -21,7 +21,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
#include "asm/smp.h"
#include "asm/processor.h"
#include "asm/spinlock.h"
-#include "kern_util.h"
#include "kern.h"
#include "irq_user.h"
#include "os.h"
@@ -61,7 +60,7 @@ void smp_send_stop(void)
continue;
os_write_file(cpu_data[i].ipi_pipe[1], "S", 1);
}
- printk(KERN_INFO "done\n");
+ printk(KERN_CONT "done\n");
}
static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
@@ -75,8 +74,7 @@ static int idle_proc(void *cpup)
if (err < 0)
panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err);
- os_set_fd_async(cpu_data[cpu].ipi_pipe[0],
- current->thread.mode.tt.extern_pid);
+ os_set_fd_async(cpu_data[cpu].ipi_pipe[0]);
wmb();
if (cpu_test_and_set(cpu, cpu_callin_map)) {
@@ -129,8 +127,7 @@ void smp_prepare_cpus(unsigned int maxcpus)
if (err < 0)
panic("CPU#0 failed to create IPI pipe, errno = %d", -err);
- os_set_fd_async(cpu_data[me].ipi_pipe[0],
- current->thread.mode.tt.extern_pid);
+ os_set_fd_async(cpu_data[me].ipi_pipe[0]);
for (cpu = 1; cpu < ncpus; cpu++) {
printk(KERN_INFO "Booting processor %d...\n", cpu);
@@ -143,9 +140,8 @@ void smp_prepare_cpus(unsigned int maxcpus)
while (waittime-- && !cpu_isset(cpu, cpu_callin_map))
cpu_relax();
- if (cpu_isset(cpu, cpu_callin_map))
- printk(KERN_INFO "done\n");
- else printk(KERN_INFO "failed\n");
+ printk(KERN_INFO "%s\n",
+ cpu_isset(cpu, cpu_calling_map) ? "done" : "failed");
}
}
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index b9d92b2089ae..9cffc628a37e 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -13,9 +13,6 @@
#include "asm/uaccess.h"
#include "asm/unistd.h"
-/* Unlocked, I don't care if this is a bit off */
-int nsyscalls = 0;
-
long sys_fork(void)
{
long ret;
diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c
index 93263571d813..56d43d0a3960 100644
--- a/arch/um/kernel/sysrq.c
+++ b/arch/um/kernel/sysrq.c
@@ -1,38 +1,37 @@
-/*
- * Copyright (C) 2001 Jeff Dike (jdike@karaya.com)
+/*
+ * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
-#include "linux/sched.h"
-#include "linux/kernel.h"
-#include "linux/module.h"
-#include "linux/kallsyms.h"
-#include "asm/page.h"
-#include "asm/processor.h"
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
#include "sysrq.h"
/* Catch non-i386 SUBARCH's. */
#if !defined(CONFIG_UML_X86) || defined(CONFIG_64BIT)
void show_trace(struct task_struct *task, unsigned long * stack)
{
- unsigned long addr;
+ unsigned long addr;
- if (!stack) {
+ if (!stack) {
stack = (unsigned long*) &stack;
WARN_ON(1);
}
- printk("Call Trace: \n");
- while (((long) stack & (THREAD_SIZE-1)) != 0) {
- addr = *stack;
+ printk(KERN_INFO "Call Trace: \n");
+ while (((long) stack & (THREAD_SIZE-1)) != 0) {
+ addr = *stack;
if (__kernel_text_address(addr)) {
- printk("%08lx: [<%08lx>]", (unsigned long) stack, addr);
- print_symbol(" %s", addr);
- printk("\n");
- }
- stack++;
- }
- printk("\n");
+ printk(KERN_INFO "%08lx: [<%08lx>]",
+ (unsigned long) stack, addr);
+ print_symbol(KERN_CONT " %s", addr);
+ printk(KERN_CONT "\n");
+ }
+ stack++;
+ }
+ printk(KERN_INFO "\n");
}
#endif
@@ -67,14 +66,13 @@ void show_stack(struct task_struct *task, unsigned long *esp)
}
stack = esp;
- for(i = 0; i < kstack_depth_to_print; i++) {
+ for (i = 0; i < kstack_depth_to_print; i++) {
if (kstack_end(stack))
break;
if (i && ((i % 8) == 0))
- printk("\n ");
+ printk("\n" KERN_INFO " ");
printk("%08lx ", *stack++);
}
- printk("Call Trace: \n");
show_trace(task, esp);
}
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index 1ac746a9eae1..e066e84493b1 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -3,12 +3,12 @@
* Licensed under the GPL
*/
-#include "linux/clockchips.h"
-#include "linux/interrupt.h"
-#include "linux/jiffies.h"
-#include "linux/threads.h"
-#include "asm/irq.h"
-#include "asm/param.h"
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/threads.h>
+#include <asm/irq.h>
+#include <asm/param.h>
#include "kern_util.h"
#include "os.h"
@@ -32,7 +32,7 @@ void timer_handler(int sig, struct uml_pt_regs *regs)
static void itimer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- switch(mode) {
+ switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
set_interval();
break;
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index f4a0e407eee4..d175d0566af0 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -3,9 +3,10 @@
* Licensed under the GPL
*/
-#include "linux/mm.h"
-#include "asm/pgtable.h"
-#include "asm/tlbflush.h"
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
#include "as-layout.h"
#include "mem_user.h"
#include "os.h"
@@ -56,7 +57,7 @@ static int do_ops(struct host_vm_change *hvc, int end,
for (i = 0; i < end && !ret; i++) {
op = &hvc->ops[i];
- switch(op->type) {
+ switch (op->type) {
case MMAP:
ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
op->u.mmap.prot, op->u.mmap.fd,
@@ -183,27 +184,30 @@ static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
pte = pte_offset_kernel(pmd, addr);
do {
+ if ((addr >= STUB_START) && (addr < STUB_END))
+ continue;
+
r = pte_read(*pte);
w = pte_write(*pte);
x = pte_exec(*pte);
if (!pte_young(*pte)) {
r = 0;
w = 0;
- } else if (!pte_dirty(*pte)) {
+ } else if (!pte_dirty(*pte))
w = 0;
- }
+
prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
(x ? UM_PROT_EXEC : 0));
if (hvc->force || pte_newpage(*pte)) {
if (pte_present(*pte))
ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
PAGE_SIZE, prot, hvc);
- else ret = add_munmap(addr, PAGE_SIZE, hvc);
- }
- else if (pte_newprot(*pte))
+ else
+ ret = add_munmap(addr, PAGE_SIZE, hvc);
+ } else if (pte_newprot(*pte))
ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
*pte = pte_mkuptodate(*pte);
- } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
+ } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
return ret;
}
@@ -225,7 +229,7 @@ static inline int update_pmd_range(pud_t *pud, unsigned long addr,
}
}
else ret = update_pte_range(pmd, addr, next, hvc);
- } while (pmd++, addr = next, ((addr != end) && !ret));
+ } while (pmd++, addr = next, ((addr < end) && !ret));
return ret;
}
@@ -247,7 +251,7 @@ static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
}
}
else ret = update_pmd_range(pud, addr, next, hvc);
- } while (pud++, addr = next, ((addr != end) && !ret));
+ } while (pud++, addr = next, ((addr < end) && !ret));
return ret;
}
@@ -270,7 +274,7 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
}
}
else ret = update_pud_range(pgd, addr, next, &hvc);
- } while (pgd++, addr = next, ((addr != end_addr) && !ret));
+ } while (pgd++, addr = next, ((addr < end_addr) && !ret));
if (!ret)
ret = do_ops(&hvc, hvc.index, 1);
@@ -485,9 +489,6 @@ void __flush_tlb_one(unsigned long addr)
static void fix_range(struct mm_struct *mm, unsigned long start_addr,
unsigned long end_addr, int force)
{
- if (!proc_mm && (end_addr > STUB_START))
- end_addr = STUB_START;
-
fix_range_common(mm, start_addr, end_addr, force);
}
@@ -499,10 +500,9 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
else fix_range(vma->vm_mm, start, end, 0);
}
-void flush_tlb_mm(struct mm_struct *mm)
+void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
- unsigned long end;
-
/*
* Don't bother flushing if this address space is about to be
* destroyed.
@@ -510,8 +510,17 @@ void flush_tlb_mm(struct mm_struct *mm)
if (atomic_read(&mm->mm_users) == 0)
return;
- end = proc_mm ? task_size : STUB_START;
- fix_range(mm, 0, end, 0);
+ fix_range(mm, start, end, 0);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma = mm->mmap;
+
+ while (vma != NULL) {
+ fix_range(mm, vma->vm_start, vma->vm_end, 0);
+ vma = vma->vm_next;
+ }
}
void force_flush_all(void)
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index cb3321f8e0a9..44e490419495 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -13,6 +13,7 @@
#include "as-layout.h"
#include "kern_util.h"
#include "os.h"
+#include "skas.h"
#include "sysdep/sigcontext.h"
/*
@@ -128,7 +129,19 @@ static void bad_segv(struct faultinfo fi, unsigned long ip)
force_sig_info(SIGSEGV, &si, current);
}
-static void segv_handler(int sig, struct uml_pt_regs *regs)
+void fatal_sigsegv(void)
+{
+ force_sigsegv(SIGSEGV, current);
+ do_signal();
+ /*
+ * This is to tell gcc that we're not returning - do_signal
+ * can, in general, return, but in this case, it's not, since
+ * we just got a fatal SIGSEGV queued.
+ */
+ os_dump_core();
+}
+
+void segv_handler(int sig, struct uml_pt_regs *regs)
{
struct faultinfo * fi = UPT_FAULTINFO(regs);
@@ -216,9 +229,6 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
void relay_signal(int sig, struct uml_pt_regs *regs)
{
- if (arch_handle_signal(sig, regs))
- return;
-
if (!UPT_IS_USER(regs)) {
if (sig == SIGBUS)
printk(KERN_ERR "Bus error - the host /dev/shm or /tmp "
@@ -226,31 +236,24 @@ void relay_signal(int sig, struct uml_pt_regs *regs)
panic("Kernel mode signal %d", sig);
}
+ arch_examine_signal(sig, regs);
+
current->thread.arch.faultinfo = *UPT_FAULTINFO(regs);
force_sig(sig, current);
}
-static void bus_handler(int sig, struct uml_pt_regs *regs)
+void bus_handler(int sig, struct uml_pt_regs *regs)
{
if (current->thread.fault_catcher != NULL)
UML_LONGJMP(current->thread.fault_catcher, 1);
else relay_signal(sig, regs);
}
-static void winch(int sig, struct uml_pt_regs *regs)
+void winch(int sig, struct uml_pt_regs *regs)
{
do_IRQ(WINCH_IRQ, regs);
}
-const struct kern_handlers handlinfo_kern = {
- .relay_signal = relay_signal,
- .winch = winch,
- .bus_handler = bus_handler,
- .page_fault = segv_handler,
- .sigio_handler = sigio_handler,
- .timer_handler = timer_handler
-};
-
void trap_init(void)
{
}
diff --git a/arch/um/kernel/uaccess.c b/arch/um/kernel/uaccess.c
index d7436aacd26f..f0f4b040d7c5 100644
--- a/arch/um/kernel/uaccess.c
+++ b/arch/um/kernel/uaccess.c
@@ -1,10 +1,11 @@
/*
* Copyright (C) 2001 Chris Emerson (cemerson@chiark.greenend.org.uk)
- * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
-/* These are here rather than tt/uaccess.c because skas mode needs them in
+/*
+ * These are here rather than tt/uaccess.c because skas mode needs them in
* order to do SIGBUS recovery when a tmpfs mount runs out of room.
*/
@@ -25,6 +26,8 @@ int __do_copy_to_user(void *to, const void *from, int n,
fault = __do_user_copy(to, from, n, fault_addr, fault_catcher,
__do_copy, &faulted);
- if(!faulted) return(0);
- else return(n - (fault - (unsigned long) to));
+ if (!faulted)
+ return 0;
+ else
+ return n - (fault - (unsigned long) to);
}
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index f1c71393f578..468aba990dbd 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -3,22 +3,23 @@
* Licensed under the GPL
*/
-#include "linux/delay.h"
-#include "linux/mm.h"
-#include "linux/module.h"
-#include "linux/seq_file.h"
-#include "linux/string.h"
-#include "linux/utsname.h"
-#include "asm/pgtable.h"
-#include "asm/processor.h"
-#include "asm/setup.h"
-#include "arch.h"
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/utsname.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
#include "as-layout.h"
+#include "arch.h"
#include "init.h"
#include "kern.h"
+#include "kern_util.h"
#include "mem_user.h"
#include "os.h"
-#include "skas.h"
#define DEFAULT_COMMAND_LINE "root=98:0"
@@ -100,8 +101,6 @@ const struct seq_operations cpuinfo_op = {
};
/* Set in linux_main */
-unsigned long host_task_size;
-unsigned long task_size;
unsigned long uml_physmem;
unsigned long uml_reserved; /* Also modified in mem_init */
unsigned long start_vm;
@@ -197,20 +196,19 @@ __uml_setup("--help", Usage,
" Prints this message.\n\n"
);
-static int __init uml_checksetup(char *line, int *add)
+static void __init uml_checksetup(char *line, int *add)
{
struct uml_param *p;
p = &__uml_setup_start;
- while(p < &__uml_setup_end) {
+ while (p < &__uml_setup_end) {
int n;
n = strlen(p->str);
if (!strncmp(line, p->str, n) && p->setup_func(line + n, add))
- return 1;
+ return;
p++;
}
- return 0;
}
static void __init uml_postsetup(void)
@@ -218,13 +216,30 @@ static void __init uml_postsetup(void)
initcall_t *p;
p = &__uml_postsetup_start;
- while(p < &__uml_postsetup_end) {
+ while (p < &__uml_postsetup_end) {
(*p)();
p++;
}
return;
}
+static int panic_exit(struct notifier_block *self, unsigned long unused1,
+ void *unused2)
+{
+ bust_spinlocks(1);
+ show_regs(&(current->thread.regs));
+ bust_spinlocks(0);
+ uml_exitcode = 1;
+ os_dump_core();
+ return 0;
+}
+
+static struct notifier_block panic_exit_notifier = {
+ .notifier_call = panic_exit,
+ .next = NULL,
+ .priority = 0
+};
+
/* Set during early boot */
unsigned long brk_start;
unsigned long end_iomem;
@@ -234,20 +249,6 @@ EXPORT_SYMBOL(end_iomem);
extern char __binary_start;
-static unsigned long set_task_sizes_skas(unsigned long *task_size_out)
-{
- /* Round up to the nearest 4M */
- unsigned long host_task_size = ROUND_4M((unsigned long)
- &host_task_size);
-
- if (!skas_needs_stub)
- *task_size_out = host_task_size;
- else
- *task_size_out = STUB_START & PGDIR_MASK;
-
- return host_task_size;
-}
-
int __init linux_main(int argc, char **argv)
{
unsigned long avail, diff;
@@ -278,13 +279,6 @@ int __init linux_main(int argc, char **argv)
printf("UML running in %s mode\n", mode);
- host_task_size = set_task_sizes_skas(&task_size);
-
- /*
- * Setting up handlers to 'sig_info' struct
- */
- os_fill_handlinfo(handlinfo_kern);
-
brk_start = (unsigned long) sbrk(0);
/*
@@ -309,7 +303,7 @@ int __init linux_main(int argc, char **argv)
highmem = 0;
iomem_size = (iomem_size + PAGE_SIZE - 1) & PAGE_MASK;
- max_physmem = get_kmem_end() - uml_physmem - iomem_size - MIN_VMALLOC;
+ max_physmem = CONFIG_TOP_ADDR - uml_physmem - iomem_size - MIN_VMALLOC;
/*
* Zones have to begin on a 1 << MAX_ORDER page boundary,
@@ -341,7 +335,7 @@ int __init linux_main(int argc, char **argv)
}
virtmem_size = physmem_size;
- avail = get_kmem_end() - start_vm;
+ avail = CONFIG_TOP_ADDR - start_vm;
if (physmem_size > avail)
virtmem_size = avail;
end_vm = start_vm + virtmem_size;
@@ -350,6 +344,9 @@ int __init linux_main(int argc, char **argv)
printf("Kernel virtual memory size shrunk to %lu bytes\n",
virtmem_size);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &panic_exit_notifier);
+
uml_postsetup();
stack_protections((unsigned long) &init_thread_info);
@@ -358,29 +355,8 @@ int __init linux_main(int argc, char **argv)
return start_uml();
}
-extern int uml_exitcode;
-
-static int panic_exit(struct notifier_block *self, unsigned long unused1,
- void *unused2)
-{
- bust_spinlocks(1);
- show_regs(&(current->thread.regs));
- bust_spinlocks(0);
- uml_exitcode = 1;
- os_dump_core();
- return 0;
-}
-
-static struct notifier_block panic_exit_notifier = {
- .notifier_call = panic_exit,
- .next = NULL,
- .priority = 0
-};
-
void __init setup_arch(char **cmdline_p)
{
- atomic_notifier_chain_register(&panic_notifier_list,
- &panic_exit_notifier);
paging_init();
strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
diff --git a/arch/um/kernel/umid.c b/arch/um/kernel/umid.c
index 039e16efcd55..81e07e2be3ae 100644
--- a/arch/um/kernel/umid.c
+++ b/arch/um/kernel/umid.c
@@ -1,13 +1,12 @@
-/*
- * Copyright (C) 2001, 2002 Jeff Dike (jdike@karaya.com)
+/*
+ * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
-#include "asm/errno.h"
+#include <asm/errno.h>
#include "init.h"
-#include "os.h"
#include "kern.h"
-#include "linux/kernel.h"
+#include "os.h"
/* Changed by set_umid_arg */
static int umid_inited = 0;
@@ -16,16 +15,16 @@ static int __init set_umid_arg(char *name, int *add)
{
int err;
- if(umid_inited){
+ if (umid_inited) {
printf("umid already set\n");
return 0;
}
*add = 0;
err = set_umid(name);
- if(err == -EEXIST)
+ if (err == -EEXIST)
printf("umid '%s' already in use\n", name);
- else if(!err)
+ else if (!err)
umid_inited = 1;
return 0;
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index 8e129af8170d..8a48d6a30064 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -4,7 +4,7 @@
#
obj-y = aio.o elf_aux.o execvp.o file.o helper.o irq.o main.o mem.o process.o \
- registers.o sigio.o signal.o start_up.o time.o trap.o tty.o uaccess.o \
+ registers.o sigio.o signal.o start_up.o time.o tty.o uaccess.o \
umid.o tls.o user_syms.o util.o drivers/ sys-$(SUBARCH)/ skas/
obj-$(CONFIG_TTY_LOG) += tty_log.o
@@ -12,7 +12,7 @@ user-objs-$(CONFIG_TTY_LOG) += tty_log.o
USER_OBJS := $(user-objs-y) aio.o elf_aux.o execvp.o file.o helper.o irq.o \
main.o mem.o process.o registers.o sigio.o signal.o start_up.o time.o \
- trap.o tty.o tls.o uaccess.o umid.o util.o
+ tty.o tls.o uaccess.o umid.o util.o
CFLAGS_user_syms.o += -DSUBARCH_$(SUBARCH)
diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c
index 93dc0c80ebaf..b8d8c9ca8d4a 100644
--- a/arch/um/os-Linux/aio.c
+++ b/arch/um/os-Linux/aio.c
@@ -12,6 +12,7 @@
#include "aio.h"
#include "init.h"
#include "kern_constants.h"
+#include "kern_util.h"
#include "os.h"
#include "user.h"
diff --git a/arch/um/os-Linux/drivers/ethertap_user.c b/arch/um/os-Linux/drivers/ethertap_user.c
index 07ca0cb472ac..6fb0b174f538 100644
--- a/arch/um/os-Linux/drivers/ethertap_user.c
+++ b/arch/um/os-Linux/drivers/ethertap_user.c
@@ -131,7 +131,7 @@ static int etap_tramp(char *dev, char *gate, int control_me,
}
if (c != 1) {
printk(UM_KERN_ERR "etap_tramp : uml_net failed\n");
- err = helper_wait(pid, 0, "uml_net");
+ err = helper_wait(pid);
}
return err;
}
diff --git a/arch/um/os-Linux/drivers/tuntap_user.c b/arch/um/os-Linux/drivers/tuntap_user.c
index 1037a3b6386e..2448be03fd7a 100644
--- a/arch/um/os-Linux/drivers/tuntap_user.c
+++ b/arch/um/os-Linux/drivers/tuntap_user.c
@@ -14,6 +14,7 @@
#include <sys/wait.h>
#include <sys/uio.h>
#include "kern_constants.h"
+#include "kern_util.h"
#include "os.h"
#include "tuntap.h"
#include "user.h"
@@ -107,7 +108,7 @@ static int tuntap_open_tramp(char *gate, int *fd_out, int me, int remote,
"errno = %d\n", errno);
return err;
}
- helper_wait(pid, 0, "tuntap_open_tramp");
+ helper_wait(pid);
cmsg = CMSG_FIRSTHDR(&msg);
if (cmsg == NULL) {
@@ -148,7 +149,7 @@ static int tuntap_open(void *data)
memset(&ifr, 0, sizeof(ifr));
ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
strlcpy(ifr.ifr_name, pri->dev_name, sizeof(ifr.ifr_name));
- if (ioctl(pri->fd, TUNSETIFF, (void *) &ifr) < 0) {
+ if (ioctl(pri->fd, TUNSETIFF, &ifr) < 0) {
err = -errno;
printk(UM_KERN_ERR "TUNSETIFF failed, errno = %d\n",
errno);
diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
index f83462758627..b5afcfd0f861 100644
--- a/arch/um/os-Linux/file.c
+++ b/arch/um/os-Linux/file.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@@ -8,18 +8,16 @@
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/socket.h>
-#include <sys/un.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
-#include <sys/uio.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/un.h>
+#include "kern_constants.h"
#include "os.h"
#include "user.h"
-#include "kern_util.h"
-static void copy_stat(struct uml_stat *dst, struct stat64 *src)
+static void copy_stat(struct uml_stat *dst, const struct stat64 *src)
{
*dst = ((struct uml_stat) {
.ust_dev = src->st_dev, /* device */
@@ -43,10 +41,10 @@ int os_stat_fd(const int fd, struct uml_stat *ubuf)
int err;
CATCH_EINTR(err = fstat64(fd, &sbuf));
- if(err < 0)
+ if (err < 0)
return -errno;
- if(ubuf != NULL)
+ if (ubuf != NULL)
copy_stat(ubuf, &sbuf);
return err;
}
@@ -56,27 +54,26 @@ int os_stat_file(const char *file_name, struct uml_stat *ubuf)
struct stat64 sbuf;
int err;
- do {
- err = stat64(file_name, &sbuf);
- } while((err < 0) && (errno == EINTR)) ;
-
- if(err < 0)
+ CATCH_EINTR(err = stat64(file_name, &sbuf));
+ if (err < 0)
return -errno;
- if(ubuf != NULL)
+ if (ubuf != NULL)
copy_stat(ubuf, &sbuf);
return err;
}
-int os_access(const char* file, int mode)
+int os_access(const char *file, int mode)
{
int amode, err;
- amode=(mode&OS_ACC_R_OK ? R_OK : 0) | (mode&OS_ACC_W_OK ? W_OK : 0) |
- (mode&OS_ACC_X_OK ? X_OK : 0) | (mode&OS_ACC_F_OK ? F_OK : 0) ;
+ amode = (mode & OS_ACC_R_OK ? R_OK : 0) |
+ (mode & OS_ACC_W_OK ? W_OK : 0) |
+ (mode & OS_ACC_X_OK ? X_OK : 0) |
+ (mode & OS_ACC_F_OK ? F_OK : 0);
err = access(file, amode);
- if(err < 0)
+ if (err < 0)
return -errno;
return 0;
@@ -88,7 +85,7 @@ int os_ioctl_generic(int fd, unsigned int cmd, unsigned long arg)
int err;
err = ioctl(fd, cmd, arg);
- if(err < 0)
+ if (err < 0)
return -errno;
return err;
@@ -97,7 +94,7 @@ int os_ioctl_generic(int fd, unsigned int cmd, unsigned long arg)
/* FIXME: ensure namebuf in os_get_if_name is big enough */
int os_get_ifname(int fd, char* namebuf)
{
- if(ioctl(fd, SIOCGIFNAME, namebuf) < 0)
+ if (ioctl(fd, SIOCGIFNAME, namebuf) < 0)
return -errno;
return 0;
@@ -108,37 +105,22 @@ int os_set_slip(int fd)
int disc, sencap;
disc = N_SLIP;
- if(ioctl(fd, TIOCSETD, &disc) < 0)
+ if (ioctl(fd, TIOCSETD, &disc) < 0)
return -errno;
sencap = 0;
- if(ioctl(fd, SIOCSIFENCAP, &sencap) < 0)
+ if (ioctl(fd, SIOCSIFENCAP, &sencap) < 0)
return -errno;
return 0;
}
-int os_set_owner(int fd, int pid)
-{
- if(fcntl(fd, F_SETOWN, pid) < 0){
- int save_errno = errno;
-
- if(fcntl(fd, F_GETOWN, 0) != pid)
- return -save_errno;
- }
-
- return 0;
-}
-
int os_mode_fd(int fd, int mode)
{
int err;
- do {
- err = fchmod(fd, mode);
- } while((err < 0) && (errno==EINTR)) ;
-
- if(err < 0)
+ CATCH_EINTR(err = fchmod(fd, mode));
+ if (err < 0)
return -errno;
return 0;
@@ -150,64 +132,73 @@ int os_file_type(char *file)
int err;
err = os_stat_file(file, &buf);
- if(err < 0)
+ if (err < 0)
return err;
- if(S_ISDIR(buf.ust_mode))
+ if (S_ISDIR(buf.ust_mode))
return OS_TYPE_DIR;
- else if(S_ISLNK(buf.ust_mode))
+ else if (S_ISLNK(buf.ust_mode))
return OS_TYPE_SYMLINK;
- else if(S_ISCHR(buf.ust_mode))
+ else if (S_ISCHR(buf.ust_mode))
return OS_TYPE_CHARDEV;
- else if(S_ISBLK(buf.ust_mode))
+ else if (S_ISBLK(buf.ust_mode))
return OS_TYPE_BLOCKDEV;
- else if(S_ISFIFO(buf.ust_mode))
+ else if (S_ISFIFO(buf.ust_mode))
return OS_TYPE_FIFO;
- else if(S_ISSOCK(buf.ust_mode))
+ else if (S_ISSOCK(buf.ust_mode))
return OS_TYPE_SOCK;
else return OS_TYPE_FILE;
}
-int os_file_mode(char *file, struct openflags *mode_out)
+int os_file_mode(const char *file, struct openflags *mode_out)
{
int err;
*mode_out = OPENFLAGS();
err = access(file, W_OK);
- if(err && (errno != EACCES))
+ if (err && (errno != EACCES))
return -errno;
- else if(!err)
+ else if (!err)
*mode_out = of_write(*mode_out);
err = access(file, R_OK);
- if(err && (errno != EACCES))
+ if (err && (errno != EACCES))
return -errno;
- else if(!err)
+ else if (!err)
*mode_out = of_read(*mode_out);
return err;
}
-int os_open_file(char *file, struct openflags flags, int mode)
+int os_open_file(const char *file, struct openflags flags, int mode)
{
int fd, err, f = 0;
- if(flags.r && flags.w) f = O_RDWR;
- else if(flags.r) f = O_RDONLY;
- else if(flags.w) f = O_WRONLY;
+ if (flags.r && flags.w)
+ f = O_RDWR;
+ else if (flags.r)
+ f = O_RDONLY;
+ else if (flags.w)
+ f = O_WRONLY;
else f = 0;
- if(flags.s) f |= O_SYNC;
- if(flags.c) f |= O_CREAT;
- if(flags.t) f |= O_TRUNC;
- if(flags.e) f |= O_EXCL;
+ if (flags.s)
+ f |= O_SYNC;
+ if (flags.c)
+ f |= O_CREAT;
+ if (flags.t)
+ f |= O_TRUNC;
+ if (flags.e)
+ f |= O_EXCL;
+ if (flags.a)
+ f |= O_APPEND;
fd = open64(file, f, mode);
- if(fd < 0)
+ if (fd < 0)
return -errno;
- if(flags.cl && fcntl(fd, F_SETFD, 1)){
+ if (flags.cl && fcntl(fd, F_SETFD, 1)) {
err = -errno;
close(fd);
return err;
@@ -216,7 +207,7 @@ int os_open_file(char *file, struct openflags flags, int mode)
return fd;
}
-int os_connect_socket(char *name)
+int os_connect_socket(const char *name)
{
struct sockaddr_un sock;
int fd, err;
@@ -225,13 +216,13 @@ int os_connect_socket(char *name)
snprintf(sock.sun_path, sizeof(sock.sun_path), "%s", name);
fd = socket(AF_UNIX, SOCK_STREAM, 0);
- if(fd < 0) {
+ if (fd < 0) {
err = -errno;
goto out;
}
err = connect(fd, (struct sockaddr *) &sock, sizeof(sock));
- if(err) {
+ if (err) {
err = -errno;
goto out_close;
}
@@ -254,7 +245,7 @@ int os_seek_file(int fd, unsigned long long offset)
unsigned long long actual;
actual = lseek64(fd, offset, SEEK_SET);
- if(actual != offset)
+ if (actual != offset)
return -errno;
return 0;
}
@@ -263,7 +254,7 @@ int os_read_file(int fd, void *buf, int len)
{
int n = read(fd, buf, len);
- if(n < 0)
+ if (n < 0)
return -errno;
return n;
}
@@ -272,37 +263,38 @@ int os_write_file(int fd, const void *buf, int len)
{
int n = write(fd, (void *) buf, len);
- if(n < 0)
+ if (n < 0)
return -errno;
return n;
}
-int os_file_size(char *file, unsigned long long *size_out)
+int os_file_size(const char *file, unsigned long long *size_out)
{
struct uml_stat buf;
int err;
err = os_stat_file(file, &buf);
- if(err < 0){
- printk("Couldn't stat \"%s\" : err = %d\n", file, -err);
+ if (err < 0) {
+ printk(UM_KERN_ERR "Couldn't stat \"%s\" : err = %d\n", file,
+ -err);
return err;
}
- if(S_ISBLK(buf.ust_mode)){
+ if (S_ISBLK(buf.ust_mode)) {
int fd;
long blocks;
fd = open(file, O_RDONLY, 0);
- if(fd < 0) {
+ if (fd < 0) {
err = -errno;
- printk("Couldn't open \"%s\", errno = %d\n", file,
- errno);
+ printk(UM_KERN_ERR "Couldn't open \"%s\", "
+ "errno = %d\n", file, errno);
return err;
}
- if(ioctl(fd, BLKGETSIZE, &blocks) < 0){
+ if (ioctl(fd, BLKGETSIZE, &blocks) < 0) {
err = -errno;
- printk("Couldn't get the block size of \"%s\", "
- "errno = %d\n", file, errno);
+ printk(UM_KERN_ERR "Couldn't get the block size of "
+ "\"%s\", errno = %d\n", file, errno);
close(fd);
return err;
}
@@ -314,14 +306,15 @@ int os_file_size(char *file, unsigned long long *size_out)
return 0;
}
-int os_file_modtime(char *file, unsigned long *modtime)
+int os_file_modtime(const char *file, unsigned long *modtime)
{
struct uml_stat buf;
int err;
err = os_stat_file(file, &buf);
- if(err < 0){
- printk("Couldn't stat \"%s\" : err = %d\n", file, -err);
+ if (err < 0) {
+ printk(UM_KERN_ERR "Couldn't stat \"%s\" : err = %d\n", file,
+ -err);
return err;
}
@@ -329,26 +322,13 @@ int os_file_modtime(char *file, unsigned long *modtime)
return 0;
}
-int os_get_exec_close(int fd, int *close_on_exec)
-{
- int ret;
-
- CATCH_EINTR(ret = fcntl(fd, F_GETFD));
-
- if(ret < 0)
- return -errno;
-
- *close_on_exec = (ret & FD_CLOEXEC) ? 1 : 0;
- return ret;
-}
-
int os_set_exec_close(int fd)
{
int err;
CATCH_EINTR(err = fcntl(fd, F_SETFD, FD_CLOEXEC));
- if(err < 0)
+ if (err < 0)
return -errno;
return err;
}
@@ -358,53 +338,51 @@ int os_pipe(int *fds, int stream, int close_on_exec)
int err, type = stream ? SOCK_STREAM : SOCK_DGRAM;
err = socketpair(AF_UNIX, type, 0, fds);
- if(err < 0)
+ if (err < 0)
return -errno;
- if(!close_on_exec)
+ if (!close_on_exec)
return 0;
err = os_set_exec_close(fds[0]);
- if(err < 0)
+ if (err < 0)
goto error;
err = os_set_exec_close(fds[1]);
- if(err < 0)
+ if (err < 0)
goto error;
return 0;
error:
- printk("os_pipe : Setting FD_CLOEXEC failed, err = %d\n", -err);
+ printk(UM_KERN_ERR "os_pipe : Setting FD_CLOEXEC failed, err = %d\n",
+ -err);
close(fds[1]);
close(fds[0]);
return err;
}
-int os_set_fd_async(int fd, int owner)
+int os_set_fd_async(int fd)
{
- int err;
+ int err, flags;
+
+ flags = fcntl(fd, F_GETFL);
+ if (flags < 0)
+ return -errno;
- /* XXX This should do F_GETFL first */
- if(fcntl(fd, F_SETFL, O_ASYNC | O_NONBLOCK) < 0){
+ flags |= O_ASYNC | O_NONBLOCK;
+ if (fcntl(fd, F_SETFL, flags) < 0) {
err = -errno;
- printk("os_set_fd_async : failed to set O_ASYNC and "
- "O_NONBLOCK on fd # %d, errno = %d\n", fd, errno);
+ printk(UM_KERN_ERR "os_set_fd_async : failed to set O_ASYNC "
+ "and O_NONBLOCK on fd # %d, errno = %d\n", fd, errno);
return err;
}
-#ifdef notdef
- if(fcntl(fd, F_SETFD, 1) < 0){
- printk("os_set_fd_async : Setting FD_CLOEXEC failed, "
- "errno = %d\n", errno);
- }
-#endif
- if((fcntl(fd, F_SETSIG, SIGIO) < 0) ||
- (fcntl(fd, F_SETOWN, owner) < 0)){
+ if ((fcntl(fd, F_SETSIG, SIGIO) < 0) ||
+ (fcntl(fd, F_SETOWN, os_getpid()) < 0)) {
err = -errno;
- printk("os_set_fd_async : Failed to fcntl F_SETOWN "
- "(or F_SETSIG) fd %d to pid %d, errno = %d\n", fd,
- owner, errno);
+ printk(UM_KERN_ERR "os_set_fd_async : Failed to fcntl F_SETOWN "
+ "(or F_SETSIG) fd %d, errno = %d\n", fd, errno);
return err;
}
@@ -413,10 +391,14 @@ int os_set_fd_async(int fd, int owner)
int os_clear_fd_async(int fd)
{
- int flags = fcntl(fd, F_GETFL);
+ int flags;
+
+ flags = fcntl(fd, F_GETFL);
+ if (flags < 0)
+ return -errno;
flags &= ~(O_ASYNC | O_NONBLOCK);
- if(fcntl(fd, F_SETFL, flags) < 0)
+ if (fcntl(fd, F_SETFL, flags) < 0)
return -errno;
return 0;
}
@@ -426,11 +408,15 @@ int os_set_fd_block(int fd, int blocking)
int flags;
flags = fcntl(fd, F_GETFL);
+ if (flags < 0)
+ return -errno;
- if(blocking) flags &= ~O_NONBLOCK;
- else flags |= O_NONBLOCK;
+ if (blocking)
+ flags &= ~O_NONBLOCK;
+ else
+ flags |= O_NONBLOCK;
- if(fcntl(fd, F_SETFL, flags) < 0)
+ if (fcntl(fd, F_SETFL, flags) < 0)
return -errno;
return 0;
@@ -441,7 +427,7 @@ int os_accept_connection(int fd)
int new;
new = accept(fd, NULL, 0);
- if(new < 0)
+ if (new < 0)
return -errno;
return new;
}
@@ -462,15 +448,17 @@ int os_shutdown_socket(int fd, int r, int w)
{
int what, err;
- if(r && w) what = SHUT_RDWR;
- else if(r) what = SHUT_RD;
- else if(w) what = SHUT_WR;
- else {
- printk("os_shutdown_socket : neither r or w was set\n");
+ if (r && w)
+ what = SHUT_RDWR;
+ else if (r)
+ what = SHUT_RD;
+ else if (w)
+ what = SHUT_WR;
+ else
return -EINVAL;
- }
+
err = shutdown(fd, what);
- if(err < 0)
+ if (err < 0)
return -errno;
return 0;
}
@@ -494,19 +482,20 @@ int os_rcv_fd(int fd, int *helper_pid_out)
msg.msg_flags = 0;
n = recvmsg(fd, &msg, 0);
- if(n < 0)
+ if (n < 0)
return -errno;
- else if(n != iov.iov_len)
+ else if (n != iov.iov_len)
*helper_pid_out = -1;
cmsg = CMSG_FIRSTHDR(&msg);
- if(cmsg == NULL){
- printk("rcv_fd didn't receive anything, error = %d\n", errno);
+ if (cmsg == NULL) {
+ printk(UM_KERN_ERR "rcv_fd didn't receive anything, "
+ "error = %d\n", errno);
return -1;
}
- if((cmsg->cmsg_level != SOL_SOCKET) ||
- (cmsg->cmsg_type != SCM_RIGHTS)){
- printk("rcv_fd didn't receive a descriptor\n");
+ if ((cmsg->cmsg_level != SOL_SOCKET) ||
+ (cmsg->cmsg_type != SCM_RIGHTS)) {
+ printk(UM_KERN_ERR "rcv_fd didn't receive a descriptor\n");
return -1;
}
@@ -514,29 +503,28 @@ int os_rcv_fd(int fd, int *helper_pid_out)
return new;
}
-int os_create_unix_socket(char *file, int len, int close_on_exec)
+int os_create_unix_socket(const char *file, int len, int close_on_exec)
{
struct sockaddr_un addr;
int sock, err;
sock = socket(PF_UNIX, SOCK_DGRAM, 0);
- if(sock < 0)
+ if (sock < 0)
return -errno;
- if(close_on_exec) {
+ if (close_on_exec) {
err = os_set_exec_close(sock);
- if(err < 0)
- printk("create_unix_socket : close_on_exec failed, "
- "err = %d", -err);
+ if (err < 0)
+ printk(UM_KERN_ERR "create_unix_socket : "
+ "close_on_exec failed, err = %d", -err);
}
addr.sun_family = AF_UNIX;
- /* XXX Be more careful about overflow */
snprintf(addr.sun_path, len, "%s", file);
err = bind(sock, (struct sockaddr *) &addr, sizeof(addr));
- if(err < 0)
+ if (err < 0)
return -errno;
return sock;
@@ -557,17 +545,18 @@ int os_lock_file(int fd, int excl)
int err, save;
err = fcntl(fd, F_SETLK, &lock);
- if(!err)
+ if (!err)
goto out;
save = -errno;
err = fcntl(fd, F_GETLK, &lock);
- if(err){
+ if (err) {
err = -errno;
goto out;
}
- printk("F_SETLK failed, file already locked by pid %d\n", lock.l_pid);
+ printk(UM_KERN_ERR "F_SETLK failed, file already locked by pid %d\n",
+ lock.l_pid);
err = save;
out:
return err;
diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c
index fba3f0fefeef..f4bd349d4412 100644
--- a/arch/um/os-Linux/helper.c
+++ b/arch/um/os-Linux/helper.c
@@ -1,22 +1,19 @@
/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
-#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <sched.h>
-#include <limits.h>
-#include <sys/signal.h>
-#include <sys/wait.h>
#include <sys/socket.h>
-#include "user.h"
+#include <sys/wait.h>
+#include "kern_constants.h"
#include "kern_util.h"
#include "os.h"
#include "um_malloc.h"
-#include "kern_constants.h"
+#include "user.h"
struct helper_data {
void (*pre_exec)(void*);
@@ -30,21 +27,19 @@ static int helper_child(void *arg)
{
struct helper_data *data = arg;
char **argv = data->argv;
- int errval;
+ int err;
if (data->pre_exec != NULL)
(*data->pre_exec)(data->pre_data);
- errval = execvp_noalloc(data->buf, argv[0], argv);
- printk("helper_child - execvp of '%s' failed - errno = %d\n", argv[0],
- -errval);
- write(data->fd, &errval, sizeof(errval));
- kill(os_getpid(), SIGKILL);
+ err = execvp_noalloc(data->buf, argv[0], argv);
+
+ /* If the exec succeeds, we don't get here */
+ write(data->fd, &err, sizeof(err));
+
return 0;
}
-/* Returns either the pid of the child process we run or -E* on failure.
- * XXX The alloc_stack here breaks if this is called in the tracing thread, so
- * we need to receive a preallocated stack (a local buffer is ok). */
+/* Returns either the pid of the child process we run or -E* on failure. */
int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
{
struct helper_data data;
@@ -58,14 +53,15 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
ret = socketpair(AF_UNIX, SOCK_STREAM, 0, fds);
if (ret < 0) {
ret = -errno;
- printk("run_helper : pipe failed, errno = %d\n", errno);
+ printk(UM_KERN_ERR "run_helper : pipe failed, errno = %d\n",
+ errno);
goto out_free;
}
ret = os_set_exec_close(fds[1]);
if (ret < 0) {
- printk("run_helper : setting FD_CLOEXEC failed, ret = %d\n",
- -ret);
+ printk(UM_KERN_ERR "run_helper : setting FD_CLOEXEC failed, "
+ "ret = %d\n", -ret);
goto out_close;
}
@@ -79,7 +75,8 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
pid = clone(helper_child, (void *) sp, CLONE_VM, &data);
if (pid < 0) {
ret = -errno;
- printk("run_helper : clone failed, errno = %d\n", errno);
+ printk(UM_KERN_ERR "run_helper : clone failed, errno = %d\n",
+ errno);
goto out_free2;
}
@@ -96,10 +93,9 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
} else {
if (n < 0) {
n = -errno;
- printk("run_helper : read on pipe failed, ret = %d\n",
- -n);
+ printk(UM_KERN_ERR "run_helper : read on pipe failed, "
+ "ret = %d\n", -n);
ret = n;
- kill(pid, SIGKILL);
}
CATCH_EINTR(waitpid(pid, NULL, __WCLONE));
}
@@ -129,50 +125,40 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
pid = clone(proc, (void *) sp, flags, arg);
if (pid < 0) {
err = -errno;
- printk("run_helper_thread : clone failed, errno = %d\n",
- errno);
+ printk(UM_KERN_ERR "run_helper_thread : clone failed, "
+ "errno = %d\n", errno);
return err;
}
if (stack_out == NULL) {
CATCH_EINTR(pid = waitpid(pid, &status, __WCLONE));
if (pid < 0) {
err = -errno;
- printk("run_helper_thread - wait failed, errno = %d\n",
- errno);
+ printk(UM_KERN_ERR "run_helper_thread - wait failed, "
+ "errno = %d\n", errno);
pid = err;
}
if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0))
- printk("run_helper_thread - thread returned status "
- "0x%x\n", status);
+ printk(UM_KERN_ERR "run_helper_thread - thread "
+ "returned status 0x%x\n", status);
free_stack(stack, 0);
} else
*stack_out = stack;
return pid;
}
-int helper_wait(int pid, int nohang, char *pname)
+int helper_wait(int pid)
{
int ret, status;
int wflags = __WCLONE;
- if (nohang)
- wflags |= WNOHANG;
-
- if (!pname)
- pname = "helper_wait";
-
CATCH_EINTR(ret = waitpid(pid, &status, wflags));
if (ret < 0) {
- printk(UM_KERN_ERR "%s : waitpid process %d failed, "
- "errno = %d\n", pname, pid, errno);
+ printk(UM_KERN_ERR "helper_wait : waitpid process %d failed, "
+ "errno = %d\n", pid, errno);
return -errno;
- } else if (nohang && ret == 0) {
- printk(UM_KERN_ERR "%s : process %d has not exited\n",
- pname, pid);
- return -ECHILD;
} else if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
- printk(UM_KERN_ERR "%s : process %d didn't exit with "
- "status 0\n", pname, pid);
+ printk(UM_KERN_ERR "helper_wait : process %d exited with "
+ "status 0x%x\n", pid, status);
return -ECHILD;
} else
return 0;
diff --git a/arch/um/os-Linux/irq.c b/arch/um/os-Linux/irq.c
index 6aa6f95d6524..0348b975e81c 100644
--- a/arch/um/os-Linux/irq.c
+++ b/arch/um/os-Linux/irq.c
@@ -1,23 +1,19 @@
/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <stdlib.h>
-#include <unistd.h>
#include <errno.h>
+#include <poll.h>
#include <signal.h>
#include <string.h>
-#include <sys/poll.h>
-#include <sys/types.h>
-#include <sys/time.h>
-#include "kern_util.h"
-#include "user.h"
-#include "process.h"
-#include "sigio.h"
#include "irq_user.h"
+#include "kern_constants.h"
#include "os.h"
+#include "process.h"
#include "um_malloc.h"
+#include "user.h"
/*
* Locked by irq_lock in arch/um/kernel/irq.c. Changed by os_create_pollfd
@@ -36,7 +32,7 @@ int os_waiting_for_events(struct irq_fd *active_fds)
if (n < 0) {
err = -errno;
if (errno != EINTR)
- printk("sigio_handler: os_waiting_for_events:"
+ printk(UM_KERN_ERR "os_waiting_for_events:"
" poll returned %d, errno = %d\n", n, errno);
return err;
}
@@ -95,24 +91,26 @@ void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
struct irq_fd *old_fd = *prev;
if ((pollfds[i].fd != -1) &&
(pollfds[i].fd != (*prev)->fd)) {
- printk("os_free_irq_by_cb - mismatch between "
- "active_fds and pollfds, fd %d vs %d\n",
+ printk(UM_KERN_ERR "os_free_irq_by_cb - "
+ "mismatch between active_fds and "
+ "pollfds, fd %d vs %d\n",
(*prev)->fd, pollfds[i].fd);
goto out;
}
pollfds_num--;
- /* This moves the *whole* array after pollfds[i]
+ /*
+ * This moves the *whole* array after pollfds[i]
* (though it doesn't spot as such)!
*/
memmove(&pollfds[i], &pollfds[i + 1],
(pollfds_num - i) * sizeof(pollfds[0]));
- if(*last_irq_ptr2 == &old_fd->next)
+ if (*last_irq_ptr2 == &old_fd->next)
*last_irq_ptr2 = prev;
*prev = (*prev)->next;
- if(old_fd->type == IRQ_WRITE)
+ if (old_fd->type == IRQ_WRITE)
ignore_sigio_fd(old_fd->fd);
kfree(old_fd);
continue;
@@ -138,14 +136,3 @@ void os_set_ioignore(void)
{
signal(SIGIO, SIG_IGN);
}
-
-void init_irq_signals(int on_sigstack)
-{
- int flags;
-
- flags = on_sigstack ? SA_ONSTACK : 0;
-
- set_handler(SIGIO, (__sighandler_t) sig_handler, flags | SA_RESTART,
- SIGUSR1, SIGIO, SIGWINCH, SIGVTALRM, -1);
- signal(SIGWINCH, SIG_IGN);
-}
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index 82c3778627b8..abb9b0ffd960 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -73,7 +73,7 @@ static void install_fatal_handler(int sig)
action.sa_handler = last_ditch_exit;
if (sigaction(sig, &action, NULL) < 0) {
printf("failed to install handler for signal %d - errno = %d\n",
- errno);
+ sig, errno);
exit(1);
}
}
@@ -92,7 +92,8 @@ static void setup_env_path(void)
* just use the default + /usr/lib/uml
*/
if (!old_path || (path_len = strlen(old_path)) == 0) {
- putenv("PATH=:/bin:/usr/bin/" UML_LIB_PATH);
+ if (putenv("PATH=:/bin:/usr/bin/" UML_LIB_PATH))
+ perror("couldn't putenv");
return;
}
@@ -100,15 +101,16 @@ static void setup_env_path(void)
path_len += strlen("PATH=" UML_LIB_PATH) + 1;
new_path = malloc(path_len);
if (!new_path) {
- perror("coudn't malloc to set a new PATH");
+ perror("couldn't malloc to set a new PATH");
return;
}
snprintf(new_path, path_len, "PATH=%s" UML_LIB_PATH, old_path);
- putenv(new_path);
+ if (putenv(new_path)) {
+ perror("couldn't putenv to set a new PATH");
+ free(new_path);
+ }
}
-extern int uml_exitcode;
-
extern void scan_elf_aux( char **envp);
int __init main(int argc, char **argv, char **envp)
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index 436f8d20b20f..eedc2d88ef8a 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -9,7 +9,6 @@
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/statfs.h>
-#include "kern_util.h"
#include "user.h"
#include "mem_user.h"
#include "init.h"
@@ -30,7 +29,7 @@ static char *tempdir = NULL;
static void __init find_tempdir(void)
{
- char *dirs[] = { "TMP", "TEMP", "TMPDIR", NULL };
+ const char *dirs[] = { "TMP", "TEMP", "TMPDIR", NULL };
int i;
char *dir = NULL;
@@ -59,9 +58,10 @@ static void __init find_tempdir(void)
* read the file as needed. If there's an error, -errno is returned;
* if the end of the file is reached, 0 is returned.
*/
-static int next(int fd, char *buf, int size, char c)
+static int next(int fd, char *buf, size_t size, char c)
{
- int n, len;
+ ssize_t n;
+ size_t len;
char *ptr;
while((ptr = strchr(buf, c)) == NULL){
@@ -172,13 +172,15 @@ int __init make_tempfile(const char *template, char **out_tempname,
which_tmpdir();
tempname = malloc(MAXPATHLEN);
+ if (!tempname)
+ goto out;
find_tempdir();
if (template[0] != '/')
strcpy(tempname, tempdir);
else
tempname[0] = '\0';
- strcat(tempname, template);
+ strncat(tempname, template, MAXPATHLEN-1-strlen(tempname));
fd = mkstemp(tempname);
if(fd < 0){
fprintf(stderr, "open - cannot create %s: %s\n", tempname,
@@ -268,6 +270,7 @@ void __init check_tmpexec(void)
if(addr == MAP_FAILED){
err = errno;
perror("failed");
+ close(fd);
if(err == EPERM)
printf("%s must be not mounted noexec\n",tempdir);
exit(1);
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index bda5c3150d6c..abf6beae3df1 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -249,7 +249,10 @@ void init_new_thread_signals(void)
SIGUSR1, SIGIO, SIGWINCH, SIGVTALRM, -1);
signal(SIGHUP, SIG_IGN);
- init_irq_signals(1);
+ set_handler(SIGIO, (__sighandler_t) sig_handler,
+ SA_ONSTACK | SA_RESTART, SIGUSR1, SIGIO, SIGWINCH, SIGALRM,
+ SIGVTALRM, -1);
+ signal(SIGWINCH, SIG_IGN);
}
int run_kernel_thread(int (*fn)(void *), void *arg, jmp_buf **jmp_ptr)
diff --git a/arch/um/os-Linux/registers.c b/arch/um/os-Linux/registers.c
index a32ba6ab1211..830fe6a1518a 100644
--- a/arch/um/os-Linux/registers.c
+++ b/arch/um/os-Linux/registers.c
@@ -8,47 +8,41 @@
#include <string.h>
#include <sys/ptrace.h>
#include "sysdep/ptrace.h"
-#include "user.h"
-/* This is set once at boot time and not changed thereafter */
-
-static unsigned long exec_regs[MAX_REG_NR];
-
-void init_thread_registers(struct uml_pt_regs *to)
-{
- memcpy(to->gp, exec_regs, sizeof(to->gp));
-}
-
-void save_registers(int pid, struct uml_pt_regs *regs)
+int save_registers(int pid, struct uml_pt_regs *regs)
{
int err;
err = ptrace(PTRACE_GETREGS, pid, 0, regs->gp);
if (err < 0)
- panic("save_registers - saving registers failed, errno = %d\n",
- errno);
+ return -errno;
+ return 0;
}
-void restore_registers(int pid, struct uml_pt_regs *regs)
+int restore_registers(int pid, struct uml_pt_regs *regs)
{
int err;
err = ptrace(PTRACE_SETREGS, pid, 0, regs->gp);
if (err < 0)
- panic("restore_registers - saving registers failed, "
- "errno = %d\n", errno);
+ return -errno;
+ return 0;
}
-void init_registers(int pid)
+/* This is set once at boot time and not changed thereafter */
+
+static unsigned long exec_regs[MAX_REG_NR];
+
+int init_registers(int pid)
{
int err;
err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs);
- if (err)
- panic("check_ptrace : PTRACE_GETREGS failed, errno = %d",
- errno);
+ if (err < 0)
+ return -errno;
arch_init_registers(pid);
+ return 0;
}
void get_safe_registers(unsigned long *regs)
diff --git a/arch/um/os-Linux/sigio.c b/arch/um/os-Linux/sigio.c
index dc03e9cccb63..abf47a7c4abd 100644
--- a/arch/um/os-Linux/sigio.c
+++ b/arch/um/os-Linux/sigio.c
@@ -1,34 +1,33 @@
/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <unistd.h>
-#include <stdlib.h>
-#include <termios.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
#include <pty.h>
+#include <sched.h>
#include <signal.h>
-#include <fcntl.h>
-#include <errno.h>
#include <string.h>
-#include <sched.h>
-#include <sys/socket.h>
-#include <sys/poll.h>
-#include "init.h"
-#include "user.h"
+#include "kern_constants.h"
#include "kern_util.h"
-#include "sigio.h"
+#include "init.h"
#include "os.h"
+#include "sigio.h"
#include "um_malloc.h"
-#include "init.h"
+#include "user.h"
-/* Protected by sigio_lock(), also used by sigio_cleanup, which is an
+/*
+ * Protected by sigio_lock(), also used by sigio_cleanup, which is an
* exitcall.
*/
static int write_sigio_pid = -1;
static unsigned long write_sigio_stack;
-/* These arrays are initialized before the sigio thread is started, and
+/*
+ * These arrays are initialized before the sigio thread is started, and
* the descriptors closed after it is killed. So, it can't see them change.
* On the UML side, they are changed under the sigio_lock.
*/
@@ -43,7 +42,8 @@ struct pollfds {
int used;
};
-/* Protected by sigio_lock(). Used by the sigio thread, but the UML thread
+/*
+ * Protected by sigio_lock(). Used by the sigio thread, but the UML thread
* synchronizes with it.
*/
static struct pollfds current_poll;
@@ -57,23 +57,26 @@ static int write_sigio_thread(void *unused)
int i, n, respond_fd;
char c;
- signal(SIGWINCH, SIG_IGN);
+ signal(SIGWINCH, SIG_IGN);
fds = &current_poll;
- while(1){
+ while (1) {
n = poll(fds->poll, fds->used, -1);
- if(n < 0){
- if(errno == EINTR) continue;
- printk("write_sigio_thread : poll returned %d, "
- "errno = %d\n", n, errno);
+ if (n < 0) {
+ if (errno == EINTR)
+ continue;
+ printk(UM_KERN_ERR "write_sigio_thread : poll returned "
+ "%d, errno = %d\n", n, errno);
}
- for(i = 0; i < fds->used; i++){
+ for (i = 0; i < fds->used; i++) {
p = &fds->poll[i];
- if(p->revents == 0) continue;
- if(p->fd == sigio_private[1]){
+ if (p->revents == 0)
+ continue;
+ if (p->fd == sigio_private[1]) {
CATCH_EINTR(n = read(sigio_private[1], &c,
sizeof(c)));
- if(n != sizeof(c))
- printk("write_sigio_thread : "
+ if (n != sizeof(c))
+ printk(UM_KERN_ERR
+ "write_sigio_thread : "
"read on socket failed, "
"err = %d\n", errno);
tmp = current_poll;
@@ -89,9 +92,10 @@ static int write_sigio_thread(void *unused)
}
CATCH_EINTR(n = write(respond_fd, &c, sizeof(c)));
- if(n != sizeof(c))
- printk("write_sigio_thread : write on socket "
- "failed, err = %d\n", errno);
+ if (n != sizeof(c))
+ printk(UM_KERN_ERR "write_sigio_thread : "
+ "write on socket failed, err = %d\n",
+ errno);
}
}
@@ -102,12 +106,13 @@ static int need_poll(struct pollfds *polls, int n)
{
struct pollfd *new;
- if(n <= polls->size)
+ if (n <= polls->size)
return 0;
new = kmalloc(n * sizeof(struct pollfd), UM_GFP_ATOMIC);
- if(new == NULL){
- printk("need_poll : failed to allocate new pollfds\n");
+ if (new == NULL) {
+ printk(UM_KERN_ERR "need_poll : failed to allocate new "
+ "pollfds\n");
return -ENOMEM;
}
@@ -119,7 +124,8 @@ static int need_poll(struct pollfds *polls, int n)
return 0;
}
-/* Must be called with sigio_lock held, because it's needed by the marked
+/*
+ * Must be called with sigio_lock held, because it's needed by the marked
* critical section.
*/
static void update_thread(void)
@@ -129,15 +135,17 @@ static void update_thread(void)
char c;
flags = set_signals(0);
- n = write(sigio_private[0], &c, sizeof(c));
- if(n != sizeof(c)){
- printk("update_thread : write failed, err = %d\n", errno);
+ CATCH_EINTR(n = write(sigio_private[0], &c, sizeof(c)));
+ if (n != sizeof(c)) {
+ printk(UM_KERN_ERR "update_thread : write failed, err = %d\n",
+ errno);
goto fail;
}
CATCH_EINTR(n = read(sigio_private[0], &c, sizeof(c)));
- if(n != sizeof(c)){
- printk("update_thread : read failed, err = %d\n", errno);
+ if (n != sizeof(c)) {
+ printk(UM_KERN_ERR "update_thread : read failed, err = %d\n",
+ errno);
goto fail;
}
@@ -164,23 +172,23 @@ int add_sigio_fd(int fd)
int err = 0, i, n;
sigio_lock();
- for(i = 0; i < all_sigio_fds.used; i++){
- if(all_sigio_fds.poll[i].fd == fd)
+ for (i = 0; i < all_sigio_fds.used; i++) {
+ if (all_sigio_fds.poll[i].fd == fd)
break;
}
- if(i == all_sigio_fds.used)
+ if (i == all_sigio_fds.used)
goto out;
p = &all_sigio_fds.poll[i];
- for(i = 0; i < current_poll.used; i++){
- if(current_poll.poll[i].fd == fd)
+ for (i = 0; i < current_poll.used; i++) {
+ if (current_poll.poll[i].fd == fd)
goto out;
}
n = current_poll.used;
err = need_poll(&next_poll, n + 1);
- if(err)
+ if (err)
goto out;
memcpy(next_poll.poll, current_poll.poll,
@@ -198,27 +206,29 @@ int ignore_sigio_fd(int fd)
struct pollfd *p;
int err = 0, i, n = 0;
- /* This is called from exitcalls elsewhere in UML - if
+ /*
+ * This is called from exitcalls elsewhere in UML - if
* sigio_cleanup has already run, then update_thread will hang
* or fail because the thread is no longer running.
*/
- if(write_sigio_pid == -1)
+ if (write_sigio_pid == -1)
return -EIO;
sigio_lock();
- for(i = 0; i < current_poll.used; i++){
- if(current_poll.poll[i].fd == fd) break;
+ for (i = 0; i < current_poll.used; i++) {
+ if (current_poll.poll[i].fd == fd)
+ break;
}
- if(i == current_poll.used)
+ if (i == current_poll.used)
goto out;
err = need_poll(&next_poll, current_poll.used - 1);
- if(err)
+ if (err)
goto out;
- for(i = 0; i < current_poll.used; i++){
+ for (i = 0; i < current_poll.used; i++) {
p = &current_poll.poll[i];
- if(p->fd != fd)
+ if (p->fd != fd)
next_poll.poll[n++] = *p;
}
next_poll.used = current_poll.used - 1;
@@ -235,7 +245,8 @@ static struct pollfd *setup_initial_poll(int fd)
p = kmalloc(sizeof(struct pollfd), UM_GFP_KERNEL);
if (p == NULL) {
- printk("setup_initial_poll : failed to allocate poll\n");
+ printk(UM_KERN_ERR "setup_initial_poll : failed to allocate "
+ "poll\n");
return NULL;
}
*p = ((struct pollfd) { .fd = fd,
@@ -261,27 +272,29 @@ static void write_sigio_workaround(void)
return;
err = os_pipe(l_write_sigio_fds, 1, 1);
- if(err < 0){
- printk("write_sigio_workaround - os_pipe 1 failed, "
+ if (err < 0) {
+ printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 1 failed, "
"err = %d\n", -err);
return;
}
err = os_pipe(l_sigio_private, 1, 1);
- if(err < 0){
- printk("write_sigio_workaround - os_pipe 2 failed, "
+ if (err < 0) {
+ printk(UM_KERN_ERR "write_sigio_workaround - os_pipe 2 failed, "
"err = %d\n", -err);
goto out_close1;
}
p = setup_initial_poll(l_sigio_private[1]);
- if(!p)
+ if (!p)
goto out_close2;
sigio_lock();
- /* Did we race? Don't try to optimize this, please, it's not so likely
- * to happen, and no more than once at the boot. */
- if(write_sigio_pid != -1)
+ /*
+ * Did we race? Don't try to optimize this, please, it's not so likely
+ * to happen, and no more than once at the boot.
+ */
+ if (write_sigio_pid != -1)
goto out_free;
current_poll = ((struct pollfds) { .poll = p,
@@ -333,19 +346,19 @@ void maybe_sigio_broken(int fd, int read)
{
int err;
- if(!isatty(fd))
+ if (!isatty(fd))
return;
- if((read || pty_output_sigio) && (!read || pty_close_sigio))
+ if ((read || pty_output_sigio) && (!read || pty_close_sigio))
return;
write_sigio_workaround();
sigio_lock();
err = need_poll(&all_sigio_fds, all_sigio_fds.used + 1);
- if(err){
- printk("maybe_sigio_broken - failed to add pollfd for "
- "descriptor %d\n", fd);
+ if (err) {
+ printk(UM_KERN_ERR "maybe_sigio_broken - failed to add pollfd "
+ "for descriptor %d\n", fd);
goto out;
}
@@ -388,7 +401,7 @@ static void openpty_cb(void *arg)
struct openpty_arg *info = arg;
info->err = 0;
- if(openpty(&info->master, &info->slave, NULL, NULL, NULL))
+ if (openpty(&info->master, &info->slave, NULL, NULL, NULL))
info->err = -errno;
}
@@ -397,17 +410,17 @@ static int async_pty(int master, int slave)
int flags;
flags = fcntl(master, F_GETFL);
- if(flags < 0)
+ if (flags < 0)
return -errno;
- if((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
- (fcntl(master, F_SETOWN, os_getpid()) < 0))
+ if ((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
+ (fcntl(master, F_SETOWN, os_getpid()) < 0))
return -errno;
- if((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
+ if ((fcntl(slave, F_SETFL, flags | O_NONBLOCK) < 0))
return -errno;
- return(0);
+ return 0;
}
static void __init check_one_sigio(void (*proc)(int, int))
@@ -417,34 +430,49 @@ static void __init check_one_sigio(void (*proc)(int, int))
int master, slave, err;
initial_thread_cb(openpty_cb, &pty);
- if(pty.err){
- printk("openpty failed, errno = %d\n", -pty.err);
+ if (pty.err) {
+ printk(UM_KERN_ERR "check_one_sigio failed, errno = %d\n",
+ -pty.err);
return;
}
master = pty.master;
slave = pty.slave;
- if((master == -1) || (slave == -1)){
- printk("openpty failed to allocate a pty\n");
+ if ((master == -1) || (slave == -1)) {
+ printk(UM_KERN_ERR "check_one_sigio failed to allocate a "
+ "pty\n");
return;
}
/* Not now, but complain so we now where we failed. */
err = raw(master);
- if (err < 0)
- panic("check_sigio : __raw failed, errno = %d\n", -err);
+ if (err < 0) {
+ printk(UM_KERN_ERR "check_one_sigio : raw failed, errno = %d\n",
+ -err);
+ return;
+ }
err = async_pty(master, slave);
- if(err < 0)
- panic("tty_fds : sigio_async failed, err = %d\n", -err);
+ if (err < 0) {
+ printk(UM_KERN_ERR "check_one_sigio : sigio_async failed, "
+ "err = %d\n", -err);
+ return;
+ }
+
+ if (sigaction(SIGIO, NULL, &old) < 0) {
+ printk(UM_KERN_ERR "check_one_sigio : sigaction 1 failed, "
+ "errno = %d\n", errno);
+ return;
+ }
- if(sigaction(SIGIO, NULL, &old) < 0)
- panic("check_sigio : sigaction 1 failed, errno = %d\n", errno);
new = old;
new.sa_handler = handler;
- if(sigaction(SIGIO, &new, NULL) < 0)
- panic("check_sigio : sigaction 2 failed, errno = %d\n", errno);
+ if (sigaction(SIGIO, &new, NULL) < 0) {
+ printk(UM_KERN_ERR "check_one_sigio : sigaction 2 failed, "
+ "errno = %d\n", errno);
+ return;
+ }
got_sigio = 0;
(*proc)(master, slave);
@@ -452,8 +480,9 @@ static void __init check_one_sigio(void (*proc)(int, int))
close(master);
close(slave);
- if(sigaction(SIGIO, &old, NULL) < 0)
- panic("check_sigio : sigaction 3 failed, errno = %d\n", errno);
+ if (sigaction(SIGIO, &old, NULL) < 0)
+ printk(UM_KERN_ERR "check_one_sigio : sigaction 3 failed, "
+ "errno = %d\n", errno);
}
static void tty_output(int master, int slave)
@@ -461,42 +490,45 @@ static void tty_output(int master, int slave)
int n;
char buf[512];
- printk("Checking that host ptys support output SIGIO...");
+ printk(UM_KERN_INFO "Checking that host ptys support output SIGIO...");
memset(buf, 0, sizeof(buf));
- while(write(master, buf, sizeof(buf)) > 0) ;
- if(errno != EAGAIN)
- panic("tty_output : write failed, errno = %d\n", errno);
- while(((n = read(slave, buf, sizeof(buf))) > 0) && !got_sigio) ;
+ while (write(master, buf, sizeof(buf)) > 0) ;
+ if (errno != EAGAIN)
+ printk(UM_KERN_ERR "tty_output : write failed, errno = %d\n",
+ errno);
+ while (((n = read(slave, buf, sizeof(buf))) > 0) && !got_sigio)
+ ;
- if(got_sigio){
- printk("Yes\n");
+ if (got_sigio) {
+ printk(UM_KERN_CONT "Yes\n");
pty_output_sigio = 1;
- }
- else if(n == -EAGAIN)
- printk("No, enabling workaround\n");
- else panic("tty_output : read failed, err = %d\n", n);
+ } else if (n == -EAGAIN)
+ printk(UM_KERN_CONT "No, enabling workaround\n");
+ else
+ printk(UM_KERN_CONT "tty_output : read failed, err = %d\n", n);
}
static void tty_close(int master, int slave)
{
- printk("Checking that host ptys support SIGIO on close...");
+ printk(UM_KERN_INFO "Checking that host ptys support SIGIO on "
+ "close...");
close(slave);
- if(got_sigio){
- printk("Yes\n");
+ if (got_sigio) {
+ printk(UM_KERN_CONT "Yes\n");
pty_close_sigio = 1;
- }
- else printk("No, enabling workaround\n");
+ } else
+ printk(UM_KERN_CONT "No, enabling workaround\n");
}
void __init check_sigio(void)
{
- if((os_access("/dev/ptmx", OS_ACC_R_OK) < 0) &&
- (os_access("/dev/ptyp0", OS_ACC_R_OK) < 0)){
- printk("No pseudo-terminals available - skipping pty SIGIO "
- "check\n");
+ if ((access("/dev/ptmx", R_OK) < 0) &&
+ (access("/dev/ptyp0", R_OK) < 0)) {
+ printk(UM_KERN_WARNING "No pseudo-terminals available - "
+ "skipping pty SIGIO check\n");
return;
}
check_one_sigio(tty_output);
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index e9800b0b5689..0fb0cc8d4757 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -9,11 +9,47 @@
#include <errno.h>
#include <signal.h>
#include <strings.h>
+#include "as-layout.h"
+#include "kern_util.h"
#include "os.h"
#include "sysdep/barrier.h"
#include "sysdep/sigcontext.h"
#include "user.h"
+/* Copied from linux/compiler-gcc.h since we can't include it directly */
+#define barrier() __asm__ __volatile__("": : :"memory")
+
+void (*sig_info[NSIG])(int, struct uml_pt_regs *) = {
+ [SIGTRAP] = relay_signal,
+ [SIGFPE] = relay_signal,
+ [SIGILL] = relay_signal,
+ [SIGWINCH] = winch,
+ [SIGBUS] = bus_handler,
+ [SIGSEGV] = segv_handler,
+ [SIGIO] = sigio_handler,
+ [SIGVTALRM] = timer_handler };
+
+static void sig_handler_common(int sig, struct sigcontext *sc)
+{
+ struct uml_pt_regs r;
+ int save_errno = errno;
+
+ r.is_user = 0;
+ if (sig == SIGSEGV) {
+ /* For segfaults, we want the data from the sigcontext. */
+ copy_sc(&r, sc);
+ GET_FAULTINFO_FROM_SC(r.faultinfo, sc);
+ }
+
+ /* enable signals if sig isn't IRQ signal */
+ if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM))
+ unblock_signals();
+
+ (*sig_info[sig])(sig, &r);
+
+ errno = save_errno;
+}
+
/*
* These are the asynchronous signals. SIGPROF is excluded because we want to
* be able to profile all of UML, not just the non-critical sections. If
@@ -26,13 +62,8 @@
#define SIGVTALRM_BIT 1
#define SIGVTALRM_MASK (1 << SIGVTALRM_BIT)
-/*
- * These are used by both the signal handlers and
- * block/unblock_signals. I don't want modifications cached in a
- * register - they must go straight to memory.
- */
-static volatile int signals_enabled = 1;
-static volatile int pending = 0;
+static int signals_enabled;
+static unsigned int signals_pending;
void sig_handler(int sig, struct sigcontext *sc)
{
@@ -40,13 +71,13 @@ void sig_handler(int sig, struct sigcontext *sc)
enabled = signals_enabled;
if (!enabled && (sig == SIGIO)) {
- pending |= SIGIO_MASK;
+ signals_pending |= SIGIO_MASK;
return;
}
block_signals();
- sig_handler_common_skas(sig, sc);
+ sig_handler_common(sig, sc);
set_signals(enabled);
}
@@ -68,7 +99,7 @@ void alarm_handler(int sig, struct sigcontext *sc)
enabled = signals_enabled;
if (!signals_enabled) {
- pending |= SIGVTALRM_MASK;
+ signals_pending |= SIGVTALRM_MASK;
return;
}
@@ -94,16 +125,6 @@ void set_sigstack(void *sig_stack, int size)
panic("enabling signal stack failed, errno = %d\n", errno);
}
-void remove_sigstack(void)
-{
- stack_t stack = ((stack_t) { .ss_flags = SS_DISABLE,
- .ss_sp = NULL,
- .ss_size = 0 });
-
- if (sigaltstack(&stack, NULL) != 0)
- panic("disabling signal stack failed, errno = %d\n", errno);
-}
-
void (*handlers[_NSIG])(int sig, struct sigcontext *sc);
void handle_signal(int sig, struct sigcontext *sc)
@@ -166,6 +187,9 @@ void set_handler(int sig, void (*handler)(int), int flags, ...)
sigaddset(&action.sa_mask, mask);
va_end(ap);
+ if (sig == SIGSEGV)
+ flags |= SA_NODEFER;
+
action.sa_flags = flags;
action.sa_restorer = NULL;
if (sigaction(sig, &action, NULL) < 0)
@@ -179,12 +203,14 @@ void set_handler(int sig, void (*handler)(int), int flags, ...)
int change_sig(int signal, int on)
{
- sigset_t sigset, old;
+ sigset_t sigset;
sigemptyset(&sigset);
sigaddset(&sigset, signal);
- sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, &old);
- return !sigismember(&old, signal);
+ if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0)
+ return -errno;
+
+ return 0;
}
void block_signals(void)
@@ -196,7 +222,7 @@ void block_signals(void)
* This might matter if gcc figures out how to inline this and
* decides to shuffle this code into the caller.
*/
- mb();
+ barrier();
}
void unblock_signals(void)
@@ -209,36 +235,26 @@ void unblock_signals(void)
/*
* We loop because the IRQ handler returns with interrupts off. So,
* interrupts may have arrived and we need to re-enable them and
- * recheck pending.
+ * recheck signals_pending.
*/
while(1) {
/*
* Save and reset save_pending after enabling signals. This
- * way, pending won't be changed while we're reading it.
+ * way, signals_pending won't be changed while we're reading it.
*/
signals_enabled = 1;
/*
- * Setting signals_enabled and reading pending must
+ * Setting signals_enabled and reading signals_pending must
* happen in this order.
*/
- mb();
-
- save_pending = pending;
- if (save_pending == 0) {
- /*
- * This must return with signals enabled, so
- * this barrier ensures that writes are
- * flushed out before the return. This might
- * matter if gcc figures out how to inline
- * this (unlikely, given its size) and decides
- * to shuffle this code into the caller.
- */
- mb();
+ barrier();
+
+ save_pending = signals_pending;
+ if (save_pending == 0)
return;
- }
- pending = 0;
+ signals_pending = 0;
/*
* We have pending interrupts, so disable signals, as the
@@ -254,7 +270,7 @@ void unblock_signals(void)
* back here.
*/
if (save_pending & SIGIO_MASK)
- sig_handler_common_skas(SIGIO, NULL);
+ sig_handler_common(SIGIO, NULL);
if (save_pending & SIGVTALRM_MASK)
real_alarm_handler(NULL);
diff --git a/arch/um/os-Linux/skas/Makefile b/arch/um/os-Linux/skas/Makefile
index 5fd8d4dad66a..d2ea3409e072 100644
--- a/arch/um/os-Linux/skas/Makefile
+++ b/arch/um/os-Linux/skas/Makefile
@@ -1,10 +1,10 @@
#
-# Copyright (C) 2002 - 2004 Jeff Dike (jdike@addtoit.com)
+# Copyright (C) 2002 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
# Licensed under the GPL
#
-obj-y := mem.o process.o trap.o
+obj-y := mem.o process.o
-USER_OBJS := mem.o process.o trap.o
+USER_OBJS := $(obj-y)
include arch/um/scripts/Makefile.rules
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index e8b7a97e83d3..d36c89c24a45 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -15,6 +15,7 @@
#include "as-layout.h"
#include "chan_user.h"
#include "kern_constants.h"
+#include "kern_util.h"
#include "mem.h"
#include "os.h"
#include "process.h"
@@ -37,27 +38,27 @@ int is_skas_winch(int pid, int fd, void *data)
static int ptrace_dump_regs(int pid)
{
- unsigned long regs[MAX_REG_NR];
- int i;
+ unsigned long regs[MAX_REG_NR];
+ int i;
- if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
- return -errno;
+ if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0)
+ return -errno;
printk(UM_KERN_ERR "Stub registers -\n");
for (i = 0; i < ARRAY_SIZE(regs); i++)
printk(UM_KERN_ERR "\t%d - %lx\n", i, regs[i]);
- return 0;
+ return 0;
}
/*
* Signals that are OK to receive in the stub - we'll just continue it.
* SIGWINCH will happen when UML is inside a detached screen.
*/
-#define STUB_SIG_MASK ((1 << SIGVTALRM) | (1 << SIGWINCH))
+#define STUB_SIG_MASK (1 << SIGVTALRM)
/* Signals that the stub will finish with - anything else is an error */
-#define STUB_DONE_MASK ((1 << SIGUSR1) | (1 << SIGTRAP))
+#define STUB_DONE_MASK (1 << SIGTRAP)
void wait_stub_done(int pid)
{
@@ -72,9 +73,11 @@ void wait_stub_done(int pid)
break;
err = ptrace(PTRACE_CONT, pid, 0, 0);
- if (err)
- panic("wait_stub_done : continue failed, errno = %d\n",
- errno);
+ if (err) {
+ printk(UM_KERN_ERR "wait_stub_done : continue failed, "
+ "errno = %d\n", errno);
+ fatal_sigsegv();
+ }
}
if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
@@ -85,8 +88,10 @@ bad_wait:
if (err)
printk(UM_KERN_ERR "Failed to get registers from stub, "
"errno = %d\n", -err);
- panic("wait_stub_done : failed to wait for SIGUSR1/SIGTRAP, pid = %d, "
- "n = %d, errno = %d, status = 0x%x\n", pid, n, errno, status);
+ printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
+ "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
+ status);
+ fatal_sigsegv();
}
extern unsigned long current_stub_stack(void);
@@ -97,9 +102,11 @@ void get_skas_faultinfo(int pid, struct faultinfo * fi)
if (ptrace_faultinfo) {
err = ptrace(PTRACE_FAULTINFO, pid, 0, fi);
- if (err)
- panic("get_skas_faultinfo - PTRACE_FAULTINFO failed, "
- "errno = %d\n", errno);
+ if (err) {
+ printk(UM_KERN_ERR "get_skas_faultinfo - "
+ "PTRACE_FAULTINFO failed, errno = %d\n", errno);
+ fatal_sigsegv();
+ }
/* Special handling for i386, which has different structs */
if (sizeof(struct ptrace_faultinfo) < sizeof(struct faultinfo))
@@ -109,9 +116,11 @@ void get_skas_faultinfo(int pid, struct faultinfo * fi)
}
else {
err = ptrace(PTRACE_CONT, pid, 0, SIGSEGV);
- if (err)
- panic("Failed to continue stub, pid = %d, errno = %d\n",
- pid, errno);
+ if (err) {
+ printk(UM_KERN_ERR "Failed to continue stub, pid = %d, "
+ "errno = %d\n", pid, errno);
+ fatal_sigsegv();
+ }
wait_stub_done(pid);
/*
@@ -137,6 +146,9 @@ static void handle_trap(int pid, struct uml_pt_regs *regs,
{
int err, status;
+ if ((UPT_IP(regs) >= STUB_START) && (UPT_IP(regs) < STUB_END))
+ fatal_sigsegv();
+
/* Mark this as a syscall */
UPT_SYSCALL_NR(regs) = PT_SYSCALL_NR(regs->gp);
@@ -144,25 +156,31 @@ static void handle_trap(int pid, struct uml_pt_regs *regs,
{
err = ptrace(PTRACE_POKEUSR, pid, PT_SYSCALL_NR_OFFSET,
__NR_getpid);
- if (err < 0)
- panic("handle_trap - nullifying syscall failed, "
- "errno = %d\n", errno);
+ if (err < 0) {
+ printk(UM_KERN_ERR "handle_trap - nullifying syscall "
+ "failed, errno = %d\n", errno);
+ fatal_sigsegv();
+ }
err = ptrace(PTRACE_SYSCALL, pid, 0, 0);
- if (err < 0)
- panic("handle_trap - continuing to end of syscall "
- "failed, errno = %d\n", errno);
+ if (err < 0) {
+ printk(UM_KERN_ERR "handle_trap - continuing to end of "
+ "syscall failed, errno = %d\n", errno);
+ fatal_sigsegv();
+ }
CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
if ((err < 0) || !WIFSTOPPED(status) ||
- (WSTOPSIG(status) != SIGTRAP + 0x80)) {
- err = ptrace_dump_regs(pid);
- if (err)
- printk(UM_KERN_ERR "Failed to get registers "
+ (WSTOPSIG(status) != SIGTRAP + 0x80)) {
+ err = ptrace_dump_regs(pid);
+ if (err)
+ printk(UM_KERN_ERR "Failed to get registers "
"from process, errno = %d\n", -err);
- panic("handle_trap - failed to wait at end of syscall, "
- "errno = %d, status = %d\n", errno, status);
- }
+ printk(UM_KERN_ERR "handle_trap - failed to wait at "
+ "end of syscall, errno = %d, status = %d\n",
+ errno, status);
+ fatal_sigsegv();
+ }
}
handle_syscall(regs);
@@ -178,10 +196,13 @@ static int userspace_tramp(void *stack)
ptrace(PTRACE_TRACEME, 0, 0, 0);
signal(SIGTERM, SIG_DFL);
+ signal(SIGWINCH, SIG_IGN);
err = set_interval();
- if (err)
- panic("userspace_tramp - setting timer failed, errno = %d\n",
- err);
+ if (err) {
+ printk(UM_KERN_ERR "userspace_tramp - setting timer failed, "
+ "errno = %d\n", err);
+ exit(1);
+ }
if (!proc_mm) {
/*
@@ -221,16 +242,14 @@ static int userspace_tramp(void *stack)
set_sigstack((void *) STUB_DATA, UM_KERN_PAGE_SIZE);
sigemptyset(&sa.sa_mask);
- sigaddset(&sa.sa_mask, SIGIO);
- sigaddset(&sa.sa_mask, SIGWINCH);
- sigaddset(&sa.sa_mask, SIGVTALRM);
- sigaddset(&sa.sa_mask, SIGUSR1);
- sa.sa_flags = SA_ONSTACK;
+ sa.sa_flags = SA_ONSTACK | SA_NODEFER;
sa.sa_handler = (void *) v;
sa.sa_restorer = NULL;
- if (sigaction(SIGSEGV, &sa, NULL) < 0)
- panic("userspace_tramp - setting SIGSEGV handler "
- "failed - errno = %d\n", errno);
+ if (sigaction(SIGSEGV, &sa, NULL) < 0) {
+ printk(UM_KERN_ERR "userspace_tramp - setting SIGSEGV "
+ "handler failed - errno = %d\n", errno);
+ exit(1);
+ }
}
kill(os_getpid(), SIGSTOP);
@@ -246,13 +265,18 @@ int start_userspace(unsigned long stub_stack)
{
void *stack;
unsigned long sp;
- int pid, status, n, flags;
+ int pid, status, n, flags, err;
stack = mmap(NULL, UM_KERN_PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (stack == MAP_FAILED)
- panic("start_userspace : mmap failed, errno = %d", errno);
+ if (stack == MAP_FAILED) {
+ err = -errno;
+ printk(UM_KERN_ERR "start_userspace : mmap failed, "
+ "errno = %d\n", errno);
+ return err;
+ }
+
sp = (unsigned long) stack + UM_KERN_PAGE_SIZE - sizeof(void *);
flags = CLONE_FILES;
@@ -262,29 +286,50 @@ int start_userspace(unsigned long stub_stack)
flags |= SIGCHLD;
pid = clone(userspace_tramp, (void *) sp, flags, (void *) stub_stack);
- if (pid < 0)
- panic("start_userspace : clone failed, errno = %d", errno);
+ if (pid < 0) {
+ err = -errno;
+ printk(UM_KERN_ERR "start_userspace : clone failed, "
+ "errno = %d\n", errno);
+ return err;
+ }
do {
CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
- if (n < 0)
- panic("start_userspace : wait failed, errno = %d",
- errno);
+ if (n < 0) {
+ err = -errno;
+ printk(UM_KERN_ERR "start_userspace : wait failed, "
+ "errno = %d\n", errno);
+ goto out_kill;
+ }
} while (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGVTALRM));
- if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP))
- panic("start_userspace : expected SIGSTOP, got status = %d",
- status);
+ if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) {
+ err = -EINVAL;
+ printk(UM_KERN_ERR "start_userspace : expected SIGSTOP, got "
+ "status = %d\n", status);
+ goto out_kill;
+ }
if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
- (void *) PTRACE_O_TRACESYSGOOD) < 0)
- panic("start_userspace : PTRACE_OLDSETOPTIONS failed, "
- "errno = %d\n", errno);
+ (void *) PTRACE_O_TRACESYSGOOD) < 0) {
+ err = -errno;
+ printk(UM_KERN_ERR "start_userspace : PTRACE_OLDSETOPTIONS "
+ "failed, errno = %d\n", errno);
+ goto out_kill;
+ }
- if (munmap(stack, UM_KERN_PAGE_SIZE) < 0)
- panic("start_userspace : munmap failed, errno = %d\n", errno);
+ if (munmap(stack, UM_KERN_PAGE_SIZE) < 0) {
+ err = -errno;
+ printk(UM_KERN_ERR "start_userspace : munmap failed, "
+ "errno = %d\n", errno);
+ goto out_kill;
+ }
return pid;
+
+ out_kill:
+ os_kill_ptraced_process(pid, 1);
+ return err;
}
void userspace(struct uml_pt_regs *regs)
@@ -302,7 +347,16 @@ void userspace(struct uml_pt_regs *regs)
nsecs += os_nsecs();
while (1) {
- restore_registers(pid, regs);
+ /*
+ * This can legitimately fail if the process loads a
+ * bogus value into a segment register. It will
+ * segfault and PTRACE_GETREGS will read that value
+ * out of the process. However, PTRACE_SETREGS will
+ * fail. In this case, there is nothing to do but
+ * just kill the process.
+ */
+ if (ptrace(PTRACE_SETREGS, pid, 0, regs->gp))
+ fatal_sigsegv();
/* Now we set local_using_sysemu to be used for one loop */
local_using_sysemu = get_using_sysemu();
@@ -310,19 +364,26 @@ void userspace(struct uml_pt_regs *regs)
op = SELECT_PTRACE_OPERATION(local_using_sysemu,
singlestepping(NULL));
- err = ptrace(op, pid, 0, 0);
- if (err)
- panic("userspace - could not resume userspace process, "
- "pid=%d, ptrace operation = %d, errno = %d\n",
- pid, op, errno);
+ if (ptrace(op, pid, 0, 0)) {
+ printk(UM_KERN_ERR "userspace - ptrace continue "
+ "failed, op = %d, errno = %d\n", op, errno);
+ fatal_sigsegv();
+ }
CATCH_EINTR(err = waitpid(pid, &status, WUNTRACED | __WALL));
- if (err < 0)
- panic("userspace - waitpid failed, errno = %d\n",
- errno);
+ if (err < 0) {
+ printk(UM_KERN_ERR "userspace - wait failed, "
+ "errno = %d\n", errno);
+ fatal_sigsegv();
+ }
regs->is_user = 1;
- save_registers(pid, regs);
+ if (ptrace(PTRACE_GETREGS, pid, 0, regs->gp)) {
+ printk(UM_KERN_ERR "userspace - PTRACE_GETREGS failed, "
+ "errno = %d\n", errno);
+ fatal_sigsegv();
+ }
+
UPT_SYSCALL_NR(regs) = -1; /* Assume: It's not a syscall */
if (WIFSTOPPED(status)) {
@@ -345,7 +406,7 @@ void userspace(struct uml_pt_regs *regs)
break;
case SIGVTALRM:
now = os_nsecs();
- if(now < nsecs)
+ if (now < nsecs)
break;
block_signals();
(*sig_info[sig])(sig, regs);
@@ -368,6 +429,7 @@ void userspace(struct uml_pt_regs *regs)
default:
printk(UM_KERN_ERR "userspace - child stopped "
"with signal %d\n", sig);
+ fatal_sigsegv();
}
pid = userspace_pid[0];
interrupt_end();
@@ -419,9 +481,12 @@ int copy_context_skas0(unsigned long new_stack, int pid)
.it_interval = tv }) });
err = ptrace_setregs(pid, thread_regs);
- if (err < 0)
- panic("copy_context_skas0 : PTRACE_SETREGS failed, "
- "pid = %d, errno = %d\n", pid, -err);
+ if (err < 0) {
+ err = -errno;
+ printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_SETREGS "
+ "failed, pid = %d, errno = %d\n", pid, -err);
+ return err;
+ }
/* set a well known return code for detection of child write failure */
child_data->err = 12345678;
@@ -431,31 +496,47 @@ int copy_context_skas0(unsigned long new_stack, int pid)
* parent's stack, and check, if bad result.
*/
err = ptrace(PTRACE_CONT, pid, 0, 0);
- if (err)
- panic("Failed to continue new process, pid = %d, "
- "errno = %d\n", pid, errno);
+ if (err) {
+ err = -errno;
+ printk(UM_KERN_ERR "Failed to continue new process, pid = %d, "
+ "errno = %d\n", pid, errno);
+ return err;
+ }
+
wait_stub_done(pid);
pid = data->err;
- if (pid < 0)
- panic("copy_context_skas0 - stub-parent reports error %d\n",
- -pid);
+ if (pid < 0) {
+ printk(UM_KERN_ERR "copy_context_skas0 - stub-parent reports "
+ "error %d\n", -pid);
+ return pid;
+ }
/*
* Wait, until child has finished too: read child's result from
* child's stack and check it.
*/
wait_stub_done(pid);
- if (child_data->err != STUB_DATA)
- panic("copy_context_skas0 - stub-child reports error %ld\n",
- child_data->err);
+ if (child_data->err != STUB_DATA) {
+ printk(UM_KERN_ERR "copy_context_skas0 - stub-child reports "
+ "error %ld\n", child_data->err);
+ err = child_data->err;
+ goto out_kill;
+ }
if (ptrace(PTRACE_OLDSETOPTIONS, pid, NULL,
- (void *)PTRACE_O_TRACESYSGOOD) < 0)
- panic("copy_context_skas0 : PTRACE_OLDSETOPTIONS failed, "
- "errno = %d\n", errno);
+ (void *)PTRACE_O_TRACESYSGOOD) < 0) {
+ err = -errno;
+ printk(UM_KERN_ERR "copy_context_skas0 : PTRACE_OLDSETOPTIONS "
+ "failed, errno = %d\n", errno);
+ goto out_kill;
+ }
return pid;
+
+ out_kill:
+ os_kill_ptraced_process(pid, 1);
+ return err;
}
/*
@@ -463,8 +544,8 @@ int copy_context_skas0(unsigned long new_stack, int pid)
* available. Opening /proc/mm creates a new mm_context, which lacks
* the stub-pages. Thus, we map them using /proc/mm-fd
*/
-void map_stub_pages(int fd, unsigned long code,
- unsigned long data, unsigned long stack)
+int map_stub_pages(int fd, unsigned long code, unsigned long data,
+ unsigned long stack)
{
struct proc_mm_op mmop;
int n;
@@ -488,8 +569,9 @@ void map_stub_pages(int fd, unsigned long code,
printk(UM_KERN_ERR "mmap args - addr = 0x%lx, fd = %d, "
"offset = %llx\n", code, code_fd,
(unsigned long long) code_offset);
- panic("map_stub_pages : /proc/mm map for code failed, "
- "err = %d\n", n);
+ printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for code "
+ "failed, err = %d\n", n);
+ return -n;
}
if (stack) {
@@ -507,10 +589,15 @@ void map_stub_pages(int fd, unsigned long code,
.offset = map_offset
} } });
CATCH_EINTR(n = write(fd, &mmop, sizeof(mmop)));
- if (n != sizeof(mmop))
- panic("map_stub_pages : /proc/mm map for data failed, "
- "err = %d\n", errno);
+ if (n != sizeof(mmop)) {
+ n = errno;
+ printk(UM_KERN_ERR "map_stub_pages : /proc/mm map for "
+ "data failed, err = %d\n", n);
+ return -n;
+ }
}
+
+ return 0;
}
void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
@@ -571,7 +658,9 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf)
kmalloc_ok = 0;
return 1;
default:
- panic("Bad sigsetjmp return in start_idle_thread - %d\n", n);
+ printk(UM_KERN_ERR "Bad sigsetjmp return in "
+ "start_idle_thread - %d\n", n);
+ fatal_sigsegv();
}
longjmp(*switch_buf, 1);
}
@@ -614,9 +703,11 @@ void __switch_mm(struct mm_id *mm_idp)
if (proc_mm) {
err = ptrace(PTRACE_SWITCH_MM, userspace_pid[0], 0,
mm_idp->u.mm_fd);
- if (err)
- panic("__switch_mm - PTRACE_SWITCH_MM failed, "
- "errno = %d\n", errno);
+ if (err) {
+ printk(UM_KERN_ERR "__switch_mm - PTRACE_SWITCH_MM "
+ "failed, errno = %d\n", errno);
+ fatal_sigsegv();
+ }
}
else userspace_pid[0] = mm_idp->u.pid;
}
diff --git a/arch/um/os-Linux/skas/trap.c b/arch/um/os-Linux/skas/trap.c
deleted file mode 100644
index 3b1b9244f468..000000000000
--- a/arch/um/os-Linux/skas/trap.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#if 0
-#include "kern_util.h"
-#include "skas.h"
-#include "ptrace_user.h"
-#include "sysdep/ptrace_user.h"
-#endif
-
-#include <errno.h>
-#include <signal.h>
-#include "sysdep/ptrace.h"
-#include "kern_constants.h"
-#include "as-layout.h"
-#include "os.h"
-#include "sigcontext.h"
-#include "task.h"
-
-static struct uml_pt_regs ksig_regs[UM_NR_CPUS];
-
-void sig_handler_common_skas(int sig, void *sc_ptr)
-{
- struct sigcontext *sc = sc_ptr;
- struct uml_pt_regs *r;
- void (*handler)(int, struct uml_pt_regs *);
- int save_user, save_errno = errno;
-
- /*
- * This is done because to allow SIGSEGV to be delivered inside a SEGV
- * handler. This can happen in copy_user, and if SEGV is disabled,
- * the process will die.
- * XXX Figure out why this is better than SA_NODEFER
- */
- if (sig == SIGSEGV) {
- change_sig(SIGSEGV, 1);
- /*
- * For segfaults, we want the data from the
- * sigcontext. In this case, we don't want to mangle
- * the process registers, so use a static set of
- * registers. For other signals, the process
- * registers are OK.
- */
- r = &ksig_regs[cpu()];
- copy_sc(r, sc_ptr);
- }
- else r = TASK_REGS(get_current());
-
- save_user = r->is_user;
- r->is_user = 0;
- if ((sig == SIGFPE) || (sig == SIGSEGV) || (sig == SIGBUS) ||
- (sig == SIGILL) || (sig == SIGTRAP))
- GET_FAULTINFO_FROM_SC(r->faultinfo, sc);
-
- change_sig(SIGUSR1, 1);
-
- handler = sig_info[sig];
-
- /* unblock SIGVTALRM, SIGIO if sig isn't IRQ signal */
- if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM))
- unblock_signals();
-
- handler(sig, r);
-
- errno = save_errno;
- r->is_user = save_user;
-}
diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
index 7b81f6c08a5e..b616e15638fb 100644
--- a/arch/um/os-Linux/start_up.c
+++ b/arch/um/os-Linux/start_up.c
@@ -60,10 +60,11 @@ static int ptrace_child(void)
* the UML code itself.
*/
ret = 2;
- _exit(ret);
+
+ exit(ret);
}
-static void fatal_perror(char *str)
+static void fatal_perror(const char *str)
{
perror(str);
exit(1);
@@ -341,6 +342,8 @@ static void __init check_coredump_limit(void)
void __init os_early_checks(void)
{
+ int pid;
+
/* Print out the core dump limits early */
check_coredump_limit();
@@ -350,6 +353,11 @@ void __init os_early_checks(void)
* kernel is running.
*/
check_tmpexec();
+
+ pid = start_ptraced_child();
+ if (init_registers(pid))
+ fatal("Failed to initialize default registers");
+ stop_ptraced_child(pid, 1, 1);
}
static int __init noprocmm_cmd_param(char *str, int* add)
@@ -411,7 +419,6 @@ static inline void check_skas3_ptrace_faultinfo(void)
non_fatal("found\n");
}
- init_registers(pid);
stop_ptraced_child(pid, 1, 1);
}
@@ -466,7 +473,7 @@ static inline void check_skas3_proc_mm(void)
else non_fatal("found\n");
}
-int can_do_skas(void)
+void can_do_skas(void)
{
non_fatal("Checking for the skas3 patch in the host:\n");
@@ -476,8 +483,6 @@ int can_do_skas(void)
if (!proc_mm || !ptrace_faultinfo || !ptrace_ldt)
skas_needs_stub = 1;
-
- return 1;
}
int __init parse_iomem(char *str, int *add)
diff --git a/arch/um/os-Linux/trap.c b/arch/um/os-Linux/trap.c
deleted file mode 100644
index 2a1c9843e32e..000000000000
--- a/arch/um/os-Linux/trap.c
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- */
-
-#include <signal.h>
-#include "os.h"
-#include "sysdep/ptrace.h"
-
-/* Initialized from linux_main() */
-void (*sig_info[NSIG])(int, struct uml_pt_regs *);
-
-void os_fill_handlinfo(struct kern_handlers h)
-{
- sig_info[SIGTRAP] = h.relay_signal;
- sig_info[SIGFPE] = h.relay_signal;
- sig_info[SIGILL] = h.relay_signal;
- sig_info[SIGWINCH] = h.winch;
- sig_info[SIGBUS] = h.bus_handler;
- sig_info[SIGSEGV] = h.page_fault;
- sig_info[SIGIO] = h.sigio_handler;
- sig_info[SIGVTALRM] = h.timer_handler;
-}
diff --git a/arch/um/os-Linux/tty.c b/arch/um/os-Linux/tty.c
index 4cfdd18ea1ef..b09ff66a77ee 100644
--- a/arch/um/os-Linux/tty.c
+++ b/arch/um/os-Linux/tty.c
@@ -1,13 +1,16 @@
-/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+/*
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <stdlib.h>
+#include <unistd.h>
#include <errno.h>
+#include <fcntl.h>
+#include "kern_constants.h"
+#include "kern_util.h"
#include "os.h"
#include "user.h"
-#include "kern_util.h"
struct grantpt_info {
int fd;
@@ -26,36 +29,34 @@ static void grantpt_cb(void *arg)
int get_pty(void)
{
struct grantpt_info info;
- int fd;
-
- fd = os_open_file("/dev/ptmx", of_rdwr(OPENFLAGS()), 0);
- if(fd < 0){
- printk("get_pty : Couldn't open /dev/ptmx - err = %d\n", -fd);
- return(fd);
+ int fd, err;
+
+ fd = open("/dev/ptmx", O_RDWR);
+ if (fd < 0) {
+ err = -errno;
+ printk(UM_KERN_ERR "get_pty : Couldn't open /dev/ptmx - "
+ "err = %d\n", errno);
+ return err;
}
info.fd = fd;
initial_thread_cb(grantpt_cb, &info);
- if(info.res < 0){
- printk("get_pty : Couldn't grant pty - errno = %d\n",
- -info.err);
- return(-1);
+ if (info.res < 0) {
+ err = -info.err;
+ printk(UM_KERN_ERR "get_pty : Couldn't grant pty - "
+ "errno = %d\n", -info.err);
+ goto out;
}
- if(unlockpt(fd) < 0){
- printk("get_pty : Couldn't unlock pty - errno = %d\n", errno);
- return(-1);
+
+ if (unlockpt(fd) < 0) {
+ err = -errno;
+ printk(UM_KERN_ERR "get_pty : Couldn't unlock pty - "
+ "errno = %d\n", errno);
+ goto out;
}
- return(fd);
+ return fd;
+out:
+ close(fd);
+ return err;
}
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/os-Linux/tty_log.c b/arch/um/os-Linux/tty_log.c
index d11a55baa6bd..cc648e6fd3a2 100644
--- a/arch/um/os-Linux/tty_log.c
+++ b/arch/um/os-Linux/tty_log.c
@@ -12,7 +12,6 @@
#include <sys/time.h>
#include "init.h"
#include "user.h"
-#include "kern_util.h"
#include "os.h"
#define TTY_LOG_DIR "./"
diff --git a/arch/um/os-Linux/util.c b/arch/um/os-Linux/util.c
index 3e058ce9ffb6..a6f31d476993 100644
--- a/arch/um/os-Linux/util.c
+++ b/arch/um/os-Linux/util.c
@@ -88,21 +88,6 @@ void setup_hostinfo(char *buf, int len)
host.release, host.version, host.machine);
}
-int setjmp_wrapper(void (*proc)(void *, void *), ...)
-{
- va_list args;
- jmp_buf buf;
- int n;
-
- n = UML_SETJMP(&buf);
- if(n == 0){
- va_start(args, proc);
- (*proc)(&buf, &args);
- }
- va_end(args);
- return n;
-}
-
void os_dump_core(void)
{
int pid;
diff --git a/arch/um/sys-i386/bug.c b/arch/um/sys-i386/bug.c
index a4360b5207db..8d4f273f1219 100644
--- a/arch/um/sys-i386/bug.c
+++ b/arch/um/sys-i386/bug.c
@@ -4,6 +4,7 @@
*/
#include <linux/uaccess.h>
+#include <asm/errno.h>
/* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
* that's not relevant in skas mode.
diff --git a/arch/um/sys-i386/bugs.c b/arch/um/sys-i386/bugs.c
index 806895d73bcc..a74442d13762 100644
--- a/arch/um/sys-i386/bugs.c
+++ b/arch/um/sys-i386/bugs.c
@@ -3,171 +3,47 @@
* Licensed under the GPL
*/
-#include <errno.h>
#include <signal.h>
-#include <string.h>
#include "kern_constants.h"
-#include "os.h"
+#include "kern_util.h"
+#include "longjmp.h"
#include "task.h"
#include "user.h"
-
-#define MAXTOKEN 64
+#include "sysdep/ptrace.h"
/* Set during early boot */
int host_has_cmov = 1;
-int host_has_xmm = 0;
+static jmp_buf cmov_test_return;
-static char token(int fd, char *buf, int len, char stop)
+static void cmov_sigill_test_handler(int sig)
{
- int n;
- char *ptr, *end, c;
-
- ptr = buf;
- end = &buf[len];
- do {
- n = os_read_file(fd, ptr, sizeof(*ptr));
- c = *ptr++;
- if (n != sizeof(*ptr)) {
- if (n == 0)
- return 0;
- printk(UM_KERN_ERR "Reading /proc/cpuinfo failed, "
- "err = %d\n", -n);
- if (n < 0)
- return n;
- else return -EIO;
- }
- } while ((c != '\n') && (c != stop) && (ptr < end));
-
- if (ptr == end) {
- printk(UM_KERN_ERR "Failed to find '%c' in /proc/cpuinfo\n",
- stop);
- return -1;
- }
- *(ptr - 1) = '\0';
- return c;
-}
-
-static int find_cpuinfo_line(int fd, char *key, char *scratch, int len)
-{
- int n;
- char c;
-
- scratch[len - 1] = '\0';
- while (1) {
- c = token(fd, scratch, len - 1, ':');
- if (c <= 0)
- return 0;
- else if (c != ':') {
- printk(UM_KERN_ERR "Failed to find ':' in "
- "/proc/cpuinfo\n");
- return 0;
- }
-
- if (!strncmp(scratch, key, strlen(key)))
- return 1;
-
- do {
- n = os_read_file(fd, &c, sizeof(c));
- if (n != sizeof(c)) {
- printk(UM_KERN_ERR "Failed to find newline in "
- "/proc/cpuinfo, err = %d\n", -n);
- return 0;
- }
- } while (c != '\n');
- }
- return 0;
+ host_has_cmov = 0;
+ longjmp(cmov_test_return, 1);
}
-static int check_cpu_flag(char *feature, int *have_it)
-{
- char buf[MAXTOKEN], c;
- int fd, len = ARRAY_SIZE(buf);
-
- printk(UM_KERN_INFO "Checking for host processor %s support...",
- feature);
- fd = os_open_file("/proc/cpuinfo", of_read(OPENFLAGS()), 0);
- if (fd < 0) {
- printk(UM_KERN_ERR "Couldn't open /proc/cpuinfo, err = %d\n",
- -fd);
- return 0;
- }
-
- *have_it = 0;
- if (!find_cpuinfo_line(fd, "flags", buf, ARRAY_SIZE(buf)))
- goto out;
-
- c = token(fd, buf, len - 1, ' ');
- if (c < 0)
- goto out;
- else if (c != ' ') {
- printk(UM_KERN_ERR "Failed to find ' ' in /proc/cpuinfo\n");
- goto out;
- }
-
- while (1) {
- c = token(fd, buf, len - 1, ' ');
- if (c < 0)
- goto out;
- else if (c == '\n')
- break;
-
- if (!strcmp(buf, feature)) {
- *have_it = 1;
- goto out;
- }
- }
- out:
- if (*have_it == 0)
- printk("No\n");
- else if (*have_it == 1)
- printk("Yes\n");
- os_close_file(fd);
- return 1;
-}
-
-#if 0 /*
- * This doesn't work in tt mode, plus it's causing compilation problems
- * for some people.
- */
-static void disable_lcall(void)
+void arch_check_bugs(void)
{
- struct modify_ldt_ldt_s ldt;
- int err;
+ struct sigaction old, new;
- bzero(&ldt, sizeof(ldt));
- ldt.entry_number = 7;
- ldt.base_addr = 0;
- ldt.limit = 0;
- err = modify_ldt(1, &ldt, sizeof(ldt));
- if (err)
- printk(UM_KERN_ERR "Failed to disable lcall7 - errno = %d\n",
- errno);
-}
-#endif
+ printk(UM_KERN_INFO "Checking for host processor cmov support...");
+ new.sa_handler = cmov_sigill_test_handler;
-void arch_init_thread(void)
-{
-#if 0
- disable_lcall();
-#endif
-}
+ /* Make sure that SIGILL is enabled after the handler longjmps back */
+ new.sa_flags = SA_NODEFER;
+ sigemptyset(&new.sa_mask);
+ sigaction(SIGILL, &new, &old);
-void arch_check_bugs(void)
-{
- int have_it;
+ if (setjmp(cmov_test_return) == 0) {
+ unsigned long foo = 0;
+ __asm__ __volatile__("cmovz %0, %1" : "=r" (foo) : "0" (foo));
+ printk(UM_KERN_CONT "Yes\n");
+ } else
+ printk(UM_KERN_CONT "No\n");
- if (os_access("/proc/cpuinfo", OS_ACC_R_OK) < 0) {
- printk(UM_KERN_ERR "/proc/cpuinfo not available - skipping CPU "
- "capability checks\n");
- return;
- }
- if (check_cpu_flag("cmov", &have_it))
- host_has_cmov = have_it;
- if (check_cpu_flag("xmm", &have_it))
- host_has_xmm = have_it;
+ sigaction(SIGILL, &old, &new);
}
-int arch_handle_signal(int sig, struct uml_pt_regs *regs)
+void arch_examine_signal(int sig, struct uml_pt_regs *regs)
{
unsigned char tmp[2];
@@ -176,24 +52,25 @@ int arch_handle_signal(int sig, struct uml_pt_regs *regs)
* SIGILL in init.
*/
if ((sig != SIGILL) || (TASK_PID(get_current()) != 1))
- return 0;
+ return;
+
+ if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2)) {
+ printk(UM_KERN_ERR "SIGILL in init, could not read "
+ "instructions!\n");
+ return;
+ }
- if (copy_from_user_proc(tmp, (void *) UPT_IP(regs), 2))
- panic("SIGILL in init, could not read instructions!\n");
if ((tmp[0] != 0x0f) || ((tmp[1] & 0xf0) != 0x40))
- return 0;
+ return;
if (host_has_cmov == 0)
- panic("SIGILL caused by cmov, which this processor doesn't "
- "implement, boot a filesystem compiled for older "
- "processors");
+ printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
+ "processor doesn't implement. Boot a filesystem "
+ "compiled for older processors");
else if (host_has_cmov == 1)
- panic("SIGILL caused by cmov, which this processor claims to "
- "implement");
- else if (host_has_cmov == -1)
- panic("SIGILL caused by cmov, couldn't tell if this processor "
- "implements it, boot a filesystem compiled for older "
- "processors");
- else panic("Bad value for host_has_cmov (%d)", host_has_cmov);
- return 0;
+ printk(UM_KERN_ERR "SIGILL caused by cmov, which this "
+ "processor claims to implement");
+ else
+ printk(UM_KERN_ERR "Bad value for host_has_cmov (%d)",
+ host_has_cmov);
}
diff --git a/arch/um/sys-i386/ldt.c b/arch/um/sys-i386/ldt.c
index 67c0958eb984..a34263e6b08d 100644
--- a/arch/um/sys-i386/ldt.c
+++ b/arch/um/sys-i386/ldt.c
@@ -3,8 +3,9 @@
* Licensed under the GPL
*/
-#include "linux/mm.h"
-#include "asm/unistd.h"
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/unistd.h>
#include "os.h"
#include "proc_mm.h"
#include "skas.h"
@@ -146,7 +147,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
if (ptrace_ldt)
return read_ldt_from_host(ptr, bytecount);
- down(&ldt->semaphore);
+ mutex_lock(&ldt->lock);
if (ldt->entry_count <= LDT_DIRECT_ENTRIES) {
size = LDT_ENTRY_SIZE*LDT_DIRECT_ENTRIES;
if (size > bytecount)
@@ -170,7 +171,7 @@ static int read_ldt(void __user * ptr, unsigned long bytecount)
ptr += size;
}
}
- up(&ldt->semaphore);
+ mutex_unlock(&ldt->lock);
if (bytecount == 0 || err == -EFAULT)
goto out;
@@ -228,7 +229,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
}
if (!ptrace_ldt)
- down(&ldt->semaphore);
+ mutex_lock(&ldt->lock);
err = write_ldt_entry(mm_idp, func, &ldt_info, &addr, 1);
if (err)
@@ -288,7 +289,7 @@ static int write_ldt(void __user * ptr, unsigned long bytecount, int func)
err = 0;
out_unlock:
- up(&ldt->semaphore);
+ mutex_unlock(&ldt->lock);
out:
return err;
}
@@ -395,7 +396,7 @@ long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
if (!ptrace_ldt)
- init_MUTEX(&new_mm->ldt.semaphore);
+ mutex_init(&new_mm->ldt.lock);
if (!from_mm) {
memset(&desc, 0, sizeof(desc));
@@ -455,7 +456,7 @@ long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
* i.e., we have to use the stub for modify_ldt, which
* can't handle the big read buffer of up to 64kB.
*/
- down(&from_mm->ldt.semaphore);
+ mutex_lock(&from_mm->ldt.lock);
if (from_mm->ldt.entry_count <= LDT_DIRECT_ENTRIES)
memcpy(new_mm->ldt.u.entries, from_mm->ldt.u.entries,
sizeof(new_mm->ldt.u.entries));
@@ -474,7 +475,7 @@ long init_new_ldt(struct mm_context *new_mm, struct mm_context *from_mm)
}
}
new_mm->ldt.entry_count = from_mm->ldt.entry_count;
- up(&from_mm->ldt.semaphore);
+ mutex_unlock(&from_mm->ldt.lock);
}
out:
diff --git a/arch/um/sys-i386/ptrace.c b/arch/um/sys-i386/ptrace.c
index bd3da8a61f64..6b4499906a6c 100644
--- a/arch/um/sys-i386/ptrace.c
+++ b/arch/um/sys-i386/ptrace.c
@@ -8,11 +8,11 @@
#include "asm/uaccess.h"
#include "skas.h"
-extern int arch_switch_tls(struct task_struct *from, struct task_struct *to);
+extern int arch_switch_tls(struct task_struct *to);
-void arch_switch_to(struct task_struct *from, struct task_struct *to)
+void arch_switch_to(struct task_struct *to)
{
- int err = arch_switch_tls(from, to);
+ int err = arch_switch_tls(to);
if (!err)
return;
diff --git a/arch/um/sys-i386/ptrace_user.c b/arch/um/sys-i386/ptrace_user.c
index 5cf97bc229b9..0b10c3e74028 100644
--- a/arch/um/sys-i386/ptrace_user.c
+++ b/arch/um/sys-i386/ptrace_user.c
@@ -19,17 +19,3 @@ int ptrace_setregs(long pid, unsigned long *regs)
return -errno;
return 0;
}
-
-int ptrace_getfpregs(long pid, unsigned long *regs)
-{
- if (ptrace(PTRACE_GETFPREGS, pid, 0, regs) < 0)
- return -errno;
- return 0;
-}
-
-int ptrace_setfpregs(long pid, unsigned long *regs)
-{
- if (ptrace(PTRACE_SETFPREGS, pid, 0, regs) < 0)
- return -errno;
- return 0;
-}
diff --git a/arch/um/sys-i386/signal.c b/arch/um/sys-i386/signal.c
index 19053d46cb60..fd0c25ad6af3 100644
--- a/arch/um/sys-i386/signal.c
+++ b/arch/um/sys-i386/signal.c
@@ -168,12 +168,13 @@ static int copy_sc_from_user(struct pt_regs *regs,
struct sigcontext __user *from)
{
struct sigcontext sc;
- int err;
+ int err, pid;
err = copy_from_user(&sc, from, sizeof(sc));
if (err)
return err;
+ pid = userspace_pid[current_thread_info()->cpu];
copy_sc(&regs->regs, &sc);
if (have_fpx_regs) {
struct user_fxsr_struct fpx;
@@ -187,8 +188,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
if (err)
return 1;
- err = restore_fpx_registers(userspace_pid[current_thread->cpu],
- (unsigned long *) &fpx);
+ err = restore_fpx_registers(pid, (unsigned long *) &fpx);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - "
"restore_fpx_registers failed, errno = %d\n",
@@ -204,8 +204,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
if (err)
return 1;
- err = restore_fp_registers(userspace_pid[current_thread->cpu],
- (unsigned long *) &fp);
+ err = restore_fp_registers(pid, (unsigned long *) &fp);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - "
"restore_fp_registers failed, errno = %d\n",
@@ -223,7 +222,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
{
struct sigcontext sc;
struct faultinfo * fi = &current->thread.arch.faultinfo;
- int err;
+ int err, pid;
sc.gs = REGS_GS(regs->regs.gp);
sc.fs = REGS_FS(regs->regs.gp);
@@ -249,11 +248,11 @@ static int copy_sc_to_user(struct sigcontext __user *to,
to_fp = (to_fp ? to_fp : (struct _fpstate __user *) (to + 1));
sc.fpstate = to_fp;
+ pid = userspace_pid[current_thread_info()->cpu];
if (have_fpx_regs) {
struct user_fxsr_struct fpx;
- err = save_fpx_registers(userspace_pid[current_thread->cpu],
- (unsigned long *) &fpx);
+ err = save_fpx_registers(pid, (unsigned long *) &fpx);
if (err < 0){
printk(KERN_ERR "copy_sc_to_user - save_fpx_registers "
"failed, errno = %d\n", err);
@@ -276,8 +275,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
else {
struct user_i387_struct fp;
- err = save_fp_registers(userspace_pid[current_thread->cpu],
- (unsigned long *) &fp);
+ err = save_fp_registers(pid, (unsigned long *) &fp);
if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
return 1;
}
diff --git a/arch/um/sys-i386/stub.S b/arch/um/sys-i386/stub.S
index e730772c401b..7699e89f660f 100644
--- a/arch/um/sys-i386/stub.S
+++ b/arch/um/sys-i386/stub.S
@@ -7,7 +7,7 @@
.globl batch_syscall_stub
batch_syscall_stub:
/* load pointer to first operation */
- mov $(ASM_STUB_DATA+8), %esp
+ mov $(STUB_DATA+8), %esp
again:
/* load length of additional data */
@@ -15,12 +15,12 @@ again:
/* if(length == 0) : end of list */
/* write possible 0 to header */
- mov %eax, ASM_STUB_DATA+4
+ mov %eax, STUB_DATA+4
cmpl $0, %eax
jz done
/* save current pointer */
- mov %esp, ASM_STUB_DATA+4
+ mov %esp, STUB_DATA+4
/* skip additional data */
add %eax, %esp
@@ -46,7 +46,7 @@ again:
done:
/* save return value */
- mov %eax, ASM_STUB_DATA
+ mov %eax, STUB_DATA
/* stop */
int3
diff --git a/arch/um/sys-i386/stub_segv.c b/arch/um/sys-i386/stub_segv.c
index b3999cb76bfd..28ccf737a79f 100644
--- a/arch/um/sys-i386/stub_segv.c
+++ b/arch/um/sys-i386/stub_segv.c
@@ -1,32 +1,17 @@
/*
- * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
+ * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
-#include <signal.h>
-#include <sys/select.h> /* The only way I can see to get sigset_t */
-#include <asm/unistd.h>
-#include "as-layout.h"
-#include "uml-config.h"
#include "sysdep/stub.h"
#include "sysdep/sigcontext.h"
-#include "sysdep/faultinfo.h"
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig)
{
struct sigcontext *sc = (struct sigcontext *) (&sig + 1);
- int pid;
GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA), sc);
- pid = stub_syscall0(__NR_getpid);
- stub_syscall2(__NR_kill, pid, SIGUSR1);
-
- /* Load pointer to sigcontext into esp, since we need to leave
- * the stack in its original form when we do the sigreturn here, by
- * hand.
- */
- __asm__ __volatile__("mov %0,%%esp ; movl %1, %%eax ; "
- "int $0x80" : : "a" (sc), "g" (__NR_sigreturn));
+ trap_myself();
}
diff --git a/arch/um/sys-i386/sys_call_table.S b/arch/um/sys-i386/sys_call_table.S
index 12d4148dba39..00e5f5203eea 100644
--- a/arch/um/sys-i386/sys_call_table.S
+++ b/arch/um/sys-i386/sys_call_table.S
@@ -9,4 +9,9 @@
#define old_mmap old_mmap_i386
+.section .rodata,"a"
+
#include "../../x86/kernel/syscall_table_32.S"
+
+ENTRY(syscall_table_size)
+.long .-sys_call_table
diff --git a/arch/um/sys-i386/tls.c b/arch/um/sys-i386/tls.c
index fcaff86b000c..c6c7131e563b 100644
--- a/arch/um/sys-i386/tls.c
+++ b/arch/um/sys-i386/tls.c
@@ -26,6 +26,11 @@ int do_set_thread_area(struct user_desc *info)
cpu = get_cpu();
ret = os_set_thread_area(info, userspace_pid[cpu]);
put_cpu();
+
+ if (ret)
+ printk(KERN_ERR "PTRACE_SET_THREAD_AREA failed, err = %d, "
+ "index = %d\n", ret, info->entry_number);
+
return ret;
}
@@ -37,6 +42,11 @@ int do_get_thread_area(struct user_desc *info)
cpu = get_cpu();
ret = os_get_thread_area(info, userspace_pid[cpu]);
put_cpu();
+
+ if (ret)
+ printk(KERN_ERR "PTRACE_GET_THREAD_AREA failed, err = %d, "
+ "index = %d\n", ret, info->entry_number);
+
return ret;
}
@@ -172,7 +182,7 @@ void clear_flushed_tls(struct task_struct *task)
* SKAS patch.
*/
-int arch_switch_tls(struct task_struct *from, struct task_struct *to)
+int arch_switch_tls(struct task_struct *to)
{
if (!host_supports_tls)
return 0;
@@ -225,7 +235,8 @@ out:
}
/* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
-static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx)
+static int get_tls_entry(struct task_struct *task, struct user_desc *info,
+ int idx)
{
struct thread_struct *t = &task->thread;
@@ -263,7 +274,7 @@ clear:
goto out;
}
-asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
+int sys_set_thread_area(struct user_desc __user *user_desc)
{
struct user_desc info;
int idx, ret;
@@ -298,7 +309,7 @@ asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
* i386. However the only possible error are caused by bugs.
*/
int ptrace_set_thread_area(struct task_struct *child, int idx,
- struct user_desc __user *user_desc)
+ struct user_desc __user *user_desc)
{
struct user_desc info;
@@ -311,7 +322,7 @@ int ptrace_set_thread_area(struct task_struct *child, int idx,
return set_tls_entry(child, &info, idx, 0);
}
-asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc)
+int sys_get_thread_area(struct user_desc __user *user_desc)
{
struct user_desc info;
int idx, ret;
@@ -355,10 +366,9 @@ out:
return ret;
}
-
/*
- * XXX: This part is probably common to i386 and x86-64. Don't create a common
- * file for now, do that when implementing x86-64 support.
+ * This code is really i386-only, but it detects and logs x86_64 GDT indexes
+ * if a 32-bit UML is running on a 64-bit host.
*/
static int __init __setup_host_supports_tls(void)
{
@@ -367,13 +377,16 @@ static int __init __setup_host_supports_tls(void)
printk(KERN_INFO "Host TLS support detected\n");
printk(KERN_INFO "Detected host type: ");
switch (host_gdt_entry_tls_min) {
- case GDT_ENTRY_TLS_MIN_I386:
- printk("i386\n");
- break;
- case GDT_ENTRY_TLS_MIN_X86_64:
- printk("x86_64\n");
- break;
+ case GDT_ENTRY_TLS_MIN_I386:
+ printk(KERN_CONT "i386");
+ break;
+ case GDT_ENTRY_TLS_MIN_X86_64:
+ printk(KERN_CONT "x86_64");
+ break;
}
+ printk(KERN_CONT " (GDT indexes %d to %d)\n",
+ host_gdt_entry_tls_min,
+ host_gdt_entry_tls_min + GDT_ENTRY_TLS_ENTRIES);
} else
printk(KERN_ERR " Host TLS support NOT detected! "
"TLS support inside UML will not work\n");
diff --git a/arch/um/sys-ppc/Makefile b/arch/um/sys-ppc/Makefile
index a9814a7ae60e..08901526e893 100644
--- a/arch/um/sys-ppc/Makefile
+++ b/arch/um/sys-ppc/Makefile
@@ -6,7 +6,7 @@ OBJ = built-in.o
OBJS = ptrace.o sigcontext.o semaphore.o checksum.o miscthings.o misc.o \
ptrace_user.o sysrq.o
-EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(TOPDIR)/arch/ppc/kernel
+EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel
all: $(OBJ)
@@ -22,25 +22,25 @@ sigcontext.o: sigcontext.c
semaphore.c:
rm -f $@
- ln -s $(TOPDIR)/arch/ppc/kernel/$@ $@
+ ln -s $(srctree)/arch/ppc/kernel/$@ $@
checksum.S:
rm -f $@
- ln -s $(TOPDIR)/arch/ppc/lib/$@ $@
+ ln -s $(srctree)/arch/ppc/lib/$@ $@
mk_defs.c:
rm -f $@
- ln -s $(TOPDIR)/arch/ppc/kernel/$@ $@
+ ln -s $(srctree)/arch/ppc/kernel/$@ $@
ppc_defs.head:
rm -f $@
- ln -s $(TOPDIR)/arch/ppc/kernel/$@ $@
+ ln -s $(srctree)/arch/ppc/kernel/$@ $@
ppc_defs.h: mk_defs.c ppc_defs.head \
- $(TOPDIR)/include/asm-ppc/mmu.h \
- $(TOPDIR)/include/asm-ppc/processor.h \
- $(TOPDIR)/include/asm-ppc/pgtable.h \
- $(TOPDIR)/include/asm-ppc/ptrace.h
+ $(srctree)/include/asm-ppc/mmu.h \
+ $(srctree)/include/asm-ppc/processor.h \
+ $(srctree)/include/asm-ppc/pgtable.h \
+ $(srctree)/include/asm-ppc/ptrace.h
# $(CC) $(CFLAGS) -S mk_defs.c
cp ppc_defs.head ppc_defs.h
# for bk, this way we can write to the file even if it's not checked out
@@ -56,13 +56,13 @@ ppc_defs.h: mk_defs.c ppc_defs.head \
checksum.o: checksum.S
rm -f asm
- ln -s $(TOPDIR)/include/asm-ppc asm
+ ln -s $(srctree)/include/asm-ppc asm
$(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
rm -f asm
misc.o: misc.S ppc_defs.h
rm -f asm
- ln -s $(TOPDIR)/include/asm-ppc asm
+ ln -s $(srctree)/include/asm-ppc asm
$(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
rm -f asm
diff --git a/arch/um/sys-x86_64/bug.c b/arch/um/sys-x86_64/bug.c
index a4360b5207db..e8034e363d83 100644
--- a/arch/um/sys-x86_64/bug.c
+++ b/arch/um/sys-x86_64/bug.c
@@ -5,7 +5,8 @@
#include <linux/uaccess.h>
-/* Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
+/*
+ * Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
* that's not relevant in skas mode.
*/
diff --git a/arch/um/sys-x86_64/bugs.c b/arch/um/sys-x86_64/bugs.c
index 506b6765bbcb..44e02ba2a265 100644
--- a/arch/um/sys-x86_64/bugs.c
+++ b/arch/um/sys-x86_64/bugs.c
@@ -6,15 +6,10 @@
#include "sysdep/ptrace.h"
-void arch_init_thread(void)
-{
-}
-
void arch_check_bugs(void)
{
}
-int arch_handle_signal(int sig, struct uml_pt_regs *regs)
+void arch_examine_signal(int sig, struct uml_pt_regs *regs)
{
- return 0;
}
diff --git a/arch/um/sys-x86_64/ptrace.c b/arch/um/sys-x86_64/ptrace.c
index b7631b0e9ddc..f3458d7d1c5a 100644
--- a/arch/um/sys-x86_64/ptrace.c
+++ b/arch/um/sys-x86_64/ptrace.c
@@ -5,13 +5,12 @@
* Licensed under the GPL
*/
-#define __FRAME_OFFSETS
-#include <asm/ptrace.h>
+#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/mm.h>
+#define __FRAME_OFFSETS
+#include <asm/ptrace.h>
#include <asm/uaccess.h>
-#include <asm/elf.h>
/*
* determines which flags the user has access to.
@@ -24,12 +23,14 @@ int putreg(struct task_struct *child, int regno, unsigned long value)
unsigned long tmp;
#ifdef TIF_IA32
- /* Some code in the 64bit emulation may not be 64bit clean.
- Don't take any chances. */
+ /*
+ * Some code in the 64bit emulation may not be 64bit clean.
+ * Don't take any chances.
+ */
if (test_tsk_thread_flag(child, TIF_IA32))
value &= 0xffffffff;
#endif
- switch (regno){
+ switch (regno) {
case FS:
case GS:
case DS:
@@ -66,7 +67,7 @@ int poke_user(struct task_struct *child, long addr, long data)
if (addr < MAX_REG_OFFSET)
return putreg(child, addr, data);
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
- (addr <= offsetof(struct user, u_debugreg[7]))){
+ (addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
if ((addr == 4) || (addr == 5))
@@ -108,11 +109,10 @@ int peek_user(struct task_struct *child, long addr, long data)
return -EIO;
tmp = 0; /* Default return condition */
- if (addr < MAX_REG_OFFSET){
+ if (addr < MAX_REG_OFFSET)
tmp = getreg(child, addr);
- }
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
- (addr <= offsetof(struct user, u_debugreg[7]))){
+ (addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
addr = addr >> 2;
tmp = child->thread.arch.debugregs[addr];
@@ -127,8 +127,9 @@ int is_syscall(unsigned long addr)
int n;
n = copy_from_user(&instr, (void __user *) addr, sizeof(instr));
- if (n){
- /* access_process_vm() grants access to vsyscall and stub,
+ if (n) {
+ /*
+ * access_process_vm() grants access to vsyscall and stub,
* while copy_from_user doesn't. Maybe access_process_vm is
* slow, but that doesn't matter, since it will be called only
* in case of singlestepping, if copy_from_user failed.
@@ -155,7 +156,7 @@ int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
return err;
n = copy_to_user(buf, fpregs, sizeof(fpregs));
- if(n > 0)
+ if (n > 0)
return -EFAULT;
return n;
diff --git a/arch/um/sys-x86_64/ptrace_user.c b/arch/um/sys-x86_64/ptrace_user.c
index b5f9c33e311e..c57a496d3f5b 100644
--- a/arch/um/sys-x86_64/ptrace_user.c
+++ b/arch/um/sys-x86_64/ptrace_user.c
@@ -4,55 +4,19 @@
* Licensed under the GPL
*/
-#include <stddef.h>
#include <errno.h>
#include "ptrace_user.h"
-#include "user.h"
-#include "kern_constants.h"
int ptrace_getregs(long pid, unsigned long *regs_out)
{
- if(ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
- return(-errno);
- return(0);
-}
-
-int ptrace_setregs(long pid, unsigned long *regs)
-{
- if(ptrace(PTRACE_SETREGS, pid, 0, regs) < 0)
- return(-errno);
+ if (ptrace(PTRACE_GETREGS, pid, 0, regs_out) < 0)
+ return -errno;
return(0);
}
-int ptrace_setfpregs(long pid, unsigned long *regs)
+int ptrace_setregs(long pid, unsigned long *regs_out)
{
- if (ptrace(PTRACE_SETFPREGS, pid, 0, regs) < 0)
+ if (ptrace(PTRACE_SETREGS, pid, 0, regs_out) < 0)
return -errno;
- return 0;
-}
-
-void ptrace_pokeuser(unsigned long addr, unsigned long data)
-{
- panic("ptrace_pokeuser");
-}
-
-#define DS 184
-#define ES 192
-#define __USER_DS 0x2b
-
-void arch_enter_kernel(void *task, int pid)
-{
-}
-
-void arch_leave_kernel(void *task, int pid)
-{
-#ifdef UM_USER_CS
- if(ptrace(PTRACE_POKEUSR, pid, CS, UM_USER_CS) < 0)
- printk("POKEUSR CS failed");
-#endif
-
- if(ptrace(PTRACE_POKEUSR, pid, DS, __USER_DS) < 0)
- printk("POKEUSR DS failed");
- if(ptrace(PTRACE_POKEUSR, pid, ES, __USER_DS) < 0)
- printk("POKEUSR ES failed");
+ return(0);
}
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
index 14070181407b..1a899a7ed7a6 100644
--- a/arch/um/sys-x86_64/signal.c
+++ b/arch/um/sys-x86_64/signal.c
@@ -81,7 +81,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
if (err)
return 1;
- err = restore_fp_registers(userspace_pid[current_thread->cpu],
+ err = restore_fp_registers(userspace_pid[current_thread_info()->cpu],
(unsigned long *) &fp);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - "
@@ -143,7 +143,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
if (err)
return 1;
- err = save_fp_registers(userspace_pid[current_thread->cpu],
+ err = save_fp_registers(userspace_pid[current_thread_info()->cpu],
(unsigned long *) &fp);
if (err < 0) {
printk(KERN_ERR "copy_sc_from_user - restore_fp_registers "
diff --git a/arch/um/sys-x86_64/stub.S b/arch/um/sys-x86_64/stub.S
index 4afe204a6af7..568768763155 100644
--- a/arch/um/sys-x86_64/stub.S
+++ b/arch/um/sys-x86_64/stub.S
@@ -8,18 +8,18 @@ syscall_stub:
/* We don't have 64-bit constants, so this constructs the address
* we need.
*/
- movq $(ASM_STUB_DATA >> 32), %rbx
+ movq $(STUB_DATA >> 32), %rbx
salq $32, %rbx
- movq $(ASM_STUB_DATA & 0xffffffff), %rcx
+ movq $(STUB_DATA & 0xffffffff), %rcx
or %rcx, %rbx
movq %rax, (%rbx)
int3
.globl batch_syscall_stub
batch_syscall_stub:
- mov $(ASM_STUB_DATA >> 32), %rbx
+ mov $(STUB_DATA >> 32), %rbx
sal $32, %rbx
- mov $(ASM_STUB_DATA & 0xffffffff), %rax
+ mov $(STUB_DATA & 0xffffffff), %rax
or %rax, %rbx
/* load pointer to first operation */
mov %rbx, %rsp
diff --git a/arch/um/sys-x86_64/stub_segv.c b/arch/um/sys-x86_64/stub_segv.c
index 3afb590f0072..ced051afc705 100644
--- a/arch/um/sys-x86_64/stub_segv.c
+++ b/arch/um/sys-x86_64/stub_segv.c
@@ -1,51 +1,22 @@
/*
- * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com)
+ * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
-#include <stddef.h>
#include <signal.h>
-#include <asm/unistd.h>
#include "as-layout.h"
-#include "uml-config.h"
-#include "sysdep/sigcontext.h"
-#include "sysdep/faultinfo.h"
#include "sysdep/stub.h"
-
-/* Copied from sys-x86_64/signal.c - Can't find an equivalent definition
- * in the libc headers anywhere.
- */
-struct rt_sigframe
-{
- char *pretcode;
- struct ucontext uc;
- struct siginfo info;
-};
-
-/* Copied here from <linux/kernel.h> - we're userspace. */
-#define container_of(ptr, type, member) ({ \
- const typeof( ((type *)0)->member ) *__mptr = (ptr); \
- (type *)( (char *)__mptr - offsetof(type,member) );})
+#include "sysdep/faultinfo.h"
+#include "sysdep/sigcontext.h"
void __attribute__ ((__section__ (".__syscall_stub")))
stub_segv_handler(int sig)
{
struct ucontext *uc;
- int pid;
__asm__ __volatile__("movq %%rdx, %0" : "=g" (uc) :);
GET_FAULTINFO_FROM_SC(*((struct faultinfo *) STUB_DATA),
&uc->uc_mcontext);
-
- pid = stub_syscall0(__NR_getpid);
- stub_syscall2(__NR_kill, pid, SIGUSR1);
-
- /* sys_sigreturn expects that the stack pointer will be 8 bytes into
- * the signal frame. So, we use the ucontext pointer, which we know
- * already, to get the signal frame pointer, and add 8 to that.
- */
- __asm__ __volatile__("movq %0, %%rsp; movq %1, %%rax ; syscall": :
- "g" ((unsigned long)
- container_of(uc, struct rt_sigframe, uc) + 8),
- "g" (__NR_rt_sigreturn));
+ trap_myself();
}
+
diff --git a/arch/um/sys-x86_64/syscall_table.c b/arch/um/sys-x86_64/syscall_table.c
index 71b2ae4ad5de..c128eb897008 100644
--- a/arch/um/sys-x86_64/syscall_table.c
+++ b/arch/um/sys-x86_64/syscall_table.c
@@ -1,5 +1,7 @@
-/* System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c
- * with some changes for UML. */
+/*
+ * System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c
+ * with some changes for UML.
+ */
#include <linux/linkage.h>
#include <linux/sys.h>
@@ -8,22 +10,26 @@
#define __NO_STUBS
-/* Below you can see, in terms of #define's, the differences between the x86-64
- * and the UML syscall table. */
+/*
+ * Below you can see, in terms of #define's, the differences between the x86-64
+ * and the UML syscall table.
+ */
/* Not going to be implemented by UML, since we have no hardware. */
#define stub_iopl sys_ni_syscall
#define sys_ioperm sys_ni_syscall
-/* The UML TLS problem. Note that x86_64 does not implement this, so the below
- * is needed only for the ia32 compatibility. */
-/*#define sys_set_thread_area sys_ni_syscall
-#define sys_get_thread_area sys_ni_syscall*/
+/*
+ * The UML TLS problem. Note that x86_64 does not implement this, so the below
+ * is needed only for the ia32 compatibility.
+ */
/* On UML we call it this way ("old" means it's not mmap2) */
#define sys_mmap old_mmap
-/* On x86-64 sys_uname is actually sys_newuname plus a compatibility trick.
- * See arch/x86_64/kernel/sys_x86_64.c */
+/*
+ * On x86-64 sys_uname is actually sys_newuname plus a compatibility trick.
+ * See arch/x86_64/kernel/sys_x86_64.c
+ */
#define sys_uname sys_uname64
#define stub_clone sys_clone
@@ -46,8 +52,19 @@ typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
-sys_call_ptr_t sys_call_table[UM_NR_syscall_max+1] __cacheline_aligned = {
- /* Smells like a like a compiler bug -- it doesn't work when the & below is removed. */
- [0 ... UM_NR_syscall_max] = &sys_ni_syscall,
+/*
+ * We used to have a trick here which made sure that holes in the
+ * x86_64 table were filled in with sys_ni_syscall, but a comment in
+ * unistd_64.h says that holes aren't allowed, so the trick was
+ * removed.
+ * The trick looked like this
+ * [0 ... UM_NR_syscall_max] = &sys_ni_syscall
+ * before including unistd_64.h - the later initializations overwrote
+ * the sys_ni_syscall filler.
+ */
+
+sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
#include <asm-x86/unistd_64.h>
};
+
+int syscall_table_size = sizeof(sys_call_table);
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
index 86f6b18410ee..f1199fd34d38 100644
--- a/arch/um/sys-x86_64/syscalls.c
+++ b/arch/um/sys-x86_64/syscalls.c
@@ -48,7 +48,9 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
switch (code) {
case ARCH_SET_FS:
case ARCH_SET_GS:
- restore_registers(pid, &current->thread.regs.regs);
+ ret = restore_registers(pid, &current->thread.regs.regs);
+ if (ret)
+ return ret;
break;
case ARCH_GET_FS:
case ARCH_GET_GS:
@@ -70,10 +72,10 @@ long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
switch (code) {
case ARCH_SET_FS:
current->thread.arch.fs = (unsigned long) ptr;
- save_registers(pid, &current->thread.regs.regs);
+ ret = save_registers(pid, &current->thread.regs.regs);
break;
case ARCH_SET_GS:
- save_registers(pid, &current->thread.regs.regs);
+ ret = save_registers(pid, &current->thread.regs.regs);
break;
case ARCH_GET_FS:
ret = put_user(tmp, addr);
@@ -105,7 +107,7 @@ long sys_clone(unsigned long clone_flags, unsigned long newsp,
return ret;
}
-void arch_switch_to(struct task_struct *from, struct task_struct *to)
+void arch_switch_to(struct task_struct *to)
{
if ((to->thread.arch.fs == 0) || (to->mm == NULL))
return;
diff --git a/arch/um/sys-x86_64/sysrq.c b/arch/um/sys-x86_64/sysrq.c
index 765444031819..f4f82beb3508 100644
--- a/arch/um/sys-x86_64/sysrq.c
+++ b/arch/um/sys-x86_64/sysrq.c
@@ -4,32 +4,33 @@
* Licensed under the GPL
*/
-#include "linux/kernel.h"
-#include "linux/utsname.h"
-#include "linux/module.h"
-#include "asm/current.h"
-#include "asm/ptrace.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/utsname.h>
+#include <asm/current.h>
+#include <asm/ptrace.h>
#include "sysrq.h"
-void __show_regs(struct pt_regs * regs)
+void __show_regs(struct pt_regs *regs)
{
printk("\n");
print_modules();
- printk("Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current),
+ printk(KERN_INFO "Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current),
current->comm, print_tainted(), init_utsname()->release);
- printk("RIP: %04lx:[<%016lx>] ", PT_REGS_CS(regs) & 0xffff,
+ printk(KERN_INFO "RIP: %04lx:[<%016lx>]\n", PT_REGS_CS(regs) & 0xffff,
PT_REGS_RIP(regs));
- printk("\nRSP: %016lx EFLAGS: %08lx\n", PT_REGS_RSP(regs),
+ printk(KERN_INFO "RSP: %016lx EFLAGS: %08lx\n", PT_REGS_RSP(regs),
PT_REGS_EFLAGS(regs));
- printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
+ printk(KERN_INFO "RAX: %016lx RBX: %016lx RCX: %016lx\n",
PT_REGS_RAX(regs), PT_REGS_RBX(regs), PT_REGS_RCX(regs));
- printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
+ printk(KERN_INFO "RDX: %016lx RSI: %016lx RDI: %016lx\n",
PT_REGS_RDX(regs), PT_REGS_RSI(regs), PT_REGS_RDI(regs));
- printk("RBP: %016lx R08: %016lx R09: %016lx\n",
+ printk(KERN_INFO "RBP: %016lx R08: %016lx R09: %016lx\n",
PT_REGS_RBP(regs), PT_REGS_R8(regs), PT_REGS_R9(regs));
- printk("R10: %016lx R11: %016lx R12: %016lx\n",
+ printk(KERN_INFO "R10: %016lx R11: %016lx R12: %016lx\n",
PT_REGS_R10(regs), PT_REGS_R11(regs), PT_REGS_R12(regs));
- printk("R13: %016lx R14: %016lx R15: %016lx\n",
+ printk(KERN_INFO "R13: %016lx R14: %016lx R15: %016lx\n",
PT_REGS_R13(regs), PT_REGS_R14(regs), PT_REGS_R15(regs));
}
diff --git a/arch/um/sys-x86_64/um_module.c b/arch/um/sys-x86_64/um_module.c
index 8b8eff1bd977..3dead392a415 100644
--- a/arch/um/sys-x86_64/um_module.c
+++ b/arch/um/sys-x86_64/um_module.c
@@ -1,7 +1,7 @@
#include <linux/vmalloc.h>
#include <linux/moduleloader.h>
-/*Copied from i386 arch/i386/kernel/module.c */
+/* Copied from i386 arch/i386/kernel/module.c */
void *module_alloc(unsigned long size)
{
if (size == 0)
@@ -13,7 +13,9 @@ void *module_alloc(unsigned long size)
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
+ /*
+ * FIXME: If module_region == mod->init_region, trim exception
+ * table entries.
+ */
}
diff --git a/arch/v850/kernel/anna.c b/arch/v850/kernel/anna.c
index 0e429041a117..5978a25170fb 100644
--- a/arch/v850/kernel/anna.c
+++ b/arch/v850/kernel/anna.c
@@ -85,7 +85,8 @@ void __init mach_reserve_bootmem ()
/* The space between SRAM and SDRAM is filled with duplicate
images of SRAM. Prevent the kernel from using them. */
reserve_bootmem (SRAM_ADDR + SRAM_SIZE,
- SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE));
+ SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE),
+ BOOTMEM_DEFAULT);
}
void mach_gettimeofday (struct timespec *tv)
diff --git a/arch/v850/kernel/as85ep1.c b/arch/v850/kernel/as85ep1.c
index 18437bc5c3ad..b525ecf3aea4 100644
--- a/arch/v850/kernel/as85ep1.c
+++ b/arch/v850/kernel/as85ep1.c
@@ -116,7 +116,8 @@ void __init mach_reserve_bootmem ()
if (SDRAM_ADDR < RAM_END && SDRAM_ADDR > RAM_START)
/* We can't use the space between SRAM and SDRAM, so
prevent the kernel from trying. */
- reserve_bootmem (SRAM_END, SDRAM_ADDR - SRAM_END);
+ reserve_bootmem(SRAM_END, SDRAM_ADDR - SRAM_END,
+ BOOTMEM_DEFAULT);
}
void mach_gettimeofday (struct timespec *tv)
diff --git a/arch/v850/kernel/rte_ma1_cb.c b/arch/v850/kernel/rte_ma1_cb.c
index 9a716f946421..08abf3d5f8df 100644
--- a/arch/v850/kernel/rte_ma1_cb.c
+++ b/arch/v850/kernel/rte_ma1_cb.c
@@ -46,13 +46,15 @@ void __init mach_reserve_bootmem ()
{
#ifdef CONFIG_RTE_CB_MULTI
/* Prevent the kernel from touching the monitor's scratch RAM. */
- reserve_bootmem (MON_SCRATCH_ADDR, MON_SCRATCH_SIZE);
+ reserve_bootmem(MON_SCRATCH_ADDR, MON_SCRATCH_SIZE,
+ BOOTMEM_DEFAULT);
#endif
/* The space between SRAM and SDRAM is filled with duplicate
images of SRAM. Prevent the kernel from using them. */
reserve_bootmem (SRAM_ADDR + SRAM_SIZE,
- SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE));
+ SDRAM_ADDR - (SRAM_ADDR + SRAM_SIZE),
+ BOOTMEM_DEFAULT);
}
void mach_gettimeofday (struct timespec *tv)
diff --git a/arch/v850/kernel/setup.c b/arch/v850/kernel/setup.c
index a914f244f494..a0a8456a8430 100644
--- a/arch/v850/kernel/setup.c
+++ b/arch/v850/kernel/setup.c
@@ -241,15 +241,18 @@ init_bootmem_alloc (unsigned long ram_start, unsigned long ram_len)
if (kram_end > kram_start)
/* Reserve the RAM part of the kernel's address space, so it
doesn't get allocated. */
- reserve_bootmem (kram_start, kram_end - kram_start);
+ reserve_bootmem(kram_start, kram_end - kram_start,
+ BOOTMEM_DEFAULT);
if (intv_in_ram && !intv_in_kram)
/* Reserve the interrupt vector space. */
- reserve_bootmem (intv_start, intv_end - intv_start);
+ reserve_bootmem(intv_start, intv_end - intv_start,
+ BOOTMEM_DEFAULT);
if (bootmap >= ram_start && bootmap < ram_end)
/* Reserve the bootmap space. */
- reserve_bootmem (bootmap, bootmap_len);
+ reserve_bootmem(bootmap, bootmap_len,
+ BOOTMEM_DEFAULT);
/* Reserve the memory used by the root filesystem image if it's
in RAM. */
@@ -257,7 +260,8 @@ init_bootmem_alloc (unsigned long ram_start, unsigned long ram_len)
&& (unsigned long)&_root_fs_image_start >= ram_start
&& (unsigned long)&_root_fs_image_start < ram_end)
reserve_bootmem ((unsigned long)&_root_fs_image_start,
- &_root_fs_image_end - &_root_fs_image_start);
+ &_root_fs_image_end - &_root_fs_image_start,
+ BOOTMEM_DEFAULT);
/* Let the platform-dependent code reserve some too. */
if (mrb)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 59eef1c7fdaa..9d0acedf5f3f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -52,6 +52,10 @@ config HAVE_LATENCYTOP_SUPPORT
config SEMAPHORE_SLEEPERS
def_bool y
+config FAST_CMPXCHG_LOCAL
+ bool
+ default y
+
config MMU
def_bool y
@@ -105,6 +109,9 @@ config GENERIC_TIME_VSYSCALL
bool
default X86_64
+config ARCH_HAS_CPU_RELAX
+ def_bool y
+
config HAVE_SETUP_PER_CPU_AREA
def_bool X86_64
@@ -415,7 +422,7 @@ config HPET_TIMER
config HPET_EMULATE_RTC
def_bool y
- depends on HPET_TIMER && (RTC=y || RTC=m)
+ depends on HPET_TIMER && (RTC=y || RTC=m || RTC_DRV_CMOS=m || RTC_DRV_CMOS=y)
# Mark as embedded because too many people got it wrong.
# The code disables itself when not needed.
@@ -465,6 +472,9 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
Calgary anyway, pass 'iommu=calgary' on the kernel command line.
If unsure, say Y.
+config IOMMU_HELPER
+ def_bool (CALGARY_IOMMU || GART_IOMMU)
+
# need this always selected by IOMMU for the VIA workaround
config SWIOTLB
bool
@@ -628,7 +638,6 @@ config TOSHIBA
config I8K
tristate "Dell laptop support"
- depends on X86_32
---help---
This adds a driver to safely access the System Management Mode
of the CPU on the Dell Inspiron 8000. The System Management Mode
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 2e1e3af28c3a..fa555148823d 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -220,9 +220,9 @@ config DEBUG_BOOT_PARAMS
This option will cause struct boot_params to be exported via debugfs.
config CPA_DEBUG
- bool "CPA self test code"
+ bool "CPA self-test code"
depends on DEBUG_KERNEL
help
- Do change_page_attr self tests at boot.
+ Do change_page_attr() self-tests every 30 seconds.
endmenu
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index e4c12079171b..58cccb6483b0 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -172,8 +172,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
has_dumped = 1;
current->flags |= PF_DUMPCORE;
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
- dump.u_ar0 = (u32)(((unsigned long)(&dump.regs)) -
- ((unsigned long)(&dump)));
+ dump.u_ar0 = offsetof(struct user32, regs);
dump.signal = signr;
dump_thread32(regs, &dump);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 0db0a6291bbd..8022d3c695c0 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -722,7 +722,9 @@ ia32_sys_call_table:
.quad sys_epoll_pwait
.quad compat_sys_utimensat /* 320 */
.quad compat_sys_signalfd
- .quad compat_sys_timerfd
+ .quad sys_timerfd_create
.quad sys_eventfd
.quad sys32_fallocate
+ .quad compat_sys_timerfd_settime /* 325 */
+ .quad compat_sys_timerfd_gettime
ia32_syscall_end:
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d2a58431a074..680b7300a489 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -78,7 +78,6 @@ int acpi_ht __initdata = 1; /* enable HT */
int acpi_lapic;
int acpi_ioapic;
int acpi_strict;
-EXPORT_SYMBOL(acpi_strict);
u8 acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
@@ -106,7 +105,7 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
#ifdef CONFIG_X86_64
/* rely on all ACPI tables being in the direct mapping */
-char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
+char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
{
if (!phys_addr || !size)
return NULL;
@@ -131,7 +130,7 @@ char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
* from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
* count idx down while incrementing the phys address.
*/
-char *__acpi_map_table(unsigned long phys, unsigned long size)
+char *__init __acpi_map_table(unsigned long phys, unsigned long size)
{
unsigned long base, offset, mapped_size;
int idx;
@@ -490,8 +489,6 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity)
return irq;
}
-EXPORT_SYMBOL(acpi_register_gsi);
-
/*
* ACPI based hotplug support for CPU
*/
@@ -587,25 +584,6 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
EXPORT_SYMBOL(acpi_unregister_ioapic);
-static unsigned long __init
-acpi_scan_rsdp(unsigned long start, unsigned long length)
-{
- unsigned long offset = 0;
- unsigned long sig_len = sizeof("RSD PTR ") - 1;
-
- /*
- * Scan all 16-byte boundaries of the physical memory region for the
- * RSDP signature.
- */
- for (offset = 0; offset < length; offset += 16) {
- if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
- continue;
- return (start + offset);
- }
-
- return 0;
-}
-
static int __init acpi_parse_sbf(struct acpi_table_header *table)
{
struct acpi_table_boot *sb;
@@ -748,27 +726,6 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
return 0;
}
-unsigned long __init acpi_find_rsdp(void)
-{
- unsigned long rsdp_phys = 0;
-
- if (efi_enabled) {
- if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
- return efi.acpi20;
- else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
- return efi.acpi;
- }
- /*
- * Scan memory looking for the RSDP signature. First search EBDA (low
- * memory) paragraphs and then search upper memory (E0000-FFFFF).
- */
- rsdp_phys = acpi_scan_rsdp(0, 0x400);
- if (!rsdp_phys)
- rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
-
- return rsdp_phys;
-}
-
#ifdef CONFIG_X86_LOCAL_APIC
/*
* Parse LAPIC entries in MADT
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index a25db514c719..324eb0cab19c 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -46,6 +46,12 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
buf[1] = 1;
buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
+ /*
+ * The default of PDC_SMP_T_SWCOORD bit is set for intel x86 cpu so
+ * that OSPM is capable of native ACPI throttling software
+ * coordination using BIOS supplied _TSD info.
+ */
+ buf[2] |= ACPI_PDC_SMP_T_SWCOORD;
if (cpu_has(c, X86_FEATURE_EST))
buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index d9313d9adced..f86a3c4a2669 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -637,7 +637,7 @@ void __init early_cpu_init(void)
}
/* Make sure %fs is initialized properly in idle threads */
-struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
+struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
regs->fs = __KERNEL_PERCPU;
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig
index 151eda0a23fc..cb7a5715596d 100644
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig
@@ -29,7 +29,7 @@ config X86_ACPI_CPUFREQ
config ELAN_CPUFREQ
tristate "AMD Elan SC400 and SC410"
select CPU_FREQ_TABLE
- depends on X86_32 && X86_ELAN
+ depends on X86_ELAN
---help---
This adds the CPUFreq driver for AMD Elan SC400 and SC410
processors.
@@ -45,7 +45,7 @@ config ELAN_CPUFREQ
config SC520_CPUFREQ
tristate "AMD Elan SC520"
select CPU_FREQ_TABLE
- depends on X86_32 && X86_ELAN
+ depends on X86_ELAN
---help---
This adds the CPUFreq driver for AMD Elan SC520 processor.
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
index 326a4c81f684..39f8cb18296c 100644
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
@@ -23,6 +23,7 @@
#define EPS_BRAND_C7 1
#define EPS_BRAND_EDEN 2
#define EPS_BRAND_C3 3
+#define EPS_BRAND_C7D 4
struct eps_cpu_data {
u32 fsb;
@@ -54,6 +55,7 @@ static int eps_set_state(struct eps_cpu_data *centaur,
{
struct cpufreq_freqs freqs;
u32 lo, hi;
+ u8 current_multiplier, current_voltage;
int err = 0;
int i;
@@ -93,6 +95,15 @@ postchange:
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
freqs.new = centaur->fsb * ((lo >> 8) & 0xff);
+ /* Print voltage and multiplier */
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ current_voltage = lo & 0xff;
+ printk(KERN_INFO "eps: Current voltage = %dmV\n",
+ current_voltage * 16 + 700);
+ current_multiplier = (lo >> 8) & 0xff;
+ printk(KERN_INFO "eps: Current multiplier = %d\n",
+ current_multiplier);
+
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return err;
}
@@ -141,9 +152,10 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
u8 current_multiplier, current_voltage;
u8 max_multiplier, max_voltage;
u8 min_multiplier, min_voltage;
- u8 brand;
+ u8 brand = 0;
u32 fsb;
struct eps_cpu_data *centaur;
+ struct cpuinfo_x86 *c = &cpu_data(0);
struct cpufreq_frequency_table *f_table;
int k, step, voltage;
int ret;
@@ -153,21 +165,36 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
/* Check brand */
- printk("eps: Detected VIA ");
- rdmsr(0x1153, lo, hi);
- brand = (((lo >> 2) ^ lo) >> 18) & 3;
+ printk(KERN_INFO "eps: Detected VIA ");
+
+ switch (c->x86_model) {
+ case 10:
+ rdmsr(0x1153, lo, hi);
+ brand = (((lo >> 2) ^ lo) >> 18) & 3;
+ printk(KERN_CONT "Model A ");
+ break;
+ case 13:
+ rdmsr(0x1154, lo, hi);
+ brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
+ printk(KERN_CONT "Model D ");
+ break;
+ }
+
switch(brand) {
case EPS_BRAND_C7M:
- printk("C7-M\n");
+ printk(KERN_CONT "C7-M\n");
break;
case EPS_BRAND_C7:
- printk("C7\n");
+ printk(KERN_CONT "C7\n");
break;
case EPS_BRAND_EDEN:
- printk("Eden\n");
+ printk(KERN_CONT "Eden\n");
+ break;
+ case EPS_BRAND_C7D:
+ printk(KERN_CONT "C7-D\n");
break;
case EPS_BRAND_C3:
- printk("C3\n");
+ printk(KERN_CONT "C3\n");
return -ENODEV;
break;
}
@@ -179,7 +206,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Can be locked at 0 */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & 1 << 16)) {
- printk("eps: Can't enable Enhanced PowerSaver\n");
+ printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
return -ENODEV;
}
}
@@ -187,19 +214,19 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
- printk("eps: Current voltage = %dmV\n", current_voltage * 16 + 700);
+ printk(KERN_INFO "eps: Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
- printk("eps: Current multiplier = %d\n", current_multiplier);
+ printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
/* Print limits */
max_voltage = hi & 0xff;
- printk("eps: Highest voltage = %dmV\n", max_voltage * 16 + 700);
+ printk(KERN_INFO "eps: Highest voltage = %dmV\n", max_voltage * 16 + 700);
max_multiplier = (hi >> 8) & 0xff;
- printk("eps: Highest multiplier = %d\n", max_multiplier);
+ printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
min_voltage = (hi >> 16) & 0xff;
- printk("eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700);
+ printk(KERN_INFO "eps: Lowest voltage = %dmV\n", min_voltage * 16 + 700);
min_multiplier = (hi >> 24) & 0xff;
- printk("eps: Lowest multiplier = %d\n", min_multiplier);
+ printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
/* Sanity checks */
if (current_multiplier == 0 || max_multiplier == 0
@@ -208,7 +235,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
if (current_multiplier > max_multiplier
|| max_multiplier <= min_multiplier)
return -EINVAL;
- if (current_voltage > 0x1c || max_voltage > 0x1c)
+ if (current_voltage > 0x1f || max_voltage > 0x1f)
return -EINVAL;
if (max_voltage < min_voltage)
return -EINVAL;
@@ -310,7 +337,7 @@ static int __init eps_init(void)
/* This driver will work only on Centaur C7 processors with
* Enhanced SpeedStep/PowerSaver registers */
if (c->x86_vendor != X86_VENDOR_CENTAUR
- || c->x86 != 6 || c->x86_model != 10)
+ || c->x86 != 6 || c->x86_model < 10)
return -ENODEV;
if (!cpu_has(c, X86_FEATURE_EST))
return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c
index 2ed7db2fd257..9d9eae82e60f 100644
--- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c
@@ -181,8 +181,8 @@ static __init struct pci_dev *gx_detect_chipset(void)
struct pci_dev *gx_pci = NULL;
/* check if CPU is a MediaGX or a Geode. */
- if ((current_cpu_data.x86_vendor != X86_VENDOR_NSC) &&
- (current_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) {
+ if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) &&
+ (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) {
dprintk("error: no MediaGX/Geode processor found!\n");
return NULL;
}
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index b5a9863d6cdc..0a61159d7b71 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -460,7 +460,7 @@ static int powernow_decode_bios (int maxfid, int startvid)
latency = psb->settlingtime;
if (latency < 100) {
- printk (KERN_INFO PFX "BIOS set settling time to %d microseconds."
+ printk(KERN_INFO PFX "BIOS set settling time to %d microseconds. "
"Should be at least 100. Correcting.\n", latency);
latency = 100;
}
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index a0522735dd9d..c99d59d8ef2e 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -578,10 +578,9 @@ static void print_basics(struct powernow_k8_data *data)
for (j = 0; j < data->numps; j++) {
if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) {
if (cpu_family == CPU_HW_PSTATE) {
- printk(KERN_INFO PFX " %d : fid 0x%x did 0x%x (%d MHz)\n",
+ printk(KERN_INFO PFX " %d : pstate %d (%d MHz)\n",
j,
- (data->powernow_table[j].index & 0xff00) >> 8,
- (data->powernow_table[j].index & 0xff0000) >> 16,
+ data->powernow_table[j].index,
data->powernow_table[j].frequency/1000);
} else {
printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x\n",
@@ -827,7 +826,6 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf
for (i = 0; i < data->acpi_data.state_count; i++) {
u32 index;
- u32 hi = 0, lo = 0;
index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
if (index > data->max_hw_pstate) {
@@ -1236,8 +1234,10 @@ static unsigned int powernowk8_get (unsigned int cpu)
struct powernow_k8_data *data;
cpumask_t oldmask = current->cpus_allowed;
unsigned int khz = 0;
+ unsigned int first;
- data = per_cpu(powernow_data, first_cpu(per_cpu(cpu_core_map, cpu)));
+ first = first_cpu(per_cpu(cpu_core_map, cpu));
+ data = per_cpu(powernow_data, first);
if (!data)
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index afd2b520d35c..ab48cfed4d96 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -47,7 +47,7 @@ struct powernow_k8_data {
#define CPUID_XFAM 0x0ff00000 /* extended family */
#define CPUID_XFAM_K8 0
#define CPUID_XMOD 0x000f0000 /* extended model */
-#define CPUID_XMOD_REV_MASK 0x00080000
+#define CPUID_XMOD_REV_MASK 0x000c0000
#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */
#define CPUID_USE_XFAM_XMOD 0x00000f00
#define CPUID_GET_MAX_CAPABILITIES 0x80000000
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
index 76c3ab0da468..98d4fdb7dc04 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
@@ -189,10 +189,7 @@ static unsigned int pentium4_get_frequency(void)
printk(KERN_DEBUG "speedstep-lib: couldn't detect FSB speed. Please send an e-mail to <linux@brodo.de>\n");
/* Multiplier. */
- if (c->x86_model < 2)
- mult = msr_lo >> 27;
- else
- mult = msr_lo >> 24;
+ mult = msr_lo >> 24;
dprintk("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", fsb, mult, (fsb * mult));
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 404a6a2d4016..7139b0262703 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -83,8 +83,6 @@ static char cyrix_model_mult2[] __cpuinitdata = "12233445";
* FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
*/
-extern void calibrate_delay(void) __init;
-
static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
{
unsigned long flags;
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 1e27b69a7a0e..b6e136f23d3d 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -659,7 +659,7 @@ static __init int amd_special_default_mtrr(void)
*/
int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
{
- unsigned long i, base, size, highest_addr = 0, def, dummy;
+ unsigned long i, base, size, highest_pfn = 0, def, dummy;
mtrr_type type;
u64 trim_start, trim_size;
@@ -682,28 +682,27 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
mtrr_if->get(i, &base, &size, &type);
if (type != MTRR_TYPE_WRBACK)
continue;
- base <<= PAGE_SHIFT;
- size <<= PAGE_SHIFT;
- if (highest_addr < base + size)
- highest_addr = base + size;
+ if (highest_pfn < base + size)
+ highest_pfn = base + size;
}
/* kvm/qemu doesn't have mtrr set right, don't trim them all */
- if (!highest_addr) {
+ if (!highest_pfn) {
printk(KERN_WARNING "WARNING: strange, CPU MTRRs all blank?\n");
WARN_ON(1);
return 0;
}
- if ((highest_addr >> PAGE_SHIFT) < end_pfn) {
+ if (highest_pfn < end_pfn) {
printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover"
- " all of memory, losing %LdMB of RAM.\n",
- (((u64)end_pfn << PAGE_SHIFT) - highest_addr) >> 20);
+ " all of memory, losing %luMB of RAM.\n",
+ (end_pfn - highest_pfn) >> (20 - PAGE_SHIFT));
WARN_ON(1);
printk(KERN_INFO "update e820 for mtrr\n");
- trim_start = highest_addr;
+ trim_start = highest_pfn;
+ trim_start <<= PAGE_SHIFT;
trim_size = end_pfn;
trim_size <<= PAGE_SHIFT;
trim_size -= trim_start;
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index bea8474744ff..c7341e81941c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -582,7 +582,6 @@ retint_restore_args: /* return to kernel space */
TRACE_IRQS_IRETQ
restore_args:
RESTORE_ARGS 0,8,0
-iret_label:
#ifdef CONFIG_PARAVIRT
INTERRUPT_RETURN
#endif
@@ -593,13 +592,22 @@ ENTRY(native_iret)
.quad native_iret, bad_iret
.previous
.section .fixup,"ax"
- /* force a signal here? this matches i386 behaviour */
- /* running with kernel gs */
bad_iret:
- movq $11,%rdi /* SIGSEGV */
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
- jmp do_exit
+ /*
+ * The iret traps when the %cs or %ss being restored is bogus.
+ * We've lost the original trap vector and error code.
+ * #GPF is the most likely one to get for an invalid selector.
+ * So pretend we completed the iret and took the #GPF in user mode.
+ *
+ * We are now running with the kernel GS after exception recovery.
+ * But error_entry expects us to have user GS to match the user %cs,
+ * so swap back.
+ */
+ pushq $0
+
+ SWAPGS
+ jmp general_protection
+
.previous
/* edi: workmask, edx: work */
@@ -911,7 +919,7 @@ error_kernelspace:
iret run with kernel gs again, so don't set the user space flag.
B stepping K8s sometimes report an truncated RIP for IRET
exceptions returning to compat mode. Check for these here too. */
- leaq iret_label(%rip),%rbp
+ leaq native_iret(%rip),%rbp
cmpq %rbp,RIP(%rsp)
je error_swapgs
movl %ebp,%ebp /* zero extend */
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 4f283ad215ec..09b38d539b09 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -250,18 +250,13 @@ ENTRY(secondary_startup_64)
lretq
/* SMP bootup changes these two */
-#ifndef CONFIG_HOTPLUG_CPU
- .pushsection .init.data
-#endif
+ __CPUINITDATA
.align 8
- .globl initial_code
-initial_code:
+ ENTRY(initial_code)
.quad x86_64_start_kernel
-#ifndef CONFIG_HOTPLUG_CPU
- .popsection
-#endif
- .globl init_rsp
-init_rsp:
+ __FINITDATA
+
+ ENTRY(init_rsp)
.quad init_thread_union+THREAD_SIZE-8
bad_address:
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index c1cfd60639d4..d0b234c9fc31 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -151,7 +151,7 @@ NORET_TYPE void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
-#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
+#ifdef CONFIG_NUMA
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index a1fef42f8cdb..236d2f8f7ddc 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -234,5 +234,10 @@ NORET_TYPE void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
VMCOREINFO_SYMBOL(init_level4_pgt);
+
+#ifdef CONFIG_NUMA
+ VMCOREINFO_SYMBOL(node_data);
+ VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
+#endif
}
diff --git a/arch/x86/kernel/mpparse_32.c b/arch/x86/kernel/mpparse_32.c
index 67009cdd5eca..f349e68e45a0 100644
--- a/arch/x86/kernel/mpparse_32.c
+++ b/arch/x86/kernel/mpparse_32.c
@@ -736,7 +736,8 @@ static int __init smp_scan_config (unsigned long base, unsigned long length)
smp_found_config = 1;
printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
mpf, virt_to_phys(mpf));
- reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
+ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE,
+ BOOTMEM_DEFAULT);
if (mpf->mpf_physptr) {
/*
* We cannot access to MPC table to compute
@@ -751,7 +752,8 @@ static int __init smp_scan_config (unsigned long base, unsigned long length)
unsigned long end = max_low_pfn * PAGE_SIZE;
if (mpf->mpf_physptr + size > end)
size = end - mpf->mpf_physptr;
- reserve_bootmem(mpf->mpf_physptr, size);
+ reserve_bootmem(mpf->mpf_physptr, size,
+ BOOTMEM_DEFAULT);
}
mpf_found = mpf;
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 1fe7f043ebde..1b5464c2434f 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -35,6 +35,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
+#include <linux/iommu-helper.h>
#include <asm/gart.h>
#include <asm/calgary.h>
#include <asm/tce.h>
@@ -260,22 +261,28 @@ static void iommu_range_reserve(struct iommu_table *tbl,
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
-static unsigned long iommu_range_alloc(struct iommu_table *tbl,
- unsigned int npages)
+static unsigned long iommu_range_alloc(struct device *dev,
+ struct iommu_table *tbl,
+ unsigned int npages)
{
unsigned long flags;
unsigned long offset;
+ unsigned long boundary_size;
+
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ PAGE_SIZE) >> PAGE_SHIFT;
BUG_ON(npages == 0);
spin_lock_irqsave(&tbl->it_lock, flags);
- offset = find_next_zero_string(tbl->it_map, tbl->it_hint,
- tbl->it_size, npages);
+ offset = iommu_area_alloc(tbl->it_map, tbl->it_size, tbl->it_hint,
+ npages, 0, boundary_size, 0);
if (offset == ~0UL) {
tbl->chip_ops->tce_cache_blast(tbl);
- offset = find_next_zero_string(tbl->it_map, 0,
- tbl->it_size, npages);
+
+ offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0,
+ npages, 0, boundary_size, 0);
if (offset == ~0UL) {
printk(KERN_WARNING "Calgary: IOMMU full.\n");
spin_unlock_irqrestore(&tbl->it_lock, flags);
@@ -286,7 +293,6 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
}
}
- set_bit_string(tbl->it_map, offset, npages);
tbl->it_hint = offset + npages;
BUG_ON(tbl->it_hint > tbl->it_size);
@@ -295,13 +301,13 @@ static unsigned long iommu_range_alloc(struct iommu_table *tbl,
return offset;
}
-static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *vaddr,
- unsigned int npages, int direction)
+static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
+ void *vaddr, unsigned int npages, int direction)
{
unsigned long entry;
dma_addr_t ret = bad_dma_address;
- entry = iommu_range_alloc(tbl, npages);
+ entry = iommu_range_alloc(dev, tbl, npages);
if (unlikely(entry == bad_dma_address))
goto error;
@@ -354,7 +360,7 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
badbit, tbl, dma_addr, entry, npages);
}
- __clear_bit_string(tbl->it_map, entry, npages);
+ iommu_area_free(tbl->it_map, entry, npages);
spin_unlock_irqrestore(&tbl->it_lock, flags);
}
@@ -438,7 +444,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
vaddr = (unsigned long) sg_virt(s);
npages = num_dma_pages(vaddr, s->length);
- entry = iommu_range_alloc(tbl, npages);
+ entry = iommu_range_alloc(dev, tbl, npages);
if (entry == bad_dma_address) {
/* makes sure unmap knows to stop */
s->dma_length = 0;
@@ -476,7 +482,7 @@ static dma_addr_t calgary_map_single(struct device *dev, void *vaddr,
npages = num_dma_pages(uaddr, size);
if (translation_enabled(tbl))
- dma_handle = iommu_alloc(tbl, vaddr, npages, direction);
+ dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction);
else
dma_handle = virt_to_bus(vaddr);
@@ -516,7 +522,7 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
if (translation_enabled(tbl)) {
/* set up tces to cover the allocated range */
- mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL);
+ mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL);
if (mapping == bad_dma_address)
goto free;
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 845cbecd68e9..65f6acb025c8 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -25,6 +25,7 @@
#include <linux/bitops.h>
#include <linux/kdebug.h>
#include <linux/scatterlist.h>
+#include <linux/iommu-helper.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <asm/mtrr.h>
@@ -82,17 +83,24 @@ AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit; /* protected by iommu_bitmap_lock */
static int need_flush; /* global flush state. set for each gart wrap */
-static unsigned long alloc_iommu(int size)
+static unsigned long alloc_iommu(struct device *dev, int size)
{
unsigned long offset, flags;
+ unsigned long boundary_size;
+ unsigned long base_index;
+
+ base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
+ PAGE_SIZE) >> PAGE_SHIFT;
+ boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+ PAGE_SIZE) >> PAGE_SHIFT;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
- offset = find_next_zero_string(iommu_gart_bitmap, next_bit,
- iommu_pages, size);
+ offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
+ size, base_index, boundary_size, 0);
if (offset == -1) {
need_flush = 1;
- offset = find_next_zero_string(iommu_gart_bitmap, 0,
- iommu_pages, size);
+ offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
+ size, base_index, boundary_size, 0);
}
if (offset != -1) {
set_bit_string(iommu_gart_bitmap, offset, size);
@@ -114,7 +122,7 @@ static void free_iommu(unsigned long offset, int size)
unsigned long flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
- __clear_bit_string(iommu_gart_bitmap, offset, size);
+ iommu_area_free(iommu_gart_bitmap, offset, size);
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}
@@ -235,7 +243,7 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
size_t size, int dir)
{
unsigned long npages = to_pages(phys_mem, size);
- unsigned long iommu_page = alloc_iommu(npages);
+ unsigned long iommu_page = alloc_iommu(dev, npages);
int i;
if (iommu_page == -1) {
@@ -355,10 +363,11 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
}
/* Map multiple scatterlist entries continuous into the first. */
-static int __dma_map_cont(struct scatterlist *start, int nelems,
- struct scatterlist *sout, unsigned long pages)
+static int __dma_map_cont(struct device *dev, struct scatterlist *start,
+ int nelems, struct scatterlist *sout,
+ unsigned long pages)
{
- unsigned long iommu_start = alloc_iommu(pages);
+ unsigned long iommu_start = alloc_iommu(dev, pages);
unsigned long iommu_page = iommu_start;
struct scatterlist *s;
int i;
@@ -394,8 +403,8 @@ static int __dma_map_cont(struct scatterlist *start, int nelems,
}
static inline int
-dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
- unsigned long pages, int need)
+dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
+ struct scatterlist *sout, unsigned long pages, int need)
{
if (!need) {
BUG_ON(nelems != 1);
@@ -403,7 +412,7 @@ dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
sout->dma_length = start->length;
return 0;
}
- return __dma_map_cont(start, nelems, sout, pages);
+ return __dma_map_cont(dev, start, nelems, sout, pages);
}
/*
@@ -416,6 +425,8 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
struct scatterlist *s, *ps, *start_sg, *sgmap;
int need = 0, nextneed, i, out, start;
unsigned long pages = 0;
+ unsigned int seg_size;
+ unsigned int max_seg_size;
if (nents == 0)
return 0;
@@ -426,6 +437,8 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
out = 0;
start = 0;
start_sg = sgmap = sg;
+ seg_size = 0;
+ max_seg_size = dma_get_max_seg_size(dev);
ps = NULL; /* shut up gcc */
for_each_sg(sg, s, nents, i) {
dma_addr_t addr = sg_phys(s);
@@ -443,11 +456,13 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
* offset.
*/
if (!iommu_merge || !nextneed || !need || s->offset ||
+ (s->length + seg_size > max_seg_size) ||
(ps->offset + ps->length) % PAGE_SIZE) {
- if (dma_map_cont(start_sg, i - start, sgmap,
- pages, need) < 0)
+ if (dma_map_cont(dev, start_sg, i - start,
+ sgmap, pages, need) < 0)
goto error;
out++;
+ seg_size = 0;
sgmap = sg_next(sgmap);
pages = 0;
start = i;
@@ -455,11 +470,12 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
}
}
+ seg_size += s->length;
need = nextneed;
pages += to_pages(s->offset, s->length);
ps = s;
}
- if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
+ if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
goto error;
out++;
flush_gart();
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 96286df1bb81..702c33efea84 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -103,9 +103,26 @@ static int set_segment_reg(struct task_struct *task,
if (invalid_selector(value))
return -EIO;
- if (offset != offsetof(struct user_regs_struct, gs))
+ /*
+ * For %cs and %ss we cannot permit a null selector.
+ * We can permit a bogus selector as long as it has USER_RPL.
+ * Null selectors are fine for other segment registers, but
+ * we will never get back to user mode with invalid %cs or %ss
+ * and will take the trap in iret instead. Much code relies
+ * on user_mode() to distinguish a user trap frame (which can
+ * safely use invalid selectors) from a kernel trap frame.
+ */
+ switch (offset) {
+ case offsetof(struct user_regs_struct, cs):
+ case offsetof(struct user_regs_struct, ss):
+ if (unlikely(value == 0))
+ return -EIO;
+
+ default:
*pt_regs_access(task_pt_regs(task), offset) = value;
- else {
+ break;
+
+ case offsetof(struct user_regs_struct, gs):
task->thread.gs = value;
if (task == current)
/*
@@ -227,12 +244,16 @@ static int set_segment_reg(struct task_struct *task,
* Can't actually change these in 64-bit mode.
*/
case offsetof(struct user_regs_struct,cs):
+ if (unlikely(value == 0))
+ return -EIO;
#ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(task, TIF_IA32))
task_pt_regs(task)->cs = value;
#endif
break;
case offsetof(struct user_regs_struct,ss):
+ if (unlikely(value == 0))
+ return -EIO;
#ifdef CONFIG_IA32_EMULATION
if (test_tsk_thread_flag(task, TIF_IA32))
task_pt_regs(task)->ss = value;
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 3cd7a2dcd4fe..6ba33ca8715a 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -380,19 +380,19 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
void force_hpet_resume(void)
{
switch (force_hpet_resume_type) {
- case ICH_FORCE_HPET_RESUME:
- return ich_force_hpet_resume();
-
- case OLD_ICH_FORCE_HPET_RESUME:
- return old_ich_force_hpet_resume();
-
- case VT8237_FORCE_HPET_RESUME:
- return vt8237_force_hpet_resume();
-
- case NVIDIA_FORCE_HPET_RESUME:
- return nvidia_force_hpet_resume();
-
- default:
+ case ICH_FORCE_HPET_RESUME:
+ ich_force_hpet_resume();
+ return;
+ case OLD_ICH_FORCE_HPET_RESUME:
+ old_ich_force_hpet_resume();
+ return;
+ case VT8237_FORCE_HPET_RESUME:
+ vt8237_force_hpet_resume();
+ return;
+ case NVIDIA_FORCE_HPET_RESUME:
+ nvidia_force_hpet_resume();
+ return;
+ default:
break;
}
}
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 62adc5f20be5..d1d8c347cc0b 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -390,7 +390,7 @@ static void __init reserve_ebda_region(void)
unsigned int addr;
addr = get_bios_ebda();
if (addr)
- reserve_bootmem(addr, PAGE_SIZE);
+ reserve_bootmem(addr, PAGE_SIZE, BOOTMEM_DEFAULT);
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -484,7 +484,8 @@ static void __init reserve_crashkernel(void)
(unsigned long)(total_mem >> 20));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
- reserve_bootmem(crash_base, crash_size);
+ reserve_bootmem(crash_base, crash_size,
+ BOOTMEM_DEFAULT);
} else
printk(KERN_INFO "crashkernel reservation failed - "
"you have to specify a base address\n");
@@ -525,7 +526,7 @@ static void __init reserve_initrd(void)
}
if (ramdisk_end <= end_of_lowmem) {
/* All in lowmem, easy case */
- reserve_bootmem(ramdisk_image, ramdisk_size);
+ reserve_bootmem(ramdisk_image, ramdisk_size, BOOTMEM_DEFAULT);
initrd_start = ramdisk_image + PAGE_OFFSET;
initrd_end = initrd_start+ramdisk_size;
return;
@@ -536,7 +537,7 @@ static void __init reserve_initrd(void)
/* Note: this includes all the lowmem currently occupied by
the initrd, we rely on that fact to keep the data intact. */
- reserve_bootmem(ramdisk_here, ramdisk_size);
+ reserve_bootmem(ramdisk_here, ramdisk_size, BOOTMEM_DEFAULT);
initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
@@ -606,13 +607,14 @@ void __init setup_bootmem_allocator(void)
* bootmem allocator with an invalid RAM area.
*/
reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
- bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
+ bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text),
+ BOOTMEM_DEFAULT);
/*
* reserve physical page 0 - it's a special BIOS page on many boxes,
* enabling clean reboots, SMP operation, laptop functions.
*/
- reserve_bootmem(0, PAGE_SIZE);
+ reserve_bootmem(0, PAGE_SIZE, BOOTMEM_DEFAULT);
/* reserve EBDA region, it's a 4K region */
reserve_ebda_region();
@@ -622,7 +624,7 @@ void __init setup_bootmem_allocator(void)
unless you have no PS/2 mouse plugged in. */
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 == 6)
- reserve_bootmem(0xa0000 - 4096, 4096);
+ reserve_bootmem(0xa0000 - 4096, 4096, BOOTMEM_DEFAULT);
#ifdef CONFIG_SMP
/*
@@ -630,7 +632,7 @@ void __init setup_bootmem_allocator(void)
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
- reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
+ reserve_bootmem(PAGE_SIZE, PAGE_SIZE, BOOTMEM_DEFAULT);
#endif
#ifdef CONFIG_ACPI_SLEEP
/*
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index c8939dfddfba..a49f5f734a5e 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -189,7 +189,7 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
e820_register_active_regions(0, start_pfn, end_pfn);
free_bootmem_with_active_regions(0, end_pfn);
- reserve_bootmem(bootmap, bootmap_size);
+ reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
}
#endif
@@ -220,28 +220,35 @@ static inline void copy_edd(void)
#ifdef CONFIG_KEXEC
static void __init reserve_crashkernel(void)
{
- unsigned long long free_mem;
+ unsigned long long total_mem;
unsigned long long crash_size, crash_base;
int ret;
- free_mem =
- ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
+ total_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
- ret = parse_crashkernel(boot_command_line, free_mem,
+ ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base);
if (ret == 0 && crash_size) {
- if (crash_base > 0) {
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(crash_size >> 20),
- (unsigned long)(crash_base >> 20),
- (unsigned long)(free_mem >> 20));
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
- reserve_bootmem(crash_base, crash_size);
- } else
+ if (crash_base <= 0) {
printk(KERN_INFO "crashkernel reservation failed - "
"you have to specify a base address\n");
+ return;
+ }
+
+ if (reserve_bootmem(crash_base, crash_size,
+ BOOTMEM_EXCLUSIVE) < 0) {
+ printk(KERN_INFO "crashkernel reservation failed - "
+ "memory is in use\n");
+ return;
+ }
+
+ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
+ "for crashkernel (System RAM: %ldMB)\n",
+ (unsigned long)(crash_size >> 20),
+ (unsigned long)(crash_base >> 20),
+ (unsigned long)(total_mem >> 20));
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
}
}
#else
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index 5787a0c3e296..579b9b740c7c 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -202,8 +202,6 @@ valid_k7:
;
}
-extern void calibrate_delay(void);
-
static atomic_t init_deasserted;
static void __cpuinit smp_callin(void)
diff --git a/arch/x86/kernel/srat_32.c b/arch/x86/kernel/srat_32.c
index 2bf6903cb444..b72e61359c36 100644
--- a/arch/x86/kernel/srat_32.c
+++ b/arch/x86/kernel/srat_32.c
@@ -274,7 +274,7 @@ int __init get_memcfg_from_srat(void)
int tables = 0;
int i = 0;
- rsdp_address = acpi_find_rsdp();
+ rsdp_address = acpi_os_get_root_pointer();
if (!rsdp_address) {
printk("%s: System description tables not found\n",
__FUNCTION__);
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 8344c70adf61..adff5562f5fd 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -321,6 +321,8 @@ ENTRY(sys_call_table)
.long sys_epoll_pwait
.long sys_utimensat /* 320 */
.long sys_signalfd
- .long sys_timerfd
+ .long sys_timerfd_create
.long sys_eventfd
.long sys_fallocate
+ .long sys_timerfd_settime /* 325 */
+ .long sys_timerfd_gettime
diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c
index 36c100c323aa..10b8a6f69f84 100644
--- a/arch/x86/kernel/test_nx.c
+++ b/arch/x86/kernel/test_nx.c
@@ -139,7 +139,6 @@ static int test_NX(void)
* Until then, don't run them to avoid too many people getting scared
* by the error message
*/
-#if 0
#ifdef CONFIG_DEBUG_RODATA
/* Test 3: Check if the .rodata section is executable */
@@ -152,6 +151,7 @@ static int test_NX(void)
}
#endif
+#if 0
/* Test 4: Check if the .data section of a module is executable */
if (test_address(&test_data)) {
printk(KERN_ERR "test_nx: .data section is executable\n");
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 3cf72977d012..b22c01e05a18 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -1176,17 +1176,12 @@ void __init trap_init(void)
#endif
set_trap_gate(19,&simd_coprocessor_error);
+ /*
+ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
+ * Generate a build-time error if the alignment is wrong.
+ */
+ BUILD_BUG_ON(offsetof(struct task_struct, thread.i387.fxsave) & 15);
if (cpu_has_fxsr) {
- /*
- * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
- * Generates a compile-time "error: zero width for bit-field" if
- * the alignment is wrong.
- */
- struct fxsrAlignAssert {
- int _:!(offsetof(struct task_struct,
- thread.i387.fxsave) & 15);
- };
-
printk(KERN_INFO "Enabling fast FPU save and restore... ");
set_in_cr4(X86_CR4_OSFXSR);
printk("done.\n");
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8f94a0b89dff..cf5308148689 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1739,7 +1739,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
if (bytes == 8) {
gpa_t gpa;
struct page *page;
- char *addr;
+ char *kaddr;
u64 val;
down_read(&current->mm->mmap_sem);
@@ -1754,9 +1754,9 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
val = *(u64 *)new;
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
- addr = kmap_atomic(page, KM_USER0);
- set_64bit((u64 *)(addr + offset_in_page(gpa)), val);
- kunmap_atomic(addr, KM_USER0);
+ kaddr = kmap_atomic(page, KM_USER0);
+ set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
+ kunmap_atomic(kaddr, KM_USER0);
kvm_release_page_dirty(page);
emul_write:
up_read(&current->mm->mmap_sem);
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 4876182daf8a..25df1c1989fe 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -21,7 +21,7 @@ else
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
lib-y += thunk_64.o clear_page_64.o copy_page_64.o
- lib-y += bitstr_64.o bitops_64.o
+ lib-y += bitops_64.o
lib-y += memmove_64.o memset_64.o
lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
endif
diff --git a/arch/x86/lib/bitstr_64.c b/arch/x86/lib/bitstr_64.c
deleted file mode 100644
index 7445caf1b5de..000000000000
--- a/arch/x86/lib/bitstr_64.c
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <linux/module.h>
-#include <linux/bitops.h>
-
-/* Find string of zero bits in a bitmap */
-unsigned long
-find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len)
-{
- unsigned long n, end, i;
-
- again:
- n = find_next_zero_bit(bitmap, nbits, start);
- if (n == -1)
- return -1;
-
- /* could test bitsliced, but it's hardly worth it */
- end = n+len;
- if (end > nbits)
- return -1;
- for (i = n+1; i < end; i++) {
- if (test_bit(i, bitmap)) {
- start = i+1;
- goto again;
- }
- }
- return n;
-}
-
-EXPORT_SYMBOL(find_next_zero_string);
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c
index aad9d95469dc..4535e6d147ad 100644
--- a/arch/x86/lib/delay_32.c
+++ b/arch/x86/lib/delay_32.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/timex.h>
#include <linux/preempt.h>
#include <linux/delay.h>
+#include <linux/init.h>
#include <asm/processor.h>
#include <asm/delay.h>
@@ -63,7 +65,7 @@ void use_tsc_delay(void)
delay_fn = delay_tsc;
}
-int read_current_timer(unsigned long *timer_val)
+int __devinit read_current_timer(unsigned long *timer_val)
{
if (delay_fn == delay_tsc) {
rdtscl(*timer_val);
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c
index 45cdd3fbd91c..bbc610518516 100644
--- a/arch/x86/lib/delay_64.c
+++ b/arch/x86/lib/delay_64.c
@@ -10,8 +10,10 @@
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/timex.h>
#include <linux/preempt.h>
#include <linux/delay.h>
+#include <linux/init.h>
#include <asm/delay.h>
#include <asm/msr.h>
@@ -20,7 +22,7 @@
#include <asm/smp.h>
#endif
-int read_current_timer(unsigned long *timer_value)
+int __devinit read_current_timer(unsigned long *timer_value)
{
rdtscll(*timer_value);
return 0;
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index dffa786f61fe..3cc8eb2f36a9 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -444,8 +444,6 @@ static __u32 __init setup_trampoline(void)
static void __init start_secondary(void *unused)
{
__u8 cpuid = hard_smp_processor_id();
- /* external functions not defined in the headers */
- extern void calibrate_delay(void);
cpu_init();
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 04b1d20e2613..c394ca0720b8 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -391,7 +391,8 @@ unsigned long __init setup_memory(void)
void __init numa_kva_reserve(void)
{
if (kva_pages)
- reserve_bootmem(PFN_PHYS(kva_start_pfn), PFN_PHYS(kva_pages));
+ reserve_bootmem(PFN_PHYS(kva_start_pfn), PFN_PHYS(kva_pages),
+ BOOTMEM_DEFAULT);
}
void __init zone_sizes_init(void)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index ad8b9733d6b3..621afb6343dc 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -428,6 +428,16 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
}
#endif
+static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+{
+ if ((error_code & PF_WRITE) && !pte_write(*pte))
+ return 0;
+ if ((error_code & PF_INSTR) && !pte_exec(*pte))
+ return 0;
+
+ return 1;
+}
+
/*
* Handle a spurious fault caused by a stale TLB entry. This allows
* us to lazily refresh the TLB when increasing the permissions of a
@@ -457,20 +467,21 @@ static int spurious_fault(unsigned long address,
if (!pud_present(*pud))
return 0;
+ if (pud_large(*pud))
+ return spurious_fault_check(error_code, (pte_t *) pud);
+
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return 0;
+ if (pmd_large(*pmd))
+ return spurious_fault_check(error_code, (pte_t *) pmd);
+
pte = pte_offset_kernel(pmd, address);
if (!pte_present(*pte))
return 0;
- if ((error_code & PF_WRITE) && !pte_write(*pte))
- return 0;
- if ((error_code & PF_INSTR) && !pte_exec(*pte))
- return 0;
-
- return 1;
+ return spurious_fault_check(error_code, pte);
}
/*
@@ -947,11 +958,12 @@ void vmalloc_sync_all(void)
for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
if (!test_bit(pgd_index(address), insync)) {
const pgd_t *pgd_ref = pgd_offset_k(address);
+ unsigned long flags;
struct page *page;
if (pgd_none(*pgd_ref))
continue;
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
pgd = (pgd_t *)page_address(page) + pgd_index(address);
@@ -960,7 +972,7 @@ void vmalloc_sync_all(void)
else
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
}
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
set_bit(pgd_index(address), insync);
}
if (address == start)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3a98d6f724ab..5fe880fc305d 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -591,10 +591,17 @@ void mark_rodata_ro(void)
if (end <= start)
return;
- set_memory_ro(start, (end - start) >> PAGE_SHIFT);
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(end - start) >> 10);
+ set_memory_ro(start, (end - start) >> PAGE_SHIFT);
+
+ /*
+ * The rodata section (but not the kernel text!) should also be
+ * not-executable.
+ */
+ start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
+ set_memory_nx(start, (end - start) >> PAGE_SHIFT);
rodata_test();
@@ -637,9 +644,9 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
/* Should check here against the e820 map to avoid double free */
#ifdef CONFIG_NUMA
- reserve_bootmem_node(NODE_DATA(nid), phys, len);
+ reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT);
#else
- reserve_bootmem(phys, len);
+ reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
#endif
if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
dma_reserve += len / PAGE_SIZE;
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 5a02bf4c91ec..1aecc658cd7d 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -238,9 +238,10 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
free_bootmem_with_active_regions(nodeid, end);
- reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
+ reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size,
+ BOOTMEM_DEFAULT);
reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
- bootmap_pages<<PAGE_SHIFT);
+ bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
#ifdef CONFIG_ACPI_NUMA
srat_reserve_add_area(nodeid);
#endif
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 398f3a578dde..ed8201600354 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -5,6 +5,7 @@
* and compares page tables forwards and afterwards.
*/
#include <linux/bootmem.h>
+#include <linux/kthread.h>
#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -14,8 +15,13 @@
#include <asm/pgtable.h>
#include <asm/kdebug.h>
+/*
+ * Only print the results of the first pass:
+ */
+static __read_mostly int print = 1;
+
enum {
- NTEST = 4000,
+ NTEST = 400,
#ifdef CONFIG_X86_64
LPS = (1 << PMD_SHIFT),
#elif defined(CONFIG_X86_PAE)
@@ -31,7 +37,7 @@ struct split_state {
long min_exec, max_exec;
};
-static __init int print_split(struct split_state *s)
+static int print_split(struct split_state *s)
{
long i, expected, missed = 0;
int printed = 0;
@@ -82,10 +88,13 @@ static __init int print_split(struct split_state *s)
s->max_exec = addr;
}
}
- printk(KERN_INFO
- "CPA mapping 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
- s->spg, s->lpg, s->gpg, s->exec,
- s->min_exec != ~0UL ? s->min_exec : 0, s->max_exec, missed);
+ if (print) {
+ printk(KERN_INFO
+ " 4k %lu large %lu gb %lu x %lu[%lx-%lx] miss %lu\n",
+ s->spg, s->lpg, s->gpg, s->exec,
+ s->min_exec != ~0UL ? s->min_exec : 0,
+ s->max_exec, missed);
+ }
expected = (s->gpg*GPS + s->lpg*LPS)/PAGE_SIZE + s->spg + missed;
if (expected != i) {
@@ -96,11 +105,11 @@ static __init int print_split(struct split_state *s)
return err;
}
-static unsigned long __initdata addr[NTEST];
-static unsigned int __initdata len[NTEST];
+static unsigned long addr[NTEST];
+static unsigned int len[NTEST];
/* Change the global bit on random pages in the direct mapping */
-static __init int exercise_pageattr(void)
+static int pageattr_test(void)
{
struct split_state sa, sb, sc;
unsigned long *bm;
@@ -110,7 +119,8 @@ static __init int exercise_pageattr(void)
int i, k;
int err;
- printk(KERN_INFO "CPA exercising pageattr\n");
+ if (print)
+ printk(KERN_INFO "CPA self-test:\n");
bm = vmalloc((max_pfn_mapped + 7) / 8);
if (!bm) {
@@ -186,7 +196,6 @@ static __init int exercise_pageattr(void)
failed += print_split(&sb);
- printk(KERN_INFO "CPA reverting everything\n");
for (i = 0; i < NTEST; i++) {
if (!addr[i])
continue;
@@ -214,12 +223,40 @@ static __init int exercise_pageattr(void)
failed += print_split(&sc);
if (failed) {
- printk(KERN_ERR "CPA selftests NOT PASSED. Please report.\n");
+ printk(KERN_ERR "NOT PASSED. Please report.\n");
WARN_ON(1);
+ return -EINVAL;
} else {
- printk(KERN_INFO "CPA selftests PASSED\n");
+ if (print)
+ printk(KERN_INFO "ok.\n");
}
return 0;
}
-module_init(exercise_pageattr);
+
+static int do_pageattr_test(void *__unused)
+{
+ while (!kthread_should_stop()) {
+ schedule_timeout_interruptible(HZ*30);
+ if (pageattr_test() < 0)
+ break;
+ if (print)
+ print--;
+ }
+ return 0;
+}
+
+static int start_pageattr_test(void)
+{
+ struct task_struct *p;
+
+ p = kthread_create(do_pageattr_test, NULL, "pageattr-test");
+ if (!IS_ERR(p))
+ wake_up_process(p);
+ else
+ WARN_ON(1);
+
+ return 0;
+}
+
+module_init(start_pageattr_test);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bb55a78dcd62..8493c855582b 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -167,8 +167,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext)))
pgprot_val(forbidden) |= _PAGE_NX;
-
-#ifdef CONFIG_DEBUG_RODATA
/* The .rodata section needs to be read-only */
if (within(address, (unsigned long)__start_rodata,
(unsigned long)__end_rodata))
@@ -179,7 +177,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
if (within(address, virt_to_highmap(__start_rodata),
virt_to_highmap(__end_rodata)))
pgprot_val(forbidden) |= _PAGE_RW;
-#endif
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
@@ -237,7 +234,6 @@ static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
if (!SHARED_KERNEL_PMD) {
struct page *page;
- address = __pa(address);
list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
pud_t *pud;
@@ -261,17 +257,6 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
pgprot_t old_prot, new_prot;
int level, do_split = 1;
- /*
- * An Athlon 64 X2 showed hard hangs if we tried to preserve
- * largepages and changed the PSE entry from RW to RO.
- *
- * As AMD CPUs have a long series of erratas in this area,
- * (and none of the known ones seem to explain this hang),
- * disable this code until the hang can be debugged:
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
- return 1;
-
spin_lock_irqsave(&pgd_lock, flags);
/*
* Check for races, another CPU might have split this page
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index c7db504be1ea..6c1914622a88 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -272,7 +272,7 @@ static void pgd_dtor(void *pgd)
* preallocate which never got a corresponding vma will need to be
* freed manually.
*/
-static void pgd_mop_up_pmds(pgd_t *pgdp)
+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
int i;
@@ -285,7 +285,7 @@ static void pgd_mop_up_pmds(pgd_t *pgdp)
pgdp[i] = native_make_pgd(0);
paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
- pmd_free(pmd);
+ pmd_free(mm, pmd);
}
}
}
@@ -313,7 +313,7 @@ static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
pmd_t *pmd = pmd_alloc_one(mm, addr);
if (!pmd) {
- pgd_mop_up_pmds(pgd);
+ pgd_mop_up_pmds(mm, pgd);
return 0;
}
@@ -333,7 +333,7 @@ static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
return 1;
}
-static void pgd_mop_up_pmds(pgd_t *pgd)
+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
}
#endif /* CONFIG_X86_PAE */
@@ -352,9 +352,9 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
return pgd;
}
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- pgd_mop_up_pmds(pgd);
+ pgd_mop_up_pmds(mm, pgd);
quicklist_free(0, pgd_dtor, pgd);
}
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 65416f843e59..ecd91ea8a8ae 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -488,7 +488,8 @@ void __init srat_reserve_add_area(int nodeid)
printk(KERN_INFO "SRAT: This will cost you %Lu MB of "
"pre-allocated memory.\n", (unsigned long long)total_mb);
reserve_bootmem_node(NODE_DATA(nodeid), nodes_add[nodeid].start,
- nodes_add[nodeid].end - nodes_add[nodeid].start);
+ nodes_add[nodeid].end - nodes_add[nodeid].start,
+ BOOTMEM_DEFAULT);
}
}
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 42ba0e2da1a0..103b9dff1213 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -72,7 +72,7 @@ pcibios_align_resource(void *data, struct resource *res,
}
}
}
-
+EXPORT_SYMBOL(pcibios_align_resource);
/*
* Handle resources of PCI devices. If the world were perfect, we could
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 60d29fe0b1bd..8df1e842f6d4 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -204,7 +204,7 @@ again:
}
#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
-void __devinit calibrate_delay(void)
+void __cpuinit calibrate_delay(void)
{
loops_per_jiffy = CCOUNT_PER_JIFFY;
printk("Calibrating delay loop (skipped)... "
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 047e533fcc5b..0f6282207b32 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -35,7 +35,7 @@
* @src: src page
* @offset: offset in pages to start transaction
* @len: length in bytes
- * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
+ * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
* @depend_tx: memcpy depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
@@ -46,33 +46,29 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY);
+ struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY,
+ &dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL;
- int int_en = cb_fn ? 1 : 0;
- struct dma_async_tx_descriptor *tx = device ?
- device->device_prep_dma_memcpy(chan, len,
- int_en) : NULL;
+ struct dma_async_tx_descriptor *tx = NULL;
- if (tx) { /* run the memcpy asynchronously */
- dma_addr_t addr;
- enum dma_data_direction dir;
+ if (device) {
+ dma_addr_t dma_dest, dma_src;
+ unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
- pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
-
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_FROM_DEVICE;
-
- addr = dma_map_page(device->dev, dest, dest_offset, len, dir);
- tx->tx_set_dest(addr, tx, 0);
+ dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
+ DMA_FROM_DEVICE);
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_TO_DEVICE;
+ dma_src = dma_map_page(device->dev, src, src_offset, len,
+ DMA_TO_DEVICE);
- addr = dma_map_page(device->dev, src, src_offset, len, dir);
- tx->tx_set_src(addr, tx, 0);
+ tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
+ len, dma_prep_flags);
+ }
+ if (tx) {
+ pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
- } else { /* run the memcpy synchronously */
+ } else {
void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len);
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 66ef6351202e..09c0e83664bc 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -35,7 +35,7 @@
* @val: fill value
* @offset: offset in pages to start transaction
* @len: length in bytes
- * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
+ * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: memset depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
@@ -46,24 +46,24 @@ async_memset(struct page *dest, int val, unsigned int offset,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET);
+ struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
+ &dest, 1, NULL, 0, len);
struct dma_device *device = chan ? chan->device : NULL;
- int int_en = cb_fn ? 1 : 0;
- struct dma_async_tx_descriptor *tx = device ?
- device->device_prep_dma_memset(chan, val, len,
- int_en) : NULL;
+ struct dma_async_tx_descriptor *tx = NULL;
- if (tx) { /* run the memset asynchronously */
- dma_addr_t dma_addr;
- enum dma_data_direction dir;
+ if (device) {
+ dma_addr_t dma_dest;
+ unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
- pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_FROM_DEVICE;
+ dma_dest = dma_map_page(device->dev, dest, offset, len,
+ DMA_FROM_DEVICE);
- dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
- tx->tx_set_dest(dma_addr, tx, 0);
+ tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
+ dma_prep_flags);
+ }
+ if (tx) {
+ pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
} else { /* run the memset synchronously */
void *dest_buf;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index bc18cbb8ea79..562882189de5 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -57,8 +57,7 @@ static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END];
*/
static spinlock_t async_tx_lock;
-static struct list_head
-async_tx_master_list = LIST_HEAD_INIT(async_tx_master_list);
+static LIST_HEAD(async_tx_master_list);
/* async_tx_issue_pending_all - start all transactions on all channels */
void async_tx_issue_pending_all(void)
@@ -362,13 +361,13 @@ static void __exit async_tx_exit(void)
}
/**
- * async_tx_find_channel - find a channel to carry out the operation or let
+ * __async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously
* @depend_tx: transaction dependency
* @tx_type: transaction type
*/
struct dma_chan *
-async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
+__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
enum dma_transaction_type tx_type)
{
/* see if we can keep the chain on one channel */
@@ -384,7 +383,7 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
} else
return NULL;
}
-EXPORT_SYMBOL_GPL(async_tx_find_channel);
+EXPORT_SYMBOL_GPL(__async_tx_find_channel);
#else
static int __init async_tx_init(void)
{
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 2575f674dcd5..2259a4ff15cb 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -30,35 +30,51 @@
#include <linux/raid/xor.h>
#include <linux/async_tx.h>
-static void
-do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device,
+/* do_async_xor - dma map the pages and perform the xor with an engine.
+ * This routine is marked __always_inline so it can be compiled away
+ * when CONFIG_DMA_ENGINE=n
+ */
+static __always_inline struct dma_async_tx_descriptor *
+do_async_xor(struct dma_device *device,
struct dma_chan *chan, struct page *dest, struct page **src_list,
unsigned int offset, unsigned int src_cnt, size_t len,
enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- dma_addr_t dma_addr;
- enum dma_data_direction dir;
+ dma_addr_t dma_dest;
+ dma_addr_t *dma_src = (dma_addr_t *) src_list;
+ struct dma_async_tx_descriptor *tx;
int i;
+ unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
pr_debug("%s: len: %zu\n", __FUNCTION__, len);
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_FROM_DEVICE;
-
- dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
- tx->tx_set_dest(dma_addr, tx, 0);
-
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_TO_DEVICE;
+ dma_dest = dma_map_page(device->dev, dest, offset, len,
+ DMA_FROM_DEVICE);
- for (i = 0; i < src_cnt; i++) {
- dma_addr = dma_map_page(device->dev, src_list[i],
- offset, len, dir);
- tx->tx_set_src(dma_addr, tx, i);
+ for (i = 0; i < src_cnt; i++)
+ dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
+ len, DMA_TO_DEVICE);
+
+ /* Since we have clobbered the src_list we are committed
+ * to doing this asynchronously. Drivers force forward progress
+ * in case they can not provide a descriptor
+ */
+ tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
+ dma_prep_flags);
+ if (!tx) {
+ if (depend_tx)
+ dma_wait_for_async_tx(depend_tx);
+
+ while (!tx)
+ tx = device->device_prep_dma_xor(chan, dma_dest,
+ dma_src, src_cnt, len,
+ dma_prep_flags);
}
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+
+ return tx;
}
static void
@@ -102,7 +118,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
* @src_cnt: number of source pages
* @len: length in bytes
* @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
- * ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
+ * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine
@@ -113,14 +129,16 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR);
+ struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
+ &dest, 1, src_list,
+ src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
dma_async_tx_callback _cb_fn;
void *_cb_param;
unsigned long local_flags;
int xor_src_cnt;
- int i = 0, src_off = 0, int_en;
+ int i = 0, src_off = 0;
BUG_ON(src_cnt <= 1);
@@ -140,20 +158,11 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
_cb_param = cb_param;
}
- int_en = _cb_fn ? 1 : 0;
-
- tx = device->device_prep_dma_xor(
- chan, xor_src_cnt, len, int_en);
-
- if (tx) {
- do_async_xor(tx, device, chan, dest,
- &src_list[src_off], offset, xor_src_cnt, len,
- local_flags, depend_tx, _cb_fn,
- _cb_param);
- } else /* fall through */
- goto xor_sync;
+ tx = do_async_xor(device, chan, dest,
+ &src_list[src_off], offset,
+ xor_src_cnt, len, local_flags,
+ depend_tx, _cb_fn, _cb_param);
} else { /* run the xor synchronously */
-xor_sync:
/* in the sync case the dest is an implied source
* (assumes the dest is at the src_off index)
*/
@@ -242,7 +251,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
* @src_cnt: number of source pages
* @len: length in bytes
* @result: 0 if sum == 0 else non-zero
- * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
+ * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine
@@ -254,29 +263,36 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM);
+ struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM,
+ &dest, 1, src_list,
+ src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
- int int_en = cb_fn ? 1 : 0;
- struct dma_async_tx_descriptor *tx = device ?
- device->device_prep_dma_zero_sum(chan, src_cnt, len, result,
- int_en) : NULL;
- int i;
+ struct dma_async_tx_descriptor *tx = NULL;
BUG_ON(src_cnt <= 1);
- if (tx) {
- dma_addr_t dma_addr;
- enum dma_data_direction dir;
+ if (device) {
+ dma_addr_t *dma_src = (dma_addr_t *) src_list;
+ unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
+ int i;
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_TO_DEVICE;
-
- for (i = 0; i < src_cnt; i++) {
- dma_addr = dma_map_page(device->dev, src_list[i],
- offset, len, dir);
- tx->tx_set_src(dma_addr, tx, i);
+ for (i = 0; i < src_cnt; i++)
+ dma_src[i] = dma_map_page(device->dev, src_list[i],
+ offset, len, DMA_TO_DEVICE);
+
+ tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
+ len, result,
+ dma_prep_flags);
+ if (!tx) {
+ if (depend_tx)
+ dma_wait_for_async_tx(depend_tx);
+
+ while (!tx)
+ tx = device->device_prep_dma_zero_sum(chan,
+ dma_src, src_cnt, len, result,
+ dma_prep_flags);
}
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
@@ -311,6 +327,16 @@ EXPORT_SYMBOL_GPL(async_xor_zero_sum);
static int __init async_xor_init(void)
{
+ #ifdef CONFIG_DMA_ENGINE
+ /* To conserve stack space the input src_list (array of page pointers)
+ * is reused to hold the array of dma addresses passed to the driver.
+ * This conversion is only possible when dma_addr_t is less than the
+ * the size of a pointer. HIGHMEM64G is known to violate this
+ * assumption.
+ */
+ BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
+ #endif
+
return 0;
}
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 6affff882cf8..61ac42e1e32b 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -224,7 +224,7 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = ERR_PTR(-EINVAL);
if (!is_power_of_2(alg->cra_blocksize))
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 074298f2f8e3..250425263e00 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -230,7 +230,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = cryptd_alloc_instance(alg, state);
if (IS_ERR(inst))
@@ -267,7 +267,7 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_PTR(PTR_ERR(algt));
+ return ERR_CAST(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 6310387a872c..a46838e98a71 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -128,7 +128,7 @@ static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = crypto_alloc_instance("ecb", alg);
if (IS_ERR(inst))
diff --git a/crypto/hmac.c b/crypto/hmac.c
index a1d016a50e7d..b60c3c7aa320 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -213,7 +213,7 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_HASH_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = crypto_alloc_instance("hmac", alg);
if (IS_ERR(inst))
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 621095db28b3..9d52e580d10a 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -241,7 +241,7 @@ static struct crypto_instance *alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = crypto_alloc_instance("lrw", alg);
if (IS_ERR(inst))
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index fe704775f88f..d1b8bdfb5855 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -234,7 +234,7 @@ static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = crypto_alloc_instance("pcbc", alg);
if (IS_ERR(inst))
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index a82959df678c..86727403e5ab 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -301,7 +301,7 @@ static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
switch(alg->cra_blocksize) {
case 16:
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3f8a231fe754..b86877bdc7ac 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -52,12 +52,16 @@ source "drivers/i2c/Kconfig"
source "drivers/spi/Kconfig"
+source "drivers/gpio/Kconfig"
+
source "drivers/w1/Kconfig"
source "drivers/power/Kconfig"
source "drivers/hwmon/Kconfig"
+source "drivers/thermal/Kconfig"
+
source "drivers/watchdog/Kconfig"
source "drivers/ssb/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 0ee9a8a4095e..30ba97ec5eb5 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -5,6 +5,7 @@
# Rewritten to use lists instead of if-statements.
#
+obj-$(CONFIG_HAVE_GPIO_LIB) += gpio/
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_PARISC) += parisc/
obj-$(CONFIG_RAPIDIO) += rapidio/
@@ -64,6 +65,7 @@ obj-y += i2c/
obj-$(CONFIG_W1) += w1/
obj-$(CONFIG_POWER_SUPPLY) += power/
obj-$(CONFIG_HWMON) += hwmon/
+obj-$(CONFIG_THERMAL) += thermal/
obj-$(CONFIG_WATCHDOG) += watchdog/
obj-$(CONFIG_PHONE) += telephony/
obj-$(CONFIG_MD) += md/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index ccf6ea95f68c..7ef172c2a1d6 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -68,26 +68,28 @@ config ACPI_PROCFS
Say N to delete /proc/acpi/ files that have moved to /sys/
config ACPI_PROCFS_POWER
- bool "Deprecated power /proc/acpi folders"
+ bool "Deprecated power /proc/acpi directories"
depends on PROC_FS
default y
---help---
For backwards compatibility, this option allows
- deprecated power /proc/acpi/ folders to exist, even when
+ deprecated power /proc/acpi/ directories to exist, even when
they have been replaced by functions in /sys.
- The deprecated folders (and their replacements) include:
+ The deprecated directories (and their replacements) include:
/proc/acpi/battery/* (/sys/class/power_supply/*)
/proc/acpi/ac_adapter/* (sys/class/power_supply/*)
- This option has no effect on /proc/acpi/ folders
+ This option has no effect on /proc/acpi/ directories
and functions, which do not yet exist in /sys
- Say N to delete power /proc/acpi/ folders that have moved to /sys/
+ Say N to delete power /proc/acpi/ directories that have moved to /sys/
+
config ACPI_SYSFS_POWER
bool "Future power /sys interface"
select POWER_SUPPLY
default y
---help---
Say N to disable power /sys interface
+
config ACPI_PROC_EVENT
bool "Deprecated /proc/acpi/event support"
depends on PROC_FS
@@ -186,6 +188,7 @@ config ACPI_HOTPLUG_CPU
config ACPI_THERMAL
tristate "Thermal Zone"
depends on ACPI_PROCESSOR
+ select THERMAL
default y
help
This driver adds support for ACPI thermal zones. Most mobile and
@@ -199,6 +202,16 @@ config ACPI_NUMA
depends on (X86 || IA64)
default y if IA64_GENERIC || IA64_SGI_SN2
+config ACPI_WMI
+ tristate "WMI (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ This driver adds support for the ACPI-WMI mapper device (PNP0C14)
+ found on some systems.
+
+ NOTE: You will need another driver or userspace application on top of
+ this to actually use anything defined in the ACPI-WMI mapper.
+
config ACPI_ASUS
tristate "ASUS/Medion Laptop Extras"
depends on X86
@@ -263,8 +276,10 @@ config ACPI_CUSTOM_DSDT
depends on !STANDALONE
default n
help
- This option is to load a custom ACPI DSDT
- If you don't know what that is, say N.
+ This option supports a custom DSDT by linking it into the kernel.
+ See Documentation/acpi/dsdt-override.txt
+
+ If unsure, say N.
config ACPI_CUSTOM_DSDT_FILE
string "Custom DSDT Table file to include"
@@ -274,6 +289,17 @@ config ACPI_CUSTOM_DSDT_FILE
Enter the full path name to the file which includes the AmlCode
declaration.
+config ACPI_CUSTOM_DSDT_INITRD
+ bool "Read Custom DSDT from initramfs"
+ depends on BLK_DEV_INITRD
+ default n
+ help
+ This option supports a custom DSDT by optionally loading it from initrd.
+ See Documentation/acpi/dsdt-override.txt
+
+ If you are not using this feature now, but may use it later,
+ it is safe to say Y here.
+
config ACPI_BLACKLIST_YEAR
int "Disable ACPI for systems before Jan 1st this year" if X86_32
default 0
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 456446f90077..f29812a86533 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_ACPI_THERMAL) += thermal.o
obj-$(CONFIG_ACPI_SYSTEM) += system.o event.o
obj-$(CONFIG_ACPI_DEBUG) += debug.o
obj-$(CONFIG_ACPI_NUMA) += numa.o
+obj-$(CONFIG_ACPI_WMI) += wmi.o
obj-$(CONFIG_ACPI_ASUS) += asus_acpi.o
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
diff --git a/drivers/acpi/asus_acpi.c b/drivers/acpi/asus_acpi.c
index d915fec9bf63..d25ef961415c 100644
--- a/drivers/acpi/asus_acpi.c
+++ b/drivers/acpi/asus_acpi.c
@@ -142,6 +142,7 @@ struct asus_hotk {
xxN, //M2400N, M3700N, M5200N, M6800N, S1300N, S5200N
A4S, //Z81sp
//(Centrino)
+ F3Sa,
END_MODEL
} model; //Models currently supported
u16 event_count[128]; //count for each event TODO make this better
@@ -405,7 +406,20 @@ static struct model_data model_conf[END_MODEL] = {
.brightness_get = "GPLV",
.mt_bt_switch = "BLED",
.mt_wled = "WLED"
- }
+ },
+
+ {
+ .name = "F3Sa",
+ .mt_bt_switch = "BLED",
+ .mt_wled = "WLED",
+ .mt_mled = "MLED",
+ .brightness_get = "GPLV",
+ .brightness_set = "SPLV",
+ .mt_lcd_switch = "\\_SB.PCI0.SBRG.EC0._Q10",
+ .lcd_status = "\\_SB.PCI0.SBRG.EC0.RPIN",
+ .display_get = "\\ADVG",
+ .display_set = "SDSP",
+ },
};
@@ -710,15 +724,8 @@ static int get_lcd_state(void)
{
int lcd = 0;
- if (hotk->model != L3H) {
- /* We don't have to check anything if we are here */
- if (!read_acpi_int(NULL, hotk->methods->lcd_status, &lcd))
- printk(KERN_WARNING
- "Asus ACPI: Error reading LCD status\n");
-
- if (hotk->model == L2D)
- lcd = ~lcd;
- } else { /* L3H and the like have to be handled differently */
+ if (hotk->model == L3H) {
+ /* L3H and the like have to be handled differently */
acpi_status status = 0;
struct acpi_object_list input;
union acpi_object mt_params[2];
@@ -745,6 +752,32 @@ static int get_lcd_state(void)
if (out_obj.type == ACPI_TYPE_INTEGER)
/* That's what the AML code does */
lcd = out_obj.integer.value >> 8;
+ } else if (hotk->model == F3Sa) {
+ unsigned long tmp;
+ union acpi_object param;
+ struct acpi_object_list input;
+ acpi_status status;
+
+ /* Read pin 11 */
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = 0x11;
+ input.count = 1;
+ input.pointer = &param;
+
+ status = acpi_evaluate_integer(NULL, hotk->methods->lcd_status,
+ &input, &tmp);
+ if (status != AE_OK)
+ return -1;
+
+ lcd = tmp;
+ } else {
+ /* We don't have to check anything if we are here */
+ if (!read_acpi_int(NULL, hotk->methods->lcd_status, &lcd))
+ printk(KERN_WARNING
+ "Asus ACPI: Error reading LCD status\n");
+
+ if (hotk->model == L2D)
+ lcd = ~lcd;
}
return (lcd & 1);
@@ -1134,6 +1167,8 @@ static int asus_model_match(char *model)
return W5A;
else if (strncmp(model, "A4S", 3) == 0)
return A4S;
+ else if (strncmp(model, "F3Sa", 4) == 0)
+ return F3Sa;
else
return END_MODEL;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index c4a769d1ba85..f6215e809808 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -194,6 +194,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_MANUFACTURER:
val->strval = battery->oem_info;
break;
+ case POWER_SUPPLY_PROP_SERIAL_NUMBER:
+ val->strval = battery->serial_number;
+ break;
default:
return -EINVAL;
}
@@ -212,6 +215,7 @@ static enum power_supply_property charge_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
static enum power_supply_property energy_battery_props[] = {
@@ -226,6 +230,7 @@ static enum power_supply_property energy_battery_props[] = {
POWER_SUPPLY_PROP_ENERGY_NOW,
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
#endif
diff --git a/drivers/acpi/bay.c b/drivers/acpi/bay.c
index 6daf6088ac88..1fa86811b8ee 100644
--- a/drivers/acpi/bay.c
+++ b/drivers/acpi/bay.c
@@ -46,6 +46,12 @@ MODULE_LICENSE("GPL");
printk(KERN_DEBUG PREFIX "%s: %s\n", prefix, s); }
static void bay_notify(acpi_handle handle, u32 event, void *data);
+static const struct acpi_device_id bay_device_ids[] = {
+ {"LNXIOBAY", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, bay_device_ids);
+
struct bay {
acpi_handle handle;
char *name;
@@ -128,7 +134,7 @@ static ssize_t show_present(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", bay_present(bay));
}
-DEVICE_ATTR(present, S_IRUGO, show_present, NULL);
+static DEVICE_ATTR(present, S_IRUGO, show_present, NULL);
/*
* write_eject - write method for "eject" file in sysfs
@@ -144,7 +150,7 @@ static ssize_t write_eject(struct device *dev, struct device_attribute *attr,
eject_device(bay->handle);
return count;
}
-DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject);
+static DEVICE_ATTR(eject, S_IWUSR, NULL, write_eject);
/**
* is_ata - see if a device is an ata device
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 8809654d6cc9..6dbaa2d15fe0 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -70,8 +70,6 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
/* IBM 600E - _ADR should return 7, but it returns 1 */
{"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
"Incorrect _ADR", 1},
- {"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions,
- "Bogus PCI routing", 1},
{""}
};
@@ -208,33 +206,35 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* Disable OSI(Linux) warnings on all "Acer, inc."
*
* _OSI(Linux) disables the latest Windows BIOS code:
+ * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5050"),
+ * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5580"),
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 3010"),
* _OSI(Linux) effect unknown:
* DMI_MATCH(DMI_PRODUCT_NAME, "Ferrari 5000"),
*/
- {
- .callback = dmi_disable_osi_linux,
- .ident = "Acer, inc.",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer, inc."),
- },
- },
+ /*
+ * note that dmi_check_system() uses strstr()
+ * to match sub-strings rather than !strcmp(),
+ * so "Acer" below matches "Acer, inc." above.
+ */
/*
* Disable OSI(Linux) warnings on all "Acer"
*
* _OSI(Linux) effect unknown:
- * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720Z"),
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5520"),
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 6460"),
* DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 7510"),
* DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5220"),
+ *
+ * _OSI(Linux) is a NOP:
+ * DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5315"),
*/
{
- .callback = dmi_unknown_osi_linux,
+ .callback = dmi_disable_osi_linux,
.ident = "Acer",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -242,21 +242,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
/*
* Disable OSI(Linux) warnings on all "Apple Computer, Inc."
+ * Disable OSI(Linux) warnings on all "Apple Inc."
*
* _OSI(Linux) confirmed to be a NOP:
* DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
* DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
* DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro2,2"),
+ * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"),
* _OSI(Linux) effect unknown:
* DMI_MATCH(DMI_PRODUCT_NAME, "MacPro2,1"),
* DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro1,1"),
- * DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro3,1"),
*/
{
.callback = dmi_disable_osi_linux,
.ident = "Apple",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple"),
},
},
/*
@@ -294,13 +295,13 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* DMI_MATCH(DMI_BOARD_NAME, "IFL91"),
*/
{
- .callback = dmi_unknown_osi_linux,
+ .callback = dmi_disable_osi_linux,
.ident = "Compal",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
},
},
- { /* OSI(Linux) touches USB, breaks suspend to disk */
+ { /* OSI(Linux) touches USB, unknown side-effect */
.callback = dmi_disable_osi_linux,
.ident = "Dell Dimension 5150",
.matches = {
@@ -310,7 +311,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) is a NOP */
.callback = dmi_disable_osi_linux,
- .ident = "Dell",
+ .ident = "Dell i1501",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1501"),
@@ -318,7 +319,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) effect unknown */
.callback = dmi_unknown_osi_linux,
- .ident = "Dell",
+ .ident = "Dell Latitude D830",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D830"),
@@ -326,7 +327,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) effect unknown */
.callback = dmi_unknown_osi_linux,
- .ident = "Dell",
+ .ident = "Dell OP GX620",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex GX620"),
@@ -334,15 +335,23 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) effect unknown */
.callback = dmi_unknown_osi_linux,
- .ident = "Dell",
+ .ident = "Dell PE 1900",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1900"),
},
},
+ { /* OSI(Linux) is a NOP */
+ .callback = dmi_disable_osi_linux,
+ .ident = "Dell PE R200",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R200"),
+ },
+ },
{ /* OSI(Linux) touches USB */
.callback = dmi_disable_osi_linux,
- .ident = "Dell",
+ .ident = "Dell PR 390",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation 390"),
@@ -358,7 +367,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{ /* OSI(Linux) effect unknown */
.callback = dmi_unknown_osi_linux,
- .ident = "Dell",
+ .ident = "Dell PE SC440",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge SC440"),
@@ -474,6 +483,11 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
*
* _OSI(Linux) confirmed to be a NOP:
* DMI_MATCH(DMI_PRODUCT_NAME, "P1-J150B"),
+ * with DMI_MATCH(DMI_BOARD_NAME, "ROCKY"),
+ *
+ * unknown:
+ * DMI_MATCH(DMI_PRODUCT_NAME, "S1-MDGDG"),
+ * with DMI_MATCH(DMI_BOARD_NAME, "ROCKY"),
*/
{
.callback = dmi_disable_osi_linux,
@@ -516,7 +530,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
* DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ11M"),
*/
{
- .callback = dmi_unknown_osi_linux,
+ .callback = dmi_disable_osi_linux,
.ident = "Sony",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 1b4cf984b081..8b0d4b7d188a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -122,6 +122,31 @@ int acpi_bus_get_status(struct acpi_device *device)
EXPORT_SYMBOL(acpi_bus_get_status);
+void acpi_bus_private_data_handler(acpi_handle handle,
+ u32 function, void *context)
+{
+ return;
+}
+EXPORT_SYMBOL(acpi_bus_private_data_handler);
+
+int acpi_bus_get_private_data(acpi_handle handle, void **data)
+{
+ acpi_status status = AE_OK;
+
+ if (!*data)
+ return -EINVAL;
+
+ status = acpi_get_data(handle, acpi_bus_private_data_handler, data);
+ if (ACPI_FAILURE(status) || !*data) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
+ handle));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(acpi_bus_get_private_data);
+
/* --------------------------------------------------------------------------
Power Management
-------------------------------------------------------------------------- */
@@ -366,7 +391,6 @@ int acpi_bus_receive_event(struct acpi_bus_event *event)
return 0;
}
-EXPORT_SYMBOL(acpi_bus_receive_event);
#endif /* CONFIG_ACPI_PROC_EVENT */
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c
index bf513e07b773..6df564f4ca6e 100644
--- a/drivers/acpi/debug.c
+++ b/drivers/acpi/debug.c
@@ -130,6 +130,63 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp) {
module_param_call(debug_layer, param_set_uint, param_get_debug_layer, &acpi_dbg_layer, 0644);
module_param_call(debug_level, param_set_uint, param_get_debug_level, &acpi_dbg_level, 0644);
+static char trace_method_name[6];
+module_param_string(trace_method_name, trace_method_name, 6, 0644);
+static unsigned int trace_debug_layer;
+module_param(trace_debug_layer, uint, 0644);
+static unsigned int trace_debug_level;
+module_param(trace_debug_level, uint, 0644);
+
+static int param_set_trace_state(const char *val, struct kernel_param *kp)
+{
+ int result = 0;
+
+ if (!strncmp(val, "enable", strlen("enable") - 1)) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ if (!strncmp(val, "disable", strlen("disable") - 1)) {
+ int name = 0;
+ result = acpi_debug_trace((char *)&name, trace_debug_level,
+ trace_debug_layer, 0);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ if (!strncmp(val, "1", 1)) {
+ result = acpi_debug_trace(trace_method_name, trace_debug_level,
+ trace_debug_layer, 1);
+ if (result)
+ result = -EBUSY;
+ goto exit;
+ }
+
+ result = -EINVAL;
+exit:
+ return result;
+}
+
+static int param_get_trace_state(char *buffer, struct kernel_param *kp)
+{
+ if (!acpi_gbl_trace_method_name)
+ return sprintf(buffer, "disable");
+ else {
+ if (acpi_gbl_trace_flags & 1)
+ return sprintf(buffer, "1");
+ else
+ return sprintf(buffer, "enable");
+ }
+ return 0;
+}
+
+module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
+ NULL, 0644);
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/dispatcher/dsopcode.c b/drivers/acpi/dispatcher/dsopcode.c
index fc9da4879cbf..f501e083aac7 100644
--- a/drivers/acpi/dispatcher/dsopcode.c
+++ b/drivers/acpi/dispatcher/dsopcode.c
@@ -359,7 +359,9 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
status = acpi_os_validate_address(obj_desc->region.space_id,
obj_desc->region.address,
- (acpi_size) obj_desc->region.length);
+ (acpi_size) obj_desc->region.length,
+ acpi_ut_get_node_name(node));
+
if (ACPI_FAILURE(status)) {
/*
* Invalid address/length. We will emit an error message and mark
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 1dabdf4c07b3..307cef65c247 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -51,6 +51,12 @@ static struct atomic_notifier_head dock_notifier_list;
static struct platform_device *dock_device;
static char dock_device_name[] = "dock";
+static const struct acpi_device_id dock_device_ids[] = {
+ {"LNXDOCK", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, dock_device_ids);
+
struct dock_station {
acpi_handle handle;
unsigned long last_dock_time;
@@ -680,7 +686,7 @@ static ssize_t show_docked(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", dock_present(dock_station));
}
-DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
+static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
/*
* show_flags - read method for flags file in sysfs
@@ -691,7 +697,7 @@ static ssize_t show_flags(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
}
-DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
+static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
/*
* write_undock - write method for "undock" file in sysfs
@@ -707,7 +713,7 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
return ret ? ret: count;
}
-DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
+static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
/*
* show_dock_uid - read method for "uid" file in sysfs
@@ -723,7 +729,7 @@ static ssize_t show_dock_uid(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%lx\n", lbuf);
}
-DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
+static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
/**
* dock_add - add a new dock station
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 987b967c7467..7222a18a0319 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -573,7 +573,7 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
void *handler_context, void *region_context)
{
struct acpi_ec *ec = handler_context;
- int result = 0, i = 0;
+ int result = 0, i;
u8 temp = 0;
if ((address > 0xFF) || !value || !handler_context)
@@ -585,7 +585,18 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
if (bits != 8 && acpi_strict)
return AE_BAD_PARAMETER;
- while (bits - i > 0) {
+ acpi_ec_burst_enable(ec);
+
+ if (function == ACPI_READ) {
+ result = acpi_ec_read(ec, address, &temp);
+ *value = temp;
+ } else {
+ temp = 0xff & (*value);
+ result = acpi_ec_write(ec, address, temp);
+ }
+
+ for (i = 8; unlikely(bits - i > 0); i += 8) {
+ ++address;
if (function == ACPI_READ) {
result = acpi_ec_read(ec, address, &temp);
(*value) |= ((acpi_integer)temp) << i;
@@ -593,10 +604,10 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
temp = 0xff & ((*value) >> i);
result = acpi_ec_write(ec, address, temp);
}
- i += 8;
- ++address;
}
+ acpi_ec_burst_disable(ec);
+
switch (result) {
case -EINVAL:
return AE_BAD_PARAMETER;
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 5c95863f8fa9..5479dc0eeeec 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -109,6 +109,34 @@ static const struct file_operations acpi_system_event_ops = {
};
#endif /* CONFIG_ACPI_PROC_EVENT */
+/* ACPI notifier chain */
+BLOCKING_NOTIFIER_HEAD(acpi_chain_head);
+
+int acpi_notifier_call_chain(struct acpi_device *dev, u32 type, u32 data)
+{
+ struct acpi_bus_event event;
+
+ strcpy(event.device_class, dev->pnp.device_class);
+ strcpy(event.bus_id, dev->pnp.bus_id);
+ event.type = type;
+ event.data = data;
+ return (blocking_notifier_call_chain(&acpi_chain_head, 0, (void *)&event)
+ == NOTIFY_BAD) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(acpi_notifier_call_chain);
+
+int register_acpi_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_chain_head, nb);
+}
+EXPORT_SYMBOL(register_acpi_notifier);
+
+int unregister_acpi_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&acpi_chain_head, nb);
+}
+EXPORT_SYMBOL(unregister_acpi_notifier);
+
#ifdef CONFIG_NET
static unsigned int acpi_event_seqnum;
struct acpi_genl_event {
diff --git a/drivers/acpi/events/evevent.c b/drivers/acpi/events/evevent.c
index e41287815ea1..3048801a37b5 100644
--- a/drivers/acpi/events/evevent.c
+++ b/drivers/acpi/events/evevent.c
@@ -259,7 +259,7 @@ u32 acpi_ev_fixed_event_detect(void)
enable_bit_mask)) {
/* Found an active (signalled) event */
-
+ acpi_os_fixed_event_count(i);
int_status |= acpi_ev_fixed_event_dispatch((u32) i);
}
}
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c
index e22f4a973c0f..0dadd2adc800 100644
--- a/drivers/acpi/events/evgpe.c
+++ b/drivers/acpi/events/evgpe.c
@@ -270,18 +270,18 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
case ACPI_GPE_TYPE_WAKE_RUN:
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
- /*lint -fallthrough */
+ /* fallthrough */
case ACPI_GPE_TYPE_RUNTIME:
/* Disable the requested runtime GPE */
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
- status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
- break;
+
+ /* fallthrough */
default:
- return_ACPI_STATUS(AE_BAD_PARAMETER);
+ acpi_hw_write_gpe_enable_reg(gpe_event_info);
}
return_ACPI_STATUS(AE_OK);
@@ -501,6 +501,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
* an interrupt handler.
*
******************************************************************************/
+static void acpi_ev_asynch_enable_gpe(void *context);
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
@@ -576,22 +577,30 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
method_node)));
}
}
+ /* Defer enabling of GPE until all notify handlers are done */
+ acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
+ gpe_event_info);
+ return_VOID;
+}
- if ((local_gpe_event_info.flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+static void acpi_ev_asynch_enable_gpe(void *context)
+{
+ struct acpi_gpe_event_info *gpe_event_info = context;
+ acpi_status status;
+ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
ACPI_GPE_LEVEL_TRIGGERED) {
/*
* GPE is level-triggered, we clear the GPE status bit after
* handling the event.
*/
- status = acpi_hw_clear_gpe(&local_gpe_event_info);
+ status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
return_VOID;
}
}
/* Enable this GPE */
-
- (void)acpi_hw_write_gpe_enable_reg(&local_gpe_event_info);
+ (void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
return_VOID;
}
@@ -618,7 +627,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
- acpi_gpe_count++;
+ acpi_os_gpe_count(gpe_number);
/*
* If edge-triggered, clear the GPE status bit now. Note that
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index a6e149d692cb..48cb705b274a 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -30,7 +30,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
-
+#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -68,9 +68,55 @@ static struct acpi_driver acpi_fan_driver = {
},
};
+/* thermal cooling device callbacks */
+static int fan_get_max_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ /* ACPI fan device only support two states: ON/OFF */
+ return sprintf(buf, "1\n");
+}
+
+static int fan_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ int state;
+ int result;
+
+ if (!device)
+ return -EINVAL;
+
+ result = acpi_bus_get_power(device->handle, &state);
+ if (result)
+ return result;
+
+ return sprintf(buf, "%s\n", state == ACPI_STATE_D3 ? "0" :
+ (state == ACPI_STATE_D0 ? "1" : "unknown"));
+}
+
+static int
+fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ int result;
+
+ if (!device || (state != 0 && state != 1))
+ return -EINVAL;
+
+ result = acpi_bus_set_power(device->handle,
+ state ? ACPI_STATE_D0 : ACPI_STATE_D3);
+
+ return result;
+}
+
+static struct thermal_cooling_device_ops fan_cooling_ops = {
+ .get_max_state = fan_get_max_state,
+ .get_cur_state = fan_get_cur_state,
+ .set_cur_state = fan_set_cur_state,
+};
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
+#ifdef CONFIG_ACPI_PROCFS
static struct proc_dir_entry *acpi_fan_dir;
@@ -171,7 +217,17 @@ static int acpi_fan_remove_fs(struct acpi_device *device)
return 0;
}
+#else
+static int acpi_fan_add_fs(struct acpi_device *device)
+{
+ return 0;
+}
+static int acpi_fan_remove_fs(struct acpi_device *device)
+{
+ return 0;
+}
+#endif
/* --------------------------------------------------------------------------
Driver Interface
-------------------------------------------------------------------------- */
@@ -179,9 +235,8 @@ static int acpi_fan_remove_fs(struct acpi_device *device)
static int acpi_fan_add(struct acpi_device *device)
{
int result = 0;
- struct acpi_fan *fan = NULL;
int state = 0;
-
+ struct thermal_cooling_device *cdev;
if (!device)
return -EINVAL;
@@ -199,6 +254,25 @@ static int acpi_fan_add(struct acpi_device *device)
acpi_bus_set_power(device->handle, state);
device->flags.force_power_state = 0;
+ cdev = thermal_cooling_device_register("Fan", device,
+ &fan_cooling_ops);
+ if (cdev)
+ printk(KERN_INFO PREFIX
+ "%s is registered as cooling_device%d\n",
+ device->dev.bus_id, cdev->id);
+ else
+ goto end;
+ acpi_driver_data(device) = cdev;
+ result = sysfs_create_link(&device->dev.kobj, &cdev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ return result;
+
+ result = sysfs_create_link(&cdev->device.kobj, &device->dev.kobj,
+ "device");
+ if (result)
+ return result;
+
result = acpi_fan_add_fs(device);
if (result)
goto end;
@@ -208,18 +282,20 @@ static int acpi_fan_add(struct acpi_device *device)
!device->power.state ? "on" : "off");
end:
- if (result)
- kfree(fan);
-
return result;
}
static int acpi_fan_remove(struct acpi_device *device, int type)
{
- if (!device || !acpi_driver_data(device))
+ struct thermal_cooling_device *cdev = acpi_driver_data(device);
+
+ if (!device || !cdev)
return -EINVAL;
acpi_fan_remove_fs(device);
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&cdev->device.kobj, "device");
+ thermal_cooling_device_unregister(cdev);
return 0;
}
@@ -261,10 +337,12 @@ static int __init acpi_fan_init(void)
int result = 0;
+#ifdef CONFIG_ACPI_PROCFS
acpi_fan_dir = proc_mkdir(ACPI_FAN_CLASS, acpi_root_dir);
if (!acpi_fan_dir)
return -ENODEV;
acpi_fan_dir->owner = THIS_MODULE;
+#endif
result = acpi_bus_register_driver(&acpi_fan_driver);
if (result < 0) {
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 4893e256e399..eda0978b57c6 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -36,8 +36,6 @@ int register_acpi_bus_type(struct acpi_bus_type *type)
return -ENODEV;
}
-EXPORT_SYMBOL(register_acpi_bus_type);
-
int unregister_acpi_bus_type(struct acpi_bus_type *type)
{
if (acpi_disabled)
@@ -53,8 +51,6 @@ int unregister_acpi_bus_type(struct acpi_bus_type *type)
return -ENODEV;
}
-EXPORT_SYMBOL(unregister_acpi_bus_type);
-
static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
{
struct acpi_bus_type *tmp, *ret = NULL;
diff --git a/drivers/acpi/hardware/hwsleep.c b/drivers/acpi/hardware/hwsleep.c
index fd1c4ba63367..058d0be5cbe2 100644
--- a/drivers/acpi/hardware/hwsleep.c
+++ b/drivers/acpi/hardware/hwsleep.c
@@ -286,13 +286,13 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
}
/*
+ * 1) Disable/Clear all GPEs
* 2) Enable all wakeup GPEs
*/
status = acpi_hw_disable_all_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
-
acpi_gbl_system_awake_and_running = FALSE;
status = acpi_hw_enable_all_wakeup_gpes();
diff --git a/drivers/acpi/namespace/nsxfeval.c b/drivers/acpi/namespace/nsxfeval.c
index f39fbc6b9237..b92133faf5b7 100644
--- a/drivers/acpi/namespace/nsxfeval.c
+++ b/drivers/acpi/namespace/nsxfeval.c
@@ -443,6 +443,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
struct acpica_device_id hid;
struct acpi_compatible_id_list *cid;
acpi_native_uint i;
+ int found;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
@@ -496,16 +497,19 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
/* Walk the CID list */
+ found = 0;
for (i = 0; i < cid->count; i++) {
if (ACPI_STRNCMP(cid->id[i].value, info->hid,
sizeof(struct
- acpi_compatible_id)) !=
+ acpi_compatible_id)) ==
0) {
- ACPI_FREE(cid);
- return (AE_OK);
+ found = 1;
+ break;
}
}
ACPI_FREE(cid);
+ if (!found)
+ return (AE_OK);
}
}
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 0822d9fc1cb4..5d59cb33b1a5 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -78,6 +78,7 @@ int acpi_map_pxm_to_node(int pxm)
return node;
}
+#if 0
void __cpuinit acpi_unmap_pxm_to_node(int node)
{
int pxm = node_to_pxm_map[node];
@@ -85,6 +86,7 @@ void __cpuinit acpi_unmap_pxm_to_node(int node)
node_to_pxm_map[node] = PXM_INVAL;
node_clear(node, nodes_found_map);
}
+#endif /* 0 */
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
@@ -247,7 +249,6 @@ int acpi_get_pxm(acpi_handle h)
} while (ACPI_SUCCESS(status));
return -1;
}
-EXPORT_SYMBOL(acpi_get_pxm);
int acpi_get_node(acpi_handle *handle)
{
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index e53fb516f9d4..27ccd68b8f46 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -44,6 +44,8 @@
#include <asm/uaccess.h>
#include <linux/efi.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl");
@@ -74,9 +76,25 @@ static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
+struct acpi_res_list {
+ resource_size_t start;
+ resource_size_t end;
+ acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
+ char name[5]; /* only can have a length of 4 chars, make use of this
+ one instead of res->name, no need to kalloc then */
+ struct list_head resource_list;
+};
+
+static LIST_HEAD(resource_list_head);
+static DEFINE_SPINLOCK(acpi_res_lock);
+
#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
static char osi_additional_string[OSI_STRING_LENGTH_MAX];
+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
+static int acpi_no_initrd_override;
+#endif
+
/*
* "Ode to _OSI(Linux)"
*
@@ -120,7 +138,7 @@ static char osi_additional_string[OSI_STRING_LENGTH_MAX];
*/
#define OSI_LINUX_ENABLE 0
-struct osi_linux {
+static struct osi_linux {
unsigned int enable:1;
unsigned int dmi:1;
unsigned int cmdline:1;
@@ -219,8 +237,6 @@ void acpi_os_printf(const char *fmt, ...)
va_end(args);
}
-EXPORT_SYMBOL(acpi_os_printf);
-
void acpi_os_vprintf(const char *fmt, va_list args)
{
static char buffer[512];
@@ -250,11 +266,16 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
"System description tables not found\n");
return 0;
}
- } else
- return acpi_find_rsdp();
+ } else {
+ acpi_physical_address pa = 0;
+
+ acpi_find_root_pointer(&pa);
+ return pa;
+ }
}
-void __iomem *acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
+void __iomem *__init_refok
+acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
if (phys > ULONG_MAX) {
printk(KERN_ERR PREFIX "Cannot map memory that high\n");
@@ -312,6 +333,67 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
return AE_OK;
}
+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
+struct acpi_table_header *acpi_find_dsdt_initrd(void)
+{
+ struct file *firmware_file;
+ mm_segment_t oldfs;
+ unsigned long len, len2;
+ struct acpi_table_header *dsdt_buffer, *ret = NULL;
+ struct kstat stat;
+ char *ramfs_dsdt_name = "/DSDT.aml";
+
+ printk(KERN_INFO PREFIX "Checking initramfs for custom DSDT");
+
+ /*
+ * Never do this at home, only the user-space is allowed to open a file.
+ * The clean way would be to use the firmware loader.
+ * But this code must be run before there is any userspace available.
+ * A static/init firmware infrastructure doesn't exist yet...
+ */
+ if (vfs_stat(ramfs_dsdt_name, &stat) < 0)
+ return ret;
+
+ len = stat.size;
+ /* check especially against empty files */
+ if (len <= 4) {
+ printk(KERN_ERR PREFIX "Failed: DSDT only %lu bytes.\n", len);
+ return ret;
+ }
+
+ firmware_file = filp_open(ramfs_dsdt_name, O_RDONLY, 0);
+ if (IS_ERR(firmware_file)) {
+ printk(KERN_ERR PREFIX "Failed to open %s.\n", ramfs_dsdt_name);
+ return ret;
+ }
+
+ dsdt_buffer = kmalloc(len, GFP_ATOMIC);
+ if (!dsdt_buffer) {
+ printk(KERN_ERR PREFIX "Failed to allocate %lu bytes.\n", len);
+ goto err;
+ }
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ len2 = vfs_read(firmware_file, (char __user *)dsdt_buffer, len,
+ &firmware_file->f_pos);
+ set_fs(oldfs);
+ if (len2 < len) {
+ printk(KERN_ERR PREFIX "Failed to read %lu bytes from %s.\n",
+ len, ramfs_dsdt_name);
+ ACPI_FREE(dsdt_buffer);
+ goto err;
+ }
+
+ printk(KERN_INFO PREFIX "Found %lu byte DSDT in %s.\n",
+ len, ramfs_dsdt_name);
+ ret = dsdt_buffer;
+err:
+ filp_close(firmware_file, NULL);
+ return ret;
+}
+#endif
+
acpi_status
acpi_os_table_override(struct acpi_table_header * existing_table,
struct acpi_table_header ** new_table)
@@ -319,20 +401,52 @@ acpi_os_table_override(struct acpi_table_header * existing_table,
if (!existing_table || !new_table)
return AE_BAD_PARAMETER;
+ *new_table = NULL;
+
#ifdef CONFIG_ACPI_CUSTOM_DSDT
if (strncmp(existing_table->signature, "DSDT", 4) == 0)
*new_table = (struct acpi_table_header *)AmlCode;
- else
- *new_table = NULL;
-#else
- *new_table = NULL;
#endif
+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
+ if ((strncmp(existing_table->signature, "DSDT", 4) == 0) &&
+ !acpi_no_initrd_override) {
+ struct acpi_table_header *initrd_table;
+
+ initrd_table = acpi_find_dsdt_initrd();
+ if (initrd_table)
+ *new_table = initrd_table;
+ }
+#endif
+ if (*new_table != NULL) {
+ printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], "
+ "this is unsafe: tainting kernel\n",
+ existing_table->signature,
+ existing_table->oem_table_id);
+ add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
+ }
return AE_OK;
}
+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
+int __init acpi_no_initrd_override_setup(char *s)
+{
+ acpi_no_initrd_override = 1;
+ return 1;
+}
+__setup("acpi_no_initrd_override", acpi_no_initrd_override_setup);
+#endif
+
static irqreturn_t acpi_irq(int irq, void *dev_id)
{
- return (*acpi_irq_handler) (acpi_irq_context) ? IRQ_HANDLED : IRQ_NONE;
+ u32 handled;
+
+ handled = (*acpi_irq_handler) (acpi_irq_context);
+
+ if (handled) {
+ acpi_irq_handled++;
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
}
acpi_status
@@ -341,6 +455,8 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
{
unsigned int irq;
+ acpi_irq_stats_init();
+
/*
* Ignore the GSI from the core, and use the value in our copy of the
* FADT. It may not be the same if an interrupt source override exists
@@ -384,8 +500,6 @@ void acpi_os_sleep(acpi_integer ms)
schedule_timeout_interruptible(msecs_to_jiffies(ms));
}
-EXPORT_SYMBOL(acpi_os_sleep);
-
void acpi_os_stall(u32 us)
{
while (us) {
@@ -399,8 +513,6 @@ void acpi_os_stall(u32 us)
}
}
-EXPORT_SYMBOL(acpi_os_stall);
-
/*
* Support ACPI 3.0 AML Timer operand
* Returns 64-bit free-running, monotonically increasing timer
@@ -550,8 +662,6 @@ acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
return (result ? AE_ERROR : AE_OK);
}
-EXPORT_SYMBOL(acpi_os_read_pci_configuration);
-
acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
acpi_integer value, u32 width)
@@ -661,25 +771,6 @@ static void acpi_os_execute_deferred(struct work_struct *work)
dpc->function(dpc->context);
kfree(dpc);
- /* Yield cpu to notify thread */
- cond_resched();
-
- return;
-}
-
-static void acpi_os_execute_notify(struct work_struct *work)
-{
- struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
-
- if (!dpc) {
- printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
- return;
- }
-
- dpc->function(dpc->context);
-
- kfree(dpc);
-
return;
}
@@ -703,7 +794,7 @@ acpi_status acpi_os_execute(acpi_execute_type type,
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
-
+ struct workqueue_struct *queue;
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
function, context));
@@ -727,20 +818,13 @@ acpi_status acpi_os_execute(acpi_execute_type type,
dpc->function = function;
dpc->context = context;
- if (type == OSL_NOTIFY_HANDLER) {
- INIT_WORK(&dpc->work, acpi_os_execute_notify);
- if (!queue_work(kacpi_notify_wq, &dpc->work)) {
- status = AE_ERROR;
- kfree(dpc);
- }
- } else {
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- if (!queue_work(kacpid_wq, &dpc->work)) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
- "Call to queue_work() failed.\n"));
- status = AE_ERROR;
- kfree(dpc);
- }
+ INIT_WORK(&dpc->work, acpi_os_execute_deferred);
+ queue = (type == OSL_NOTIFY_HANDLER) ? kacpi_notify_wq : kacpid_wq;
+ if (!queue_work(queue, &dpc->work)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Call to queue_work() failed.\n"));
+ status = AE_ERROR;
+ kfree(dpc);
}
return_ACPI_STATUS(status);
}
@@ -793,8 +877,6 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_create_semaphore);
-
/*
* TODO: A better way to delete semaphores? Linux doesn't have a
* 'delete_semaphore()' function -- may result in an invalid
@@ -818,8 +900,6 @@ acpi_status acpi_os_delete_semaphore(acpi_handle handle)
return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_delete_semaphore);
-
/*
* TODO: The kernel doesn't have a 'down_timeout' function -- had to
* improvise. The process is to sleep for one scheduler quantum
@@ -912,8 +992,6 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
return status;
}
-EXPORT_SYMBOL(acpi_os_wait_semaphore);
-
/*
* TODO: Support for units > 1?
*/
@@ -936,8 +1014,6 @@ acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_signal_semaphore);
-
#ifdef ACPI_FUTURE_USAGE
u32 acpi_os_get_line(char *buffer)
{
@@ -981,8 +1057,6 @@ acpi_status acpi_os_signal(u32 function, void *info)
return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_signal);
-
static int __init acpi_os_name_setup(char *str)
{
char *p = acpi_os_name;
@@ -1102,6 +1176,128 @@ static int __init acpi_wake_gpes_always_on_setup(char *str)
__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
+/* Check of resource interference between native drivers and ACPI
+ * OperationRegions (SystemIO and System Memory only).
+ * IO ports and memory declared in ACPI might be used by the ACPI subsystem
+ * in arbitrary AML code and can interfere with legacy drivers.
+ * acpi_enforce_resources= can be set to:
+ *
+ * - strict (2)
+ * -> further driver trying to access the resources will not load
+ * - lax (default) (1)
+ * -> further driver trying to access the resources will load, but you
+ * get a system message that something might go wrong...
+ *
+ * - no (0)
+ * -> ACPI Operation Region resources will not be registered
+ *
+ */
+#define ENFORCE_RESOURCES_STRICT 2
+#define ENFORCE_RESOURCES_LAX 1
+#define ENFORCE_RESOURCES_NO 0
+
+static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
+
+static int __init acpi_enforce_resources_setup(char *str)
+{
+ if (str == NULL || *str == '\0')
+ return 0;
+
+ if (!strcmp("strict", str))
+ acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
+ else if (!strcmp("lax", str))
+ acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
+ else if (!strcmp("no", str))
+ acpi_enforce_resources = ENFORCE_RESOURCES_NO;
+
+ return 1;
+}
+
+__setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
+
+/* Check for resource conflicts between ACPI OperationRegions and native
+ * drivers */
+int acpi_check_resource_conflict(struct resource *res)
+{
+ struct acpi_res_list *res_list_elem;
+ int ioport;
+ int clash = 0;
+
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
+ return 0;
+ if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
+ return 0;
+
+ ioport = res->flags & IORESOURCE_IO;
+
+ spin_lock(&acpi_res_lock);
+ list_for_each_entry(res_list_elem, &resource_list_head,
+ resource_list) {
+ if (ioport && (res_list_elem->resource_type
+ != ACPI_ADR_SPACE_SYSTEM_IO))
+ continue;
+ if (!ioport && (res_list_elem->resource_type
+ != ACPI_ADR_SPACE_SYSTEM_MEMORY))
+ continue;
+
+ if (res->end < res_list_elem->start
+ || res_list_elem->end < res->start)
+ continue;
+ clash = 1;
+ break;
+ }
+ spin_unlock(&acpi_res_lock);
+
+ if (clash) {
+ if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
+ printk(KERN_INFO "%sACPI: %s resource %s [0x%llx-0x%llx]"
+ " conflicts with ACPI region %s"
+ " [0x%llx-0x%llx]\n",
+ acpi_enforce_resources == ENFORCE_RESOURCES_LAX
+ ? KERN_WARNING : KERN_ERR,
+ ioport ? "I/O" : "Memory", res->name,
+ (long long) res->start, (long long) res->end,
+ res_list_elem->name,
+ (long long) res_list_elem->start,
+ (long long) res_list_elem->end);
+ printk(KERN_INFO "ACPI: Device needs an ACPI driver\n");
+ }
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
+ return -EBUSY;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(acpi_check_resource_conflict);
+
+int acpi_check_region(resource_size_t start, resource_size_t n,
+ const char *name)
+{
+ struct resource res = {
+ .start = start,
+ .end = start + n - 1,
+ .name = name,
+ .flags = IORESOURCE_IO,
+ };
+
+ return acpi_check_resource_conflict(&res);
+}
+EXPORT_SYMBOL(acpi_check_region);
+
+int acpi_check_mem_region(resource_size_t start, resource_size_t n,
+ const char *name)
+{
+ struct resource res = {
+ .start = start,
+ .end = start + n - 1,
+ .name = name,
+ .flags = IORESOURCE_MEM,
+ };
+
+ return acpi_check_resource_conflict(&res);
+
+}
+EXPORT_SYMBOL(acpi_check_mem_region);
+
/*
* Acquire a spinlock.
*
@@ -1213,24 +1409,24 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
*
* Returns 0 on success
*/
-int acpi_dmi_dump(void)
+static int acpi_dmi_dump(void)
{
if (!dmi_available)
return -1;
printk(KERN_NOTICE PREFIX "DMI System Vendor: %s\n",
- dmi_get_slot(DMI_SYS_VENDOR));
+ dmi_get_system_info(DMI_SYS_VENDOR));
printk(KERN_NOTICE PREFIX "DMI Product Name: %s\n",
- dmi_get_slot(DMI_PRODUCT_NAME));
+ dmi_get_system_info(DMI_PRODUCT_NAME));
printk(KERN_NOTICE PREFIX "DMI Product Version: %s\n",
- dmi_get_slot(DMI_PRODUCT_VERSION));
+ dmi_get_system_info(DMI_PRODUCT_VERSION));
printk(KERN_NOTICE PREFIX "DMI Board Name: %s\n",
- dmi_get_slot(DMI_BOARD_NAME));
+ dmi_get_system_info(DMI_BOARD_NAME));
printk(KERN_NOTICE PREFIX "DMI BIOS Vendor: %s\n",
- dmi_get_slot(DMI_BIOS_VENDOR));
+ dmi_get_system_info(DMI_BIOS_VENDOR));
printk(KERN_NOTICE PREFIX "DMI BIOS Date: %s\n",
- dmi_get_slot(DMI_BIOS_DATE));
+ dmi_get_system_info(DMI_BIOS_DATE));
return 0;
}
@@ -1303,10 +1499,46 @@ acpi_status
acpi_os_validate_address (
u8 space_id,
acpi_physical_address address,
- acpi_size length)
+ acpi_size length,
+ char *name)
{
+ struct acpi_res_list *res;
+ if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
+ return AE_OK;
- return AE_OK;
+ switch (space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ /* Only interference checks against SystemIO and SytemMemory
+ are needed */
+ res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
+ if (!res)
+ return AE_OK;
+ /* ACPI names are fixed to 4 bytes, still better use strlcpy */
+ strlcpy(res->name, name, 5);
+ res->start = address;
+ res->end = address + length - 1;
+ res->resource_type = space_id;
+ spin_lock(&acpi_res_lock);
+ list_add(&res->resource_list, &resource_list_head);
+ spin_unlock(&acpi_res_lock);
+ pr_debug("Added %s resource: start: 0x%llx, end: 0x%llx, "
+ "name: %s\n", (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ ? "SystemIO" : "System Memory",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end,
+ res->name);
+ break;
+ case ACPI_ADR_SPACE_PCI_CONFIG:
+ case ACPI_ADR_SPACE_EC:
+ case ACPI_ADR_SPACE_SMBUS:
+ case ACPI_ADR_SPACE_CMOS:
+ case ACPI_ADR_SPACE_PCI_BAR_TARGET:
+ case ACPI_ADR_SPACE_DATA_TABLE:
+ case ACPI_ADR_SPACE_FIXED_HARDWARE:
+ break;
+ }
+ return AE_OK;
}
#endif
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 388300de005d..4b252ea0e952 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -44,6 +44,8 @@ struct acpi_pci_data {
struct pci_dev *dev;
};
+static int acpi_pci_unbind(struct acpi_device *device);
+
static void acpi_pci_data_handler(acpi_handle handle, u32 function,
void *context)
{
@@ -267,7 +269,7 @@ int acpi_pci_bind(struct acpi_device *device)
return result;
}
-int acpi_pci_unbind(struct acpi_device *device)
+static int acpi_pci_unbind(struct acpi_device *device)
{
int result = 0;
acpi_status status = AE_OK;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 62010c2481b3..7f19859580c7 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -51,10 +51,8 @@ static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment,
int bus,
int device, int pin)
{
- struct list_head *node = NULL;
struct acpi_prt_entry *entry = NULL;
-
if (!acpi_prt.count)
return NULL;
@@ -64,8 +62,7 @@ static struct acpi_prt_entry *acpi_pci_irq_find_prt_entry(int segment,
*
*/
spin_lock(&acpi_prt_lock);
- list_for_each(node, &acpi_prt.entries) {
- entry = list_entry(node, struct acpi_prt_entry, node);
+ list_for_each_entry(entry, &acpi_prt.entries, node) {
if ((segment == entry->id.segment)
&& (bus == entry->id.bus)
&& (device == entry->id.device)
@@ -478,8 +475,6 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
return 0;
}
-EXPORT_SYMBOL(acpi_pci_irq_enable);
-
/* FIXME: implement x86/x86_64 version */
void __attribute__ ((weak)) acpi_unregister_gsi(u32 i)
{
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 5400ea173f6f..233c40c51684 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -95,7 +95,7 @@ static struct {
int count;
struct list_head entries;
} acpi_link;
-DEFINE_MUTEX(acpi_link_lock);
+static DEFINE_MUTEX(acpi_link_lock);
/* --------------------------------------------------------------------------
PCI Link Device Management
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index af1769a20c7a..76bf6d90c700 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -458,11 +458,9 @@ int acpi_power_transition(struct acpi_device *device, int state)
}
end:
- if (result) {
+ if (result)
device->power.state = ACPI_STATE_UNKNOWN;
- printk(KERN_WARNING PREFIX "Transitioning device [%s] to D%d\n",
- device->pnp.bus_id, state);
- } else {
+ else {
/* We shouldn't change the state till all above operations succeed */
device->power.state = state;
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index e48ee4f8749f..75ccf5d18bf4 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -668,6 +668,24 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
acpi_processor_power_init(pr, device);
+ pr->cdev = thermal_cooling_device_register("Processor", device,
+ &processor_cooling_ops);
+ if (pr->cdev)
+ printk(KERN_INFO PREFIX
+ "%s is registered as cooling_device%d\n",
+ device->dev.bus_id, pr->cdev->id);
+ else
+ goto end;
+
+ result = sysfs_create_link(&device->dev.kobj, &pr->cdev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ return result;
+ result = sysfs_create_link(&pr->cdev->device.kobj, &device->dev.kobj,
+ "device");
+ if (result)
+ return result;
+
if (pr->flags.throttling) {
printk(KERN_INFO PREFIX "%s [%s] (supports",
acpi_device_name(device), acpi_device_bid(device));
@@ -791,6 +809,11 @@ static int acpi_processor_remove(struct acpi_device *device, int type)
acpi_processor_remove_fs(device);
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&pr->cdev->device.kobj, "device");
+ thermal_cooling_device_unregister(pr->cdev);
+ pr->cdev = NULL;
+
processors[pr->id] = NULL;
kfree(pr);
@@ -812,11 +835,18 @@ static int is_processor_present(acpi_handle handle)
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) {
- ACPI_EXCEPTION((AE_INFO, status, "Processor Device is not present"));
- return 0;
- }
- return 1;
+ /*
+ * if a processor object does not have an _STA object,
+ * OSPM assumes that the processor is present.
+ */
+ if (status == AE_NOT_FOUND)
+ return 1;
+
+ if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT))
+ return 1;
+
+ ACPI_EXCEPTION((AE_INFO, status, "Processor Device is not present"));
+ return 0;
}
static
@@ -1061,6 +1091,8 @@ static int __init acpi_processor_init(void)
acpi_processor_ppc_init();
+ acpi_processor_throttling_init();
+
return 0;
out_cpuidle:
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index eb1f82f79153..32003fdc91e8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -38,7 +38,7 @@
#include <linux/dmi.h>
#include <linux/moduleparam.h>
#include <linux/sched.h> /* need_resched() */
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include <linux/clockchips.h>
#include <linux/cpuidle.h>
@@ -98,6 +98,9 @@ module_param(bm_history, uint, 0644);
static int acpi_processor_set_power_policy(struct acpi_processor *pr);
+#else /* CONFIG_CPU_IDLE */
+static unsigned int latency_factor __read_mostly = 2;
+module_param(latency_factor, uint, 0644);
#endif
/*
@@ -201,6 +204,10 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
}
+/*
+ * Callers should disable interrupts before the call and enable
+ * interrupts after return.
+ */
static void acpi_safe_halt(void)
{
current_thread_info()->status &= ~TS_POLLING;
@@ -261,7 +268,7 @@ static atomic_t c3_cpu_count;
/* Common C-state entry for C2, C3, .. */
static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
{
- if (cstate->space_id == ACPI_CSTATE_FFH) {
+ if (cstate->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cstate);
} else {
@@ -413,6 +420,8 @@ static void acpi_processor_idle(void)
pm_idle_save();
else
acpi_safe_halt();
+
+ local_irq_enable();
return;
}
@@ -521,6 +530,7 @@ static void acpi_processor_idle(void)
* skew otherwise.
*/
sleep_ticks = 0xFFFFFFFF;
+ local_irq_enable();
break;
case ACPI_STATE_C2:
@@ -648,7 +658,8 @@ static void acpi_processor_idle(void)
if (cx->promotion.state &&
((cx->promotion.state - pr->power.states) <= max_cstate)) {
if (sleep_ticks > cx->promotion.threshold.ticks &&
- cx->promotion.state->latency <= system_latency_constraint()) {
+ cx->promotion.state->latency <=
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
cx->promotion.count++;
cx->demotion.count = 0;
if (cx->promotion.count >=
@@ -692,7 +703,8 @@ static void acpi_processor_idle(void)
* or if the latency of the current state is unacceptable
*/
if ((pr->power.state - pr->power.states) > max_cstate ||
- pr->power.state->latency > system_latency_constraint()) {
+ pr->power.state->latency >
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
if (cx->demotion.state)
next_state = cx->demotion.state;
}
@@ -920,20 +932,20 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
cx.address = reg->address;
cx.index = current_count + 1;
- cx.space_id = ACPI_CSTATE_SYSTEMIO;
+ cx.entry_method = ACPI_CSTATE_SYSTEMIO;
if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
if (acpi_processor_ffh_cstate_probe
(pr->id, &cx, reg) == 0) {
- cx.space_id = ACPI_CSTATE_FFH;
- } else if (cx.type != ACPI_STATE_C1) {
+ cx.entry_method = ACPI_CSTATE_FFH;
+ } else if (cx.type == ACPI_STATE_C1) {
/*
* C1 is a special case where FIXED_HARDWARE
* can be handled in non-MWAIT way as well.
* In that case, save this _CST entry info.
- * That is, we retain space_id of SYSTEM_IO for
- * halt based C1.
* Otherwise, ignore this info and continue.
*/
+ cx.entry_method = ACPI_CSTATE_HALT;
+ } else {
continue;
}
}
@@ -1200,7 +1212,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
"maximum allowed latency: %d usec\n",
pr->power.state ? pr->power.state - pr->power.states : 0,
max_cstate, (unsigned)pr->power.bm_activity,
- system_latency_constraint());
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
seq_puts(seq, "states:\n");
@@ -1367,12 +1379,16 @@ static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
/**
* acpi_idle_do_entry - a helper function that does C2 and C3 type entry
* @cx: cstate data
+ *
+ * Caller disables interrupt before call and enables interrupt after return.
*/
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
{
- if (cx->space_id == ACPI_CSTATE_FFH) {
+ if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */
acpi_processor_ffh_cstate_enter(cx);
+ } else if (cx->entry_method == ACPI_CSTATE_HALT) {
+ acpi_safe_halt();
} else {
int unused;
/* IO port based C-state */
@@ -1394,21 +1410,27 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_state *state)
{
+ u32 t1, t2;
struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+
pr = processors[smp_processor_id()];
if (unlikely(!pr))
return 0;
+ local_irq_disable();
if (pr->flags.bm_check)
acpi_idle_update_bm_rld(pr, cx);
- acpi_safe_halt();
+ t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ acpi_idle_do_entry(cx);
+ t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+ local_irq_enable();
cx->usage++;
- return 0;
+ return ticks_elapsed_in_us(t1, t2);
}
/**
@@ -1515,7 +1537,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (dev->safe_state) {
return dev->safe_state->enter(dev, dev->safe_state);
} else {
+ local_irq_disable();
acpi_safe_halt();
+ local_irq_enable();
return 0;
}
}
@@ -1607,7 +1631,7 @@ struct cpuidle_driver acpi_idle_driver = {
*/
static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
{
- int i, count = 0;
+ int i, count = CPUIDLE_DRIVER_STATE_START;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
struct cpuidle_device *dev = &pr->power.dev;
@@ -1636,13 +1660,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
state->exit_latency = cx->latency;
- state->target_residency = cx->latency * 6;
+ state->target_residency = cx->latency * latency_factor;
state->power_usage = cx->power;
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
state->flags |= CPUIDLE_FLAG_SHALLOW;
+ state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_c1;
dev->safe_state = state;
break;
@@ -1665,6 +1690,8 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
}
count++;
+ if (count == CPUIDLE_STATE_MAX)
+ break;
}
dev->state_count = count;
@@ -1718,8 +1745,9 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
"ACPI: processor limited to max C-state %d\n",
max_cstate);
first_run++;
-#if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP)
- register_latency_notifier(&acpi_processor_latency_notifier);
+#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
+ pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
+ &acpi_processor_latency_notifier);
#endif
}
@@ -1806,7 +1834,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
*/
cpu_idle_wait();
#ifdef CONFIG_SMP
- unregister_latency_notifier(&acpi_processor_latency_notifier);
+ pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
+ &acpi_processor_latency_notifier);
#endif
}
#endif
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 463b0247cbc5..f32010bee4d5 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -60,6 +60,11 @@ static DEFINE_MUTEX(performance_mutex);
* policy is adjusted accordingly.
*/
+static unsigned int ignore_ppc = 0;
+module_param(ignore_ppc, uint, 0644);
+MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
+ "limited by BIOS, this should help");
+
#define PPC_REGISTERED 1
#define PPC_IN_USE 2
@@ -72,6 +77,9 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
struct acpi_processor *pr;
unsigned int ppc = 0;
+ if (ignore_ppc)
+ return 0;
+
mutex_lock(&performance_mutex);
if (event != CPUFREQ_INCOMPATIBLE)
@@ -130,7 +138,13 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
{
- int ret = acpi_processor_get_platform_limit(pr);
+ int ret;
+
+ if (ignore_ppc)
+ return 0;
+
+ ret = acpi_processor_get_platform_limit(pr);
+
if (ret < 0)
return (ret);
else
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 06e6f3fb8825..9cb43f52f7b6 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -32,6 +32,7 @@
#include <linux/cpufreq.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/sysdev.h>
#include <asm/uaccess.h>
@@ -93,6 +94,9 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
* _any_ cpufreq driver and not only the acpi-cpufreq driver.
*/
+#define CPUFREQ_THERMAL_MIN_STEP 0
+#define CPUFREQ_THERMAL_MAX_STEP 3
+
static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS];
static unsigned int acpi_thermal_cpufreq_is_init = 0;
@@ -109,8 +113,9 @@ static int acpi_thermal_cpufreq_increase(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return -ENODEV;
- if (cpufreq_thermal_reduction_pctg[cpu] < 60) {
- cpufreq_thermal_reduction_pctg[cpu] += 20;
+ if (cpufreq_thermal_reduction_pctg[cpu] <
+ CPUFREQ_THERMAL_MAX_STEP) {
+ cpufreq_thermal_reduction_pctg[cpu]++;
cpufreq_update_policy(cpu);
return 0;
}
@@ -123,8 +128,9 @@ static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
if (!cpu_has_cpufreq(cpu))
return -ENODEV;
- if (cpufreq_thermal_reduction_pctg[cpu] > 20)
- cpufreq_thermal_reduction_pctg[cpu] -= 20;
+ if (cpufreq_thermal_reduction_pctg[cpu] >
+ (CPUFREQ_THERMAL_MIN_STEP + 1))
+ cpufreq_thermal_reduction_pctg[cpu]--;
else
cpufreq_thermal_reduction_pctg[cpu] = 0;
cpufreq_update_policy(cpu);
@@ -143,7 +149,7 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
max_freq =
(policy->cpuinfo.max_freq *
- (100 - cpufreq_thermal_reduction_pctg[policy->cpu])) / 100;
+ (100 - cpufreq_thermal_reduction_pctg[policy->cpu] * 20)) / 100;
cpufreq_verify_within_limits(policy, 0, max_freq);
@@ -155,6 +161,32 @@ static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
.notifier_call = acpi_thermal_cpufreq_notifier,
};
+static int cpufreq_get_max_state(unsigned int cpu)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+ return CPUFREQ_THERMAL_MAX_STEP;
+}
+
+static int cpufreq_get_cur_state(unsigned int cpu)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+ return cpufreq_thermal_reduction_pctg[cpu];
+}
+
+static int cpufreq_set_cur_state(unsigned int cpu, int state)
+{
+ if (!cpu_has_cpufreq(cpu))
+ return 0;
+
+ cpufreq_thermal_reduction_pctg[cpu] = state;
+ cpufreq_update_policy(cpu);
+ return 0;
+}
+
void acpi_thermal_cpufreq_init(void)
{
int i;
@@ -179,6 +211,20 @@ void acpi_thermal_cpufreq_exit(void)
}
#else /* ! CONFIG_CPU_FREQ */
+static int cpufreq_get_max_state(unsigned int cpu)
+{
+ return 0;
+}
+
+static int cpufreq_get_cur_state(unsigned int cpu)
+{
+ return 0;
+}
+
+static int cpufreq_set_cur_state(unsigned int cpu, int state)
+{
+ return 0;
+}
static int acpi_thermal_cpufreq_increase(unsigned int cpu)
{
@@ -310,6 +356,84 @@ int acpi_processor_get_limit_info(struct acpi_processor *pr)
return 0;
}
+/* thermal coolign device callbacks */
+static int acpi_processor_max_state(struct acpi_processor *pr)
+{
+ int max_state = 0;
+
+ /*
+ * There exists four states according to
+ * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
+ */
+ max_state += cpufreq_get_max_state(pr->id);
+ if (pr->flags.throttling)
+ max_state += (pr->throttling.state_count -1);
+
+ return max_state;
+}
+static int
+processor_get_max_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_processor *pr = acpi_driver_data(device);
+
+ if (!device || !pr)
+ return -EINVAL;
+
+ return sprintf(buf, "%d\n", acpi_processor_max_state(pr));
+}
+
+static int
+processor_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_processor *pr = acpi_driver_data(device);
+ int cur_state;
+
+ if (!device || !pr)
+ return -EINVAL;
+
+ cur_state = cpufreq_get_cur_state(pr->id);
+ if (pr->flags.throttling)
+ cur_state += pr->throttling.state;
+
+ return sprintf(buf, "%d\n", cur_state);
+}
+
+static int
+processor_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_processor *pr = acpi_driver_data(device);
+ int result = 0;
+ int max_pstate;
+
+ if (!device || !pr)
+ return -EINVAL;
+
+ max_pstate = cpufreq_get_max_state(pr->id);
+
+ if (state > acpi_processor_max_state(pr))
+ return -EINVAL;
+
+ if (state <= max_pstate) {
+ if (pr->flags.throttling && pr->throttling.state)
+ result = acpi_processor_set_throttling(pr, 0);
+ cpufreq_set_cur_state(pr->id, state);
+ } else {
+ cpufreq_set_cur_state(pr->id, max_pstate);
+ result = acpi_processor_set_throttling(pr,
+ state - max_pstate);
+ }
+ return result;
+}
+
+struct thermal_cooling_device_ops processor_cooling_ops = {
+ .get_max_state = processor_get_max_state,
+ .get_cur_state = processor_get_cur_state,
+ .set_cur_state = processor_set_cur_state,
+};
+
/* /proc interface */
static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 1685b40abda7..1b8e592a8241 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -45,9 +45,229 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_throttling");
+struct throttling_tstate {
+ unsigned int cpu; /* cpu nr */
+ int target_state; /* target T-state */
+};
+
+#define THROTTLING_PRECHANGE (1)
+#define THROTTLING_POSTCHANGE (2)
+
static int acpi_processor_get_throttling(struct acpi_processor *pr);
int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
+static int acpi_processor_update_tsd_coord(void)
+{
+ int count, count_target;
+ int retval = 0;
+ unsigned int i, j;
+ cpumask_t covered_cpus;
+ struct acpi_processor *pr, *match_pr;
+ struct acpi_tsd_package *pdomain, *match_pdomain;
+ struct acpi_processor_throttling *pthrottling, *match_pthrottling;
+
+ /*
+ * Now that we have _TSD data from all CPUs, lets setup T-state
+ * coordination between all CPUs.
+ */
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr)
+ continue;
+
+ /* Basic validity check for domain info */
+ pthrottling = &(pr->throttling);
+
+ /*
+ * If tsd package for one cpu is invalid, the coordination
+ * among all CPUs is thought as invalid.
+ * Maybe it is ugly.
+ */
+ if (!pthrottling->tsd_valid_flag) {
+ retval = -EINVAL;
+ break;
+ }
+ }
+ if (retval)
+ goto err_ret;
+
+ cpus_clear(covered_cpus);
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr)
+ continue;
+
+ if (cpu_isset(i, covered_cpus))
+ continue;
+ pthrottling = &pr->throttling;
+
+ pdomain = &(pthrottling->domain_info);
+ cpu_set(i, pthrottling->shared_cpu_map);
+ cpu_set(i, covered_cpus);
+ /*
+ * If the number of processor in the TSD domain is 1, it is
+ * unnecessary to parse the coordination for this CPU.
+ */
+ if (pdomain->num_processors <= 1)
+ continue;
+
+ /* Validate the Domain info */
+ count_target = pdomain->num_processors;
+ count = 1;
+
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = processors[j];
+ if (!match_pr)
+ continue;
+
+ match_pthrottling = &(match_pr->throttling);
+ match_pdomain = &(match_pthrottling->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ /* Here i and j are in the same domain.
+ * If two TSD packages have the same domain, they
+ * should have the same num_porcessors and
+ * coordination type. Otherwise it will be regarded
+ * as illegal.
+ */
+ if (match_pdomain->num_processors != count_target) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ if (pdomain->coord_type != match_pdomain->coord_type) {
+ retval = -EINVAL;
+ goto err_ret;
+ }
+
+ cpu_set(j, covered_cpus);
+ cpu_set(j, pthrottling->shared_cpu_map);
+ count++;
+ }
+ for_each_possible_cpu(j) {
+ if (i == j)
+ continue;
+
+ match_pr = processors[j];
+ if (!match_pr)
+ continue;
+
+ match_pthrottling = &(match_pr->throttling);
+ match_pdomain = &(match_pthrottling->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
+
+ /*
+ * If some CPUS have the same domain, they
+ * will have the same shared_cpu_map.
+ */
+ match_pthrottling->shared_cpu_map =
+ pthrottling->shared_cpu_map;
+ }
+ }
+
+err_ret:
+ for_each_possible_cpu(i) {
+ pr = processors[i];
+ if (!pr)
+ continue;
+
+ /*
+ * Assume no coordination on any error parsing domain info.
+ * The coordination type will be forced as SW_ALL.
+ */
+ if (retval) {
+ pthrottling = &(pr->throttling);
+ cpus_clear(pthrottling->shared_cpu_map);
+ cpu_set(i, pthrottling->shared_cpu_map);
+ pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+ }
+ }
+
+ return retval;
+}
+
+/*
+ * Update the T-state coordination after the _TSD
+ * data for all cpus is obtained.
+ */
+void acpi_processor_throttling_init(void)
+{
+ if (acpi_processor_update_tsd_coord())
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Assume no T-state coordination\n"));
+
+ return;
+}
+
+static int acpi_processor_throttling_notifier(unsigned long event, void *data)
+{
+ struct throttling_tstate *p_tstate = data;
+ struct acpi_processor *pr;
+ unsigned int cpu ;
+ int target_state;
+ struct acpi_processor_limit *p_limit;
+ struct acpi_processor_throttling *p_throttling;
+
+ cpu = p_tstate->cpu;
+ pr = processors[cpu];
+ if (!pr) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid pr pointer\n"));
+ return 0;
+ }
+ if (!pr->flags.throttling) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Throttling control is "
+ "unsupported on CPU %d\n", cpu));
+ return 0;
+ }
+ target_state = p_tstate->target_state;
+ p_throttling = &(pr->throttling);
+ switch (event) {
+ case THROTTLING_PRECHANGE:
+ /*
+ * Prechange event is used to choose one proper t-state,
+ * which meets the limits of thermal, user and _TPC.
+ */
+ p_limit = &pr->limit;
+ if (p_limit->thermal.tx > target_state)
+ target_state = p_limit->thermal.tx;
+ if (p_limit->user.tx > target_state)
+ target_state = p_limit->user.tx;
+ if (pr->throttling_platform_limit > target_state)
+ target_state = pr->throttling_platform_limit;
+ if (target_state >= p_throttling->state_count) {
+ printk(KERN_WARNING
+ "Exceed the limit of T-state \n");
+ target_state = p_throttling->state_count - 1;
+ }
+ p_tstate->target_state = target_state;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PreChange Event:"
+ "target T-state of CPU %d is T%d\n",
+ cpu, target_state));
+ break;
+ case THROTTLING_POSTCHANGE:
+ /*
+ * Postchange event is only used to update the
+ * T-state flag of acpi_processor_throttling.
+ */
+ p_throttling->state = target_state;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "PostChange Event:"
+ "CPU %d is switched to T%d\n",
+ cpu, target_state));
+ break;
+ default:
+ printk(KERN_WARNING
+ "Unsupported Throttling notifier event\n");
+ break;
+ }
+
+ return 0;
+}
+
/*
* _TPC - Throttling Present Capabilities
*/
@@ -293,6 +513,10 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
struct acpi_buffer state = { 0, NULL };
union acpi_object *tsd = NULL;
struct acpi_tsd_package *pdomain;
+ struct acpi_processor_throttling *pthrottling;
+
+ pthrottling = &pr->throttling;
+ pthrottling->tsd_valid_flag = 0;
status = acpi_evaluate_object(pr->handle, "_TSD", NULL, &buffer);
if (ACPI_FAILURE(status)) {
@@ -340,6 +564,22 @@ static int acpi_processor_get_tsd(struct acpi_processor *pr)
goto end;
}
+ pthrottling = &pr->throttling;
+ pthrottling->tsd_valid_flag = 1;
+ pthrottling->shared_type = pdomain->coord_type;
+ cpu_set(pr->id, pthrottling->shared_cpu_map);
+ /*
+ * If the coordination type is not defined in ACPI spec,
+ * the tsd_valid_flag will be clear and coordination type
+ * will be forecd as DOMAIN_COORD_TYPE_SW_ALL.
+ */
+ if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
+ pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
+ pthrottling->tsd_valid_flag = 0;
+ pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+ }
+
end:
kfree(buffer.pointer);
return result;
@@ -589,6 +829,11 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
cpumask_t saved_mask;
int ret;
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
/*
* Migrate task to the cpu pointed by pr.
*/
@@ -742,13 +987,92 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
{
cpumask_t saved_mask;
- int ret;
+ int ret = 0;
+ unsigned int i;
+ struct acpi_processor *match_pr;
+ struct acpi_processor_throttling *p_throttling;
+ struct throttling_tstate t_state;
+ cpumask_t online_throttling_cpus;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+ if ((state < 0) || (state > (pr->throttling.state_count - 1)))
+ return -EINVAL;
+
+ saved_mask = current->cpus_allowed;
+ t_state.target_state = state;
+ p_throttling = &(pr->throttling);
+ cpus_and(online_throttling_cpus, cpu_online_map,
+ p_throttling->shared_cpu_map);
/*
- * Migrate task to the cpu pointed by pr.
+ * The throttling notifier will be called for every
+ * affected cpu in order to get one proper T-state.
+ * The notifier event is THROTTLING_PRECHANGE.
*/
- saved_mask = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(pr->id));
- ret = pr->throttling.acpi_processor_set_throttling(pr, state);
+ for_each_cpu_mask(i, online_throttling_cpus) {
+ t_state.cpu = i;
+ acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
+ &t_state);
+ }
+ /*
+ * The function of acpi_processor_set_throttling will be called
+ * to switch T-state. If the coordination type is SW_ALL or HW_ALL,
+ * it is necessary to call it for every affected cpu. Otherwise
+ * it can be called only for the cpu pointed by pr.
+ */
+ if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
+ set_cpus_allowed(current, cpumask_of_cpu(pr->id));
+ ret = p_throttling->acpi_processor_set_throttling(pr,
+ t_state.target_state);
+ } else {
+ /*
+ * When the T-state coordination is SW_ALL or HW_ALL,
+ * it is necessary to set T-state for every affected
+ * cpus.
+ */
+ for_each_cpu_mask(i, online_throttling_cpus) {
+ match_pr = processors[i];
+ /*
+ * If the pointer is invalid, we will report the
+ * error message and continue.
+ */
+ if (!match_pr) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Invalid Pointer for CPU %d\n", i));
+ continue;
+ }
+ /*
+ * If the throttling control is unsupported on CPU i,
+ * we will report the error message and continue.
+ */
+ if (!match_pr->flags.throttling) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Throttling Controll is unsupported "
+ "on CPU %d\n", i));
+ continue;
+ }
+ t_state.cpu = i;
+ set_cpus_allowed(current, cpumask_of_cpu(i));
+ ret = match_pr->throttling.
+ acpi_processor_set_throttling(
+ match_pr, t_state.target_state);
+ }
+ }
+ /*
+ * After the set_throttling is called, the
+ * throttling notifier is called for every
+ * affected cpu to update the T-states.
+ * The notifier event is THROTTLING_POSTCHANGE
+ */
+ for_each_cpu_mask(i, online_throttling_cpus) {
+ t_state.cpu = i;
+ acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
+ &t_state);
+ }
/* restore the previous state */
set_cpus_allowed(current, saved_mask);
return ret;
@@ -757,6 +1081,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
int acpi_processor_get_throttling_info(struct acpi_processor *pr)
{
int result = 0;
+ struct acpi_processor_throttling *pthrottling;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n",
@@ -788,7 +1113,16 @@ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
&acpi_processor_set_throttling_ptc;
}
- acpi_processor_get_tsd(pr);
+ /*
+ * If TSD package for one CPU can't be parsed successfully, it means
+ * that this CPU will have no coordination with other CPUs.
+ */
+ if (acpi_processor_get_tsd(pr)) {
+ pthrottling = &pr->throttling;
+ pthrottling->tsd_valid_flag = 0;
+ cpu_set(pr->id, pthrottling->shared_cpu_map);
+ pthrottling->shared_type = DOMAIN_COORD_TYPE_SW_ALL;
+ }
/*
* PIIX4 Errata: We don't support throttling on the original PIIX4.
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index f136c7d3b3c2..1194105cc3ca 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -888,7 +888,7 @@ static void acpi_charger_remove(struct acpi_sbs *sbs)
#endif
}
-void acpi_sbs_callback(void *context)
+static void acpi_sbs_callback(void *context)
{
int id;
struct acpi_sbs *sbs = context;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index fd40b6a1d639..ae9a90438e2f 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -111,8 +111,8 @@ static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
return -ETIME;
}
-int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, u8 address,
- u8 command, u8 *data, u8 length)
+static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
+ u8 address, u8 command, u8 *data, u8 length)
{
int ret = -EFAULT, i;
u8 temp, sz = 0;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index d9d531cce27f..3fac011f9cf9 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -59,7 +59,7 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
count = snprintf(&modalias[len], size, "%s:",
cid_list->id[i].value);
if (count < 0 || count >= size) {
- printk(KERN_ERR "acpi: %s cid[%i] exceeds event buffer size",
+ printk(KERN_ERR PREFIX "%s cid[%i] exceeds event buffer size",
acpi_dev->pnp.device_name, i);
break;
}
@@ -453,7 +453,7 @@ static int acpi_device_register(struct acpi_device *device,
device->dev.release = &acpi_device_release;
result = device_add(&device->dev);
if(result) {
- printk("Error adding device %s", device->dev.bus_id);
+ printk(KERN_ERR PREFIX "Error adding device %s", device->dev.bus_id);
goto end;
}
@@ -941,6 +941,15 @@ static int acpi_bay_match(struct acpi_device *device){
return -ENODEV;
}
+/*
+ * acpi_dock_match - see if a device has a _DCK method
+ */
+static int acpi_dock_match(struct acpi_device *device)
+{
+ acpi_handle tmp;
+ return acpi_get_handle(device->handle, "_DCK", &tmp);
+}
+
static void acpi_device_set_id(struct acpi_device *device,
struct acpi_device *parent, acpi_handle handle,
int type)
@@ -950,13 +959,14 @@ static void acpi_device_set_id(struct acpi_device *device,
char *hid = NULL;
char *uid = NULL;
struct acpi_compatible_id_list *cid_list = NULL;
+ const char *cid_add = NULL;
acpi_status status;
switch (type) {
case ACPI_BUS_TYPE_DEVICE:
status = acpi_get_object_info(handle, &buffer);
if (ACPI_FAILURE(status)) {
- printk("%s: Error reading device info\n", __FUNCTION__);
+ printk(KERN_ERR PREFIX "%s: Error reading device info\n", __FUNCTION__);
return;
}
@@ -972,15 +982,18 @@ static void acpi_device_set_id(struct acpi_device *device,
device->flags.bus_address = 1;
}
- if(!(info->valid & (ACPI_VALID_HID | ACPI_VALID_CID))){
- status = acpi_video_bus_match(device);
- if(ACPI_SUCCESS(status))
- hid = ACPI_VIDEO_HID;
+ /* If we have a video/bay/dock device, add our selfdefined
+ HID to the CID list. Like that the video/bay/dock drivers
+ will get autoloaded and the device might still match
+ against another driver.
+ */
+ if (ACPI_SUCCESS(acpi_video_bus_match(device)))
+ cid_add = ACPI_VIDEO_HID;
+ else if (ACPI_SUCCESS(acpi_bay_match(device)))
+ cid_add = ACPI_BAY_HID;
+ else if (ACPI_SUCCESS(acpi_dock_match(device)))
+ cid_add = ACPI_DOCK_HID;
- status = acpi_bay_match(device);
- if (ACPI_SUCCESS(status))
- hid = ACPI_BAY_HID;
- }
break;
case ACPI_BUS_TYPE_POWER:
hid = ACPI_POWER_HID;
@@ -1021,12 +1034,45 @@ static void acpi_device_set_id(struct acpi_device *device,
strcpy(device->pnp.unique_id, uid);
device->flags.unique_id = 1;
}
- if (cid_list) {
- device->pnp.cid_list = kmalloc(cid_list->size, GFP_KERNEL);
- if (device->pnp.cid_list)
- memcpy(device->pnp.cid_list, cid_list, cid_list->size);
- else
- printk(KERN_ERR "Memory allocation error\n");
+ if (cid_list || cid_add) {
+ struct acpi_compatible_id_list *list;
+ int size = 0;
+ int count = 0;
+
+ if (cid_list) {
+ size = cid_list->size;
+ } else if (cid_add) {
+ size = sizeof(struct acpi_compatible_id_list);
+ cid_list = ACPI_ALLOCATE_ZEROED((acpi_size) size);
+ if (!cid_list) {
+ printk(KERN_ERR "Memory allocation error\n");
+ kfree(buffer.pointer);
+ return;
+ } else {
+ cid_list->count = 0;
+ cid_list->size = size;
+ }
+ }
+ if (cid_add)
+ size += sizeof(struct acpi_compatible_id);
+ list = kmalloc(size, GFP_KERNEL);
+
+ if (list) {
+ if (cid_list) {
+ memcpy(list, cid_list, cid_list->size);
+ count = cid_list->count;
+ }
+ if (cid_add) {
+ strncpy(list->id[count].value, cid_add,
+ ACPI_MAX_CID_LENGTH);
+ count++;
+ device->flags.compatible_ids = 1;
+ }
+ list->size = size;
+ list->count = count;
+ device->pnp.cid_list = list;
+ } else
+ printk(KERN_ERR PREFIX "Memory allocation error\n");
}
kfree(buffer.pointer);
@@ -1050,7 +1096,7 @@ static int acpi_device_set_context(struct acpi_device *device, int type)
acpi_bus_data_handler, device);
if (ACPI_FAILURE(status)) {
- printk("Error attaching device data\n");
+ printk(KERN_ERR PREFIX "Error attaching device data\n");
result = -ENODEV;
}
}
@@ -1081,6 +1127,20 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
}
static int
+acpi_is_child_device(struct acpi_device *device,
+ int (*matcher)(struct acpi_device *))
+{
+ int result = -ENODEV;
+
+ do {
+ if (ACPI_SUCCESS(matcher(device)))
+ return AE_OK;
+ } while ((device = device->parent));
+
+ return result;
+}
+
+static int
acpi_add_single_object(struct acpi_device **child,
struct acpi_device *parent, acpi_handle handle, int type,
struct acpi_bus_ops *ops)
@@ -1131,10 +1191,20 @@ acpi_add_single_object(struct acpi_device **child,
case ACPI_BUS_TYPE_PROCESSOR:
case ACPI_BUS_TYPE_DEVICE:
result = acpi_bus_get_status(device);
- if (ACPI_FAILURE(result) || !device->status.present) {
- result = -ENOENT;
+ if (ACPI_FAILURE(result)) {
+ result = -ENODEV;
goto end;
}
+ if (!device->status.present) {
+ /* Bay and dock should be handled even if absent */
+ if (!ACPI_SUCCESS(
+ acpi_is_child_device(device, acpi_bay_match)) &&
+ !ACPI_SUCCESS(
+ acpi_is_child_device(device, acpi_dock_match))) {
+ result = -ENODEV;
+ goto end;
+ }
+ }
break;
default:
STRUCT_TO_INT(device->status) =
diff --git a/drivers/acpi/sleep/main.c b/drivers/acpi/sleep/main.c
index 485de1347075..293a1cbb47c0 100644
--- a/drivers/acpi/sleep/main.c
+++ b/drivers/acpi/sleep/main.c
@@ -170,7 +170,7 @@ static int acpi_pm_enter(suspend_state_t pm_state)
/* Reprogram control registers and execute _BFS */
acpi_leave_sleep_state_prep(acpi_state);
- /* ACPI 3.0 specs (P62) says that it's the responsabilty
+ /* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
* POWER_BUTTON event should not reach userspace ]
*/
@@ -472,11 +472,20 @@ int acpi_pm_device_sleep_state(struct device *dev, int wake, int *d_min_p)
if (acpi_target_sleep_state == ACPI_STATE_S0 ||
(wake && adev->wakeup.state.enabled &&
adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+ acpi_status status;
+
acpi_method[3] = 'W';
- acpi_evaluate_integer(handle, acpi_method, NULL, &d_max);
- /* Sanity check */
- if (d_max < d_min)
+ status = acpi_evaluate_integer(handle, acpi_method, NULL,
+ &d_max);
+ if (ACPI_FAILURE(status)) {
+ d_max = d_min;
+ } else if (d_max < d_min) {
+ /* Warn the user of the broken DSDT */
+ printk(KERN_WARNING "ACPI: Wrong value from %s\n",
+ acpi_method);
+ /* Sanitize it */
d_min = d_max;
+ }
}
if (d_min_p)
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c
index 1538355c266b..f8df5217d477 100644
--- a/drivers/acpi/sleep/proc.c
+++ b/drivers/acpi/sleep/proc.c
@@ -178,6 +178,9 @@ static int get_date_field(char **p, u32 * value)
* Try to find delimeter, only to insert null. The end of the
* string won't have one, but is still valid.
*/
+ if (*p == NULL)
+ return result;
+
next = strpbrk(*p, "- :");
if (next)
*next++ = '\0';
@@ -190,6 +193,8 @@ static int get_date_field(char **p, u32 * value)
if (next)
*p = next;
+ else
+ *p = NULL;
return result;
}
@@ -251,27 +256,6 @@ acpi_system_write_alarm(struct file *file,
if ((result = get_date_field(&p, &sec)))
goto end;
- if (sec > 59) {
- min += 1;
- sec -= 60;
- }
- if (min > 59) {
- hr += 1;
- min -= 60;
- }
- if (hr > 23) {
- day += 1;
- hr -= 24;
- }
- if (day > 31) {
- mo += 1;
- day -= 31;
- }
- if (mo > 12) {
- yr += 1;
- mo -= 12;
- }
-
spin_lock_irq(&rtc_lock);
rtc_control = CMOS_READ(RTC_CONTROL);
@@ -288,24 +272,24 @@ acpi_system_write_alarm(struct file *file,
spin_unlock_irq(&rtc_lock);
if (sec > 59) {
- min++;
- sec -= 60;
+ min += sec/60;
+ sec = sec%60;
}
if (min > 59) {
- hr++;
- min -= 60;
+ hr += min/60;
+ min = min%60;
}
if (hr > 23) {
- day++;
- hr -= 24;
+ day += hr/24;
+ hr = hr%24;
}
if (day > 31) {
- mo++;
- day -= 31;
+ mo += day/32;
+ day = day%32;
}
if (mo > 12) {
- yr++;
- mo -= 12;
+ yr += mo/13;
+ mo = mo%13;
}
spin_lock_irq(&rtc_lock);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 5ffe0ea18967..55cf4c05bb74 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -40,6 +40,8 @@ ACPI_MODULE_NAME("system");
#define ACPI_SYSTEM_CLASS "system"
#define ACPI_SYSTEM_DEVICE_NAME "System"
+u32 acpi_irq_handled;
+
/*
* Make ACPICA version work as module param
*/
@@ -166,6 +168,212 @@ static int acpi_system_sysfs_init(void)
return 0;
}
+/*
+ * Detailed ACPI IRQ counters in /sys/firmware/acpi/interrupts/
+ * See Documentation/ABI/testing/sysfs-firmware-acpi
+ */
+
+#define COUNT_GPE 0
+#define COUNT_SCI 1 /* acpi_irq_handled */
+#define COUNT_ERROR 2 /* other */
+#define NUM_COUNTERS_EXTRA 3
+
+static u32 *all_counters;
+static u32 num_gpes;
+static u32 num_counters;
+static struct attribute **all_attrs;
+static u32 acpi_gpe_count;
+
+static struct attribute_group interrupt_stats_attr_group = {
+ .name = "interrupts",
+};
+static struct kobj_attribute *counter_attrs;
+
+static int count_num_gpes(void)
+{
+ int count = 0;
+ struct acpi_gpe_xrupt_info *gpe_xrupt_info;
+ struct acpi_gpe_block_info *gpe_block;
+ acpi_cpu_flags flags;
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
+ while (gpe_xrupt_info) {
+ gpe_block = gpe_xrupt_info->gpe_block_list_head;
+ while (gpe_block) {
+ count += gpe_block->register_count *
+ ACPI_GPE_REGISTER_WIDTH;
+ gpe_block = gpe_block->next;
+ }
+ gpe_xrupt_info = gpe_xrupt_info->next;
+ }
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+
+ return count;
+}
+
+static void delete_gpe_attr_array(void)
+{
+ u32 *tmp = all_counters;
+
+ all_counters = NULL;
+ kfree(tmp);
+
+ if (counter_attrs) {
+ int i;
+
+ for (i = 0; i < num_gpes; i++)
+ kfree(counter_attrs[i].attr.name);
+
+ kfree(counter_attrs);
+ }
+ kfree(all_attrs);
+
+ return;
+}
+
+void acpi_os_gpe_count(u32 gpe_number)
+{
+ acpi_gpe_count++;
+
+ if (!all_counters)
+ return;
+
+ if (gpe_number < num_gpes)
+ all_counters[gpe_number]++;
+ else
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++;
+
+ return;
+}
+
+void acpi_os_fixed_event_count(u32 event_number)
+{
+ if (!all_counters)
+ return;
+
+ if (event_number < ACPI_NUM_FIXED_EVENTS)
+ all_counters[num_gpes + event_number]++;
+ else
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR]++;
+
+ return;
+}
+
+static ssize_t counter_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI] =
+ acpi_irq_handled;
+ all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE] =
+ acpi_gpe_count;
+
+ return sprintf(buf, "%d\n", all_counters[attr - counter_attrs]);
+}
+
+/*
+ * counter_set() sets the specified counter.
+ * setting the total "sci" file to any value clears all counters.
+ */
+static ssize_t counter_set(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t size)
+{
+ int index = attr - counter_attrs;
+
+ if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
+ int i;
+ for (i = 0; i < num_counters; ++i)
+ all_counters[i] = 0;
+ acpi_gpe_count = 0;
+ acpi_irq_handled = 0;
+
+ } else
+ all_counters[index] = strtoul(buf, NULL, 0);
+
+ return size;
+}
+
+void acpi_irq_stats_init(void)
+{
+ int i;
+
+ if (all_counters)
+ return;
+
+ num_gpes = count_num_gpes();
+ num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
+
+ all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
+ GFP_KERNEL);
+ if (all_attrs == NULL)
+ return;
+
+ all_counters = kzalloc(sizeof(u32) * (num_counters), GFP_KERNEL);
+ if (all_counters == NULL)
+ goto fail;
+
+ counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
+ GFP_KERNEL);
+ if (counter_attrs == NULL)
+ goto fail;
+
+ for (i = 0; i < num_counters; ++i) {
+ char buffer[10];
+ char *name;
+
+ if (i < num_gpes)
+ sprintf(buffer, "gpe%02X", i);
+ else if (i == num_gpes + ACPI_EVENT_PMTIMER)
+ sprintf(buffer, "ff_pmtimer");
+ else if (i == num_gpes + ACPI_EVENT_GLOBAL)
+ sprintf(buffer, "ff_gbl_lock");
+ else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
+ sprintf(buffer, "ff_pwr_btn");
+ else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
+ sprintf(buffer, "ff_slp_btn");
+ else if (i == num_gpes + ACPI_EVENT_RTC)
+ sprintf(buffer, "ff_rt_clk");
+ else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
+ sprintf(buffer, "gpe_all");
+ else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
+ sprintf(buffer, "sci");
+ else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
+ sprintf(buffer, "error");
+ else
+ sprintf(buffer, "bug%02X", i);
+
+ name = kzalloc(strlen(buffer) + 1, GFP_KERNEL);
+ if (name == NULL)
+ goto fail;
+ strncpy(name, buffer, strlen(buffer) + 1);
+
+ counter_attrs[i].attr.name = name;
+ counter_attrs[i].attr.mode = 0644;
+ counter_attrs[i].show = counter_show;
+ counter_attrs[i].store = counter_set;
+
+ all_attrs[i] = &counter_attrs[i].attr;
+ }
+
+ interrupt_stats_attr_group.attrs = all_attrs;
+ if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
+ return;
+
+fail:
+ delete_gpe_attr_array();
+ return;
+}
+
+static void __exit interrupt_stats_exit(void)
+{
+ sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
+
+ delete_gpe_attr_array();
+
+ return;
+}
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/tables/Makefile b/drivers/acpi/tables/Makefile
index 0a7d7afac255..7385efa61622 100644
--- a/drivers/acpi/tables/Makefile
+++ b/drivers/acpi/tables/Makefile
@@ -2,6 +2,6 @@
# Makefile for all Linux ACPI interpreter subdirectories
#
-obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o
+obj-y := tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
EXTRA_CFLAGS += $(ACPI_CFLAGS)
diff --git a/drivers/acpi/tables/tbxfroot.c b/drivers/acpi/tables/tbxfroot.c
index cf8fa514189f..9ecb4b6c1e7d 100644
--- a/drivers/acpi/tables/tbxfroot.c
+++ b/drivers/acpi/tables/tbxfroot.c
@@ -100,7 +100,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
/*******************************************************************************
*
- * FUNCTION: acpi_tb_find_rsdp
+ * FUNCTION: acpi_find_root_pointer
*
* PARAMETERS: table_address - Where the table pointer is returned
*
@@ -219,8 +219,6 @@ acpi_status acpi_find_root_pointer(acpi_native_uint * table_address)
return_ACPI_STATUS(AE_NOT_FOUND);
}
-ACPI_EXPORT_SYMBOL(acpi_find_root_pointer)
-
/*******************************************************************************
*
* FUNCTION: acpi_tb_scan_memory_for_rsdp
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 5f79b4451212..8d4b79b4f933 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -43,7 +43,7 @@
#include <linux/seq_file.h>
#include <linux/reboot.h>
#include <asm/uaccess.h>
-
+#include <linux/thermal.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
@@ -65,9 +65,6 @@
#define ACPI_THERMAL_MAX_ACTIVE 10
#define ACPI_THERMAL_MAX_LIMIT_STR_LEN 65
-#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732>=0) ? ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
-#define CELSIUS_TO_KELVIN(t) ((t+273)*10)
-
#define _COMPONENT ACPI_THERMAL_COMPONENT
ACPI_MODULE_NAME("thermal");
@@ -195,6 +192,8 @@ struct acpi_thermal {
struct acpi_thermal_trips trips;
struct acpi_handle_list devices;
struct timer_list timer;
+ struct thermal_zone_device *thermal_zone;
+ int tz_enabled;
struct mutex lock;
};
@@ -321,178 +320,226 @@ static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
return 0;
}
-static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
-{
- acpi_status status = AE_OK;
- int i = 0;
+#define ACPI_TRIPS_CRITICAL 0x01
+#define ACPI_TRIPS_HOT 0x02
+#define ACPI_TRIPS_PASSIVE 0x04
+#define ACPI_TRIPS_ACTIVE 0x08
+#define ACPI_TRIPS_DEVICES 0x10
+#define ACPI_TRIPS_REFRESH_THRESHOLDS (ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE)
+#define ACPI_TRIPS_REFRESH_DEVICES ACPI_TRIPS_DEVICES
- if (!tz)
- return -EINVAL;
+#define ACPI_TRIPS_INIT (ACPI_TRIPS_CRITICAL | ACPI_TRIPS_HOT | \
+ ACPI_TRIPS_PASSIVE | ACPI_TRIPS_ACTIVE | \
+ ACPI_TRIPS_DEVICES)
- /* Critical Shutdown (required) */
-
- status = acpi_evaluate_integer(tz->device->handle, "_CRT", NULL,
- &tz->trips.critical.temperature);
- if (ACPI_FAILURE(status)) {
- tz->trips.critical.flags.valid = 0;
- ACPI_EXCEPTION((AE_INFO, status, "No critical threshold"));
- return -ENODEV;
- } else {
- tz->trips.critical.flags.valid = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found critical threshold [%lu]\n",
- tz->trips.critical.temperature));
- }
+/*
+ * This exception is thrown out in two cases:
+ * 1.An invalid trip point becomes invalid or a valid trip point becomes invalid
+ * when re-evaluating the AML code.
+ * 2.TODO: Devices listed in _PSL, _ALx, _TZD may change.
+ * We need to re-bind the cooling devices of a thermal zone when this occurs.
+ */
+#define ACPI_THERMAL_TRIPS_EXCEPTION(flags, str) \
+do { \
+ if (flags != ACPI_TRIPS_INIT) \
+ ACPI_EXCEPTION((AE_INFO, AE_ERROR, \
+ "ACPI thermal trip point %s changed\n" \
+ "Please send acpidump to linux-acpi@vger.kernel.org\n", str)); \
+} while (0)
+
+static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
+{
+ acpi_status status = AE_OK;
+ struct acpi_handle_list devices;
+ int valid = 0;
+ int i;
- if (tz->trips.critical.flags.valid == 1) {
- if (crt == -1) {
+ /* Critical Shutdown (required) */
+ if (flag & ACPI_TRIPS_CRITICAL) {
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_CRT", NULL, &tz->trips.critical.temperature);
+ if (ACPI_FAILURE(status)) {
tz->trips.critical.flags.valid = 0;
- } else if (crt > 0) {
- unsigned long crt_k = CELSIUS_TO_KELVIN(crt);
-
- /*
- * Allow override to lower critical threshold
- */
- if (crt_k < tz->trips.critical.temperature)
- tz->trips.critical.temperature = crt_k;
+ ACPI_EXCEPTION((AE_INFO, status,
+ "No critical threshold"));
+ return -ENODEV;
+ } else {
+ tz->trips.critical.flags.valid = 1;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found critical threshold [%lu]\n",
+ tz->trips.critical.temperature));
+ }
+ if (tz->trips.critical.flags.valid == 1) {
+ if (crt == -1) {
+ tz->trips.critical.flags.valid = 0;
+ } else if (crt > 0) {
+ unsigned long crt_k = CELSIUS_TO_KELVIN(crt);
+ /*
+ * Allow override to lower critical threshold
+ */
+ if (crt_k < tz->trips.critical.temperature)
+ tz->trips.critical.temperature = crt_k;
+ }
}
}
/* Critical Sleep (optional) */
-
- status =
- acpi_evaluate_integer(tz->device->handle, "_HOT", NULL,
- &tz->trips.hot.temperature);
- if (ACPI_FAILURE(status)) {
- tz->trips.hot.flags.valid = 0;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No hot threshold\n"));
- } else {
- tz->trips.hot.flags.valid = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found hot threshold [%lu]\n",
- tz->trips.hot.temperature));
- }
-
- /* Passive: Processors (optional) */
-
- if (psv == -1) {
- status = AE_SUPPORT;
- } else if (psv > 0) {
- tz->trips.passive.temperature = CELSIUS_TO_KELVIN(psv);
- status = AE_OK;
- } else {
+ if (flag & ACPI_TRIPS_HOT) {
status = acpi_evaluate_integer(tz->device->handle,
- "_PSV", NULL, &tz->trips.passive.temperature);
+ "_HOT", NULL, &tz->trips.hot.temperature);
+ if (ACPI_FAILURE(status)) {
+ tz->trips.hot.flags.valid = 0;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "No hot threshold\n"));
+ } else {
+ tz->trips.hot.flags.valid = 1;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Found hot threshold [%lu]\n",
+ tz->trips.critical.temperature));
+ }
}
- if (ACPI_FAILURE(status)) {
- tz->trips.passive.flags.valid = 0;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No passive threshold\n"));
- } else {
- tz->trips.passive.flags.valid = 1;
-
- status =
- acpi_evaluate_integer(tz->device->handle, "_TC1", NULL,
- &tz->trips.passive.tc1);
- if (ACPI_FAILURE(status))
- tz->trips.passive.flags.valid = 0;
-
- status =
- acpi_evaluate_integer(tz->device->handle, "_TC2", NULL,
- &tz->trips.passive.tc2);
- if (ACPI_FAILURE(status))
- tz->trips.passive.flags.valid = 0;
+ /* Passive (optional) */
+ if (flag & ACPI_TRIPS_PASSIVE) {
+ valid = tz->trips.passive.flags.valid;
+ if (psv == -1) {
+ status = AE_SUPPORT;
+ } else if (psv > 0) {
+ tz->trips.passive.temperature = CELSIUS_TO_KELVIN(psv);
+ status = AE_OK;
+ } else {
+ status = acpi_evaluate_integer(tz->device->handle,
+ "_PSV", NULL, &tz->trips.passive.temperature);
+ }
- status =
- acpi_evaluate_integer(tz->device->handle, "_TSP", NULL,
- &tz->trips.passive.tsp);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
-
- status =
- acpi_evaluate_reference(tz->device->handle, "_PSL", NULL,
- &tz->trips.passive.devices);
+ else {
+ tz->trips.passive.flags.valid = 1;
+ if (flag == ACPI_TRIPS_INIT) {
+ status = acpi_evaluate_integer(
+ tz->device->handle, "_TC1",
+ NULL, &tz->trips.passive.tc1);
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ status = acpi_evaluate_integer(
+ tz->device->handle, "_TC2",
+ NULL, &tz->trips.passive.tc2);
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ status = acpi_evaluate_integer(
+ tz->device->handle, "_TSP",
+ NULL, &tz->trips.passive.tsp);
+ if (ACPI_FAILURE(status))
+ tz->trips.passive.flags.valid = 0;
+ }
+ }
+ }
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.passive.flags.valid) {
+ memset(&devices, 0, sizeof(struct acpi_handle_list));
+ status = acpi_evaluate_reference(tz->device->handle, "_PSL",
+ NULL, &devices);
if (ACPI_FAILURE(status))
tz->trips.passive.flags.valid = 0;
-
- if (!tz->trips.passive.flags.valid)
- printk(KERN_WARNING PREFIX "Invalid passive threshold\n");
else
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found passive threshold [%lu]\n",
- tz->trips.passive.temperature));
- }
+ tz->trips.passive.flags.valid = 1;
- /* Active: Fans, etc. (optional) */
+ if (memcmp(&tz->trips.passive.devices, &devices,
+ sizeof(struct acpi_handle_list))) {
+ memcpy(&tz->trips.passive.devices, &devices,
+ sizeof(struct acpi_handle_list));
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ }
+ }
+ if ((flag & ACPI_TRIPS_PASSIVE) || (flag & ACPI_TRIPS_DEVICES)) {
+ if (valid != tz->trips.passive.flags.valid)
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state");
+ }
+ /* Active (optional) */
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
-
char name[5] = { '_', 'A', 'C', ('0' + i), '\0' };
+ valid = tz->trips.active[i].flags.valid;
if (act == -1)
- break; /* disable all active trip points */
-
- status = acpi_evaluate_integer(tz->device->handle,
- name, NULL, &tz->trips.active[i].temperature);
-
- if (ACPI_FAILURE(status)) {
- if (i == 0) /* no active trip points */
+ break; /* disable all active trip points */
+
+ if (flag & ACPI_TRIPS_ACTIVE) {
+ status = acpi_evaluate_integer(tz->device->handle,
+ name, NULL, &tz->trips.active[i].temperature);
+ if (ACPI_FAILURE(status)) {
+ tz->trips.active[i].flags.valid = 0;
+ if (i == 0)
+ break;
+ if (act <= 0)
+ break;
+ if (i == 1)
+ tz->trips.active[0].temperature =
+ CELSIUS_TO_KELVIN(act);
+ else
+ /*
+ * Don't allow override higher than
+ * the next higher trip point
+ */
+ tz->trips.active[i - 1].temperature =
+ (tz->trips.active[i - 2].temperature <
+ CELSIUS_TO_KELVIN(act) ?
+ tz->trips.active[i - 2].temperature :
+ CELSIUS_TO_KELVIN(act));
break;
- if (act <= 0) /* no override requested */
- break;
- if (i == 1) { /* 1 trip point */
- tz->trips.active[0].temperature =
- CELSIUS_TO_KELVIN(act);
- } else { /* multiple trips */
- /*
- * Don't allow override higher than
- * the next higher trip point
- */
- tz->trips.active[i - 1].temperature =
- (tz->trips.active[i - 2].temperature <
- CELSIUS_TO_KELVIN(act) ?
- tz->trips.active[i - 2].temperature :
- CELSIUS_TO_KELVIN(act));
- }
- break;
+ } else
+ tz->trips.active[i].flags.valid = 1;
}
name[2] = 'L';
- status =
- acpi_evaluate_reference(tz->device->handle, name, NULL,
- &tz->trips.active[i].devices);
- if (ACPI_SUCCESS(status)) {
- tz->trips.active[i].flags.valid = 1;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Found active threshold [%d]:[%lu]\n",
- i, tz->trips.active[i].temperature));
- } else
- ACPI_EXCEPTION((AE_INFO, status,
- "Invalid active threshold [%d]", i));
+ if ((flag & ACPI_TRIPS_DEVICES) && tz->trips.active[i].flags.valid ) {
+ memset(&devices, 0, sizeof(struct acpi_handle_list));
+ status = acpi_evaluate_reference(tz->device->handle,
+ name, NULL, &devices);
+ if (ACPI_FAILURE(status))
+ tz->trips.active[i].flags.valid = 0;
+ else
+ tz->trips.active[i].flags.valid = 1;
+
+ if (memcmp(&tz->trips.active[i].devices, &devices,
+ sizeof(struct acpi_handle_list))) {
+ memcpy(&tz->trips.active[i].devices, &devices,
+ sizeof(struct acpi_handle_list));
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ }
+ }
+ if ((flag & ACPI_TRIPS_ACTIVE) || (flag & ACPI_TRIPS_DEVICES))
+ if (valid != tz->trips.active[i].flags.valid)
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "state");
+
+ if (!tz->trips.active[i].flags.valid)
+ break;
+ }
+
+ if (flag & ACPI_TRIPS_DEVICES) {
+ memset(&devices, 0, sizeof(struct acpi_handle_list));
+ status = acpi_evaluate_reference(tz->device->handle, "_TZD",
+ NULL, &devices);
+ if (memcmp(&tz->devices, &devices,
+ sizeof(struct acpi_handle_list))) {
+ memcpy(&tz->devices, &devices,
+ sizeof(struct acpi_handle_list));
+ ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
+ }
}
return 0;
}
-static int acpi_thermal_get_devices(struct acpi_thermal *tz)
+static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
{
- acpi_status status = AE_OK;
-
-
- if (!tz)
- return -EINVAL;
-
- status =
- acpi_evaluate_reference(tz->device->handle, "_TZD", NULL, &tz->devices);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- return 0;
+ return acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT);
}
static int acpi_thermal_critical(struct acpi_thermal *tz)
{
- if (!tz || !tz->trips.critical.flags.valid || nocrt)
+ if (!tz || !tz->trips.critical.flags.valid)
return -EINVAL;
if (tz->temperature >= tz->trips.critical.temperature) {
@@ -501,9 +548,6 @@ static int acpi_thermal_critical(struct acpi_thermal *tz)
} else if (tz->trips.critical.flags.enabled)
tz->trips.critical.flags.enabled = 0;
- printk(KERN_EMERG
- "Critical temperature reached (%ld C), shutting down.\n",
- KELVIN_TO_CELSIUS(tz->temperature));
acpi_bus_generate_proc_event(tz->device, ACPI_THERMAL_NOTIFY_CRITICAL,
tz->trips.critical.flags.enabled);
acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
@@ -511,14 +555,20 @@ static int acpi_thermal_critical(struct acpi_thermal *tz)
ACPI_THERMAL_NOTIFY_CRITICAL,
tz->trips.critical.flags.enabled);
- orderly_poweroff(true);
+ /* take no action if nocrt is set */
+ if(!nocrt) {
+ printk(KERN_EMERG
+ "Critical temperature reached (%ld C), shutting down.\n",
+ KELVIN_TO_CELSIUS(tz->temperature));
+ orderly_poweroff(true);
+ }
return 0;
}
static int acpi_thermal_hot(struct acpi_thermal *tz)
{
- if (!tz || !tz->trips.hot.flags.valid || nocrt)
+ if (!tz || !tz->trips.hot.flags.valid)
return -EINVAL;
if (tz->temperature >= tz->trips.hot.temperature) {
@@ -534,7 +584,7 @@ static int acpi_thermal_hot(struct acpi_thermal *tz)
ACPI_THERMAL_NOTIFY_HOT,
tz->trips.hot.flags.enabled);
- /* TBD: Call user-mode "sleep(S4)" function */
+ /* TBD: Call user-mode "sleep(S4)" function if nocrt is cleared */
return 0;
}
@@ -732,6 +782,9 @@ static void acpi_thermal_check(void *data)
if (result)
goto unlock;
+ if (!tz->tz_enabled)
+ goto unlock;
+
memset(&tz->state, 0, sizeof(tz->state));
/*
@@ -825,6 +878,290 @@ static void acpi_thermal_check(void *data)
mutex_unlock(&tz->lock);
}
+/* sys I/F for generic thermal sysfs support */
+static int thermal_get_temp(struct thermal_zone_device *thermal, char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+
+ if (!tz)
+ return -EINVAL;
+
+ return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(tz->temperature));
+}
+
+static const char enabled[] = "kernel";
+static const char disabled[] = "user";
+static int thermal_get_mode(struct thermal_zone_device *thermal,
+ char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+
+ if (!tz)
+ return -EINVAL;
+
+ return sprintf(buf, "%s\n", tz->tz_enabled ?
+ enabled : disabled);
+}
+
+static int thermal_set_mode(struct thermal_zone_device *thermal,
+ const char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+ int enable;
+
+ if (!tz)
+ return -EINVAL;
+
+ /*
+ * enable/disable thermal management from ACPI thermal driver
+ */
+ if (!strncmp(buf, enabled, sizeof enabled - 1))
+ enable = 1;
+ else if (!strncmp(buf, disabled, sizeof disabled - 1))
+ enable = 0;
+ else
+ return -EINVAL;
+
+ if (enable != tz->tz_enabled) {
+ tz->tz_enabled = enable;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "%s ACPI thermal control\n",
+ tz->tz_enabled ? enabled : disabled));
+ acpi_thermal_check(tz);
+ }
+ return 0;
+}
+
+static int thermal_get_trip_type(struct thermal_zone_device *thermal,
+ int trip, char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+ int i;
+
+ if (!tz || trip < 0)
+ return -EINVAL;
+
+ if (tz->trips.critical.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "critical\n");
+ trip--;
+ }
+
+ if (tz->trips.hot.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "hot\n");
+ trip--;
+ }
+
+ if (tz->trips.passive.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "passive\n");
+ trip--;
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
+ tz->trips.active[i].flags.valid; i++) {
+ if (!trip)
+ return sprintf(buf, "active%d\n", i);
+ trip--;
+ }
+
+ return -EINVAL;
+}
+
+static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
+ int trip, char *buf)
+{
+ struct acpi_thermal *tz = thermal->devdata;
+ int i;
+
+ if (!tz || trip < 0)
+ return -EINVAL;
+
+ if (tz->trips.critical.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(
+ tz->trips.critical.temperature));
+ trip--;
+ }
+
+ if (tz->trips.hot.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(
+ tz->trips.hot.temperature));
+ trip--;
+ }
+
+ if (tz->trips.passive.flags.valid) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(
+ tz->trips.passive.temperature));
+ trip--;
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
+ tz->trips.active[i].flags.valid; i++) {
+ if (!trip)
+ return sprintf(buf, "%ld\n", KELVIN_TO_CELSIUS(
+ tz->trips.active[i].temperature));
+ trip--;
+ }
+
+ return -EINVAL;
+}
+
+typedef int (*cb)(struct thermal_zone_device *, int,
+ struct thermal_cooling_device *);
+static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev,
+ cb action)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_thermal *tz = thermal->devdata;
+ struct acpi_device *dev;
+ acpi_status status;
+ acpi_handle handle;
+ int i;
+ int j;
+ int trip = -1;
+ int result = 0;
+
+ if (tz->trips.critical.flags.valid)
+ trip++;
+
+ if (tz->trips.hot.flags.valid)
+ trip++;
+
+ if (tz->trips.passive.flags.valid) {
+ trip++;
+ for (i = 0; i < tz->trips.passive.devices.count;
+ i++) {
+ handle = tz->trips.passive.devices.handles[i];
+ status = acpi_bus_get_device(handle, &dev);
+ if (ACPI_SUCCESS(status) && (dev == device)) {
+ result = action(thermal, trip, cdev);
+ if (result)
+ goto failed;
+ }
+ }
+ }
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
+ if (!tz->trips.active[i].flags.valid)
+ break;
+ trip++;
+ for (j = 0;
+ j < tz->trips.active[i].devices.count;
+ j++) {
+ handle = tz->trips.active[i].devices.handles[j];
+ status = acpi_bus_get_device(handle, &dev);
+ if (ACPI_SUCCESS(status) && (dev == device)) {
+ result = action(thermal, trip, cdev);
+ if (result)
+ goto failed;
+ }
+ }
+ }
+
+ for (i = 0; i < tz->devices.count; i++) {
+ handle = tz->devices.handles[i];
+ status = acpi_bus_get_device(handle, &dev);
+ if (ACPI_SUCCESS(status) && (dev == device)) {
+ result = action(thermal, -1, cdev);
+ if (result)
+ goto failed;
+ }
+ }
+
+failed:
+ return result;
+}
+
+static int
+acpi_thermal_bind_cooling_device(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ return acpi_thermal_cooling_device_cb(thermal, cdev,
+ thermal_zone_bind_cooling_device);
+}
+
+static int
+acpi_thermal_unbind_cooling_device(struct thermal_zone_device *thermal,
+ struct thermal_cooling_device *cdev)
+{
+ return acpi_thermal_cooling_device_cb(thermal, cdev,
+ thermal_zone_unbind_cooling_device);
+}
+
+static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
+ .bind = acpi_thermal_bind_cooling_device,
+ .unbind = acpi_thermal_unbind_cooling_device,
+ .get_temp = thermal_get_temp,
+ .get_mode = thermal_get_mode,
+ .set_mode = thermal_set_mode,
+ .get_trip_type = thermal_get_trip_type,
+ .get_trip_temp = thermal_get_trip_temp,
+};
+
+static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
+{
+ int trips = 0;
+ int result;
+ acpi_status status;
+ int i;
+
+ if (tz->trips.critical.flags.valid)
+ trips++;
+
+ if (tz->trips.hot.flags.valid)
+ trips++;
+
+ if (tz->trips.passive.flags.valid)
+ trips++;
+
+ for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
+ tz->trips.active[i].flags.valid; i++, trips++);
+ tz->thermal_zone = thermal_zone_device_register("ACPI thermal zone",
+ trips, tz, &acpi_thermal_zone_ops);
+ if (!tz->thermal_zone)
+ return -ENODEV;
+
+ result = sysfs_create_link(&tz->device->dev.kobj,
+ &tz->thermal_zone->device.kobj, "thermal_zone");
+ if (result)
+ return result;
+
+ result = sysfs_create_link(&tz->thermal_zone->device.kobj,
+ &tz->device->dev.kobj, "device");
+ if (result)
+ return result;
+
+ status = acpi_attach_data(tz->device->handle,
+ acpi_bus_private_data_handler,
+ tz->thermal_zone);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error attaching device data\n"));
+ return -ENODEV;
+ }
+
+ tz->tz_enabled = 1;
+
+ printk(KERN_INFO PREFIX "%s is registered as thermal_zone%d\n",
+ tz->device->dev.bus_id, tz->thermal_zone->id);
+ return 0;
+}
+
+static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
+{
+ sysfs_remove_link(&tz->device->dev.kobj, "thermal_zone");
+ sysfs_remove_link(&tz->thermal_zone->device.kobj, "device");
+ thermal_zone_device_unregister(tz->thermal_zone);
+ tz->thermal_zone = NULL;
+ acpi_detach_data(tz->device->handle, acpi_bus_private_data_handler);
+}
+
+
/* --------------------------------------------------------------------------
FS Interface (/proc)
-------------------------------------------------------------------------- */
@@ -1181,15 +1518,15 @@ static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data)
acpi_thermal_check(tz);
break;
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
- acpi_thermal_get_trip_points(tz);
+ acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
acpi_thermal_check(tz);
acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
device->dev.bus_id, event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
- if (tz->flags.devices)
- acpi_thermal_get_devices(tz);
+ acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
+ acpi_thermal_check(tz);
acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
device->dev.bus_id, event, 0);
@@ -1232,11 +1569,6 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
else
acpi_thermal_get_polling_frequency(tz);
- /* Get devices in this thermal zone [_TZD] (optional) */
- result = acpi_thermal_get_devices(tz);
- if (!result)
- tz->flags.devices = 1;
-
return 0;
}
@@ -1260,13 +1592,19 @@ static int acpi_thermal_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
acpi_driver_data(device) = tz;
mutex_init(&tz->lock);
+
+
result = acpi_thermal_get_info(tz);
if (result)
- goto end;
+ goto free_memory;
+
+ result = acpi_thermal_register_thermal_zone(tz);
+ if (result)
+ goto free_memory;
result = acpi_thermal_add_fs(device);
if (result)
- goto end;
+ goto unregister_thermal_zone;
init_timer(&tz->timer);
@@ -1277,19 +1615,21 @@ static int acpi_thermal_add(struct acpi_device *device)
acpi_thermal_notify, tz);
if (ACPI_FAILURE(status)) {
result = -ENODEV;
- goto end;
+ goto remove_fs;
}
printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
acpi_device_name(device), acpi_device_bid(device),
KELVIN_TO_CELSIUS(tz->temperature));
+ goto end;
- end:
- if (result) {
- acpi_thermal_remove_fs(device);
- kfree(tz);
- }
-
+remove_fs:
+ acpi_thermal_remove_fs(device);
+unregister_thermal_zone:
+ thermal_zone_device_unregister(tz->thermal_zone);
+free_memory:
+ kfree(tz);
+end:
return result;
}
@@ -1329,6 +1669,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
}
acpi_thermal_remove_fs(device);
+ acpi_thermal_unregister_thermal_zone(tz);
mutex_destroy(&tz->lock);
kfree(tz);
return 0;
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c
index 93ea8290b4f7..630c9a2c5b7b 100644
--- a/drivers/acpi/utilities/utglobal.c
+++ b/drivers/acpi/utilities/utglobal.c
@@ -671,7 +671,6 @@ void acpi_ut_init_globals(void)
/* GPE support */
- acpi_gpe_count = 0;
acpi_gbl_gpe_xrupt_list_head = NULL;
acpi_gbl_gpe_fadt_blocks[0] = NULL;
acpi_gbl_gpe_fadt_blocks[1] = NULL;
@@ -735,4 +734,3 @@ void acpi_ut_init_globals(void)
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
- ACPI_EXPORT_SYMBOL(acpi_gpe_count)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index bd77e81e81c1..7f714fa2a454 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -34,6 +34,7 @@
#include <linux/seq_file.h>
#include <linux/input.h>
#include <linux/backlight.h>
+#include <linux/thermal.h>
#include <linux/video_output.h>
#include <asm/uaccess.h>
@@ -72,8 +73,12 @@ MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
+static int brightness_switch_enabled = 1;
+module_param(brightness_switch_enabled, bool, 0644);
+
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
+static int acpi_video_resume(struct acpi_device *device);
static const struct acpi_device_id video_device_ids[] = {
{ACPI_VIDEO_HID, 0},
@@ -88,6 +93,7 @@ static struct acpi_driver acpi_video_bus = {
.ops = {
.add = acpi_video_bus_add,
.remove = acpi_video_bus_remove,
+ .resume = acpi_video_resume,
},
};
@@ -179,6 +185,7 @@ struct acpi_video_device {
struct acpi_device *dev;
struct acpi_video_device_brightness *brightness;
struct backlight_device *backlight;
+ struct thermal_cooling_device *cdev;
struct output_device *output_dev;
};
@@ -273,7 +280,6 @@ static void acpi_video_device_rebind(struct acpi_video_bus *video);
static void acpi_video_device_bind(struct acpi_video_bus *video,
struct acpi_video_device *device);
static int acpi_video_device_enumerate(struct acpi_video_bus *video);
-static int acpi_video_switch_output(struct acpi_video_bus *video, int event);
static int acpi_video_device_lcd_set_level(struct acpi_video_device *device,
int level);
static int acpi_video_device_lcd_get_level_current(
@@ -292,18 +298,26 @@ static int acpi_video_device_set_state(struct acpi_video_device *device, int sta
static int acpi_video_get_brightness(struct backlight_device *bd)
{
unsigned long cur_level;
+ int i;
struct acpi_video_device *vd =
(struct acpi_video_device *)bl_get_data(bd);
acpi_video_device_lcd_get_level_current(vd, &cur_level);
- return (int) cur_level;
+ for (i = 2; i < vd->brightness->count; i++) {
+ if (vd->brightness->levels[i] == cur_level)
+ /* The first two entries are special - see page 575
+ of the ACPI spec 3.0 */
+ return i-2;
+ }
+ return 0;
}
static int acpi_video_set_brightness(struct backlight_device *bd)
{
- int request_level = bd->props.brightness;
+ int request_level = bd->props.brightness+2;
struct acpi_video_device *vd =
(struct acpi_video_device *)bl_get_data(bd);
- acpi_video_device_lcd_set_level(vd, request_level);
+ acpi_video_device_lcd_set_level(vd,
+ vd->brightness->levels[request_level]);
return 0;
}
@@ -334,6 +348,54 @@ static struct output_properties acpi_output_properties = {
.set_state = acpi_video_output_set,
.get_status = acpi_video_output_get,
};
+
+
+/* thermal cooling device callbacks */
+static int video_get_max_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_video_device *video = acpi_driver_data(device);
+
+ return sprintf(buf, "%d\n", video->brightness->count - 3);
+}
+
+static int video_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_video_device *video = acpi_driver_data(device);
+ unsigned long level;
+ int state;
+
+ acpi_video_device_lcd_get_level_current(video, &level);
+ for (state = 2; state < video->brightness->count; state++)
+ if (level == video->brightness->levels[state])
+ return sprintf(buf, "%d\n",
+ video->brightness->count - state - 1);
+
+ return -EINVAL;
+}
+
+static int
+video_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ struct acpi_video_device *video = acpi_driver_data(device);
+ int level;
+
+ if ( state >= video->brightness->count - 2)
+ return -EINVAL;
+
+ state = video->brightness->count - state;
+ level = video->brightness->levels[state -1];
+ return acpi_video_device_lcd_set_level(video, level);
+}
+
+static struct thermal_cooling_device_ops video_cooling_ops = {
+ .get_max_state = video_get_max_state,
+ .get_cur_state = video_get_cur_state,
+ .set_cur_state = video_set_cur_state,
+};
+
/* --------------------------------------------------------------------------
Video Management
-------------------------------------------------------------------------- */
@@ -652,7 +714,7 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
kfree(obj);
if (device->cap._BCL && device->cap._BCM && device->cap._BQC && max_level > 0){
- unsigned long tmp;
+ int result;
static int count = 0;
char *name;
name = kzalloc(MAX_NAME_LEN, GFP_KERNEL);
@@ -660,14 +722,30 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
return;
sprintf(name, "acpi_video%d", count++);
- acpi_video_device_lcd_get_level_current(device, &tmp);
device->backlight = backlight_device_register(name,
NULL, device, &acpi_backlight_ops);
- device->backlight->props.max_brightness = max_level;
- device->backlight->props.brightness = (int)tmp;
+ device->backlight->props.max_brightness = device->brightness->count-3;
+ device->backlight->props.brightness = acpi_video_get_brightness(device->backlight);
backlight_update_status(device->backlight);
-
kfree(name);
+
+ device->cdev = thermal_cooling_device_register("LCD",
+ device->dev, &video_cooling_ops);
+ if (device->cdev) {
+ printk(KERN_INFO PREFIX
+ "%s is registered as cooling_device%d\n",
+ device->dev->dev.bus_id, device->cdev->id);
+ result = sysfs_create_link(&device->dev->dev.kobj,
+ &device->cdev->device.kobj,
+ "thermal_cooling");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ result = sysfs_create_link(&device->cdev->device.kobj,
+ &device->dev->dev.kobj,
+ "device");
+ if (result)
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ }
}
if (device->cap._DCS && device->cap._DSS){
static int count = 0;
@@ -726,11 +804,40 @@ static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
static int acpi_video_bus_check(struct acpi_video_bus *video)
{
acpi_status status = -ENOENT;
-
+ long device_id;
+ struct device *dev;
+ struct acpi_device *device;
if (!video)
return -EINVAL;
+ device = video->device;
+
+ status =
+ acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
+
+ if (!ACPI_SUCCESS(status))
+ return -ENODEV;
+
+ /* We need to attempt to determine whether the _ADR refers to a
+ PCI device or not. There's no terribly good way to do this,
+ so the best we can hope for is to assume that there'll never
+ be a video device in the host bridge */
+ if (device_id >= 0x10000) {
+ /* It looks like a PCI device. Does it exist? */
+ dev = acpi_get_physical_device(device->handle);
+ } else {
+ /* It doesn't look like a PCI device. Does its parent
+ exist? */
+ acpi_handle phandle;
+ if (acpi_get_parent(device->handle, &phandle))
+ return -ENODEV;
+ dev = acpi_get_physical_device(phandle);
+ }
+ if (!dev)
+ return -ENODEV;
+ put_device(dev);
+
/* Since there is no HID, CID and so on for VGA driver, we have
* to check well known required nodes.
*/
@@ -1256,8 +1363,37 @@ acpi_video_bus_write_DOS(struct file *file,
static int acpi_video_bus_add_fs(struct acpi_device *device)
{
+ long device_id;
+ int status;
struct proc_dir_entry *entry = NULL;
struct acpi_video_bus *video;
+ struct device *dev;
+
+ status =
+ acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
+
+ if (!ACPI_SUCCESS(status))
+ return -ENODEV;
+
+ /* We need to attempt to determine whether the _ADR refers to a
+ PCI device or not. There's no terribly good way to do this,
+ so the best we can hope for is to assume that there'll never
+ be a video device in the host bridge */
+ if (device_id >= 0x10000) {
+ /* It looks like a PCI device. Does it exist? */
+ dev = acpi_get_physical_device(device->handle);
+ } else {
+ /* It doesn't look like a PCI device. Does its parent
+ exist? */
+ acpi_handle phandle;
+ if (acpi_get_parent(device->handle, &phandle))
+ return -ENODEV;
+ dev = acpi_get_physical_device(phandle);
+ }
+ if (!dev)
+ return -ENODEV;
+ put_device(dev);
+
video = acpi_driver_data(device);
@@ -1580,64 +1716,6 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
return status;
}
-/*
- * Arg:
- * video : video bus device
- * event : notify event
- *
- * Return:
- * < 0 : error
- *
- * 1. Find out the current active output device.
- * 2. Identify the next output device to switch to.
- * 3. call _DSS to do actual switch.
- */
-
-static int acpi_video_switch_output(struct acpi_video_bus *video, int event)
-{
- struct list_head *node;
- struct acpi_video_device *dev = NULL;
- struct acpi_video_device *dev_next = NULL;
- struct acpi_video_device *dev_prev = NULL;
- unsigned long state;
- int status = 0;
-
- mutex_lock(&video->device_list_lock);
-
- list_for_each(node, &video->video_device_list) {
- dev = container_of(node, struct acpi_video_device, entry);
- status = acpi_video_device_get_state(dev, &state);
- if (state & 0x2) {
- dev_next = container_of(node->next,
- struct acpi_video_device, entry);
- dev_prev = container_of(node->prev,
- struct acpi_video_device, entry);
- goto out;
- }
- }
-
- dev_next = container_of(node->next, struct acpi_video_device, entry);
- dev_prev = container_of(node->prev, struct acpi_video_device, entry);
-
- out:
- mutex_unlock(&video->device_list_lock);
-
- switch (event) {
- case ACPI_VIDEO_NOTIFY_CYCLE:
- case ACPI_VIDEO_NOTIFY_NEXT_OUTPUT:
- acpi_video_device_set_state(dev, 0);
- acpi_video_device_set_state(dev_next, 0x80000001);
- break;
- case ACPI_VIDEO_NOTIFY_PREV_OUTPUT:
- acpi_video_device_set_state(dev, 0);
- acpi_video_device_set_state(dev_prev, 0x80000001);
- default:
- break;
- }
-
- return status;
-}
-
static int
acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event)
@@ -1729,6 +1807,14 @@ static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
ACPI_DEVICE_NOTIFY,
acpi_video_device_notify);
backlight_device_unregister(device->backlight);
+ if (device->cdev) {
+ sysfs_remove_link(&device->dev->dev.kobj,
+ "thermal_cooling");
+ sysfs_remove_link(&device->cdev->device.kobj,
+ "device");
+ thermal_cooling_device_unregister(device->cdev);
+ device->cdev = NULL;
+ }
video_output_unregister(device->output_dev);
return 0;
@@ -1797,23 +1883,19 @@ static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data)
* connector. */
acpi_video_device_enumerate(video);
acpi_video_device_rebind(video);
- acpi_video_switch_output(video, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_CYCLE: /* Cycle Display output hotkey pressed. */
- acpi_video_switch_output(video, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_NEXT_OUTPUT: /* Next Display output hotkey pressed. */
- acpi_video_switch_output(video, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_VIDEO_NEXT;
break;
case ACPI_VIDEO_NOTIFY_PREV_OUTPUT: /* previous Display output hotkey pressed. */
- acpi_video_switch_output(video, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_VIDEO_PREV;
break;
@@ -1825,6 +1907,7 @@ static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data)
break;
}
+ acpi_notifier_call_chain(device, event, 0);
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
@@ -1850,27 +1933,32 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
switch (event) {
case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */
- acpi_video_switch_brightness(video_device, event);
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESS_CYCLE;
break;
case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS: /* Increase brightness */
- acpi_video_switch_brightness(video_device, event);
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESSUP;
break;
case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS: /* Decrease brightness */
- acpi_video_switch_brightness(video_device, event);
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESSDOWN;
break;
case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS: /* zero brightnesss */
- acpi_video_switch_brightness(video_device, event);
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESS_ZERO;
break;
case ACPI_VIDEO_NOTIFY_DISPLAY_OFF: /* display device off */
- acpi_video_switch_brightness(video_device, event);
+ if (brightness_switch_enabled)
+ acpi_video_switch_brightness(video_device, event);
acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_DISPLAY_OFF;
break;
@@ -1881,6 +1969,7 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
break;
}
+ acpi_notifier_call_chain(device, event, 0);
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
@@ -1890,6 +1979,25 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
}
static int instance;
+static int acpi_video_resume(struct acpi_device *device)
+{
+ struct acpi_video_bus *video;
+ struct acpi_video_device *video_device;
+ int i;
+
+ if (!device || !acpi_driver_data(device))
+ return -EINVAL;
+
+ video = acpi_driver_data(device);
+
+ for (i = 0; i < video->attached_count; i++) {
+ video_device = video->attached_array[i].bind_info;
+ if (video_device && video_device->backlight)
+ acpi_video_set_brightness(video_device->backlight);
+ }
+ return AE_OK;
+}
+
static int acpi_video_bus_add(struct acpi_device *device)
{
acpi_status status;
diff --git a/drivers/acpi/wmi.c b/drivers/acpi/wmi.c
new file mode 100644
index 000000000000..36b84ab418dd
--- /dev/null
+++ b/drivers/acpi/wmi.c
@@ -0,0 +1,710 @@
+/*
+ * ACPI-WMI mapping driver
+ *
+ * Copyright (C) 2007-2008 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ *
+ * GUID parsing code from ldm.c is:
+ * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+ * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+ACPI_MODULE_NAME("wmi");
+MODULE_AUTHOR("Carlos Corbacho");
+MODULE_DESCRIPTION("ACPI-WMI Mapping Driver");
+MODULE_LICENSE("GPL");
+
+#define ACPI_WMI_CLASS "wmi"
+
+#undef PREFIX
+#define PREFIX "ACPI: WMI: "
+
+static DEFINE_MUTEX(wmi_data_lock);
+
+struct guid_block {
+ char guid[16];
+ union {
+ char object_id[2];
+ struct {
+ unsigned char notify_id;
+ unsigned char reserved;
+ };
+ };
+ u8 instance_count;
+ u8 flags;
+};
+
+struct wmi_block {
+ struct list_head list;
+ struct guid_block gblock;
+ acpi_handle handle;
+ wmi_notify_handler handler;
+ void *handler_data;
+};
+
+static struct wmi_block wmi_blocks;
+
+/*
+ * If the GUID data block is marked as expensive, we must enable and
+ * explicitily disable data collection.
+ */
+#define ACPI_WMI_EXPENSIVE 0x1
+#define ACPI_WMI_METHOD 0x2 /* GUID is a method */
+#define ACPI_WMI_STRING 0x4 /* GUID takes & returns a string */
+#define ACPI_WMI_EVENT 0x8 /* GUID is an event */
+
+static int acpi_wmi_remove(struct acpi_device *device, int type);
+static int acpi_wmi_add(struct acpi_device *device);
+
+static const struct acpi_device_id wmi_device_ids[] = {
+ {"PNP0C14", 0},
+ {"pnp0c14", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
+
+static struct acpi_driver acpi_wmi_driver = {
+ .name = "wmi",
+ .class = ACPI_WMI_CLASS,
+ .ids = wmi_device_ids,
+ .ops = {
+ .add = acpi_wmi_add,
+ .remove = acpi_wmi_remove,
+ },
+};
+
+/*
+ * GUID parsing functions
+ */
+
+/**
+ * wmi_parse_hexbyte - Convert a ASCII hex number to a byte
+ * @src: Pointer to at least 2 characters to convert.
+ *
+ * Convert a two character ASCII hex string to a number.
+ *
+ * Return: 0-255 Success, the byte was parsed correctly
+ * -1 Error, an invalid character was supplied
+ */
+static int wmi_parse_hexbyte(const u8 *src)
+{
+ unsigned int x; /* For correct wrapping */
+ int h;
+
+ /* high part */
+ x = src[0];
+ if (x - '0' <= '9' - '0') {
+ h = x - '0';
+ } else if (x - 'a' <= 'f' - 'a') {
+ h = x - 'a' + 10;
+ } else if (x - 'A' <= 'F' - 'A') {
+ h = x - 'A' + 10;
+ } else {
+ return -1;
+ }
+ h <<= 4;
+
+ /* low part */
+ x = src[1];
+ if (x - '0' <= '9' - '0')
+ return h | (x - '0');
+ if (x - 'a' <= 'f' - 'a')
+ return h | (x - 'a' + 10);
+ if (x - 'A' <= 'F' - 'A')
+ return h | (x - 'A' + 10);
+ return -1;
+}
+
+/**
+ * wmi_swap_bytes - Rearrange GUID bytes to match GUID binary
+ * @src: Memory block holding binary GUID (16 bytes)
+ * @dest: Memory block to hold byte swapped binary GUID (16 bytes)
+ *
+ * Byte swap a binary GUID to match it's real GUID value
+ */
+static void wmi_swap_bytes(u8 *src, u8 *dest)
+{
+ int i;
+
+ for (i = 0; i <= 3; i++)
+ memcpy(dest + i, src + (3 - i), 1);
+
+ for (i = 0; i <= 1; i++)
+ memcpy(dest + 4 + i, src + (5 - i), 1);
+
+ for (i = 0; i <= 1; i++)
+ memcpy(dest + 6 + i, src + (7 - i), 1);
+
+ memcpy(dest + 8, src + 8, 8);
+}
+
+/**
+ * wmi_parse_guid - Convert GUID from ASCII to binary
+ * @src: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @dest: Memory block to hold binary GUID (16 bytes)
+ *
+ * N.B. The GUID need not be NULL terminated.
+ *
+ * Return: 'true' @dest contains binary GUID
+ * 'false' @dest contents are undefined
+ */
+static bool wmi_parse_guid(const u8 *src, u8 *dest)
+{
+ static const int size[] = { 4, 2, 2, 2, 6 };
+ int i, j, v;
+
+ if (src[8] != '-' || src[13] != '-' ||
+ src[18] != '-' || src[23] != '-')
+ return false;
+
+ for (j = 0; j < 5; j++, src++) {
+ for (i = 0; i < size[j]; i++, src += 2, *dest++ = v) {
+ v = wmi_parse_hexbyte(src);
+ if (v < 0)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool find_guid(const char *guid_string, struct wmi_block **out)
+{
+ char tmp[16], guid_input[16];
+ struct wmi_block *wblock;
+ struct guid_block *block;
+ struct list_head *p;
+
+ wmi_parse_guid(guid_string, tmp);
+ wmi_swap_bytes(tmp, guid_input);
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ block = &wblock->gblock;
+
+ if (memcmp(block->guid, guid_input, 16) == 0) {
+ if (out)
+ *out = wblock;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Exported WMI functions
+ */
+/**
+ * wmi_evaluate_method - Evaluate a WMI method
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * @method_id: Method ID to call
+ * &in: Buffer containing input for the method call
+ * &out: Empty buffer to return the method results
+ *
+ * Call an ACPI-WMI method
+ */
+acpi_status wmi_evaluate_method(const char *guid_string, u8 instance,
+u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
+{
+ struct guid_block *block = NULL;
+ struct wmi_block *wblock = NULL;
+ acpi_handle handle;
+ acpi_status status;
+ struct acpi_object_list input;
+ union acpi_object params[3];
+ char method[4] = "WM";
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_BAD_ADDRESS;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (!block->flags & ACPI_WMI_METHOD)
+ return AE_BAD_DATA;
+
+ if (block->instance_count < instance)
+ return AE_BAD_PARAMETER;
+
+ input.count = 2;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = instance;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = method_id;
+
+ if (in) {
+ input.count = 3;
+
+ if (block->flags & ACPI_WMI_STRING) {
+ params[2].type = ACPI_TYPE_STRING;
+ } else {
+ params[2].type = ACPI_TYPE_BUFFER;
+ }
+ params[2].buffer.length = in->length;
+ params[2].buffer.pointer = in->pointer;
+ }
+
+ strncat(method, block->object_id, 2);
+
+ status = acpi_evaluate_object(handle, method, &input, out);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_evaluate_method);
+
+/**
+ * wmi_query_block - Return contents of a WMI block
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * &out: Empty buffer to return the contents of the data block to
+ *
+ * Return the contents of an ACPI-WMI data block to a buffer
+ */
+acpi_status wmi_query_block(const char *guid_string, u8 instance,
+struct acpi_buffer *out)
+{
+ struct guid_block *block = NULL;
+ struct wmi_block *wblock = NULL;
+ acpi_handle handle;
+ acpi_status status, wc_status = AE_ERROR;
+ struct acpi_object_list input, wc_input;
+ union acpi_object wc_params[1], wq_params[1];
+ char method[4];
+ char wc_method[4] = "WC";
+
+ if (!guid_string || !out)
+ return AE_BAD_PARAMETER;
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_BAD_ADDRESS;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (block->instance_count < instance)
+ return AE_BAD_PARAMETER;
+
+ /* Check GUID is a data block */
+ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
+ return AE_BAD_ADDRESS;
+
+ input.count = 1;
+ input.pointer = wq_params;
+ wq_params[0].type = ACPI_TYPE_INTEGER;
+ wq_params[0].integer.value = instance;
+
+ /*
+ * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method first to
+ * enable collection.
+ */
+ if (block->flags & ACPI_WMI_EXPENSIVE) {
+ wc_input.count = 1;
+ wc_input.pointer = wc_params;
+ wc_params[0].type = ACPI_TYPE_INTEGER;
+ wc_params[0].integer.value = 1;
+
+ strncat(wc_method, block->object_id, 2);
+
+ /*
+ * Some GUIDs break the specification by declaring themselves
+ * expensive, but have no corresponding WCxx method. So we
+ * should not fail if this happens.
+ */
+ wc_status = acpi_evaluate_object(handle, wc_method,
+ &wc_input, NULL);
+ }
+
+ strcpy(method, "WQ");
+ strncat(method, block->object_id, 2);
+
+ status = acpi_evaluate_object(handle, method, NULL, out);
+
+ /*
+ * If ACPI_WMI_EXPENSIVE, call the relevant WCxx method, even if
+ * the WQxx method failed - we should disable collection anyway.
+ */
+ if ((block->flags & ACPI_WMI_EXPENSIVE) && wc_status) {
+ wc_params[0].integer.value = 0;
+ status = acpi_evaluate_object(handle,
+ wc_method, &wc_input, NULL);
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(wmi_query_block);
+
+/**
+ * wmi_set_block - Write to a WMI block
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @instance: Instance index
+ * &in: Buffer containing new values for the data block
+ *
+ * Write the contents of the input buffer to an ACPI-WMI data block
+ */
+acpi_status wmi_set_block(const char *guid_string, u8 instance,
+const struct acpi_buffer *in)
+{
+ struct guid_block *block = NULL;
+ struct wmi_block *wblock = NULL;
+ acpi_handle handle;
+ struct acpi_object_list input;
+ union acpi_object params[2];
+ char method[4] = "WS";
+
+ if (!guid_string || !in)
+ return AE_BAD_DATA;
+
+ if (!find_guid(guid_string, &wblock))
+ return AE_BAD_ADDRESS;
+
+ block = &wblock->gblock;
+ handle = wblock->handle;
+
+ if (block->instance_count < instance)
+ return AE_BAD_PARAMETER;
+
+ /* Check GUID is a data block */
+ if (block->flags & (ACPI_WMI_EVENT | ACPI_WMI_METHOD))
+ return AE_BAD_ADDRESS;
+
+ input.count = 2;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = instance;
+
+ if (block->flags & ACPI_WMI_STRING) {
+ params[1].type = ACPI_TYPE_STRING;
+ } else {
+ params[1].type = ACPI_TYPE_BUFFER;
+ }
+ params[1].buffer.length = in->length;
+ params[1].buffer.pointer = in->pointer;
+
+ strncat(method, block->object_id, 2);
+
+ return acpi_evaluate_object(handle, method, &input, NULL);
+}
+EXPORT_SYMBOL_GPL(wmi_set_block);
+
+/**
+ * wmi_install_notify_handler - Register handler for WMI events
+ * @handler: Function to handle notifications
+ * @data: Data to be returned to handler when event is fired
+ *
+ * Register a handler for events sent to the ACPI-WMI mapper device.
+ */
+acpi_status wmi_install_notify_handler(const char *guid,
+wmi_notify_handler handler, void *data)
+{
+ struct wmi_block *block;
+
+ if (!guid || !handler)
+ return AE_BAD_PARAMETER;
+
+ find_guid(guid, &block);
+ if (!block)
+ return AE_NOT_EXIST;
+
+ if (block->handler)
+ return AE_ALREADY_ACQUIRED;
+
+ block->handler = handler;
+ block->handler_data = data;
+
+ return AE_OK;
+}
+EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
+
+/**
+ * wmi_uninstall_notify_handler - Unregister handler for WMI events
+ *
+ * Unregister handler for events sent to the ACPI-WMI mapper device.
+ */
+acpi_status wmi_remove_notify_handler(const char *guid)
+{
+ struct wmi_block *block;
+
+ if (!guid)
+ return AE_BAD_PARAMETER;
+
+ find_guid(guid, &block);
+ if (!block)
+ return AE_NOT_EXIST;
+
+ if (!block->handler)
+ return AE_NULL_ENTRY;
+
+ block->handler = NULL;
+ block->handler_data = NULL;
+
+ return AE_OK;
+}
+EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
+
+/**
+ * wmi_get_event_data - Get WMI data associated with an event
+ *
+ * @event - Event to find
+ * &out - Buffer to hold event data
+ *
+ * Returns extra data associated with an event in WMI.
+ */
+acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
+{
+ struct acpi_object_list input;
+ union acpi_object params[1];
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ struct list_head *p;
+
+ input.count = 1;
+ input.pointer = params;
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = event;
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ gblock = &wblock->gblock;
+
+ if ((gblock->flags & ACPI_WMI_EVENT) &&
+ (gblock->notify_id == event))
+ return acpi_evaluate_object(wblock->handle, "_WED",
+ &input, out);
+ }
+
+ return AE_NOT_FOUND;
+}
+EXPORT_SYMBOL_GPL(wmi_get_event_data);
+
+/**
+ * wmi_has_guid - Check if a GUID is available
+ * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ *
+ * Check if a given GUID is defined by _WDG
+ */
+bool wmi_has_guid(const char *guid_string)
+{
+ return find_guid(guid_string, NULL);
+}
+EXPORT_SYMBOL_GPL(wmi_has_guid);
+
+/*
+ * Parse the _WDG method for the GUID data blocks
+ */
+static __init acpi_status parse_wdg(acpi_handle handle)
+{
+ struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+ struct guid_block *gblock;
+ struct wmi_block *wblock;
+ acpi_status status;
+ u32 i, total;
+
+ status = acpi_evaluate_object(handle, "_WDG", NULL, &out);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+
+ if (obj->type != ACPI_TYPE_BUFFER)
+ return AE_ERROR;
+
+ total = obj->buffer.length / sizeof(struct guid_block);
+
+ gblock = kzalloc(obj->buffer.length, GFP_KERNEL);
+ if (!gblock)
+ return AE_NO_MEMORY;
+
+ memcpy(gblock, obj->buffer.pointer, obj->buffer.length);
+
+ for (i = 0; i < total; i++) {
+ wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
+ if (!wblock)
+ return AE_NO_MEMORY;
+
+ wblock->gblock = gblock[i];
+ wblock->handle = handle;
+ list_add_tail(&wblock->list, &wmi_blocks.list);
+ }
+
+ kfree(out.pointer);
+ kfree(gblock);
+
+ return status;
+}
+
+/*
+ * WMI can have EmbeddedControl access regions. In which case, we just want to
+ * hand these off to the EC driver.
+ */
+static acpi_status
+acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
+ u32 bits, acpi_integer * value,
+ void *handler_context, void *region_context)
+{
+ int result = 0, i = 0;
+ u8 temp = 0;
+
+ if ((address > 0xFF) || !value)
+ return AE_BAD_PARAMETER;
+
+ if (function != ACPI_READ && function != ACPI_WRITE)
+ return AE_BAD_PARAMETER;
+
+ if (bits != 8)
+ return AE_BAD_PARAMETER;
+
+ if (function == ACPI_READ) {
+ result = ec_read(address, &temp);
+ (*value) |= ((acpi_integer)temp) << i;
+ } else {
+ temp = 0xff & ((*value) >> i);
+ result = ec_write(address, temp);
+ }
+
+ switch (result) {
+ case -EINVAL:
+ return AE_BAD_PARAMETER;
+ break;
+ case -ENODEV:
+ return AE_NOT_FOUND;
+ break;
+ case -ETIME:
+ return AE_TIME;
+ break;
+ default:
+ return AE_OK;
+ }
+}
+
+static void acpi_wmi_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct guid_block *block;
+ struct wmi_block *wblock;
+ struct list_head *p;
+ struct acpi_device *device = data;
+
+ list_for_each(p, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+ block = &wblock->gblock;
+
+ if ((block->flags & ACPI_WMI_EVENT) &&
+ (block->notify_id == event)) {
+ if (wblock->handler)
+ wblock->handler(event, wblock->handler_data);
+
+ acpi_bus_generate_netlink_event(
+ device->pnp.device_class, device->dev.bus_id,
+ event, 0);
+ break;
+ }
+ }
+}
+
+static int acpi_wmi_remove(struct acpi_device *device, int type)
+{
+ acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_wmi_notify);
+
+ acpi_remove_address_space_handler(device->handle,
+ ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
+
+ return 0;
+}
+
+static int __init acpi_wmi_add(struct acpi_device *device)
+{
+ acpi_status status;
+ int result = 0;
+
+ status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_wmi_notify, device);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Error installing notify handler\n");
+ return -ENODEV;
+ }
+
+ status = acpi_install_address_space_handler(device->handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_wmi_ec_space_handler,
+ NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ status = parse_wdg(device->handle);
+ if (ACPI_FAILURE(status)) {
+ printk(KERN_ERR PREFIX "Error installing EC region handler\n");
+ return -ENODEV;
+ }
+
+ return result;
+}
+
+static int __init acpi_wmi_init(void)
+{
+ acpi_status result;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&wmi_blocks.list);
+
+ result = acpi_bus_register_driver(&acpi_wmi_driver);
+
+ if (result < 0) {
+ printk(KERN_INFO PREFIX "Error loading mapper\n");
+ } else {
+ printk(KERN_INFO PREFIX "Mapper loaded\n");
+ }
+
+ return result;
+}
+
+static void __exit acpi_wmi_exit(void)
+{
+ struct list_head *p, *tmp;
+ struct wmi_block *wblock;
+
+ acpi_bus_unregister_driver(&acpi_wmi_driver);
+
+ list_for_each_safe(p, tmp, &wmi_blocks.list) {
+ wblock = list_entry(p, struct wmi_block, list);
+
+ list_del(p);
+ kfree(wblock);
+ }
+
+ printk(KERN_INFO PREFIX "Mapper unloaded\n");
+}
+
+subsys_initcall(acpi_wmi_init);
+module_exit(acpi_wmi_exit);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 27c8d56111c2..29e71bddd6ff 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -679,24 +679,20 @@ static void ahci_save_initial_config(struct pci_dev *pdev,
/* cross check port_map and cap.n_ports */
if (port_map) {
- u32 tmp_port_map = port_map;
- int n_ports = ahci_nr_ports(cap);
+ int map_ports = 0;
- for (i = 0; i < AHCI_MAX_PORTS && n_ports; i++) {
- if (tmp_port_map & (1 << i)) {
- n_ports--;
- tmp_port_map &= ~(1 << i);
- }
- }
+ for (i = 0; i < AHCI_MAX_PORTS; i++)
+ if (port_map & (1 << i))
+ map_ports++;
- /* If n_ports and port_map are inconsistent, whine and
- * clear port_map and let it be generated from n_ports.
+ /* If PI has more ports than n_ports, whine, clear
+ * port_map and let it be generated from n_ports.
*/
- if (n_ports || tmp_port_map) {
+ if (map_ports > ahci_nr_ports(cap)) {
dev_printk(KERN_WARNING, &pdev->dev,
- "nr_ports (%u) and implemented port map "
- "(0x%x) don't match, using nr_ports\n",
- ahci_nr_ports(cap), port_map);
+ "implemented port map (0x%x) contains more "
+ "ports than nr_ports (%u), using nr_ports\n",
+ port_map, ahci_nr_ports(cap));
port_map = 0;
}
}
@@ -2201,7 +2197,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
struct ata_host *host;
- int i, rc;
+ int n_ports, i, rc;
VPRINTK("ENTER\n");
@@ -2255,7 +2251,14 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
- host = ata_host_alloc_pinfo(&pdev->dev, ppi, fls(hpriv->port_map));
+ /* CAP.NP sometimes indicate the index of the last enabled
+ * port, at other times, that of the last possible port, so
+ * determining the maximum port number requires looking at
+ * both CAP.NP and port_map.
+ */
+ n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host)
return -ENOMEM;
host->iomap = pcim_iomap_table(pdev);
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 4b99ed0c59bb..9c2515f67de5 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1603,7 +1603,8 @@ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev)
* Zero on success, or -ERRNO value.
*/
-static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit piix_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int printed_version;
struct device *dev = &pdev->dev;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 361cf50cbdea..3011919f3ec8 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4154,8 +4154,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* NCQ is broken */
{ "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
{ "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
- { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
- { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
{ "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
{ "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 938f48a807eb..408da30594c4 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
static int __devinit pata_of_platform_probe(struct of_device *ofdev,
const struct of_device_id *match)
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 224bb6c2030a..aad7adc6ea56 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -19,7 +19,7 @@
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#define DRV_NAME "pata_platform"
#define DRV_VERSION "1.2"
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 922d7b2efba8..efcb66b6ccef 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -355,8 +355,8 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
ata_port_printk(qc->ap, KERN_ERR,
"s/g len unaligned : 0x%x\n", sg_len);
- if ((num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1)) &&
- (qc->n_iter + 1 != qc->n_elem)) {
+ if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) &&
+ sg_next(sg) != NULL) {
VPRINTK("setting indirect prde\n");
prd_ptr_to_indirect_ext = prd;
prd->dba = cpu_to_le32(indirect_ext_segment_paddr);
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 96e614a1c169..59e65edc5820 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -108,17 +108,6 @@ struct inic_port_priv {
u8 cached_pirq_mask;
};
-static int inic_slave_config(struct scsi_device *sdev)
-{
- /* This controller is braindamaged. dma_boundary is 0xffff
- * like others but it will lock up the whole machine HARD if
- * 65536 byte PRD entry is fed. Reduce maximum segment size.
- */
- blk_queue_max_segment_size(sdev->request_queue, 65536 - 512);
-
- return ata_scsi_slave_config(sdev);
-}
-
static struct scsi_host_template inic_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
@@ -132,7 +121,7 @@ static struct scsi_host_template inic_sht = {
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
- .slave_configure = inic_slave_config,
+ .slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
};
@@ -730,6 +719,18 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
+ /*
+ * This controller is braindamaged. dma_boundary is 0xffff
+ * like others but it will lock up the whole machine HARD if
+ * 65536 byte PRD entry is fed. Reduce maximum segment size.
+ */
+ rc = pci_set_dma_max_seg_size(pdev, 65536 - 512);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "failed to set the maximum segment size.\n");
+ return rc;
+ }
+
rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl);
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 3c1b5c9027db..080b8362f8d6 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -69,8 +69,11 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -179,6 +182,8 @@ enum {
HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
HC_MAIN_IRQ_MASK_OFS = 0x1d64,
+ HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
+ HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
PORT0_ERR = (1 << 0), /* shift by port # */
PORT0_DONE = (1 << 1), /* shift by port # */
HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
@@ -194,11 +199,13 @@ enum {
TWSI_INT = (1 << 24),
HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
+ HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
HC_MAIN_RSVD),
HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
HC_MAIN_RSVD_5),
+ HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
/* SATAHC registers */
HC_CFG_OFS = 0,
@@ -368,6 +375,7 @@ enum chip_type {
chip_608x,
chip_6042,
chip_7042,
+ chip_soc,
};
/* Command ReQuest Block: 32B */
@@ -424,6 +432,10 @@ struct mv_host_priv {
u32 hp_flags;
struct mv_port_signal signal[8];
const struct mv_hw_ops *ops;
+ int n_ports;
+ void __iomem *base;
+ void __iomem *main_cause_reg_addr;
+ void __iomem *main_mask_reg_addr;
u32 irq_cause_ofs;
u32 irq_mask_ofs;
u32 unmask_all_irqs;
@@ -482,6 +494,15 @@ static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
+static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
+ void __iomem *mmio);
+static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio);
+static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int n_hc);
+static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
+ void __iomem *mmio);
+static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no);
@@ -661,6 +682,12 @@ static const struct ata_port_info mv_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
+ { /* chip_soc */
+ .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv_iie_ops,
+ },
};
static const struct pci_device_id mv_pci_tbl[] = {
@@ -711,6 +738,15 @@ static const struct mv_hw_ops mv6xxx_ops = {
.reset_bus = mv_reset_pci_bus,
};
+static const struct mv_hw_ops mv_soc_ops = {
+ .phy_errata = mv6_phy_errata,
+ .enable_leds = mv_soc_enable_leds,
+ .read_preamp = mv_soc_read_preamp,
+ .reset_hc = mv_soc_reset_hc,
+ .reset_flash = mv_soc_reset_flash,
+ .reset_bus = mv_soc_reset_bus,
+};
+
/*
* Functions
*/
@@ -749,9 +785,15 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
}
+static inline void __iomem *mv_host_base(struct ata_host *host)
+{
+ struct mv_host_priv *hpriv = host->private_data;
+ return hpriv->base;
+}
+
static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
- return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
+ return mv_port_base(mv_host_base(ap->host), ap->port_no);
}
static inline int mv_get_hc_count(unsigned long port_flags)
@@ -1649,16 +1691,21 @@ static void mv_intr_edma(struct ata_port *ap)
*/
static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
{
- void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->base;
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
u32 hc_irq_cause;
- int port, port0;
+ int port, port0, last_port;
if (hc == 0)
port0 = 0;
else
port0 = MV_PORTS_PER_HC;
+ if (HAS_PCI(host))
+ last_port = port0 + MV_PORTS_PER_HC;
+ else
+ last_port = port0 + hpriv->n_ports;
/* we'll need the HC success int register in most cases */
hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
if (!hc_irq_cause)
@@ -1669,7 +1716,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
hc, relevant, hc_irq_cause);
- for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
+ for (port = port0; port < port0 + last_port; port++) {
struct ata_port *ap = host->ports[port];
struct mv_port_priv *pp = ap->private_data;
int have_err_bits, hard_port, shift;
@@ -1764,13 +1811,15 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
+ struct mv_host_priv *hpriv = host->private_data;
unsigned int hc, handled = 0, n_hcs;
- void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
+ void __iomem *mmio = hpriv->base;
u32 irq_stat, irq_mask;
spin_lock(&host->lock);
- irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
- irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
+
+ irq_stat = readl(hpriv->main_cause_reg_addr);
+ irq_mask = readl(hpriv->main_mask_reg_addr);
/* check the cases where we either have nothing pending or have read
* a bogus register value which can indicate HW removal or PCI fault
@@ -1827,7 +1876,8 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
@@ -1840,7 +1890,8 @@ static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
@@ -2178,6 +2229,93 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
writel(m2, port_mmio + PHY_MODE2);
}
+/* TODO: use the generic LED interface to configure the SATA Presence */
+/* & Acitivy LEDs on the board */
+static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ return;
+}
+
+static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio)
+{
+ void __iomem *port_mmio;
+ u32 tmp;
+
+ port_mmio = mv_port_base(mmio, idx);
+ tmp = readl(port_mmio + PHY_MODE2);
+
+ hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
+ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
+}
+
+#undef ZERO
+#define ZERO(reg) writel(0, port_mmio + (reg))
+static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int port)
+{
+ void __iomem *port_mmio = mv_port_base(mmio, port);
+
+ writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
+
+ mv_channel_reset(hpriv, mmio, port);
+
+ ZERO(0x028); /* command */
+ writel(0x101f, port_mmio + EDMA_CFG_OFS);
+ ZERO(0x004); /* timer */
+ ZERO(0x008); /* irq err cause */
+ ZERO(0x00c); /* irq err mask */
+ ZERO(0x010); /* rq bah */
+ ZERO(0x014); /* rq inp */
+ ZERO(0x018); /* rq outp */
+ ZERO(0x01c); /* respq bah */
+ ZERO(0x024); /* respq outp */
+ ZERO(0x020); /* respq inp */
+ ZERO(0x02c); /* test control */
+ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
+}
+
+#undef ZERO
+
+#define ZERO(reg) writel(0, hc_mmio + (reg))
+static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ void __iomem *hc_mmio = mv_hc_base(mmio, 0);
+
+ ZERO(0x00c);
+ ZERO(0x010);
+ ZERO(0x014);
+
+}
+
+#undef ZERO
+
+static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int n_hc)
+{
+ unsigned int port;
+
+ for (port = 0; port < hpriv->n_ports; port++)
+ mv_soc_reset_hc_port(hpriv, mmio, port);
+
+ mv_soc_reset_one_hc(hpriv, mmio);
+
+ return 0;
+}
+
+static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ return;
+}
+
+static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
+{
+ return;
+}
+
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no)
{
@@ -2342,7 +2480,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
{
struct ata_port *ap = link->ap;
struct mv_host_priv *hpriv = ap->host->private_data;
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ void __iomem *mmio = hpriv->base;
mv_stop_dma(ap);
@@ -2383,7 +2521,7 @@ static void mv_error_handler(struct ata_port *ap)
static void mv_eh_freeze(struct ata_port *ap)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int hc = (ap->port_no > 3) ? 1 : 0;
u32 tmp, mask;
unsigned int shift;
@@ -2397,13 +2535,14 @@ static void mv_eh_freeze(struct ata_port *ap)
mask = 0x3 << shift;
/* disable assertion of portN err, done events */
- tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
- writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
+ tmp = readl(hpriv->main_mask_reg_addr);
+ writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
}
static void mv_eh_thaw(struct ata_port *ap)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
unsigned int hc = (ap->port_no > 3) ? 1 : 0;
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
void __iomem *port_mmio = mv_ap_base(ap);
@@ -2430,8 +2569,8 @@ static void mv_eh_thaw(struct ata_port *ap)
writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
/* enable assertion of portN err, done events */
- tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
- writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
+ tmp = readl(hpriv->main_mask_reg_addr);
+ writelfl(tmp | mask, hpriv->main_mask_reg_addr);
}
/**
@@ -2598,9 +2737,13 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
}
break;
+ case chip_soc:
+ hpriv->ops = &mv_soc_ops;
+ hp_flags |= MV_HP_ERRATA_60X1C0;
+ break;
default:
- dev_printk(KERN_ERR, &pdev->dev,
+ dev_printk(KERN_ERR, host->dev,
"BUG: invalid board index %u\n", board_idx);
return 1;
}
@@ -2633,15 +2776,25 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
static int mv_init_host(struct ata_host *host, unsigned int board_idx)
{
int rc = 0, n_hc, port, hc;
- void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
struct mv_host_priv *hpriv = host->private_data;
-
- /* global interrupt mask */
- writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
+ void __iomem *mmio = hpriv->base;
rc = mv_chip_id(host, board_idx);
if (rc)
- goto done;
+ goto done;
+
+ if (HAS_PCI(host)) {
+ hpriv->main_cause_reg_addr = hpriv->base +
+ HC_MAIN_IRQ_CAUSE_OFS;
+ hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
+ } else {
+ hpriv->main_cause_reg_addr = hpriv->base +
+ HC_SOC_MAIN_IRQ_CAUSE_OFS;
+ hpriv->main_mask_reg_addr = hpriv->base +
+ HC_SOC_MAIN_IRQ_MASK_OFS;
+ }
+ /* global interrupt mask */
+ writel(0, hpriv->main_mask_reg_addr);
n_hc = mv_get_hc_count(host->ports[0]->flags);
@@ -2672,13 +2825,15 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
for (port = 0; port < host->n_ports; port++) {
struct ata_port *ap = host->ports[port];
void __iomem *port_mmio = mv_port_base(mmio, port);
- unsigned int offset = port_mmio - mmio;
mv_port_init(&ap->ioaddr, port_mmio);
#ifdef CONFIG_PCI
- ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
- ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
+ if (HAS_PCI(host)) {
+ unsigned int offset = port_mmio - mmio;
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
+ }
#endif
}
@@ -2694,35 +2849,141 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
}
- /* Clear any currently outstanding host interrupt conditions */
- writelfl(0, mmio + hpriv->irq_cause_ofs);
+ if (HAS_PCI(host)) {
+ /* Clear any currently outstanding host interrupt conditions */
+ writelfl(0, mmio + hpriv->irq_cause_ofs);
- /* and unmask interrupt generation for host regs */
- writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
+ /* and unmask interrupt generation for host regs */
+ writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
+ if (IS_GEN_I(hpriv))
+ writelfl(~HC_MAIN_MASKED_IRQS_5,
+ hpriv->main_mask_reg_addr);
+ else
+ writelfl(~HC_MAIN_MASKED_IRQS,
+ hpriv->main_mask_reg_addr);
+
+ VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
+ "PCI int cause/mask=0x%08x/0x%08x\n",
+ readl(hpriv->main_cause_reg_addr),
+ readl(hpriv->main_mask_reg_addr),
+ readl(mmio + hpriv->irq_cause_ofs),
+ readl(mmio + hpriv->irq_mask_ofs));
+ } else {
+ writelfl(~HC_MAIN_MASKED_IRQS_SOC,
+ hpriv->main_mask_reg_addr);
+ VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
+ readl(hpriv->main_cause_reg_addr),
+ readl(hpriv->main_mask_reg_addr));
+ }
+done:
+ return rc;
+}
- if (IS_GEN_I(hpriv))
- writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
- else
- writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
+/**
+ * mv_platform_probe - handle a positive probe of an soc Marvell
+ * host
+ * @pdev: platform device found
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static int mv_platform_probe(struct platform_device *pdev)
+{
+ static int printed_version;
+ const struct mv_sata_platform_data *mv_platform_data;
+ const struct ata_port_info *ppi[] =
+ { &mv_port_info[chip_soc], NULL };
+ struct ata_host *host;
+ struct mv_host_priv *hpriv;
+ struct resource *res;
+ int n_ports, rc;
- VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
- "PCI int cause/mask=0x%08x/0x%08x\n",
- readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
- readl(mmio + HC_MAIN_IRQ_MASK_OFS),
- readl(mmio + hpriv->irq_cause_ofs),
- readl(mmio + hpriv->irq_mask_ofs));
+ if (!printed_version++)
+ dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
-done:
- return rc;
+ /*
+ * Simple resource validation ..
+ */
+ if (unlikely(pdev->num_resources != 2)) {
+ dev_err(&pdev->dev, "invalid number of resources\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the register base first
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+
+ /* allocate host */
+ mv_platform_data = pdev->dev.platform_data;
+ n_ports = mv_platform_data->n_ports;
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+
+ if (!host || !hpriv)
+ return -ENOMEM;
+ host->private_data = hpriv;
+ hpriv->n_ports = n_ports;
+
+ host->iomap = NULL;
+ hpriv->base = ioremap(res->start, res->end - res->start + 1);
+ hpriv->base -= MV_SATAHC0_REG_BASE;
+
+ /* initialize adapter */
+ rc = mv_init_host(host, chip_soc);
+ if (rc)
+ return rc;
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
+ host->n_ports);
+
+ return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
+ IRQF_SHARED, &mv6_sht);
+}
+
+/*
+ *
+ * mv_platform_remove - unplug a platform interface
+ * @pdev: platform device
+ *
+ * A platform bus SATA device has been unplugged. Perform the needed
+ * cleanup. Also called on module unload for any active devices.
+ */
+static int __devexit mv_platform_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *base = hpriv->base;
+
+ ata_host_detach(host);
+ iounmap(base);
+ return 0;
}
+static struct platform_driver mv_platform_driver = {
+ .probe = mv_platform_probe,
+ .remove = __devexit_p(mv_platform_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+
#ifdef CONFIG_PCI
-static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int mv_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
static struct pci_driver mv_pci_driver = {
.name = DRV_NAME,
.id_table = mv_pci_tbl,
- .probe = mv_init_one,
+ .probe = mv_pci_init_one,
.remove = ata_pci_remove_one,
};
@@ -2828,14 +3089,15 @@ static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
}
/**
- * mv_init_one - handle a positive probe of a Marvell host
+ * mv_pci_init_one - handle a positive probe of a PCI Marvell host
* @pdev: PCI device found
* @ent: PCI device ID entry for the matched host
*
* LOCKING:
* Inherited from caller.
*/
-static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int mv_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int printed_version;
unsigned int board_idx = (unsigned int)ent->driver_data;
@@ -2855,6 +3117,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!host || !hpriv)
return -ENOMEM;
host->private_data = hpriv;
+ hpriv->n_ports = n_ports;
/* acquire resources */
rc = pcim_enable_device(pdev);
@@ -2867,6 +3130,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
+ hpriv->base = host->iomap[MV_PRIMARY_BAR];
rc = pci_go_64(pdev);
if (rc)
@@ -2895,11 +3159,22 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
#endif
+static int mv_platform_probe(struct platform_device *pdev);
+static int __devexit mv_platform_remove(struct platform_device *pdev);
+
static int __init mv_init(void)
{
int rc = -ENODEV;
#ifdef CONFIG_PCI
rc = pci_register_driver(&mv_pci_driver);
+ if (rc < 0)
+ return rc;
+#endif
+ rc = platform_driver_register(&mv_platform_driver);
+
+#ifdef CONFIG_PCI
+ if (rc < 0)
+ pci_unregister_driver(&mv_pci_driver);
#endif
return rc;
}
@@ -2909,6 +3184,7 @@ static void __exit mv_exit(void)
#ifdef CONFIG_PCI
pci_unregister_driver(&mv_pci_driver);
#endif
+ platform_driver_unregister(&mv_platform_driver);
}
MODULE_AUTHOR("Brett Russ");
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index bfe92a43cf89..ed5473bf7a0a 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -247,6 +247,7 @@ struct nv_adma_port_priv {
void __iomem *ctl_block;
void __iomem *gen_block;
void __iomem *notifier_clear_block;
+ u64 adma_dma_mask;
u8 flags;
int last_issue_ncq;
};
@@ -715,9 +716,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
+ struct nv_adma_port_priv *port0, *port1;
+ struct scsi_device *sdev0, *sdev1;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u64 bounce_limit;
- unsigned long segment_boundary;
+ unsigned long segment_boundary, flags;
unsigned short sg_tablesize;
int rc;
int adma_enable;
@@ -729,6 +731,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
/* Not a proper libata device, ignore */
return rc;
+ spin_lock_irqsave(ap->lock, flags);
+
if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
/*
* NVIDIA reports that ADMA mode does not support ATAPI commands.
@@ -737,7 +741,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
* Restrict DMA parameters as required by the legacy interface
* when an ATAPI device is connected.
*/
- bounce_limit = ATA_DMA_MASK;
segment_boundary = ATA_DMA_BOUNDARY;
/* Subtract 1 since an extra entry may be needed for padding, see
libata-scsi.c */
@@ -748,7 +751,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
adma_enable = 0;
nv_adma_register_mode(ap);
} else {
- bounce_limit = *ap->dev->dma_mask;
segment_boundary = NV_ADMA_DMA_BOUNDARY;
sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
adma_enable = 1;
@@ -774,12 +776,49 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
if (current_reg != new_reg)
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
- blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
+ port0 = ap->host->ports[0]->private_data;
+ port1 = ap->host->ports[1]->private_data;
+ sdev0 = ap->host->ports[0]->link.device[0].sdev;
+ sdev1 = ap->host->ports[1]->link.device[0].sdev;
+ if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
+ (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
+ /** We have to set the DMA mask to 32-bit if either port is in
+ ATAPI mode, since they are on the same PCI device which is
+ used for DMA mapping. If we set the mask we also need to set
+ the bounce limit on both ports to ensure that the block
+ layer doesn't feed addresses that cause DMA mapping to
+ choke. If either SCSI device is not allocated yet, it's OK
+ since that port will discover its correct setting when it
+ does get allocated.
+ Note: Setting 32-bit mask should not fail. */
+ if (sdev0)
+ blk_queue_bounce_limit(sdev0->request_queue,
+ ATA_DMA_MASK);
+ if (sdev1)
+ blk_queue_bounce_limit(sdev1->request_queue,
+ ATA_DMA_MASK);
+
+ pci_set_dma_mask(pdev, ATA_DMA_MASK);
+ } else {
+ /** This shouldn't fail as it was set to this value before */
+ pci_set_dma_mask(pdev, pp->adma_dma_mask);
+ if (sdev0)
+ blk_queue_bounce_limit(sdev0->request_queue,
+ pp->adma_dma_mask);
+ if (sdev1)
+ blk_queue_bounce_limit(sdev1->request_queue,
+ pp->adma_dma_mask);
+ }
+
blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
ata_port_printk(ap, KERN_INFO,
- "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
- (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
+ "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
+ (unsigned long long)*ap->host->dev->dma_mask,
+ segment_boundary, sg_tablesize);
+
+ spin_unlock_irqrestore(ap->lock, flags);
+
return rc;
}
@@ -1140,10 +1179,20 @@ static int nv_adma_port_start(struct ata_port *ap)
void *mem;
dma_addr_t mem_dma;
void __iomem *mmio;
+ struct pci_dev *pdev = to_pci_dev(dev);
u16 tmp;
VPRINTK("ENTER\n");
+ /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
+ pad buffers */
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+ rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc)
+ return rc;
+
rc = ata_port_start(ap);
if (rc)
return rc;
@@ -1159,6 +1208,15 @@ static int nv_adma_port_start(struct ata_port *ap)
pp->notifier_clear_block = pp->gen_block +
NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
+ /* Now that the legacy PRD and padding buffer are allocated we can
+ safely raise the DMA mask to allocate the CPB/APRD table.
+ These are allowed to fail since we store the value that ends up
+ being used to set as the bounce limit in slave_config later if
+ needed. */
+ pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ pp->adma_dma_mask = *dev->dma_mask;
+
mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
&mem_dma, GFP_KERNEL);
if (!mem)
@@ -2417,12 +2475,6 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hpriv->type = type;
host->private_data = hpriv;
- /* set 64bit dma masks, may fail */
- if (type == ADMA) {
- if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
- pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
- }
-
/* request and iomap NV_MMIO_BAR */
rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
if (rc)
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 3ef072ff319d..30caa0337190 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -30,8 +30,6 @@
* Hardware documentation available under NDA.
*
*
- * To-do list:
- * - VT6421 PATA support
*
*/
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 63e09c015ca0..c66637392bbc 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -5,7 +5,7 @@ obj-y := core.o sys.o bus.o dd.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o
obj-y += power/
-obj-$(CONFIG_HAS_DMA) += dma-mapping.o dmapool.o
+obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_ISA) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index c5885f5ce0ac..499b003f9278 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -110,7 +110,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
*
* Initialize and register the CPU device.
*/
-int __devinit register_cpu(struct cpu *cpu, int num)
+int __cpuinit register_cpu(struct cpu *cpu, int num)
{
int error;
cpu->node_id = cpu_to_node(num);
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
deleted file mode 100644
index b5034dc72a05..000000000000
--- a/drivers/base/dmapool.c
+++ /dev/null
@@ -1,481 +0,0 @@
-
-#include <linux/device.h>
-#include <linux/mm.h>
-#include <asm/io.h> /* Needed for i386 to build */
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/poison.h>
-#include <linux/sched.h>
-
-/*
- * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
- * small blocks are easily used by drivers for bus mastering controllers.
- * This should probably be sharing the guts of the slab allocator.
- */
-
-struct dma_pool { /* the pool */
- struct list_head page_list;
- spinlock_t lock;
- size_t blocks_per_page;
- size_t size;
- struct device *dev;
- size_t allocation;
- char name [32];
- wait_queue_head_t waitq;
- struct list_head pools;
-};
-
-struct dma_page { /* cacheable header for 'allocation' bytes */
- struct list_head page_list;
- void *vaddr;
- dma_addr_t dma;
- unsigned in_use;
- unsigned long bitmap [0];
-};
-
-#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
-
-static DEFINE_MUTEX (pools_lock);
-
-static ssize_t
-show_pools (struct device *dev, struct device_attribute *attr, char *buf)
-{
- unsigned temp;
- unsigned size;
- char *next;
- struct dma_page *page;
- struct dma_pool *pool;
-
- next = buf;
- size = PAGE_SIZE;
-
- temp = scnprintf(next, size, "poolinfo - 0.1\n");
- size -= temp;
- next += temp;
-
- mutex_lock(&pools_lock);
- list_for_each_entry(pool, &dev->dma_pools, pools) {
- unsigned pages = 0;
- unsigned blocks = 0;
-
- list_for_each_entry(page, &pool->page_list, page_list) {
- pages++;
- blocks += page->in_use;
- }
-
- /* per-pool info, no real statistics yet */
- temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
- pool->name,
- blocks, pages * pool->blocks_per_page,
- pool->size, pages);
- size -= temp;
- next += temp;
- }
- mutex_unlock(&pools_lock);
-
- return PAGE_SIZE - size;
-}
-static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL);
-
-/**
- * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @allocation: returned blocks won't cross this boundary (or zero)
- * Context: !in_interrupt()
- *
- * Returns a dma allocation pool with the requested characteristics, or
- * null if one can't be created. Given one of these pools, dma_pool_alloc()
- * may be used to allocate memory. Such memory will all have "consistent"
- * DMA mappings, accessible by the device and its driver without using
- * cache flushing primitives. The actual size of blocks allocated may be
- * larger than requested because of alignment.
- *
- * If allocation is nonzero, objects returned from dma_pool_alloc() won't
- * cross that size boundary. This is useful for devices which have
- * addressing restrictions on individual DMA transfers, such as not crossing
- * boundaries of 4KBytes.
- */
-struct dma_pool *
-dma_pool_create (const char *name, struct device *dev,
- size_t size, size_t align, size_t allocation)
-{
- struct dma_pool *retval;
-
- if (align == 0)
- align = 1;
- if (size == 0)
- return NULL;
- else if (size < align)
- size = align;
- else if ((size % align) != 0) {
- size += align + 1;
- size &= ~(align - 1);
- }
-
- if (allocation == 0) {
- if (PAGE_SIZE < size)
- allocation = size;
- else
- allocation = PAGE_SIZE;
- // FIXME: round up for less fragmentation
- } else if (allocation < size)
- return NULL;
-
- if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
- return retval;
-
- strlcpy (retval->name, name, sizeof retval->name);
-
- retval->dev = dev;
-
- INIT_LIST_HEAD (&retval->page_list);
- spin_lock_init (&retval->lock);
- retval->size = size;
- retval->allocation = allocation;
- retval->blocks_per_page = allocation / size;
- init_waitqueue_head (&retval->waitq);
-
- if (dev) {
- int ret;
-
- mutex_lock(&pools_lock);
- if (list_empty (&dev->dma_pools))
- ret = device_create_file (dev, &dev_attr_pools);
- else
- ret = 0;
- /* note: not currently insisting "name" be unique */
- if (!ret)
- list_add (&retval->pools, &dev->dma_pools);
- else {
- kfree(retval);
- retval = NULL;
- }
- mutex_unlock(&pools_lock);
- } else
- INIT_LIST_HEAD (&retval->pools);
-
- return retval;
-}
-
-
-static struct dma_page *
-pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
-{
- struct dma_page *page;
- int mapsize;
-
- mapsize = pool->blocks_per_page;
- mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
- mapsize *= sizeof (long);
-
- page = kmalloc(mapsize + sizeof *page, mem_flags);
- if (!page)
- return NULL;
- page->vaddr = dma_alloc_coherent (pool->dev,
- pool->allocation,
- &page->dma,
- mem_flags);
- if (page->vaddr) {
- memset (page->bitmap, 0xff, mapsize); // bit set == free
-#ifdef CONFIG_DEBUG_SLAB
- memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
-#endif
- list_add (&page->page_list, &pool->page_list);
- page->in_use = 0;
- } else {
- kfree (page);
- page = NULL;
- }
- return page;
-}
-
-
-static inline int
-is_page_busy (int blocks, unsigned long *bitmap)
-{
- while (blocks > 0) {
- if (*bitmap++ != ~0UL)
- return 1;
- blocks -= BITS_PER_LONG;
- }
- return 0;
-}
-
-static void
-pool_free_page (struct dma_pool *pool, struct dma_page *page)
-{
- dma_addr_t dma = page->dma;
-
-#ifdef CONFIG_DEBUG_SLAB
- memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
-#endif
- dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma);
- list_del (&page->page_list);
- kfree (page);
-}
-
-
-/**
- * dma_pool_destroy - destroys a pool of dma memory blocks.
- * @pool: dma pool that will be destroyed
- * Context: !in_interrupt()
- *
- * Caller guarantees that no more memory from the pool is in use,
- * and that nothing will try to use the pool after this call.
- */
-void
-dma_pool_destroy (struct dma_pool *pool)
-{
- mutex_lock(&pools_lock);
- list_del (&pool->pools);
- if (pool->dev && list_empty (&pool->dev->dma_pools))
- device_remove_file (pool->dev, &dev_attr_pools);
- mutex_unlock(&pools_lock);
-
- while (!list_empty (&pool->page_list)) {
- struct dma_page *page;
- page = list_entry (pool->page_list.next,
- struct dma_page, page_list);
- if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
- if (pool->dev)
- dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n",
- pool->name, page->vaddr);
- else
- printk (KERN_ERR "dma_pool_destroy %s, %p busy\n",
- pool->name, page->vaddr);
- /* leak the still-in-use consistent memory */
- list_del (&page->page_list);
- kfree (page);
- } else
- pool_free_page (pool, page);
- }
-
- kfree (pool);
-}
-
-
-/**
- * dma_pool_alloc - get a block of consistent memory
- * @pool: dma pool that will produce the block
- * @mem_flags: GFP_* bitmask
- * @handle: pointer to dma address of block
- *
- * This returns the kernel virtual address of a currently unused block,
- * and reports its dma address through the handle.
- * If such a memory block can't be allocated, null is returned.
- */
-void *
-dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
-{
- unsigned long flags;
- struct dma_page *page;
- int map, block;
- size_t offset;
- void *retval;
-
-restart:
- spin_lock_irqsave (&pool->lock, flags);
- list_for_each_entry(page, &pool->page_list, page_list) {
- int i;
- /* only cachable accesses here ... */
- for (map = 0, i = 0;
- i < pool->blocks_per_page;
- i += BITS_PER_LONG, map++) {
- if (page->bitmap [map] == 0)
- continue;
- block = ffz (~ page->bitmap [map]);
- if ((i + block) < pool->blocks_per_page) {
- clear_bit (block, &page->bitmap [map]);
- offset = (BITS_PER_LONG * map) + block;
- offset *= pool->size;
- goto ready;
- }
- }
- }
- if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) {
- if (mem_flags & __GFP_WAIT) {
- DECLARE_WAITQUEUE (wait, current);
-
- __set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue (&pool->waitq, &wait);
- spin_unlock_irqrestore (&pool->lock, flags);
-
- schedule_timeout (POOL_TIMEOUT_JIFFIES);
-
- remove_wait_queue (&pool->waitq, &wait);
- goto restart;
- }
- retval = NULL;
- goto done;
- }
-
- clear_bit (0, &page->bitmap [0]);
- offset = 0;
-ready:
- page->in_use++;
- retval = offset + page->vaddr;
- *handle = offset + page->dma;
-#ifdef CONFIG_DEBUG_SLAB
- memset (retval, POOL_POISON_ALLOCATED, pool->size);
-#endif
-done:
- spin_unlock_irqrestore (&pool->lock, flags);
- return retval;
-}
-
-
-static struct dma_page *
-pool_find_page (struct dma_pool *pool, dma_addr_t dma)
-{
- unsigned long flags;
- struct dma_page *page;
-
- spin_lock_irqsave (&pool->lock, flags);
- list_for_each_entry(page, &pool->page_list, page_list) {
- if (dma < page->dma)
- continue;
- if (dma < (page->dma + pool->allocation))
- goto done;
- }
- page = NULL;
-done:
- spin_unlock_irqrestore (&pool->lock, flags);
- return page;
-}
-
-
-/**
- * dma_pool_free - put block back into dma pool
- * @pool: the dma pool holding the block
- * @vaddr: virtual address of block
- * @dma: dma address of block
- *
- * Caller promises neither device nor driver will again touch this block
- * unless it is first re-allocated.
- */
-void
-dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
-{
- struct dma_page *page;
- unsigned long flags;
- int map, block;
-
- if ((page = pool_find_page(pool, dma)) == NULL) {
- if (pool->dev)
- dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n",
- pool->name, vaddr, (unsigned long) dma);
- else
- printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
- pool->name, vaddr, (unsigned long) dma);
- return;
- }
-
- block = dma - page->dma;
- block /= pool->size;
- map = block / BITS_PER_LONG;
- block %= BITS_PER_LONG;
-
-#ifdef CONFIG_DEBUG_SLAB
- if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
- if (pool->dev)
- dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
- pool->name, vaddr, (unsigned long long) dma);
- else
- printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
- pool->name, vaddr, (unsigned long long) dma);
- return;
- }
- if (page->bitmap [map] & (1UL << block)) {
- if (pool->dev)
- dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
- pool->name, (unsigned long long)dma);
- else
- printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n",
- pool->name, (unsigned long long)dma);
- return;
- }
- memset (vaddr, POOL_POISON_FREED, pool->size);
-#endif
-
- spin_lock_irqsave (&pool->lock, flags);
- page->in_use--;
- set_bit (block, &page->bitmap [map]);
- if (waitqueue_active (&pool->waitq))
- wake_up (&pool->waitq);
- /*
- * Resist a temptation to do
- * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
- * Better have a few empty pages hang around.
- */
- spin_unlock_irqrestore (&pool->lock, flags);
-}
-
-/*
- * Managed DMA pool
- */
-static void dmam_pool_release(struct device *dev, void *res)
-{
- struct dma_pool *pool = *(struct dma_pool **)res;
-
- dma_pool_destroy(pool);
-}
-
-static int dmam_pool_match(struct device *dev, void *res, void *match_data)
-{
- return *(struct dma_pool **)res == match_data;
-}
-
-/**
- * dmam_pool_create - Managed dma_pool_create()
- * @name: name of pool, for diagnostics
- * @dev: device that will be doing the DMA
- * @size: size of the blocks in this pool.
- * @align: alignment requirement for blocks; must be a power of two
- * @allocation: returned blocks won't cross this boundary (or zero)
- *
- * Managed dma_pool_create(). DMA pool created with this function is
- * automatically destroyed on driver detach.
- */
-struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
- size_t size, size_t align, size_t allocation)
-{
- struct dma_pool **ptr, *pool;
-
- ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
- if (pool)
- devres_add(dev, ptr);
- else
- devres_free(ptr);
-
- return pool;
-}
-
-/**
- * dmam_pool_destroy - Managed dma_pool_destroy()
- * @pool: dma pool that will be destroyed
- *
- * Managed dma_pool_destroy().
- */
-void dmam_pool_destroy(struct dma_pool *pool)
-{
- struct device *dev = pool->dev;
-
- dma_pool_destroy(pool);
- WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
-}
-
-EXPORT_SYMBOL (dma_pool_create);
-EXPORT_SYMBOL (dma_pool_destroy);
-EXPORT_SYMBOL (dma_pool_alloc);
-EXPORT_SYMBOL (dma_pool_free);
-EXPORT_SYMBOL (dmam_pool_create);
-EXPORT_SYMBOL (dmam_pool_destroy);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 0295855a3eef..4a1b9bfc5471 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -292,7 +292,8 @@ firmware_class_timeout(u_long data)
static inline void fw_setup_device_id(struct device *f_dev, struct device *dev)
{
- snprintf(f_dev->bus_id, BUS_ID_SIZE, "firmware-%s", dev->bus_id);
+ /* XXX warning we should watch out for name collisions */
+ strlcpy(f_dev->bus_id, dev->bus_id, BUS_ID_SIZE);
}
static int fw_register_device(struct device **dev_p, const char *fw_name,
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 94268c75d04f..424995073c6b 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -90,7 +90,7 @@ static struct atari_disk_type {
unsigned blocks; /* total number of blocks */
unsigned fdc_speed; /* fdc_speed setting */
unsigned stretch; /* track doubling ? */
-} disk_type[] = {
+} atari_disk_type[] = {
{ "d360", 9, 720, 0, 0}, /* 0: 360kB diskette */
{ "D360", 9, 720, 0, 1}, /* 1: 360kb in 720k or 1.2MB drive */
{ "D720", 9,1440, 0, 0}, /* 2: 720kb in 720k or 1.2MB drive */
@@ -658,7 +658,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
return -EINVAL;
}
type = minor2disktype[type].index;
- UDT = &disk_type[type];
+ UDT = &atari_disk_type[type];
}
if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
@@ -1064,7 +1064,7 @@ static void fd_rwsec_done1(int status)
searched for a non-existent sector! */
!(read_track && FDC_READ(FDCREG_SECTOR) > SUDT->spt)) {
if (Probing) {
- if (SUDT > disk_type) {
+ if (SUDT > atari_disk_type) {
if (SUDT[-1].blocks > ReqBlock) {
/* try another disk type */
SUDT--;
@@ -1082,7 +1082,7 @@ static void fd_rwsec_done1(int status)
} else {
/* record not found, but not probing. Maybe stretch wrong ? Restart probing */
if (SUD.autoprobe) {
- SUDT = disk_type + StartDiskType[DriveType];
+ SUDT = atari_disk_type + StartDiskType[DriveType];
set_capacity(unit[SelectedDrive].disk,
SUDT->blocks);
Probing = 1;
@@ -1421,7 +1421,7 @@ repeat:
if (type == 0) {
if (!UDT) {
Probing = 1;
- UDT = disk_type + StartDiskType[DriveType];
+ UDT = atari_disk_type + StartDiskType[DriveType];
set_capacity(floppy->disk, UDT->blocks);
UD.autoprobe = 1;
}
@@ -1439,7 +1439,7 @@ repeat:
goto repeat;
}
type = minor2disktype[type].index;
- UDT = &disk_type[type];
+ UDT = &atari_disk_type[type];
set_capacity(floppy->disk, UDT->blocks);
UD.autoprobe = 0;
}
@@ -1505,7 +1505,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
if (minor2disktype[type].drive_types > DriveType)
return -ENODEV;
type = minor2disktype[type].index;
- dtp = &disk_type[type];
+ dtp = &atari_disk_type[type];
if (UD.flags & FTD_MSG)
printk (KERN_ERR "floppy%d: found dtp %p name %s!\n",
drive, dtp, dtp->name);
@@ -1576,7 +1576,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
continue;
}
setidx = minor2disktype[settype].index;
- dtp = &disk_type[setidx];
+ dtp = &atari_disk_type[setidx];
/* found matching entry ?? */
if ( dtp->blocks == setprm.size
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 855ce8e5efba..9715be3f2487 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2630,12 +2630,14 @@ static void do_cciss_request(struct request_queue *q)
c->Request.CDB[8] = creq->nr_sectors & 0xff;
c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
} else {
+ u32 upper32 = upper_32_bits(start_blk);
+
c->Request.CDBLen = 16;
c->Request.CDB[1]= 0;
- c->Request.CDB[2]= (start_blk >> 56) & 0xff; //MSB
- c->Request.CDB[3]= (start_blk >> 48) & 0xff;
- c->Request.CDB[4]= (start_blk >> 40) & 0xff;
- c->Request.CDB[5]= (start_blk >> 32) & 0xff;
+ c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB
+ c->Request.CDB[3]= (upper32 >> 16) & 0xff;
+ c->Request.CDB[4]= (upper32 >> 8) & 0xff;
+ c->Request.CDB[5]= upper32 & 0xff;
c->Request.CDB[6]= (start_blk >> 24) & 0xff;
c->Request.CDB[7]= (start_blk >> 16) & 0xff;
c->Request.CDB[8]= (start_blk >> 8) & 0xff;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index b8af22e610df..91ebb007416c 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -973,6 +973,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
lo->transfer = xfer->transfer;
lo->ioctl = xfer->ioctl;
+ if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) !=
+ (info->lo_flags & LO_FLAGS_AUTOCLEAR))
+ lo->lo_flags ^= LO_FLAGS_AUTOCLEAR;
+
lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
lo->lo_init[0] = info->lo_init[0];
lo->lo_init[1] = info->lo_init[1];
@@ -1331,6 +1335,10 @@ static int lo_release(struct inode *inode, struct file *file)
mutex_lock(&lo->lo_ctl_mutex);
--lo->lo_refcnt;
+
+ if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt)
+ loop_clr_fd(lo, inode->i_bdev);
+
mutex_unlock(&lo->lo_ctl_mutex);
return 0;
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 76096cad798f..8b9549ab4a4e 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -660,7 +660,7 @@ static int pt_open(struct inode *inode, struct file *file)
pt_identify(tape);
err = -ENODEV;
- if (!tape->flags & PT_MEDIA)
+ if (!(tape->flags & PT_MEDIA))
goto out;
err = -EROFS;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index e9de1712e5a0..674cd66dcaba 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2212,11 +2212,11 @@ static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
return ret;
}
- if (!buf[6] & 0x40) {
+ if (!(buf[6] & 0x40)) {
printk(DRIVER_NAME": Disc type is not CD-RW\n");
return 1;
}
- if (!buf[6] & 0x4) {
+ if (!(buf[6] & 0x4)) {
printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
return 1;
}
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
index 82f4eecc8699..06e23be70904 100644
--- a/drivers/block/rd.c
+++ b/drivers/block/rd.c
@@ -56,6 +56,7 @@
#include <linux/backing-dev.h>
#include <linux/blkpg.h>
#include <linux/writeback.h>
+#include <linux/log2.h>
#include <asm/uaccess.h>
@@ -450,7 +451,7 @@ static int __init rd_init(void)
err = -ENOMEM;
if (rd_blocksize > PAGE_SIZE || rd_blocksize < 512 ||
- (rd_blocksize & (rd_blocksize-1))) {
+ !is_power_of_2(rd_blocksize)) {
printk("RAMDISK: wrong blocksize %d, reverting to defaults\n",
rd_blocksize);
rd_blocksize = BLOCK_SIZE;
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 78ebfffc77e3..4a7a059ebaf7 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1202,8 +1202,10 @@ static int __devexit ace_of_remove(struct of_device *op)
}
/* Match table for of_platform binding */
-static struct of_device_id __devinit ace_of_match[] = {
- { .compatible = "xilinx,xsysace", },
+static struct of_device_id ace_of_match[] __devinitdata = {
+ { .compatible = "xlnx,opb-sysace-1.00.b", },
+ { .compatible = "xlnx,opb-sysace-1.00.c", },
+ { .compatible = "xlnx,xps-sysace-1.00.a", },
{},
};
MODULE_DEVICE_TABLE(of, ace_of_match);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 1375b5345a0a..3b28658f5a1f 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -423,6 +423,7 @@ static int bpa10x_send_frame(struct sk_buff *skb)
break;
default:
+ usb_free_urb(urb);
return -EILSEQ;
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index a18f9b8c9e12..7703d6e06fd9 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -704,7 +704,7 @@ static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *
static int bt3c_config(struct pcmcia_device *link)
{
- static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+ static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
bt3c_info_t *info = link->priv;
tuple_t tuple;
u_short buf[256];
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index b786f6187902..58630cc1eff2 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -162,10 +162,8 @@ static int btsdio_rx_packet(struct btsdio_data *data)
bt_cb(skb)->pkt_type = hdr[3];
err = hci_recv_frame(skb);
- if (err < 0) {
- kfree(skb);
+ if (err < 0)
return err;
- }
sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index dade1626865b..68d1d258e6a4 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -634,7 +634,7 @@ static int next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *
static int btuart_config(struct pcmcia_device *link)
{
- static kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+ static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
btuart_info_t *info = link->priv;
tuple_t tuple;
u_short buf[256];
diff --git a/drivers/bluetooth/hci_usb.c b/drivers/bluetooth/hci_usb.c
index 98a9cdeaffb6..372c7ef633da 100644
--- a/drivers/bluetooth/hci_usb.c
+++ b/drivers/bluetooth/hci_usb.c
@@ -111,6 +111,7 @@ static struct usb_device_id blacklist_ids[] = {
{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE },
/* Broadcom BCM2035 */
+ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 },
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 47e5b40510cb..db259e60289b 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1206,25 +1206,26 @@ int check_for_audio_disc(struct cdrom_device_info * cdi,
return 0;
}
-/* Admittedly, the logic below could be performed in a nicer way. */
int cdrom_release(struct cdrom_device_info *cdi, struct file *fp)
{
struct cdrom_device_ops *cdo = cdi->ops;
int opened_for_data;
- cdinfo(CD_CLOSE, "entering cdrom_release\n");
+ cdinfo(CD_CLOSE, "entering cdrom_release\n");
if (cdi->use_count > 0)
cdi->use_count--;
- if (cdi->use_count == 0)
+
+ if (cdi->use_count == 0) {
cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
- if (cdi->use_count == 0)
cdrom_dvd_rw_close_write(cdi);
- if (cdi->use_count == 0 &&
- (cdo->capability & CDC_LOCK) && !keeplocked) {
- cdinfo(CD_CLOSE, "Unlocking door!\n");
- cdo->lock_door(cdi, 0);
+
+ if ((cdo->capability & CDC_LOCK) && !keeplocked) {
+ cdinfo(CD_CLOSE, "Unlocking door!\n");
+ cdo->lock_door(cdi, 0);
+ }
}
+
opened_for_data = !(cdi->options & CDO_USE_FFLAGS) ||
!(fp && fp->f_flags & O_NONBLOCK);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 8473b9f1da96..cac06bc1754b 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -558,7 +558,7 @@ static struct cdrom_device_ops viocd_dops = {
.capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
};
-static int __init find_capability(const char *type)
+static int find_capability(const char *type)
{
struct capability_entry *entry;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 466629594776..f01ac9a07bf5 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -194,17 +194,6 @@ config MOXA_INTELLIO
module will be called moxa.
config MOXA_SMARTIO
- tristate "Moxa SmartIO support (OBSOLETE)"
- depends on SERIAL_NONSTANDARD
- help
- Say Y here if you have a Moxa SmartIO multiport serial card.
-
- This driver can also be built as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want).
- The module will be called mxser. If you want to do that, say M
- here.
-
-config MOXA_SMARTIO_NEW
tristate "Moxa SmartIO support v. 2.0"
depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA)
help
@@ -215,7 +204,7 @@ config MOXA_SMARTIO_NEW
changes finally resulting in PCI probing.
This driver can also be built as a module. The module will be called
- mxser_new. If you want to do that, say M here.
+ mxser. If you want to do that, say M here.
config ISI
tristate "Multi-Tech multiport card support (EXPERIMENTAL)"
@@ -276,7 +265,7 @@ config N_HDLC
config RISCOM8
tristate "SDL RISCom/8 card support"
- depends on SERIAL_NONSTANDARD && BROKEN_ON_SMP
+ depends on SERIAL_NONSTANDARD
help
This is a driver for the SDL Communications RISCom/8 multiport card,
which gives you many serial ports. You would need something like
@@ -765,7 +754,7 @@ config JS_RTC
config SGI_DS1286
tristate "SGI DS1286 RTC support"
- depends on SGI_IP22
+ depends on SGI_HAS_DS1286
help
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -841,6 +830,16 @@ config DTLK
To compile this driver as a module, choose M here: the
module will be called dtlk.
+config XILINX_HWICAP
+ tristate "Xilinx HWICAP Support"
+ depends on XILINX_VIRTEX
+ help
+ This option enables support for Xilinx Internal Configuration
+ Access Port (ICAP) driver. The ICAP is used on Xilinx Virtex
+ FPGA platforms to partially reconfigure the FPGA at runtime.
+
+ If unsure, say N.
+
config R3964
tristate "Siemens R3964 line discipline"
---help---
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 96fc01eddefe..5407b7615614 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
obj-$(CONFIG_A2232) += ser_a2232.o generic_serial.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
-obj-$(CONFIG_MOXA_SMARTIO_NEW) += mxser_new.o
obj-$(CONFIG_COMPUTONE) += ip2/
obj-$(CONFIG_RISCOM8) += riscom8.o
obj-$(CONFIG_ISI) += isicom.o
@@ -77,6 +76,7 @@ obj-$(CONFIG_EFI_RTC) += efirtc.o
obj-$(CONFIG_SGI_DS1286) += ds1286.o
obj-$(CONFIG_SGI_IP27_RTC) += ip27-rtc.o
obj-$(CONFIG_DS1302) += ds1302.o
+obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap/
ifeq ($(CONFIG_GENERIC_NVRAM),y)
obj-$(CONFIG_NVRAM) += generic_nvram.o
else
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index b83824c41329..c69f79598e47 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -117,7 +117,8 @@ struct agp_bridge_driver {
void (*free_by_type)(struct agp_memory *);
void *(*agp_alloc_page)(struct agp_bridge_data *);
void (*agp_destroy_page)(void *, int flags);
- int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
+ int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
+ void (*chipset_flush)(struct agp_bridge_data *);
};
struct agp_bridge_data {
@@ -235,6 +236,9 @@ struct agp_bridge_data {
#define I965_PGETBL_SIZE_512KB (0 << 1)
#define I965_PGETBL_SIZE_256KB (1 << 1)
#define I965_PGETBL_SIZE_128KB (2 << 1)
+#define I965_PGETBL_SIZE_1MB (3 << 1)
+#define I965_PGETBL_SIZE_2MB (4 << 1)
+#define I965_PGETBL_SIZE_1_5MB (5 << 1)
#define G33_PGETBL_SIZE_MASK (3 << 8)
#define G33_PGETBL_SIZE_1M (1 << 8)
#define G33_PGETBL_SIZE_2M (2 << 8)
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index aa8f3a39a704..e77c17838c8a 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -11,29 +11,28 @@
#include "agp.h"
-static struct page *alpha_core_agp_vm_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int *type)
+static int alpha_core_agp_vm_fault(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr;
unsigned long pa;
struct page *page;
- dma_addr = address - vma->vm_start + agp->aperture.bus_base;
+ dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start
+ + agp->aperture.bus_base;
pa = agp->ops->translate(agp, dma_addr);
if (pa == (unsigned long)-EINVAL)
- return NULL; /* no translation */
+ return VM_FAULT_SIGBUS; /* no translation */
/*
* Get the page, inc the use count, and return it
*/
page = virt_to_page(__va(pa));
get_page(page);
- if (type)
- *type = VM_FAULT_MINOR;
- return page;
+ vmf->page = page;
+ return 0;
}
static struct aper_size_info_fixed alpha_core_agp_sizes[] =
@@ -42,7 +41,7 @@ static struct aper_size_info_fixed alpha_core_agp_sizes[] =
};
struct vm_operations_struct alpha_core_agp_vm_ops = {
- .nopage = alpha_core_agp_vm_nopage,
+ .fault = alpha_core_agp_vm_fault,
};
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 1405a42585e1..87be46406daf 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -436,10 +436,6 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
return -ENODEV;
}
cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
- if (!cap_ptr) {
- pci_dev_put(gfxcard);
- continue;
- }
}
/* With so many variants of NVidia cards, it's simpler just
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 2720882e66fe..b1bdd015165c 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -43,7 +43,7 @@
* fix some real stupidity. It's only by chance we can bump
* past 0.99 at all due to some boolean logic error. */
#define AGPGART_VERSION_MAJOR 0
-#define AGPGART_VERSION_MINOR 102
+#define AGPGART_VERSION_MINOR 103
static const struct agp_version agp_current_version =
{
.major = AGPGART_VERSION_MAJOR,
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
index ecd4248861b9..39275794fe63 100644
--- a/drivers/char/agp/compat_ioctl.c
+++ b/drivers/char/agp/compat_ioctl.c
@@ -273,6 +273,10 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case AGPIOC_UNBIND32:
ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
break;
+
+ case AGPIOC_CHIPSET_FLUSH32:
+ ret_val = agpioc_chipset_flush_wrap(curr_priv);
+ break;
}
ioctl_out:
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
index 71939d637236..0c9678ac0371 100644
--- a/drivers/char/agp/compat_ioctl.h
+++ b/drivers/char/agp/compat_ioctl.h
@@ -39,6 +39,7 @@
#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t)
#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t)
+#define AGPIOC_CHIPSET_FLUSH32 _IO (AGPIOC_BASE, 10)
struct agp_info32 {
struct agp_version version; /* version of the driver */
@@ -101,5 +102,6 @@ void agp_free_memory_wrap(struct agp_memory *memory);
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
struct agp_memory *agp_find_mem_by_key(int key);
struct agp_client *agp_find_client_by_pid(pid_t id);
+int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 7791e98de51c..55d7a82bd071 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -689,7 +689,7 @@ static int agp_open(struct inode *inode, struct file *file)
set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
priv->my_pid = current->pid;
- if ((current->uid == 0) || (current->suid == 0)) {
+ if (capable(CAP_SYS_RAWIO)) {
/* Root priv, can be controller */
set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
}
@@ -960,6 +960,13 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
return agp_unbind_memory(memory);
}
+int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
+{
+ DBG("");
+ agp_flush_chipset(agp_bridge);
+ return 0;
+}
+
static int agp_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -1033,6 +1040,10 @@ static int agp_ioctl(struct inode *inode, struct file *file,
case AGPIOC_UNBIND:
ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
break;
+
+ case AGPIOC_CHIPSET_FLUSH:
+ ret_val = agpioc_chipset_flush_wrap(curr_priv);
+ break;
}
ioctl_out:
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 1a4674ce0c71..7484bc759c4c 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -80,6 +80,13 @@ static int agp_get_key(void)
return -1;
}
+void agp_flush_chipset(struct agp_bridge_data *bridge)
+{
+ if (bridge->driver->chipset_flush)
+ bridge->driver->chipset_flush(bridge);
+}
+EXPORT_SYMBOL(agp_flush_chipset);
+
/*
* Use kmalloc if possible for the page list. Otherwise fall back to
* vmalloc. This speeds things up and also saves memory for small AGP
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 189efb6ef970..eeea50a1d22a 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -14,8 +14,8 @@
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
#define PCI_DEVICE_ID_INTEL_82946GZ_IG 0x2972
-#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
-#define PCI_DEVICE_ID_INTEL_82965G_1_IG 0x2982
+#define PCI_DEVICE_ID_INTEL_82G35_HB 0x2980
+#define PCI_DEVICE_ID_INTEL_82G35_IG 0x2982
#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
@@ -32,13 +32,24 @@
#define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2
#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
#define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2
+#define PCI_DEVICE_ID_INTEL_IGD_HB 0x2A40
+#define PCI_DEVICE_ID_INTEL_IGD_IG 0x2A42
+
+/* cover 915 and 945 variants */
+#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB)
#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
@@ -71,9 +82,11 @@ extern int agp_memory_reserved;
#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define I915_IFPADDR 0x60
/* Intel 965G registers */
#define I965_MSAC 0x62
+#define I965_IFPADDR 0x70
/* Intel 7505 registers */
#define INTEL_I7505_APSIZE 0x74
@@ -115,6 +128,13 @@ static struct _intel_private {
* popup and for the GTT.
*/
int gtt_entries; /* i830+ */
+ union {
+ void __iomem *i9xx_flush_page;
+ void *i8xx_flush_page;
+ };
+ struct page *i8xx_page;
+ struct resource ifp_resource;
+ int resource_valid;
} intel_private;
static int intel_i810_fetch_size(void)
@@ -204,7 +224,7 @@ static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
/* Exists to support ARGB cursors */
static void *i8xx_alloc_pages(void)
{
- struct page * page;
+ struct page *page;
page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
if (page == NULL)
@@ -433,7 +453,7 @@ static void intel_i830_init_gtt_entries(void)
static const int ddt[4] = { 0, 16, 32, 64 };
int size; /* reserved space (in kb) at the top of stolen memory */
- pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
if (IS_I965) {
u32 pgetbl_ctl;
@@ -453,6 +473,15 @@ static void intel_i830_init_gtt_entries(void)
case I965_PGETBL_SIZE_512KB:
size = 512;
break;
+ case I965_PGETBL_SIZE_1MB:
+ size = 1024;
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = 2048;
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = 1024 + 512;
+ break;
default:
printk(KERN_INFO PFX "Unknown page table size, "
"assuming 512KB\n");
@@ -523,26 +552,14 @@ static void intel_i830_init_gtt_entries(void)
break;
case I915_GMCH_GMS_STOLEN_48M:
/* Check it's really I915G */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB ||
- IS_I965 || IS_G33)
+ if (IS_I915 || IS_I965 || IS_G33)
gtt_entries = MB(48) - KB(size);
else
gtt_entries = 0;
break;
case I915_GMCH_GMS_STOLEN_64M:
/* Check it's really I915G */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB ||
- IS_I965 || IS_G33)
+ if (IS_I915 || IS_I965 || IS_G33)
gtt_entries = MB(64) - KB(size);
else
gtt_entries = 0;
@@ -575,6 +592,45 @@ static void intel_i830_init_gtt_entries(void)
intel_private.gtt_entries = gtt_entries;
}
+static void intel_i830_fini_flush(void)
+{
+ kunmap(intel_private.i8xx_page);
+ intel_private.i8xx_flush_page = NULL;
+ unmap_page_from_agp(intel_private.i8xx_page);
+
+ __free_page(intel_private.i8xx_page);
+ intel_private.i8xx_page = NULL;
+}
+
+static void intel_i830_setup_flush(void)
+{
+ /* return if we've already set the flush mechanism up */
+ if (intel_private.i8xx_page)
+ return;
+
+ intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ if (!intel_private.i8xx_page)
+ return;
+
+ /* make page uncached */
+ map_page_into_agp(intel_private.i8xx_page);
+
+ intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
+ if (!intel_private.i8xx_flush_page)
+ intel_i830_fini_flush();
+}
+
+static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+{
+ unsigned int *pg = intel_private.i8xx_flush_page;
+ int i;
+
+ for (i = 0; i < 256; i += 2)
+ *(pg + i) = i;
+
+ wmb();
+}
+
/* The intel i830 automatically initializes the agp aperture during POST.
* Use the memory already set aside for in the GTT.
*/
@@ -590,10 +646,10 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
num_entries = size->num_entries;
agp_bridge->gatt_table_real = NULL;
- pci_read_config_dword(intel_private.pcidev,I810_MMADDR,&temp);
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
temp &= 0xfff80000;
- intel_private.registers = ioremap(temp,128 * 4096);
+ intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers)
return -ENOMEM;
@@ -633,7 +689,7 @@ static int intel_i830_fetch_size(void)
return values[0].size;
}
- pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
agp_bridge->previous_size = agp_bridge->current_size = (void *) values;
@@ -657,12 +713,12 @@ static int intel_i830_configure(void)
current_size = A_SIZE_FIX(agp_bridge->current_size);
- pci_read_config_dword(intel_private.pcidev,I810_GMADDR,&temp);
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
@@ -675,6 +731,8 @@ static int intel_i830_configure(void)
}
global_cache_flush();
+
+ intel_i830_setup_flush();
return 0;
}
@@ -683,9 +741,10 @@ static void intel_i830_cleanup(void)
iounmap(intel_private.registers);
}
-static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int type)
+static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
{
- int i,j,num_entries;
+ int i, j, num_entries;
void *temp;
int ret = -EINVAL;
int mask_type;
@@ -697,10 +756,10 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int
num_entries = A_SIZE_FIX(temp)->num_entries;
if (pg_start < intel_private.gtt_entries) {
- printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
- pg_start,intel_private.gtt_entries);
+ printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
- printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n");
goto out_err;
}
@@ -738,8 +797,8 @@ out_err:
return ret;
}
-static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
- int type)
+static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
{
int i;
@@ -747,7 +806,7 @@ static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
return 0;
if (pg_start < intel_private.gtt_entries) {
- printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ printk(KERN_INFO PFX "Trying to disable local/stolen memory\n");
return -EINVAL;
}
@@ -760,7 +819,7 @@ static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
return 0;
}
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
+static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
{
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
@@ -768,6 +827,95 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
return NULL;
}
+static int intel_alloc_chipset_flush_resource(void)
+{
+ int ret;
+ ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+ pcibios_align_resource, agp_bridge->dev);
+
+ return ret;
+}
+
+static void intel_i915_setup_chipset_flush(void)
+{
+ int ret;
+ u32 temp;
+
+ pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+ if (!(temp & 0x1)) {
+ intel_alloc_chipset_flush_resource();
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ temp &= ~1;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = temp;
+ intel_private.ifp_resource.end = temp + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i965_g33_setup_chipset_flush(void)
+{
+ u32 temp_hi, temp_lo;
+ int ret;
+
+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
+ pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
+
+ if (!(temp_lo & 0x1)) {
+
+ intel_alloc_chipset_flush_resource();
+
+ intel_private.resource_valid = 1;
+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
+ upper_32_bits(intel_private.ifp_resource.start));
+ pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ u64 l64;
+
+ temp_lo &= ~0x1;
+ l64 = ((u64)temp_hi << 32) | temp_lo;
+
+ intel_private.resource_valid = 1;
+ intel_private.ifp_resource.start = l64;
+ intel_private.ifp_resource.end = l64 + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
+ /* some BIOSes reserve this area in a pnp some don't */
+ if (ret)
+ intel_private.resource_valid = 0;
+ }
+}
+
+static void intel_i9xx_setup_flush(void)
+{
+ /* return if already configured */
+ if (intel_private.ifp_resource.start)
+ return;
+
+ /* setup a resource for this object */
+ intel_private.ifp_resource.name = "Intel Flush Page";
+ intel_private.ifp_resource.flags = IORESOURCE_MEM;
+
+ /* Setup chipset flush for 915 */
+ if (IS_I965 || IS_G33) {
+ intel_i965_g33_setup_chipset_flush();
+ } else {
+ intel_i915_setup_chipset_flush();
+ }
+
+ if (intel_private.ifp_resource.start) {
+ intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
+ if (!intel_private.i9xx_flush_page)
+ printk(KERN_INFO "unable to ioremap flush page - no chipset flushing");
+ }
+}
+
static int intel_i915_configure(void)
{
struct aper_size_info_fixed *current_size;
@@ -781,9 +929,9 @@ static int intel_i915_configure(void)
agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
- pci_read_config_word(agp_bridge->dev,I830_GMCH_CTRL,&gmch_ctrl);
+ pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev,I830_GMCH_CTRL,gmch_ctrl);
+ pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
@@ -796,19 +944,34 @@ static int intel_i915_configure(void)
}
global_cache_flush();
+
+ intel_i9xx_setup_flush();
+
return 0;
}
static void intel_i915_cleanup(void)
{
+ if (intel_private.i9xx_flush_page)
+ iounmap(intel_private.i9xx_flush_page);
+ if (intel_private.resource_valid)
+ release_resource(&intel_private.ifp_resource);
+ intel_private.ifp_resource.start = 0;
+ intel_private.resource_valid = 0;
iounmap(intel_private.gtt);
iounmap(intel_private.registers);
}
-static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
- int type)
+static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
{
- int i,j,num_entries;
+ if (intel_private.i9xx_flush_page)
+ writel(1, intel_private.i9xx_flush_page);
+}
+
+static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
+{
+ int i, j, num_entries;
void *temp;
int ret = -EINVAL;
int mask_type;
@@ -820,10 +983,10 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
num_entries = A_SIZE_FIX(temp)->num_entries;
if (pg_start < intel_private.gtt_entries) {
- printk (KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
- pg_start,intel_private.gtt_entries);
+ printk(KERN_DEBUG PFX "pg_start == 0x%.8lx,intel_private.gtt_entries == 0x%.8x\n",
+ pg_start, intel_private.gtt_entries);
- printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
+ printk(KERN_INFO PFX "Trying to insert into local/stolen memory\n");
goto out_err;
}
@@ -861,8 +1024,8 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
return ret;
}
-static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
- int type)
+static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
+ int type)
{
int i;
@@ -870,13 +1033,13 @@ static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
return 0;
if (pg_start < intel_private.gtt_entries) {
- printk (KERN_INFO PFX "Trying to disable local/stolen memory\n");
+ printk(KERN_INFO PFX "Trying to disable local/stolen memory\n");
return -EINVAL;
}
- for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+ for (i = pg_start; i < (mem->page_count + pg_start); i++)
writel(agp_bridge->scratch_page, intel_private.gtt+i);
- }
+
readl(intel_private.gtt+i-1);
agp_bridge->driver->tlb_flush(mem);
@@ -923,7 +1086,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = NULL;
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
- pci_read_config_dword(intel_private.pcidev, I915_PTEADDR,&temp2);
+ pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
if (IS_G33)
gtt_map_size = 1024 * 1024; /* 1M on G33 */
@@ -933,7 +1096,7 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
temp &= 0xfff80000;
- intel_private.registers = ioremap(temp,128 * 4096);
+ intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers) {
iounmap(intel_private.gtt);
return -ENOMEM;
@@ -980,6 +1143,7 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
struct aper_size_info_fixed *size;
int num_entries;
u32 temp;
+ int gtt_offset, gtt_size;
size = agp_bridge->current_size;
page_order = size->page_order;
@@ -989,13 +1153,18 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
temp &= 0xfff00000;
- intel_private.gtt = ioremap((temp + (512 * 1024)) , 512 * 1024);
- if (!intel_private.gtt)
- return -ENOMEM;
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_HB)
+ gtt_offset = gtt_size = MB(2);
+ else
+ gtt_offset = gtt_size = KB(512);
+
+ intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
+ if (!intel_private.gtt)
+ return -ENOMEM;
- intel_private.registers = ioremap(temp,128 * 4096);
+ intel_private.registers = ioremap(temp, 128 * 4096);
if (!intel_private.registers) {
iounmap(intel_private.gtt);
return -ENOMEM;
@@ -1154,7 +1323,7 @@ static int intel_815_configure(void)
/* the Intel 815 chipset spec. says that bits 29-31 in the
* ATTBASE register are reserved -> try not to write them */
if (agp_bridge->gatt_bus_addr & INTEL_815_ATTBASE_MASK) {
- printk (KERN_EMERG PFX "gatt bus addr too high");
+ printk(KERN_EMERG PFX "gatt bus addr too high");
return -EINVAL;
}
@@ -1296,6 +1465,8 @@ static int intel_845_configure(void)
pci_write_config_byte(agp_bridge->dev, INTEL_I845_AGPM, temp2 | (1 << 1));
/* clear any possible error conditions */
pci_write_config_word(agp_bridge->dev, INTEL_I845_ERRSTS, 0x001c);
+
+ intel_i830_setup_flush();
return 0;
}
@@ -1552,6 +1723,7 @@ static const struct agp_bridge_driver intel_830_driver = {
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i830_chipset_flush,
};
static const struct agp_bridge_driver intel_820_driver = {
@@ -1648,6 +1820,7 @@ static const struct agp_bridge_driver intel_845_driver = {
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
+ .chipset_flush = intel_i830_chipset_flush,
};
static const struct agp_bridge_driver intel_850_driver = {
@@ -1721,6 +1894,7 @@ static const struct agp_bridge_driver intel_915_driver = {
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
};
static const struct agp_bridge_driver intel_i965_driver = {
@@ -1746,6 +1920,7 @@ static const struct agp_bridge_driver intel_i965_driver = {
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
};
static const struct agp_bridge_driver intel_7505_driver = {
@@ -1795,6 +1970,7 @@ static const struct agp_bridge_driver intel_g33_driver = {
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
+ .chipset_flush = intel_i915_chipset_flush,
};
static int find_gmch(u16 device)
@@ -1804,7 +1980,7 @@ static int find_gmch(u16 device)
gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
- device, gmch_device);
+ device, gmch_device);
}
if (!gmch_device)
@@ -1867,7 +2043,7 @@ static const struct intel_driver_description {
NULL, &intel_915_driver },
{ PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, 0, "946GZ",
NULL, &intel_i965_driver },
- { PCI_DEVICE_ID_INTEL_82965G_1_HB, PCI_DEVICE_ID_INTEL_82965G_1_IG, 0, "965G",
+ { PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, 0, "G35",
NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, 0, "965Q",
NULL, &intel_i965_driver },
@@ -1885,6 +2061,8 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
NULL, &intel_g33_driver },
+ { PCI_DEVICE_ID_INTEL_IGD_HB, PCI_DEVICE_ID_INTEL_IGD_IG, 0,
+ "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL }
};
@@ -1924,7 +2102,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
if (intel_agp_chipsets[i].name == NULL) {
if (cap_ptr)
printk(KERN_WARNING PFX "Unsupported Intel chipset"
- "(device id: %04x)\n", pdev->device);
+ "(device id: %04x)\n", pdev->device);
agp_put_bridge(bridge);
return -ENODEV;
}
@@ -1937,7 +2115,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
intel_agp_chipsets[i].gmch_chip_id);
agp_put_bridge(bridge);
return -ENODEV;
- }
+ }
bridge->dev = pdev;
bridge->capndx = cap_ptr;
@@ -2067,7 +2245,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
- ID(PCI_DEVICE_ID_INTEL_82965G_1_HB),
+ ID(PCI_DEVICE_ID_INTEL_82G35_HB),
ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
ID(PCI_DEVICE_ID_INTEL_82965G_HB),
ID(PCI_DEVICE_ID_INTEL_82965GM_HB),
@@ -2075,6 +2253,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_G33_HB),
ID(PCI_DEVICE_ID_INTEL_Q35_HB),
ID(PCI_DEVICE_ID_INTEL_Q33_HB),
+ ID(PCI_DEVICE_ID_INTEL_IGD_HB),
{ }
};
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index ba3058dd39a7..610d6fd5bb50 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -38,7 +38,7 @@ config DRM_RADEON
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
run the Radeon in plain VGA mode.
-
+
If M is selected, the module will be called radeon.
config DRM_I810
@@ -71,9 +71,9 @@ config DRM_I915
852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
module will be called i915. AGP support is required for this driver
to work. This driver is used by the Intel driver in X.org 6.8 and
- XFree86 4.4 and above. If unsure, build this and i830 as modules and
+ XFree86 4.4 and above. If unsure, build this and i830 as modules and
the X server will load the correct one.
-
+
endchoice
config DRM_MGA
@@ -88,7 +88,7 @@ config DRM_SIS
tristate "SiS video cards"
depends on DRM && AGP
help
- Choose this option if you have a SiS 630 or compatible video
+ Choose this option if you have a SiS 630 or compatible video
chipset. If M is selected the module will be called sis. AGP
support is required for this driver to work.
@@ -105,4 +105,3 @@ config DRM_SAVAGE
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
-
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index 6915a0599dfb..1283ded88ead 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -38,5 +38,3 @@ obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_SIS) += sis.o
obj-$(CONFIG_DRM_SAVAGE)+= savage.o
obj-$(CONFIG_DRM_VIA) +=via.o
-
-
diff --git a/drivers/char/drm/README.drm b/drivers/char/drm/README.drm
index af74cd79a279..b5b332722581 100644
--- a/drivers/char/drm/README.drm
+++ b/drivers/char/drm/README.drm
@@ -41,4 +41,3 @@ For specific information about kernel-level support, see:
A Security Analysis of the Direct Rendering Infrastructure
http://dri.sourceforge.net/doc/security_low_level.html
-
diff --git a/drivers/char/drm/ati_pcigart.c b/drivers/char/drm/ati_pcigart.c
index 3345641ff904..d352dbb4ccf7 100644
--- a/drivers/char/drm/ati_pcigart.c
+++ b/drivers/char/drm/ati_pcigart.c
@@ -41,7 +41,7 @@ static void *drm_ati_alloc_pcigart_table(int order)
struct page *page;
int i;
- DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order);
+ DRM_DEBUG("%d order\n", order);
address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
order);
@@ -54,7 +54,7 @@ static void *drm_ati_alloc_pcigart_table(int order)
for (i = 0; i < order; i++, page++)
SetPageReserved(page);
- DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
+ DRM_DEBUG("returning 0x%08lx\n", address);
return (void *)address;
}
@@ -63,7 +63,7 @@ static void drm_ati_free_pcigart_table(void *address, int order)
struct page *page;
int i;
int num_pages = 1 << order;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
page = virt_to_page((unsigned long)address);
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index 82fb3d0d2785..3a05c6d5ebe1 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -202,7 +202,8 @@ enum drm_map_flags {
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
- _DRM_REMOVABLE = 0x40 /**< Removable mapping */
+ _DRM_REMOVABLE = 0x40, /**< Removable mapping */
+ _DRM_DRIVER = 0x80 /**< Managed by driver */
};
struct drm_ctx_priv_map {
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index dde02a15fa59..19d3be5c4b2d 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -292,7 +292,6 @@ struct drm_magic_entry {
struct list_head head;
struct drm_hash_item hash_item;
struct drm_file *priv;
- struct drm_magic_entry *next;
};
struct drm_vma_entry {
@@ -388,8 +387,8 @@ struct drm_file {
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
- void *driver_priv;
struct file *filp;
+ void *driver_priv;
};
/** Wait queue */
@@ -401,11 +400,9 @@ struct drm_queue {
wait_queue_head_t read_queue; /**< Processes waiting on block_read */
atomic_t block_write; /**< Queue blocked for writes */
wait_queue_head_t write_queue; /**< Processes waiting on block_write */
-#if 1
atomic_t total_queued; /**< Total queued statistic */
atomic_t total_flushed; /**< Total flushes statistic */
atomic_t total_locks; /**< Total locks statistics */
-#endif
enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
struct drm_waitlist waitlist; /**< Pending buffers */
wait_queue_head_t flush_queue; /**< Processes waiting until flush */
@@ -416,7 +413,8 @@ struct drm_queue {
*/
struct drm_lock_data {
struct drm_hw_lock *hw_lock; /**< Hardware lock */
- struct drm_file *file_priv; /**< File descr of lock holder (0=kernel) */
+ /** Private of lock holder's file (NULL=kernel) */
+ struct drm_file *file_priv;
wait_queue_head_t lock_queue; /**< Queue of blocked processes */
unsigned long lock_time; /**< Time of last lock in jiffies */
spinlock_t spinlock;
@@ -491,6 +489,27 @@ struct drm_sigdata {
struct drm_hw_lock *lock;
};
+
+/*
+ * Generic memory manager structs
+ */
+
+struct drm_mm_node {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+ int free;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+ void *private;
+};
+
+struct drm_mm {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+};
+
+
/**
* Mappings list
*/
@@ -498,7 +517,7 @@ struct drm_map_list {
struct list_head head; /**< list head */
struct drm_hash_item hash;
struct drm_map *map; /**< mapping */
- unsigned int user_token;
+ uint64_t user_token;
};
typedef struct drm_map drm_local_map_t;
@@ -536,24 +555,6 @@ struct drm_ati_pcigart_info {
int table_size;
};
-/*
- * Generic memory manager structs
- */
-struct drm_mm_node {
- struct list_head fl_entry;
- struct list_head ml_entry;
- int free;
- unsigned long start;
- unsigned long size;
- struct drm_mm *mm;
- void *private;
-};
-
-struct drm_mm {
- struct list_head fl_entry;
- struct list_head ml_entry;
-};
-
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
@@ -567,6 +568,8 @@ struct drm_driver {
void (*postclose) (struct drm_device *, struct drm_file *);
void (*lastclose) (struct drm_device *);
int (*unload) (struct drm_device *);
+ int (*suspend) (struct drm_device *);
+ int (*resume) (struct drm_device *);
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
void (*dma_ready) (struct drm_device *);
int (*dma_quiescent) (struct drm_device *);
@@ -642,6 +645,7 @@ struct drm_head {
* may contain multiple heads.
*/
struct drm_device {
+ struct device dev; /**< Linux device */
char *unique; /**< Unique identifier: e.g., busid */
int unique_len; /**< Length of unique field */
char *devname; /**< For /proc/interrupts */
@@ -750,7 +754,6 @@ struct drm_device {
struct pci_controller *hose;
#endif
struct drm_sg_mem *sg; /**< Scatter gather memory */
- unsigned long *ctx_bitmap; /**< context bitmap */
void *dev_private; /**< device private data */
struct drm_sigdata sigdata; /**< For block_all_signals */
sigset_t sigmask;
@@ -847,6 +850,8 @@ extern int drm_release(struct inode *inode, struct file *filp);
/* Mapping support (drm_vm.h) */
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
+extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
+extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
/* Memory management support (drm_memory.h) */
@@ -1061,11 +1066,11 @@ extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
/* sysfs support (drm_sysfs.c) */
+struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name);
-extern void drm_sysfs_destroy(struct class *cs);
-extern struct class_device *drm_sysfs_device_add(struct class *cs,
- struct drm_head *head);
-extern void drm_sysfs_device_remove(struct class_device *class_dev);
+extern void drm_sysfs_destroy(void);
+extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head);
+extern void drm_sysfs_device_remove(struct drm_device *dev);
/*
* Basic memory manager support (drm_mm.c)
@@ -1073,7 +1078,7 @@ extern void drm_sysfs_device_remove(struct class_device *class_dev);
extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
unsigned long size,
unsigned alignment);
-void drm_mm_put_block(struct drm_mm_node * cur);
+extern void drm_mm_put_block(struct drm_mm_node * cur);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
unsigned alignment, int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
@@ -1144,8 +1149,5 @@ extern void *drm_calloc(size_t nmemb, size_t size, int area);
/*@}*/
-extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
-extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
-
#endif /* __KERNEL__ */
#endif
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 214f4fbcba73..9468c7889ff1 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -166,7 +166,6 @@ int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
dev->agp->mode = mode.mode;
agp_enable(dev->agp->bridge, mode.mode);
- dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->enabled = 1;
return 0;
}
@@ -417,7 +416,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
-
+ head->base = head->agp_info.aper_base;
return head;
}
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index d24a6c2c2c24..bde64b84166e 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -184,7 +184,7 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
return -ENOMEM;
}
}
-
+
break;
case _DRM_SHM:
list = drm_find_matching_map(dev, map);
@@ -229,11 +229,17 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
#ifdef __alpha__
map->offset += dev->hose->mem_space->start;
#endif
- /* Note: dev->agp->base may actually be 0 when the DRM
- * is not in control of AGP space. But if user space is
- * it should already have added the AGP base itself.
+ /* In some cases (i810 driver), user space may have already
+ * added the AGP base itself, because dev->agp->base previously
+ * only got set during AGP enable. So, only add the base
+ * address if the map's offset isn't already within the
+ * aperture.
*/
- map->offset += dev->agp->base;
+ if (map->offset < dev->agp->base ||
+ map->offset > dev->agp->base +
+ dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
+ map->offset += dev->agp->base;
+ }
map->mtrr = dev->agp->agp_mtrr; /* for getmap */
/* This assumes the DRM is in total control of AGP space.
@@ -429,6 +435,7 @@ int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
return ret;
}
+EXPORT_SYMBOL(drm_rmmap);
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
@@ -814,9 +821,9 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
page_count = 0;
while (entry->buf_count < count) {
-
+
dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
-
+
if (!dmah) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -1592,5 +1599,3 @@ int drm_order(unsigned long size)
return order;
}
EXPORT_SYMBOL(drm_order);
-
-
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index 17fe69e7bfc1..d505f695421f 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -159,7 +159,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
- request->handle =
+ request->handle =
(void *)(unsigned long)_entry->user_token;
break;
}
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 44a46268b02b..0e7af53c87de 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -200,8 +200,10 @@ int drm_lastclose(struct drm_device * dev)
}
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
- drm_rmmap_locked(dev, r_list->map);
- r_list = NULL;
+ if (!(r_list->map->flags & _DRM_DRIVER)) {
+ drm_rmmap_locked(dev, r_list->map);
+ r_list = NULL;
+ }
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
@@ -255,8 +257,6 @@ int drm_init(struct drm_driver *driver)
DRM_DEBUG("\n");
- drm_mem_init();
-
for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
@@ -293,10 +293,6 @@ static void drm_cleanup(struct drm_device * dev)
drm_lastclose(dev);
- drm_ht_remove(&dev->map_hash);
-
- drm_ctxbitmap_cleanup(dev);
-
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
dev->agp && dev->agp->agp_mtrr >= 0) {
int retval;
@@ -314,6 +310,9 @@ static void drm_cleanup(struct drm_device * dev)
if (dev->driver->unload)
dev->driver->unload(dev);
+ drm_ht_remove(&dev->map_hash);
+ drm_ctxbitmap_cleanup(dev);
+
drm_put_head(&dev->primary);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
@@ -383,22 +382,24 @@ static int __init drm_core_init(void)
goto err_p3;
}
+ drm_mem_init();
+
DRM_INFO("Initialized %s %d.%d.%d %s\n",
CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
return 0;
- err_p3:
- drm_sysfs_destroy(drm_class);
- err_p2:
+err_p3:
+ drm_sysfs_destroy();
+err_p2:
unregister_chrdev(DRM_MAJOR, "drm");
drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB);
- err_p1:
+err_p1:
return ret;
}
static void __exit drm_core_exit(void)
{
remove_proc_entry("dri", NULL);
- drm_sysfs_destroy(drm_class);
+ drm_sysfs_destroy();
unregister_chrdev(DRM_MAJOR, "drm");
@@ -494,23 +495,25 @@ int drm_ioctl(struct inode *inode, struct file *filp,
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
- if (!kdata)
- return -ENOMEM;
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
}
if (cmd & IOC_IN) {
if (copy_from_user(kdata, (void __user *)arg,
_IOC_SIZE(cmd)) != 0) {
- retcode = -EACCES;
+ retcode = -EFAULT;
goto err_i1;
}
}
retcode = func(dev, kdata, file_priv);
- if (cmd & IOC_OUT) {
+ if ((retcode == 0) && (cmd & IOC_OUT)) {
if (copy_to_user((void __user *)arg, kdata,
_IOC_SIZE(cmd)) != 0)
- retcode = -EACCES;
+ retcode = -EFAULT;
}
}
diff --git a/drivers/char/drm/drm_hashtab.c b/drivers/char/drm/drm_hashtab.c
index 4b8e7db5a232..33160673a7b7 100644
--- a/drivers/char/drm/drm_hashtab.c
+++ b/drivers/char/drm/drm_hashtab.c
@@ -80,7 +80,7 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
}
}
-static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
@@ -129,7 +129,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
}
/*
- * Just insert an item and return any "bits" bit key that hasn't been
+ * Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
@@ -200,4 +200,3 @@ void drm_ht_remove(struct drm_open_hash *ht)
ht->table = NULL;
}
}
-
diff --git a/drivers/char/drm/drm_hashtab.h b/drivers/char/drm/drm_hashtab.h
index 573e333ac457..cd2b189e1be6 100644
--- a/drivers/char/drm/drm_hashtab.h
+++ b/drivers/char/drm/drm_hashtab.h
@@ -65,4 +65,3 @@ extern void drm_ht_remove(struct drm_open_hash *ht);
#endif
-
diff --git a/drivers/char/drm/drm_ioc32.c b/drivers/char/drm/drm_ioc32.c
index 2286f3312c5c..90f5a8d9bdcb 100644
--- a/drivers/char/drm/drm_ioc32.c
+++ b/drivers/char/drm/drm_ioc32.c
@@ -1051,8 +1051,12 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
drm_ioctl_compat_t *fn;
int ret;
+ /* Assume that ioctls without an explicit compat routine will just
+ * work. This may not always be a good assumption, but it's better
+ * than always failing.
+ */
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
- return -ENOTTY;
+ return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
fn = drm_compat_ioctls[nr];
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 3cbebf868e68..16829fb3089d 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -234,26 +234,23 @@ int drm_getclient(struct drm_device *dev, void *data,
idx = client->idx;
mutex_lock(&dev->struct_mutex);
-
- if (list_empty(&dev->filelist)) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
i = 0;
list_for_each_entry(pt, &dev->filelist, lhead) {
- if (i++ >= idx)
- break;
+ if (i++ >= idx) {
+ client->auth = pt->authenticated;
+ client->pid = pt->pid;
+ client->uid = pt->uid;
+ client->magic = pt->magic;
+ client->iocs = pt->ioctl_count;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
}
-
- client->auth = pt->authenticated;
- client->pid = pt->pid;
- client->uid = pt->uid;
- client->magic = pt->magic;
- client->iocs = pt->ioctl_count;
mutex_unlock(&dev->struct_mutex);
- return 0;
+ return -EINVAL;
}
/**
diff --git a/drivers/char/drm/drm_irq.c b/drivers/char/drm/drm_irq.c
index 05eae63f85ba..089c015c01d1 100644
--- a/drivers/char/drm/drm_irq.c
+++ b/drivers/char/drm/drm_irq.c
@@ -107,7 +107,7 @@ static int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
init_waitqueue_head(&dev->vbl_queue);
@@ -164,7 +164,7 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->irq);
dev->driver->irq_uninstall(dev);
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index 93019901bd30..845081b44f63 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -179,4 +179,3 @@ void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
iounmap(map->handle);
}
EXPORT_SYMBOL(drm_core_ioremapfree);
-
diff --git a/drivers/char/drm/drm_mm.c b/drivers/char/drm/drm_mm.c
index 86f4eb61a6a4..dcff9e9b52e3 100644
--- a/drivers/char/drm/drm_mm.c
+++ b/drivers/char/drm/drm_mm.c
@@ -293,4 +293,3 @@ void drm_mm_takedown(struct drm_mm * mm)
drm_free(entry, sizeof(*entry), DRM_MEM_MM);
}
-
diff --git a/drivers/char/drm/drm_os_linux.h b/drivers/char/drm/drm_os_linux.h
index daa69c9d8977..8dbd2572b7c3 100644
--- a/drivers/char/drm/drm_os_linux.h
+++ b/drivers/char/drm/drm_os_linux.h
@@ -69,9 +69,9 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
copy_to_user(arg1, arg2, arg3)
/* Macros for copyfrom user, but checking readability only once */
-#define DRM_VERIFYAREA_READ( uaddr, size ) \
+#define DRM_VERIFYAREA_READ( uaddr, size ) \
(access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
-#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_from_user(arg1, arg2, arg3)
#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_to_user(arg1, arg2, arg3)
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 43d3c42df360..f52468843678 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -139,6 +139,101 @@
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x710F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7140, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7141, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7142, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7143, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x714F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7153, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x715E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x715F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7181, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7183, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7186, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7187, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7188, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x718F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7193, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7196, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x719B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x719F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV515|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71D6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x71DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7244, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7248, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7249, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x724F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7280, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7281, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7283, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7284, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7287, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7289, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x728B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x728C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV570|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7290, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7291, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7293, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x7297, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV560|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0, 0, 0}
@@ -311,5 +406,5 @@
{0x8086, 0x29d2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x8086, 0x2a12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+ {0x8086, 0x2a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
-
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 12dfea89c7f3..d9b560fe9bbe 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -236,11 +236,11 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
type = "??";
else
type = types[map->type];
- DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08x ",
+ DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size, type, map->flags,
- r_list->user_token);
+ (unsigned long) r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
} else {
diff --git a/drivers/char/drm/drm_sarea.h b/drivers/char/drm/drm_sarea.h
index e040f47f369f..480037331e4e 100644
--- a/drivers/char/drm/drm_sarea.h
+++ b/drivers/char/drm/drm_sarea.h
@@ -45,7 +45,7 @@
#endif
/** Maximum number of drawables in the SAREA */
-#define SAREA_MAX_DRAWABLES 256
+#define SAREA_MAX_DRAWABLES 256
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c
index eb7fa437355e..26d8f675ed5d 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/char/drm/drm_scatter.c
@@ -67,7 +67,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
struct drm_sg_mem *entry;
unsigned long pages, i, j;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
@@ -81,7 +81,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
memset(entry, 0, sizeof(*entry));
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
+ DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
@@ -122,8 +122,8 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
entry->handle = ScatterHandle((unsigned long)entry->virtual);
- DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle);
- DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual);
+ DRM_DEBUG("handle = %08lx\n", entry->handle);
+ DRM_DEBUG("virtual = %p\n", entry->virtual);
for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
@@ -210,7 +210,7 @@ int drm_sg_free(struct drm_device *dev, void *data,
if (!entry || entry->handle != request->handle)
return -EINVAL;
- DRM_DEBUG("sg free virtual = %p\n", entry->virtual);
+ DRM_DEBUG("virtual = %p\n", entry->virtual);
drm_sg_cleanup(entry);
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index ee83ff9efed6..d93a217f856a 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -98,10 +98,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
dev->driver = driver;
- if (dev->driver->load)
- if ((retcode = dev->driver->load(dev, ent->driver_data)))
- goto error_out_unreg;
-
if (drm_core_has_AGP(dev)) {
if (drm_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
@@ -120,6 +116,10 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
}
}
+ if (dev->driver->load)
+ if ((retcode = dev->driver->load(dev, ent->driver_data)))
+ goto error_out_unreg;
+
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
@@ -168,11 +168,10 @@ static int drm_get_head(struct drm_device * dev, struct drm_head * head)
goto err_g1;
}
- head->dev_class = drm_sysfs_device_add(drm_class, head);
- if (IS_ERR(head->dev_class)) {
+ ret = drm_sysfs_device_add(dev, head);
+ if (ret) {
printk(KERN_ERR
"DRM: Error sysfs_device_add.\n");
- ret = PTR_ERR(head->dev_class);
goto err_g2;
}
*heads = head;
@@ -218,13 +217,14 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
if (ret)
goto err_g1;
+ pci_set_master(pdev);
if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
goto err_g2;
}
if ((ret = drm_get_head(dev, &dev->primary)))
goto err_g2;
-
+
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
driver->date, dev->primary.minor);
@@ -283,7 +283,7 @@ int drm_put_head(struct drm_head * head)
DRM_DEBUG("release secondary minor %d\n", minor);
drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
- drm_sysfs_device_remove(head->dev_class);
+ drm_sysfs_device_remove(head->dev);
*head = (struct drm_head) {.dev = NULL};
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c
index cf4349b00b07..fa36153619e8 100644
--- a/drivers/char/drm/drm_sysfs.c
+++ b/drivers/char/drm/drm_sysfs.c
@@ -19,6 +19,45 @@
#include "drm_core.h"
#include "drmP.h"
+#define to_drm_device(d) container_of(d, struct drm_device, dev)
+
+/**
+ * drm_sysfs_suspend - DRM class suspend hook
+ * @dev: Linux device to suspend
+ * @state: power state to enter
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its suspend hook, if present.
+ */
+static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
+{
+ struct drm_device *drm_dev = to_drm_device(dev);
+
+ printk(KERN_ERR "%s\n", __FUNCTION__);
+
+ if (drm_dev->driver->suspend)
+ return drm_dev->driver->suspend(drm_dev);
+
+ return 0;
+}
+
+/**
+ * drm_sysfs_resume - DRM class resume hook
+ * @dev: Linux device to resume
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its resume hook, if present.
+ */
+static int drm_sysfs_resume(struct device *dev)
+{
+ struct drm_device *drm_dev = to_drm_device(dev);
+
+ if (drm_dev->driver->resume)
+ return drm_dev->driver->resume(drm_dev);
+
+ return 0;
+}
+
/* Display the version of drm_core. This doesn't work right in current design */
static ssize_t version_show(struct class *dev, char *buf)
{
@@ -33,7 +72,7 @@ static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
* @owner: pointer to the module that is to "own" this struct drm_sysfs_class
* @name: pointer to a string for the name of this class.
*
- * This is used to create a struct drm_sysfs_class pointer that can then be used
+ * This is used to create DRM class pointer that can then be used
* in calls to drm_sysfs_device_add().
*
* Note, the pointer created here is to be destroyed when finished by making a
@@ -50,6 +89,9 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
goto err_out;
}
+ class->suspend = drm_sysfs_suspend;
+ class->resume = drm_sysfs_resume;
+
err = class_create_file(class, &class_attr_version);
if (err)
goto err_out_class;
@@ -63,94 +105,100 @@ err_out:
}
/**
- * drm_sysfs_destroy - destroys a struct drm_sysfs_class structure
- * @cs: pointer to the struct drm_sysfs_class that is to be destroyed
+ * drm_sysfs_destroy - destroys DRM class
*
- * Note, the pointer to be destroyed must have been created with a call to
- * drm_sysfs_create().
+ * Destroy the DRM device class.
*/
-void drm_sysfs_destroy(struct class *class)
+void drm_sysfs_destroy(void)
{
- if ((class == NULL) || (IS_ERR(class)))
+ if ((drm_class == NULL) || (IS_ERR(drm_class)))
return;
-
- class_remove_file(class, &class_attr_version);
- class_destroy(class);
+ class_remove_file(drm_class, &class_attr_version);
+ class_destroy(drm_class);
}
-static ssize_t show_dri(struct class_device *class_device, char *buf)
+static ssize_t show_dri(struct device *device, struct device_attribute *attr,
+ char *buf)
{
- struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev;
+ struct drm_device *dev = to_drm_device(device);
if (dev->driver->dri_library_name)
return dev->driver->dri_library_name(dev, buf);
return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
}
-static struct class_device_attribute class_device_attrs[] = {
+static struct device_attribute device_attrs[] = {
__ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
};
/**
+ * drm_sysfs_device_release - do nothing
+ * @dev: Linux device
+ *
+ * Normally, this would free the DRM device associated with @dev, along
+ * with cleaning up any other stuff. But we do that in the DRM core, so
+ * this function can just return and hope that the core does its job.
+ */
+static void drm_sysfs_device_release(struct device *dev)
+{
+ return;
+}
+
+/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
- * @cs: pointer to the struct class that this device should be registered to.
- * @dev: the dev_t for the device to be added.
- * @device: a pointer to a struct device that is assiociated with this class device.
- * @fmt: string for the class device's name
+ * @dev: DRM device to be added
+ * @head: DRM head in question
*
- * A struct class_device will be created in sysfs, registered to the specified
- * class. A "dev" file will be created, showing the dev_t for the device. The
- * pointer to the struct class_device will be returned from the call. Any further
- * sysfs files that might be required can be created using this pointer.
- * Note: the struct class passed to this function must have previously been
- * created with a call to drm_sysfs_create().
+ * Add a DRM device to the DRM's device model class. We use @dev's PCI device
+ * as the parent for the Linux device, and make sure it has a file containing
+ * the driver we're using (for userspace compatibility).
*/
-struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head)
+int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head)
{
- struct class_device *class_dev;
- int i, j, err;
-
- class_dev = class_device_create(cs, NULL,
- MKDEV(DRM_MAJOR, head->minor),
- &(head->dev->pdev)->dev,
- "card%d", head->minor);
- if (IS_ERR(class_dev)) {
- err = PTR_ERR(class_dev);
+ int err;
+ int i, j;
+
+ dev->dev.parent = &dev->pdev->dev;
+ dev->dev.class = drm_class;
+ dev->dev.release = drm_sysfs_device_release;
+ dev->dev.devt = head->device;
+ snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor);
+
+ err = device_register(&dev->dev);
+ if (err) {
+ DRM_ERROR("device add failed: %d\n", err);
goto err_out;
}
- class_set_devdata(class_dev, head);
-
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
- err = class_device_create_file(class_dev,
- &class_device_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+ err = device_create_file(&dev->dev, &device_attrs[i]);
if (err)
goto err_out_files;
}
- return class_dev;
+ return 0;
err_out_files:
if (i > 0)
for (j = 0; j < i; j++)
- class_device_remove_file(class_dev,
- &class_device_attrs[i]);
- class_device_unregister(class_dev);
+ device_remove_file(&dev->dev, &device_attrs[i]);
+ device_unregister(&dev->dev);
err_out:
- return ERR_PTR(err);
+
+ return err;
}
/**
- * drm_sysfs_device_remove - removes a class device that was created with drm_sysfs_device_add()
- * @dev: the dev_t of the device that was previously registered.
+ * drm_sysfs_device_remove - remove DRM device
+ * @dev: DRM device to remove
*
* This call unregisters and cleans up a class device that was created with a
* call to drm_sysfs_device_add()
*/
-void drm_sysfs_device_remove(struct class_device *class_dev)
+void drm_sysfs_device_remove(struct drm_device *dev)
{
int i;
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
- class_device_remove_file(class_dev, &class_device_attrs[i]);
- class_device_unregister(class_dev);
+ for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+ device_remove_file(&dev->dev, &device_attrs[i]);
+ device_unregister(&dev->dev);
}
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index ef5e6b130c48..cea4105374b2 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -180,7 +180,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
return NOPAGE_SIGBUS;
get_page(page);
- DRM_DEBUG("shm_nopage 0x%lx\n", address);
+ DRM_DEBUG("0x%lx\n", address);
return page;
}
@@ -294,7 +294,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
get_page(page);
- DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
+ DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
return page;
}
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index eb381a7c5bee..8d7ea81c4b66 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -40,7 +40,7 @@
#define I810_BUF_FREE 2
#define I810_BUF_CLIENT 1
-#define I810_BUF_HARDWARE 0
+#define I810_BUF_HARDWARE 0
#define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1
@@ -570,7 +570,7 @@ static void i810EmitState(struct drm_device * dev)
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
- DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
+ DRM_DEBUG("%x\n", dirty);
if (dirty & I810_UPLOAD_BUFFERS) {
i810EmitDestVerified(dev, sarea_priv->BufferState);
@@ -802,8 +802,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev)
int pitch = dev_priv->pitch;
RING_LOCALS;
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __FUNCTION__,
+ DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page,
dev_priv->sarea_priv->pf_current_page);
@@ -848,8 +847,6 @@ static void i810_dma_quiescent(struct drm_device * dev)
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
-/* printk("%s\n", __FUNCTION__); */
-
i810_kernel_lost_context(dev);
BEGIN_LP_RING(4);
@@ -869,8 +866,6 @@ static int i810_flush_queue(struct drm_device * dev)
int i, ret = 0;
RING_LOCALS;
-/* printk("%s\n", __FUNCTION__); */
-
i810_kernel_lost_context(dev);
BEGIN_LP_RING(2);
@@ -949,7 +944,7 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
+ DRM_DEBUG("idx %d used %d discard %d\n",
vertex->idx, vertex->used, vertex->discard);
if (vertex->idx < 0 || vertex->idx > dma->buf_count)
@@ -987,7 +982,7 @@ static int i810_clear_bufs(struct drm_device *dev, void *data,
static int i810_swap_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- DRM_DEBUG("i810_swap_bufs\n");
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -1068,11 +1063,10 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
sarea_priv->dirty = 0x7f;
- DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", address, used);
+ DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
dev_priv->counter++;
DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("i810_dma_dispatch_mc\n");
DRM_DEBUG("start : %lx\n", start);
DRM_DEBUG("used : %d\n", used);
DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
@@ -1179,7 +1173,7 @@ static void i810_do_init_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
@@ -1189,7 +1183,7 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (dev_priv->current_page != 0)
i810_dma_dispatch_flip(dev);
@@ -1202,7 +1196,7 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index 0af45872f67e..0118849a5672 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
*
*/
@@ -134,7 +134,7 @@ extern int i810_max_ioctl;
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
#define I810_READ(reg) I810_DEREF(reg)
-#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
+#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
#define I810_READ16(reg) I810_DEREF16(reg)
#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
@@ -145,7 +145,7 @@ extern int i810_max_ioctl;
#define BEGIN_LP_RING(n) do { \
if (I810_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \
+ DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
if (dev_priv->ring.space < n*4) \
i810_wait_ring(dev, n*4); \
dev_priv->ring.space -= n*4; \
@@ -155,19 +155,19 @@ extern int i810_max_ioctl;
} while (0)
#define ADVANCE_LP_RING() do { \
- if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
- dev_priv->ring.tail = outring; \
+ if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
+ dev_priv->ring.tail = outring; \
I810_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
-#define OUT_RING(n) do { \
+#define OUT_RING(n) do { \
if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outring += 4; \
outring &= ringmask; \
} while (0)
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@@ -184,28 +184,28 @@ extern int i810_max_ioctl;
#define I810REG_HWSTAM 0x02098
#define I810REG_INT_IDENTITY_R 0x020a4
-#define I810REG_INT_MASK_R 0x020a8
+#define I810REG_INT_MASK_R 0x020a8
#define I810REG_INT_ENABLE_R 0x020a0
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+#define RING_TAIL 0x00
#define TAIL_ADDR 0x000FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x00FFFFF8
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x000FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x00FFFFF8
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x000FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index 69a363edb0d2..379cbdad4921 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -42,7 +42,7 @@
#define I830_BUF_FREE 2
#define I830_BUF_CLIENT 1
-#define I830_BUF_HARDWARE 0
+#define I830_BUF_HARDWARE 0
#define I830_BUF_UNMAPPED 0
#define I830_BUF_MAPPED 1
diff --git a/drivers/char/drm/i830_drm.h b/drivers/char/drm/i830_drm.h
index 968a6d9f9dcb..4b00d2dd4f68 100644
--- a/drivers/char/drm/i830_drm.h
+++ b/drivers/char/drm/i830_drm.h
@@ -12,9 +12,9 @@
#define _I830_DEFINES_
#define I830_DMA_BUF_ORDER 12
-#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
-#define I830_DMA_BUF_NR 256
-#define I830_NR_SAREA_CLIPRECTS 8
+#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
+#define I830_DMA_BUF_NR 256
+#define I830_NR_SAREA_CLIPRECTS 8
/* Each region is a minimum of 64k, and there are at most 64 of them.
*/
@@ -58,7 +58,7 @@
#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
-#define I830_UPLOAD_STIPPLE 0x8000000
+#define I830_UPLOAD_STIPPLE 0x8000000
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index db3a9fa83960..4caba8c54455 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
*
*/
@@ -156,8 +156,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
#define BEGIN_LP_RING(n) do { \
if (I830_VERBOSE) \
- printk("BEGIN_LP_RING(%d) in %s\n", \
- n, __FUNCTION__); \
+ printk("BEGIN_LP_RING(%d)\n", (n)); \
if (dev_priv->ring.space < n*4) \
i830_wait_ring(dev, n*4, __FUNCTION__); \
outcount = 0; \
@@ -183,7 +182,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@@ -203,30 +202,30 @@ extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
#define I830REG_HWSTAM 0x02098
#define I830REG_INT_IDENTITY_R 0x020a4
-#define I830REG_INT_MASK_R 0x020a8
+#define I830REG_INT_MASK_R 0x020a8
#define I830REG_INT_ENABLE_R 0x020a0
#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x0xFFFFF000
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
@@ -279,9 +278,9 @@ extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-#define MI_BATCH_BUFFER ((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START (0x31<<23)
-#define MI_BATCH_BUFFER_END (0xA<<23)
+#define MI_BATCH_BUFFER ((0x30<<23)|1)
+#define MI_BATCH_BUFFER_START (0x31<<23)
+#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1)
#define MI_WAIT_FOR_EVENT ((0x3<<23))
diff --git a/drivers/char/drm/i830_irq.c b/drivers/char/drm/i830_irq.c
index 76403f4b6200..a33db5f0967f 100644
--- a/drivers/char/drm/i830_irq.c
+++ b/drivers/char/drm/i830_irq.c
@@ -144,7 +144,7 @@ int i830_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_wait_t *irqwait = data;
+ drm_i830_irq_wait_t *irqwait = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index e61a43e5b3ac..43986d81ae34 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -31,17 +31,6 @@
#include "i915_drm.h"
#include "i915_drv.h"
-#define IS_I965G(dev) (dev->pci_device == 0x2972 || \
- dev->pci_device == 0x2982 || \
- dev->pci_device == 0x2992 || \
- dev->pci_device == 0x29A2 || \
- dev->pci_device == 0x2A02 || \
- dev->pci_device == 0x2A12)
-
-#define IS_G33(dev) (dev->pci_device == 0x29b2 || \
- dev->pci_device == 0x29c2 || \
- dev->pci_device == 0x29d2)
-
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
* the head pointer changes, so that EBUSY only happens if the ring
@@ -90,6 +79,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
static int i915_dma_cleanup(struct drm_device * dev)
{
+ drm_i915_private_t *dev_priv = dev->dev_private;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
@@ -97,52 +87,42 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev->irq)
drm_irq_uninstall(dev);
- if (dev->dev_private) {
- drm_i915_private_t *dev_priv =
- (drm_i915_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start) {
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- }
-
- if (dev_priv->status_page_dmah) {
- drm_pci_free(dev, dev_priv->status_page_dmah);
- /* Need to rewrite hardware status page */
- I915_WRITE(0x02080, 0x1ffff000);
- }
-
- if (dev_priv->status_gfx_addr) {
- dev_priv->status_gfx_addr = 0;
- drm_core_ioremapfree(&dev_priv->hws_map, dev);
- I915_WRITE(0x2080, 0x1ffff000);
- }
+ if (dev_priv->ring.virtual_start) {
+ drm_core_ioremapfree(&dev_priv->ring.map, dev);
+ dev_priv->ring.virtual_start = 0;
+ dev_priv->ring.map.handle = 0;
+ dev_priv->ring.map.size = 0;
+ }
- drm_free(dev->dev_private, sizeof(drm_i915_private_t),
- DRM_MEM_DRIVER);
+ if (dev_priv->status_page_dmah) {
+ drm_pci_free(dev, dev_priv->status_page_dmah);
+ dev_priv->status_page_dmah = NULL;
+ /* Need to rewrite hardware status page */
+ I915_WRITE(0x02080, 0x1ffff000);
+ }
- dev->dev_private = NULL;
+ if (dev_priv->status_gfx_addr) {
+ dev_priv->status_gfx_addr = 0;
+ drm_core_ioremapfree(&dev_priv->hws_map, dev);
+ I915_WRITE(0x2080, 0x1ffff000);
}
return 0;
}
-static int i915_initialize(struct drm_device * dev,
- drm_i915_private_t * dev_priv,
- drm_i915_init_t * init)
+static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
- memset(dev_priv, 0, sizeof(drm_i915_private_t));
+ drm_i915_private_t *dev_priv = dev->dev_private;
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
- dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
return -EINVAL;
}
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio_map) {
- dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
@@ -165,7 +145,6 @@ static int i915_initialize(struct drm_device * dev,
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -197,7 +176,6 @@ static int i915_initialize(struct drm_device * dev,
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
- dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
@@ -209,7 +187,6 @@ static int i915_initialize(struct drm_device * dev,
I915_WRITE(0x02080, dev_priv->dma_status_page);
}
DRM_DEBUG("Enabled hardware status page\n");
- dev->dev_private = (void *)dev_priv;
return 0;
}
@@ -254,17 +231,12 @@ static int i915_dma_resume(struct drm_device * dev)
static int i915_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv;
drm_i915_init_t *init = data;
int retcode = 0;
switch (init->func) {
case I915_INIT_DMA:
- dev_priv = drm_alloc(sizeof(drm_i915_private_t),
- DRM_MEM_DRIVER);
- if (dev_priv == NULL)
- return -ENOMEM;
- retcode = i915_initialize(dev, dev_priv, init);
+ retcode = i915_initialize(dev, init);
break;
case I915_CLEANUP_DMA:
retcode = i915_dma_cleanup(dev);
@@ -351,7 +323,7 @@ static int validate_cmd(int cmd)
{
int ret = do_validate_cmd(cmd);
-/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
+/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
return ret;
}
@@ -685,7 +657,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
int value;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -719,7 +691,7 @@ static int i915_setparam(struct drm_device *dev, void *data,
drm_i915_setparam_t *param = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -749,7 +721,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_i915_hws_addr_t *hws = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -757,7 +729,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
- dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws->addr;
+ dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024;
dev_priv->hws_map.type = 0;
dev_priv->hws_map.flags = 0;
@@ -765,7 +737,6 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
drm_core_ioremap(&dev_priv->hws_map, dev);
if (dev_priv->hws_map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
dev_priv->status_gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
@@ -784,6 +755,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long base, size;
+ int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -791,24 +766,51 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
+ dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ memset(dev_priv, 0, sizeof(drm_i915_private_t));
+
+ dev->dev_private = (void *)dev_priv;
+
+ /* Add register map (needed for suspend/resume) */
+ base = drm_get_resource_start(dev, mmio_bar);
+ size = drm_get_resource_len(dev, mmio_bar);
+
+ ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
+ _DRM_KERNEL | _DRM_DRIVER,
+ &dev_priv->mmio_map);
+ return ret;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->mmio_map)
+ drm_rmmap(dev, dev_priv->mmio_map);
+
+ drm_free(dev->dev_private, sizeof(drm_i915_private_t),
+ DRM_MEM_DRIVER);
+
return 0;
}
void i915_driver_lastclose(struct drm_device * dev)
{
- if (dev->dev_private) {
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->agp_heap)
i915_mem_takedown(&(dev_priv->agp_heap));
- }
+
i915_dma_cleanup(dev);
}
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
- if (dev->dev_private) {
- drm_i915_private_t *dev_priv = dev->dev_private;
- i915_mem_release(dev, file_priv, dev_priv->agp_heap);
- }
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
struct drm_ioctl_desc i915_ioctls[] = {
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 85bcc276f804..52e51033d32c 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -38,6 +38,465 @@ static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
+enum pipe {
+ PIPE_A = 0,
+ PIPE_B,
+};
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (pipe == PIPE_A)
+ return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
+ else
+ return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->save_palette_a;
+ else
+ array = dev_priv->save_palette_b;
+
+ for(i = 0; i < 256; i++)
+ array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ u32 *array;
+ int i;
+
+ if (!i915_pipe_enabled(dev, pipe))
+ return;
+
+ if (pipe == PIPE_A)
+ array = dev_priv->save_palette_a;
+ else
+ array = dev_priv->save_palette_b;
+
+ for(i = 0; i < 256; i++)
+ I915_WRITE(reg + (i << 2), array[i]);
+}
+
+static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
+{
+ outb(reg, index_port);
+ return inb(data_port);
+}
+
+static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
+{
+ inb(st01);
+ outb(palette_enable | reg, VGA_AR_INDEX);
+ return inb(VGA_AR_DATA_READ);
+}
+
+static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
+{
+ inb(st01);
+ outb(palette_enable | reg, VGA_AR_INDEX);
+ outb(val, VGA_AR_DATA_WRITE);
+}
+
+static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
+{
+ outb(reg, index_port);
+ outb(val, data_port);
+}
+
+static void i915_save_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ u16 cr_index, cr_data, st01;
+
+ /* VGA color palette registers */
+ dev_priv->saveDACMASK = inb(VGA_DACMASK);
+ /* DACCRX automatically increments during read */
+ outb(0, VGA_DACRX);
+ /* Read 3 bytes of color data from each index */
+ for (i = 0; i < 256 * 3; i++)
+ dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
+
+ /* MSR bits */
+ dev_priv->saveMSR = inb(VGA_MSR_READ);
+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* CRT controller regs */
+ i915_write_indexed(cr_index, cr_data, 0x11,
+ i915_read_indexed(cr_index, cr_data, 0x11) &
+ (~0x80));
+ for (i = 0; i < 0x24; i++)
+ dev_priv->saveCR[i] =
+ i915_read_indexed(cr_index, cr_data, i);
+ /* Make sure we don't turn off CR group 0 writes */
+ dev_priv->saveCR[0x11] &= ~0x80;
+
+ /* Attribute controller registers */
+ inb(st01);
+ dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
+ for (i = 0; i < 20; i++)
+ dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
+ inb(st01);
+ outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
+
+ /* Graphics controller registers */
+ for (i = 0; i < 9; i++)
+ dev_priv->saveGR[i] =
+ i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
+
+ dev_priv->saveGR[0x10] =
+ i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+ dev_priv->saveGR[0x11] =
+ i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+ dev_priv->saveGR[0x18] =
+ i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+ /* Sequencer registers */
+ for (i = 0; i < 8; i++)
+ dev_priv->saveSR[i] =
+ i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void i915_restore_vga(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+ u16 cr_index, cr_data, st01;
+
+ /* MSR bits */
+ outb(dev_priv->saveMSR, VGA_MSR_WRITE);
+ if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+ cr_index = VGA_CR_INDEX_CGA;
+ cr_data = VGA_CR_DATA_CGA;
+ st01 = VGA_ST01_CGA;
+ } else {
+ cr_index = VGA_CR_INDEX_MDA;
+ cr_data = VGA_CR_DATA_MDA;
+ st01 = VGA_ST01_MDA;
+ }
+
+ /* Sequencer registers, don't write SR07 */
+ for (i = 0; i < 7; i++)
+ i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
+ dev_priv->saveSR[i]);
+
+ /* CRT controller regs */
+ /* Enable CR group 0 writes */
+ i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+ for (i = 0; i < 0x24; i++)
+ i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
+
+ /* Graphics controller regs */
+ for (i = 0; i < 9; i++)
+ i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
+ dev_priv->saveGR[i]);
+
+ i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+ dev_priv->saveGR[0x10]);
+ i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+ dev_priv->saveGR[0x11]);
+ i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+ dev_priv->saveGR[0x18]);
+
+ /* Attribute controller registers */
+ for (i = 0; i < 20; i++)
+ i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
+ inb(st01); /* switch back to index mode */
+ outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
+
+ /* VGA color palette registers */
+ outb(dev_priv->saveDACMASK, VGA_DACMASK);
+ /* DACCRX automatically increments during read */
+ outb(0, VGA_DACWX);
+ /* Read 3 bytes of color data from each index */
+ for (i = 0; i < 256 * 3; i++)
+ outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
+
+}
+
+static int i915_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (!dev || !dev_priv) {
+ printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
+ printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ pci_save_state(dev->pdev);
+ pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+
+ /* Pipe & plane A info */
+ dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
+ dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+ dev_priv->saveFPA0 = I915_READ(FPA0);
+ dev_priv->saveFPA1 = I915_READ(FPA1);
+ dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+ if (IS_I965G(dev))
+ dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+ dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+ dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+ dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
+ dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
+ dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
+ dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+ dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
+ dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
+ dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+ dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+ dev_priv->saveDSPABASE = I915_READ(DSPABASE);
+ if (IS_I965G(dev)) {
+ dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+ dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+ }
+ i915_save_palette(dev, PIPE_A);
+
+ /* Pipe & plane B info */
+ dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
+ dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+ dev_priv->saveFPB0 = I915_READ(FPB0);
+ dev_priv->saveFPB1 = I915_READ(FPB1);
+ dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+ if (IS_I965G(dev))
+ dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+ dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+ dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+ dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
+ dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
+ dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
+ dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+ dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+ dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
+ dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
+ dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+ dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+ dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
+ if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
+ dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+ dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+ }
+ i915_save_palette(dev, PIPE_B);
+
+ /* CRT state */
+ dev_priv->saveADPA = I915_READ(ADPA);
+
+ /* LVDS state */
+ dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+ dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+ dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+ if (IS_I965G(dev))
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ dev_priv->saveLVDS = I915_READ(LVDS);
+ if (!IS_I830(dev) && !IS_845G(dev))
+ dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+ dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
+ dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
+ dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
+
+ /* FIXME: save TV & SDVO state */
+
+ /* FBC state */
+ dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+ dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+ dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+ dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+
+ /* VGA state */
+ dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
+ dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
+ dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
+ dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+
+ /* Scratch space */
+ for (i = 0; i < 16; i++) {
+ dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
+ dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+ }
+ for (i = 0; i < 3; i++)
+ dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+ i915_save_vga(dev);
+
+ /* Shut down the device */
+ pci_disable_device(dev->pdev);
+ pci_set_power_state(dev->pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int i915_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ pci_set_power_state(dev->pdev, PCI_D0);
+ pci_restore_state(dev->pdev);
+ if (pci_enable_device(dev->pdev))
+ return -1;
+
+ pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+
+ /* Pipe & plane A info */
+ /* Prime the clock */
+ if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
+ ~DPLL_VCO_ENABLE);
+ udelay(150);
+ }
+ I915_WRITE(FPA0, dev_priv->saveFPA0);
+ I915_WRITE(FPA1, dev_priv->saveFPA1);
+ /* Actually enable it */
+ I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
+ udelay(150);
+ if (IS_I965G(dev))
+ I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
+ I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
+ I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
+ I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
+ I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
+ I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+ I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+
+ /* Restore plane info */
+ I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
+ I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
+ I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+ I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
+ I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ if (IS_I965G(dev)) {
+ I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+ I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ }
+
+ if ((dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) &&
+ (dev_priv->saveDPLL_A & DPLL_VGA_MODE_DIS))
+ I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(DSPABASE, I915_READ(DSPABASE));
+
+ /* Pipe & plane B info */
+ if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
+ ~DPLL_VCO_ENABLE);
+ udelay(150);
+ }
+ I915_WRITE(FPB0, dev_priv->saveFPB0);
+ I915_WRITE(FPB1, dev_priv->saveFPB1);
+ /* Actually enable it */
+ I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
+ udelay(150);
+ if (IS_I965G(dev))
+ I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ udelay(150);
+
+ /* Restore mode */
+ I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
+ I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
+ I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
+ I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
+ I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
+ I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+ I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+
+ /* Restore plane info */
+ I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
+ I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
+ I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+ I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
+ I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ if (IS_I965G(dev)) {
+ I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+ I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ }
+
+ if ((dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) &&
+ (dev_priv->saveDPLL_B & DPLL_VGA_MODE_DIS))
+ I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+ i915_restore_palette(dev, PIPE_A);
+ /* Enable the plane */
+ I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
+
+ /* CRT state */
+ I915_WRITE(ADPA, dev_priv->saveADPA);
+
+ /* LVDS state */
+ if (IS_I965G(dev))
+ I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ I915_WRITE(LVDS, dev_priv->saveLVDS);
+ if (!IS_I830(dev) && !IS_845G(dev))
+ I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+
+ I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+ I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
+ I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
+ I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
+ I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+
+ /* FIXME: restore TV & SDVO state */
+
+ /* FBC info */
+ I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+ I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+ I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+ I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+
+ /* VGA state */
+ I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+ I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
+ I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
+ I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
+ udelay(150);
+
+ for (i = 0; i < 16; i++) {
+ I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
+ I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
+ }
+ for (i = 0; i < 3; i++)
+ I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+
+ i915_restore_vga(dev);
+
+ return 0;
+}
+
static struct drm_driver driver = {
/* don't use mtrr's here, the Xserver or user space app should
* deal with them for intel hardware.
@@ -47,8 +506,11 @@ static struct drm_driver driver = {
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
DRIVER_IRQ_VBL2,
.load = i915_driver_load,
+ .unload = i915_driver_unload,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
+ .suspend = i915_suspend,
+ .resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
.vblank_wait = i915_driver_vblank_wait,
.vblank_wait2 = i915_driver_vblank_wait2,
@@ -77,7 +539,7 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
},
-
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index e064292e703a..f8308bfb2613 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -114,6 +114,85 @@ typedef struct drm_i915_private {
spinlock_t swaps_lock;
drm_i915_vbl_swap_t vbl_swaps;
unsigned int swaps_pending;
+
+ /* Register state */
+ u8 saveLBB;
+ u32 saveDSPACNTR;
+ u32 saveDSPBCNTR;
+ u32 savePIPEACONF;
+ u32 savePIPEBCONF;
+ u32 savePIPEASRC;
+ u32 savePIPEBSRC;
+ u32 saveFPA0;
+ u32 saveFPA1;
+ u32 saveDPLL_A;
+ u32 saveDPLL_A_MD;
+ u32 saveHTOTAL_A;
+ u32 saveHBLANK_A;
+ u32 saveHSYNC_A;
+ u32 saveVTOTAL_A;
+ u32 saveVBLANK_A;
+ u32 saveVSYNC_A;
+ u32 saveBCLRPAT_A;
+ u32 saveDSPASTRIDE;
+ u32 saveDSPASIZE;
+ u32 saveDSPAPOS;
+ u32 saveDSPABASE;
+ u32 saveDSPASURF;
+ u32 saveDSPATILEOFF;
+ u32 savePFIT_PGM_RATIOS;
+ u32 saveBLC_PWM_CTL;
+ u32 saveBLC_PWM_CTL2;
+ u32 saveFPB0;
+ u32 saveFPB1;
+ u32 saveDPLL_B;
+ u32 saveDPLL_B_MD;
+ u32 saveHTOTAL_B;
+ u32 saveHBLANK_B;
+ u32 saveHSYNC_B;
+ u32 saveVTOTAL_B;
+ u32 saveVBLANK_B;
+ u32 saveVSYNC_B;
+ u32 saveBCLRPAT_B;
+ u32 saveDSPBSTRIDE;
+ u32 saveDSPBSIZE;
+ u32 saveDSPBPOS;
+ u32 saveDSPBBASE;
+ u32 saveDSPBSURF;
+ u32 saveDSPBTILEOFF;
+ u32 saveVCLK_DIVISOR_VGA0;
+ u32 saveVCLK_DIVISOR_VGA1;
+ u32 saveVCLK_POST_DIV;
+ u32 saveVGACNTRL;
+ u32 saveADPA;
+ u32 saveLVDS;
+ u32 saveLVDSPP_ON;
+ u32 saveLVDSPP_OFF;
+ u32 saveDVOA;
+ u32 saveDVOB;
+ u32 saveDVOC;
+ u32 savePP_ON;
+ u32 savePP_OFF;
+ u32 savePP_CONTROL;
+ u32 savePP_CYCLE;
+ u32 savePFIT_CONTROL;
+ u32 save_palette_a[256];
+ u32 save_palette_b[256];
+ u32 saveFBC_CFB_BASE;
+ u32 saveFBC_LL_BASE;
+ u32 saveFBC_CONTROL;
+ u32 saveFBC_CONTROL2;
+ u32 saveSWF0[16];
+ u32 saveSWF1[16];
+ u32 saveSWF2[3];
+ u8 saveMSR;
+ u8 saveSR[8];
+ u8 saveGR[25];
+ u8 saveAR_INDEX;
+ u8 saveAR[20];
+ u8 saveDACMASK;
+ u8 saveDACDATA[256*3]; /* 256 3-byte colors */
+ u8 saveCR[36];
} drm_i915_private_t;
extern struct drm_ioctl_desc i915_ioctls[];
@@ -122,6 +201,7 @@ extern int i915_max_ioctl;
/* i915_dma.c */
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern int i915_driver_unload(struct drm_device *);
extern void i915_driver_lastclose(struct drm_device * dev);
extern void i915_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
@@ -163,7 +243,7 @@ extern void i915_mem_release(struct drm_device * dev,
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
-#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
+#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
#define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
#define I915_VERBOSE 0
@@ -173,9 +253,8 @@ extern void i915_mem_release(struct drm_device * dev,
#define BEGIN_LP_RING(n) do { \
if (I915_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
- (n), __FUNCTION__); \
- if (dev_priv->ring.space < (n)*4) \
+ DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \
+ if (dev_priv->ring.space < (n)*4) \
i915_wait_ring(dev, (n)*4, __FUNCTION__); \
outcount = 0; \
outring = dev_priv->ring.tail; \
@@ -200,7 +279,51 @@ extern void i915_mem_release(struct drm_device * dev,
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+/* Extended config space */
+#define LBB 0xf4
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define VGA_MSR_MEM_EN (1<<1)
+#define VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define VGA_GR_MEM_READ_MODE_SHIFT 3
+#define VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define VGA_GR_MEM_MODE_MASK 0xc
+#define VGA_GR_MEM_MODE_SHIFT 2
+#define VGA_GR_MEM_A0000_AFFFF 0
+#define VGA_GR_MEM_A0000_BFFFF 1
+#define VGA_GR_MEM_B0000_B7FFF 2
+#define VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@@ -215,9 +338,47 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define BB1_UNPROTECTED (0<<0)
#define BB2_END_ADDR_MASK (~0x7)
+/* Framebuffer compression */
+#define FBC_CFB_BASE 0x03200 /* 4k page aligned */
+#define FBC_LL_BASE 0x03204 /* 4k page aligned */
+#define FBC_CONTROL 0x03208
+#define FBC_CTL_EN (1<<31)
+#define FBC_CTL_PERIODIC (1<<30)
+#define FBC_CTL_INTERVAL_SHIFT (16)
+#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define FBC_CTL_STRIDE_SHIFT (5)
+#define FBC_CTL_FENCENO (1<<0)
+#define FBC_COMMAND 0x0320c
+#define FBC_CMD_COMPRESS (1<<0)
+#define FBC_STATUS 0x03210
+#define FBC_STAT_COMPRESSING (1<<31)
+#define FBC_STAT_COMPRESSED (1<<30)
+#define FBC_STAT_MODIFIED (1<<29)
+#define FBC_STAT_CURRENT_LINE (1<<0)
+#define FBC_CONTROL2 0x03214
+#define FBC_CTL_FENCE_DBL (0<<4)
+#define FBC_CTL_IDLE_IMM (0<<2)
+#define FBC_CTL_IDLE_FULL (1<<2)
+#define FBC_CTL_IDLE_LINE (2<<2)
+#define FBC_CTL_IDLE_DEBUG (3<<2)
+#define FBC_CTL_CPU_FENCE (1<<1)
+#define FBC_CTL_PLANEA (0<<0)
+#define FBC_CTL_PLANEB (1<<0)
+#define FBC_FENCE_OFF 0x0321b
+
+#define FBC_LL_SIZE (1536)
+#define FBC_LL_PAD (32)
+
+/* Interrupt bits:
+ */
+#define USER_INT_FLAG (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
+
#define I915REG_HWSTAM 0x02098
#define I915REG_INT_IDENTITY_R 0x020a4
-#define I915REG_INT_MASK_R 0x020a8
+#define I915REG_INT_MASK_R 0x020a8
#define I915REG_INT_ENABLE_R 0x020a0
#define I915REG_PIPEASTAT 0x70024
@@ -229,7 +390,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
#define SR01 1
-#define SR01_SCREEN_OFF (1<<5)
+#define SR01_SCREEN_OFF (1<<5)
#define PPCR 0x61204
#define PPCR_ON (1<<0)
@@ -249,31 +410,129 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define ADPA_DPMS_OFF (3<<10)
#define NOPID 0x2094
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+/* The binner has its own ring buffer:
+ */
+#define HWB_RING 0x2400
+
+#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x0xFFFFF000
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+
+/* Instruction parser error reg:
+ */
+#define IPEIR 0x2088
+
+/* Scratch pad debug 0 reg:
+ */
+#define SCPD0 0x209c
+
+/* Error status reg:
+ */
+#define ESR 0x20b8
+
+/* Secondary DMA fetch address debug reg:
+ */
+#define DMA_FADD_S 0x20d4
+
+/* Cache mode 0 reg.
+ * - Manipulating render cache behaviour is central
+ * to the concept of zone rendering, tuning this reg can help avoid
+ * unnecessary render cache reads and even writes (for z/stencil)
+ * at beginning and end of scene.
+ *
+ * - To change a bit, write to this reg with a mask bit set and the
+ * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
+ */
+#define Cache_Mode_0 0x2120
+#define CM0_MASK_SHIFT 16
+#define CM0_IZ_OPT_DISABLE (1<<6)
+#define CM0_ZR_OPT_DISABLE (1<<5)
+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define CM0_COLOR_EVICT_DISABLE (1<<3)
+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+
+
+/* Graphics flush control. A CPU write flushes the GWB of all writes.
+ * The data is discarded.
+ */
+#define GFX_FLSH_CNTL 0x2170
+
+/* Binner control. Defines the location of the bin pointer list:
+ */
+#define BINCTL 0x2420
+#define BC_MASK (1 << 9)
+
+/* Binned scene info.
+ */
+#define BINSCENE 0x2428
+#define BS_OP_LOAD (1 << 8)
+#define BS_MASK (1 << 22)
+
+/* Bin command parser debug reg:
+ */
+#define BCPD 0x2480
+
+/* Bin memory control debug reg:
+ */
+#define BMCD 0x2484
+
+/* Bin data cache debug reg:
+ */
+#define BDCD 0x2488
+
+/* Binner pointer cache debug reg:
+ */
+#define BPCD 0x248c
+
+/* Binner scratch pad debug reg:
+ */
+#define BINSKPD 0x24f0
+
+/* HWB scratch pad debug reg:
+ */
+#define HWBSKPD 0x24f4
+
+/* Binner memory pool reg:
+ */
+#define BMP_BUFFER 0x2430
+#define BMP_PAGE_SIZE_4K (0 << 10)
+#define BMP_BUFFER_SIZE_SHIFT 1
+#define BMP_ENABLE (1 << 0)
+
+/* Get/put memory from the binner memory pool:
+ */
+#define BMP_GET 0x2438
+#define BMP_PUT 0x2440
+#define BMP_OFFSET_SHIFT 5
+
+/* 3D state packets:
+ */
+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
#define SC_ENABLE_MASK (0x1<<0)
#define SC_ENABLE (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+
#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
#define SCI_YMIN_MASK (0xffff<<16)
#define SCI_XMIN_MASK (0xffff<<0)
@@ -290,17 +549,19 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4)
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-#define MI_BATCH_BUFFER ((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START (0x31<<23)
-#define MI_BATCH_BUFFER_END (0xA<<23)
+#define MI_BATCH_BUFFER ((0x30<<23)|1)
+#define MI_BATCH_BUFFER_START (0x31<<23)
+#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_WAIT_FOR_EVENT ((0x3<<23))
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@@ -308,9 +569,538 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
#define ASYNC_FLIP (1<<22)
+#define DISPLAY_PLANE_A (0<<20)
+#define DISPLAY_PLANE_B (1<<20)
+
+/* Display regs */
+#define DSPACNTR 0x70180
+#define DSPBCNTR 0x71180
+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+
+/* Define the region of interest for the binner:
+ */
+#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-#define READ_BREADCRUMB(dev_priv) (((u32 *)(dev_priv->hw_status_page))[5])
+#define CMD_MI_FLUSH (0x04 << 23)
+#define MI_NO_WRITE_FLUSH (1 << 2)
+#define MI_READ_FLUSH (1 << 0)
+#define MI_EXE_FLUSH (1 << 1)
+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
+
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5])
+#define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg])
+
+#define BLC_PWM_CTL 0x61254
+#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
+
+#define BLC_PWM_CTL2 0x61250
+/**
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
+#define BLM_LEGACY_MODE (1 << 16)
+/**
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
+#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
+
+#define I915_GCFGC 0xf0
+#define I915_LOW_FREQUENCY_ENABLE (1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define I915_DISPLAY_CLOCK_MASK (7 << 4)
+
+#define I855_HPLLCC 0xc0
+#define I855_CLOCK_CONTROL_MASK (3 << 0)
+#define I855_CLOCK_133_200 (0 << 0)
+#define I855_CLOCK_100_200 (1 << 0)
+#define I855_CLOCK_100_133 (2 << 0)
+#define I855_CLOCK_166_250 (3 << 0)
+
+/* p317, 319
+ */
+#define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */
+#define VCLK2_VCO_N 0x600a
+#define VCLK2_VCO_DIV_SEL 0x6012
+
+#define VCLK_DIVISOR_VGA0 0x6000
+#define VCLK_DIVISOR_VGA1 0x6004
+#define VCLK_POST_DIV 0x6010
+/** Selects a post divisor of 4 instead of 2. */
+# define VGA1_PD_P2_DIV_4 (1 << 15)
+/** Overrides the p2 post divisor field */
+# define VGA1_PD_P1_DIV_2 (1 << 13)
+# define VGA1_PD_P1_SHIFT 8
+/** P1 value is 2 greater than this field */
+# define VGA1_PD_P1_MASK (0x1f << 8)
+/** Selects a post divisor of 4 instead of 2. */
+# define VGA0_PD_P2_DIV_4 (1 << 7)
+/** Overrides the p2 post divisor field */
+# define VGA0_PD_P1_DIV_2 (1 << 5)
+# define VGA0_PD_P1_SHIFT 0
+/** P1 value is 2 greater than this field */
+# define VGA0_PD_P1_MASK (0x1f << 0)
+
+/* I830 CRTC registers */
+#define HTOTAL_A 0x60000
+#define HBLANK_A 0x60004
+#define HSYNC_A 0x60008
+#define VTOTAL_A 0x6000c
+#define VBLANK_A 0x60010
+#define VSYNC_A 0x60014
+#define PIPEASRC 0x6001c
+#define BCLRPAT_A 0x60020
+#define VSYNCSHIFT_A 0x60028
+
+#define HTOTAL_B 0x61000
+#define HBLANK_B 0x61004
+#define HSYNC_B 0x61008
+#define VTOTAL_B 0x6100c
+#define VBLANK_B 0x61010
+#define VSYNC_B 0x61014
+#define PIPEBSRC 0x6101c
+#define BCLRPAT_B 0x61020
+#define VSYNCSHIFT_B 0x61028
+
+#define PP_STATUS 0x61200
+# define PP_ON (1 << 31)
+/**
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+# define PP_READY (1 << 30)
+# define PP_SEQUENCE_NONE (0 << 28)
+# define PP_SEQUENCE_ON (1 << 28)
+# define PP_SEQUENCE_OFF (2 << 28)
+# define PP_SEQUENCE_MASK 0x30000000
+#define PP_CONTROL 0x61204
+# define POWER_TARGET_ON (1 << 0)
+
+#define LVDSPP_ON 0x61208
+#define LVDSPP_OFF 0x6120c
+#define PP_CYCLE 0x61210
+
+#define PFIT_CONTROL 0x61230
+# define PFIT_ENABLE (1 << 31)
+# define PFIT_PIPE_MASK (3 << 29)
+# define PFIT_PIPE_SHIFT 29
+# define VERT_INTERP_DISABLE (0 << 10)
+# define VERT_INTERP_BILINEAR (1 << 10)
+# define VERT_INTERP_MASK (3 << 10)
+# define VERT_AUTO_SCALE (1 << 9)
+# define HORIZ_INTERP_DISABLE (0 << 6)
+# define HORIZ_INTERP_BILINEAR (1 << 6)
+# define HORIZ_INTERP_MASK (3 << 6)
+# define HORIZ_AUTO_SCALE (1 << 5)
+# define PANEL_8TO6_DITHER_ENABLE (1 << 3)
+
+#define PFIT_PGM_RATIOS 0x61234
+# define PFIT_VERT_SCALE_MASK 0xfff00000
+# define PFIT_HORIZ_SCALE_MASK 0x0000fff0
+
+#define PFIT_AUTO_RATIOS 0x61238
+
+
+#define DPLL_A 0x06014
+#define DPLL_B 0x06018
+# define DPLL_VCO_ENABLE (1 << 31)
+# define DPLL_DVO_HIGH_SPEED (1 << 30)
+# define DPLL_SYNCLOCK_ENABLE (1 << 29)
+# define DPLL_VGA_MODE_DIS (1 << 28)
+# define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
+# define DPLLB_MODE_LVDS (2 << 26) /* i915 */
+# define DPLL_MODE_MASK (3 << 26)
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
+# define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
+# define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
+/**
+ * The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+/**
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
+# define DPLL_FPA01_P1_POST_DIV_SHIFT 16
+# define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
+# define PLL_REF_INPUT_DREFCLK (0 << 13)
+# define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */
+# define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */
+# define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+# define PLL_REF_INPUT_MASK (3 << 13)
+# define PLL_LOAD_PULSE_PHASE_SHIFT 9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+# define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+# define DISPLAY_RATE_SELECT_FPA1 (1 << 8)
+
+/**
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+# define SDVO_MULTIPLIER_MASK 0x000000ff
+# define SDVO_MULTIPLIER_SHIFT_HIRES 4
+# define SDVO_MULTIPLIER_SHIFT_VGA 0
+
+/** @defgroup DPLL_MD
+ * @{
+ */
+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD 0x0601c
+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD 0x06020
+/**
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1. Must be set to 1 pixel for SDVO.
+ */
+# define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000
+# define DPLL_MD_UDI_DIVIDER_SHIFT 24
+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+# define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000
+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16
+/**
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock. At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+# define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00
+# define DPLL_MD_UDI_MULTIPLIER_SHIFT 8
+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
+/** @} */
+
+#define DPLL_TEST 0x606c
+# define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
+# define DPLLB_TEST_SDVO_DIV_2 (1 << 22)
+# define DPLLB_TEST_SDVO_DIV_4 (2 << 22)
+# define DPLLB_TEST_SDVO_DIV_MASK (3 << 22)
+# define DPLLB_TEST_N_BYPASS (1 << 19)
+# define DPLLB_TEST_M_BYPASS (1 << 18)
+# define DPLLB_INPUT_BUFFER_ENABLE (1 << 16)
+# define DPLLA_TEST_N_BYPASS (1 << 3)
+# define DPLLA_TEST_M_BYPASS (1 << 2)
+# define DPLLA_INPUT_BUFFER_ENABLE (1 << 0)
+
+#define ADPA 0x61100
+#define ADPA_DAC_ENABLE (1<<31)
+#define ADPA_DAC_DISABLE 0
+#define ADPA_PIPE_SELECT_MASK (1<<30)
+#define ADPA_PIPE_A_SELECT 0
+#define ADPA_PIPE_B_SELECT (1<<30)
+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_SETS_HVPOLARITY 0
+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_VSYNC_CNTL_ENABLE 0
+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_HSYNC_CNTL_ENABLE 0
+#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define ADPA_VSYNC_ACTIVE_LOW 0
+#define ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define ADPA_HSYNC_ACTIVE_LOW 0
+
+#define FPA0 0x06040
+#define FPA1 0x06044
+#define FPB0 0x06048
+#define FPB1 0x0604c
+# define FP_N_DIV_MASK 0x003f0000
+# define FP_N_DIV_SHIFT 16
+# define FP_M1_DIV_MASK 0x00003f00
+# define FP_M1_DIV_SHIFT 8
+# define FP_M2_DIV_MASK 0x0000003f
+# define FP_M2_DIV_SHIFT 0
+
+
+#define PORT_HOTPLUG_EN 0x61110
+# define SDVOB_HOTPLUG_INT_EN (1 << 26)
+# define SDVOC_HOTPLUG_INT_EN (1 << 25)
+# define TV_HOTPLUG_INT_EN (1 << 18)
+# define CRT_HOTPLUG_INT_EN (1 << 9)
+# define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
+
+#define PORT_HOTPLUG_STAT 0x61114
+# define CRT_HOTPLUG_INT_STATUS (1 << 11)
+# define TV_HOTPLUG_INT_STATUS (1 << 10)
+# define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
+# define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
+# define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
+# define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
+# define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
+# define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
+
+#define SDVOB 0x61140
+#define SDVOC 0x61160
+#define SDVO_ENABLE (1 << 31)
+#define SDVO_PIPE_B_SELECT (1 << 30)
+#define SDVO_STALL_SELECT (1 << 29)
+#define SDVO_INTERRUPT_ENABLE (1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK (7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT 23
+#define SDVO_PHASE_SELECT_MASK (15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT (6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT (1 << 18)
+#define SDVOC_GANG_MODE (1 << 16)
+#define SDVO_BORDER_ENABLE (1 << 7)
+#define SDVOB_PCIE_CONCURRENCY (1 << 3)
+#define SDVO_DETECTED (1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK (1 << 17)
+
+/** @defgroup LVDS
+ * @{
+ */
+/**
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS 0x61180
+/**
+ * Enables the LVDS port. This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+# define LVDS_PORT_EN (1 << 31)
+/** Selects pipe B for LVDS data. Must be set on pre-965. */
+# define LVDS_PIPEB_SELECT (1 << 30)
+
+/**
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+# define LVDS_A0A2_CLKA_POWER_MASK (3 << 8)
+# define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8)
+# define LVDS_A0A2_CLKA_POWER_UP (3 << 8)
+/**
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+# define LVDS_A3_POWER_MASK (3 << 6)
+# define LVDS_A3_POWER_DOWN (0 << 6)
+# define LVDS_A3_POWER_UP (3 << 6)
+/**
+ * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+# define LVDS_CLKB_POWER_MASK (3 << 4)
+# define LVDS_CLKB_POWER_DOWN (0 << 4)
+# define LVDS_CLKB_POWER_UP (3 << 4)
+
+/**
+ * Controls the B0-B3 data pairs. This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode. The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+# define LVDS_B0B3_POWER_MASK (3 << 2)
+# define LVDS_B0B3_POWER_DOWN (0 << 2)
+# define LVDS_B0B3_POWER_UP (3 << 2)
+
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE (1<<31)
+#define PIPEACONF_DISABLE 0
+#define PIPEACONF_DOUBLE_WIDE (1<<30)
+#define I965_PIPECONF_ACTIVE (1<<30)
+#define PIPEACONF_SINGLE_WIDE 0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_PIPE_LOCKED (1<<25)
+#define PIPEACONF_PALETTE 0
+#define PIPEACONF_GAMMA (1<<24)
+#define PIPECONF_FORCE_BORDER (1<<25)
+#define PIPECONF_PROGRESSIVE (0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE (1<<31)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_DOUBLE_WIDE (1<<30)
+#define PIPEBCONF_DISABLE 0
+#define PIPEBCONF_GAMMA (1<<24)
+#define PIPEBCONF_PALETTE 0
+
+#define PIPEBGCMAXRED 0x71010
+#define PIPEBGCMAXGREEN 0x71014
+#define PIPEBGCMAXBLUE 0x71018
+#define PIPEBSTAT 0x71024
+#define PIPEBFRAMEHIGH 0x71040
+#define PIPEBFRAMEPIXEL 0x71044
+
+#define DSPACNTR 0x70180
+#define DSPBCNTR 0x71180
+#define DISPLAY_PLANE_ENABLE (1<<31)
+#define DISPLAY_PLANE_DISABLE 0
+#define DISPPLANE_GAMMA_ENABLE (1<<30)
+#define DISPPLANE_GAMMA_DISABLE 0
+#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
+#define DISPPLANE_8BPP (0x2<<26)
+#define DISPPLANE_15_16BPP (0x4<<26)
+#define DISPPLANE_16BPP (0x5<<26)
+#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
+#define DISPPLANE_32BPP (0x7<<26)
+#define DISPPLANE_STEREO_ENABLE (1<<25)
+#define DISPPLANE_STEREO_DISABLE 0
+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+#define DISPPLANE_SEL_PIPE_A 0
+#define DISPPLANE_SEL_PIPE_B (1<<24)
+#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
+#define DISPPLANE_SRC_KEY_DISABLE 0
+#define DISPPLANE_LINE_DOUBLE (1<<20)
+#define DISPPLANE_NO_LINE_DOUBLE 0
+#define DISPPLANE_STEREO_POLARITY_FIRST 0
+#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE 0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
+
+#define DSPABASE 0x70184
+#define DSPASTRIDE 0x70188
+
+#define DSPBBASE 0x71184
+#define DSPBADDR DSPBBASE
+#define DSPBSTRIDE 0x71188
+
+#define DSPAKEYVAL 0x70194
+#define DSPAKEYMASK 0x70198
+
+#define DSPAPOS 0x7018C /* reserved */
+#define DSPASIZE 0x70190
+#define DSPBPOS 0x7118C
+#define DSPBSIZE 0x71190
+
+#define DSPASURF 0x7019C
+#define DSPATILEOFF 0x701A4
+
+#define DSPBSURF 0x7119C
+#define DSPBTILEOFF 0x711A4
+
+#define VGACNTRL 0x71400
+# define VGA_DISP_DISABLE (1 << 31)
+# define VGA_2X_MODE (1 << 30)
+# define VGA_PIPE_B_SELECT (1 << 29)
+
+/*
+ * Some BIOS scratch area registers. The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+
+#define SWF0 0x71410
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF10 0x70410
+
+#define SWF30 0x72414
+
+/*
+ * Overlay registers. These are overlay registers accessed via MMIO.
+ * Those loaded via the overlay register page are defined in i830_video.c.
+ */
+#define OVADD 0x30000
+
+#define DOVSTA 0x30008
+#define OC_BUF (0x3<<20)
+
+#define OGAMC5 0x30010
+#define OGAMC4 0x30014
+#define OGAMC3 0x30018
+#define OGAMC2 0x3001c
+#define OGAMC1 0x30020
+#define OGAMC0 0x30024
+/*
+ * Palette registers
+ */
+#define PALETTE_A 0x0a000
+#define PALETTE_B 0x0a800
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I855(dev) ((dev)->pci_device == 0x3582)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+
+#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2)
+
+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
+ (dev)->pci_device == 0x2982 || \
+ (dev)->pci_device == 0x2992 || \
+ (dev)->pci_device == 0x29A2 || \
+ (dev)->pci_device == 0x2A02 || \
+ (dev)->pci_device == 0x2A12 || \
+ (dev)->pci_device == 0x2A42)
+
+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
+
+#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42)
+
+#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
+ (dev)->pci_device == 0x29B2 || \
+ (dev)->pci_device == 0x29D2)
+
+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+ IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
+
+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+ IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
+
+#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c
index a443f4a202e3..92653b38e64c 100644
--- a/drivers/char/drm/i915_irq.c
+++ b/drivers/char/drm/i915_irq.c
@@ -276,7 +276,7 @@ static int i915_emit_irq(struct drm_device * dev)
i915_kernel_lost_context(dev);
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
@@ -291,7 +291,7 @@ static int i915_emit_irq(struct drm_device * dev)
OUT_RING(0);
OUT_RING(GFX_OP_USER_INTERRUPT);
ADVANCE_LP_RING();
-
+
return dev_priv->counter;
}
@@ -300,7 +300,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = 0;
- DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
+ DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
if (READ_BREADCRUMB(dev_priv) >= irq_nr)
@@ -312,8 +312,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
READ_BREADCRUMB(dev_priv) >= irq_nr);
if (ret == -EBUSY) {
- DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
- __FUNCTION__,
+ DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
}
@@ -329,14 +328,14 @@ static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequ
int ret = 0;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(counter))
- *sequence) <= (1<<23)));
-
+
*sequence = cur_vblank;
return ret;
@@ -365,7 +364,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -388,7 +387,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,
drm_i915_irq_wait_t *irqwait = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -418,13 +417,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
drm_i915_vblank_pipe_t *pipe = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
- DRM_ERROR("%s called with invalid pipe 0x%x\n",
- __FUNCTION__, pipe->pipe);
+ DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
return -EINVAL;
}
@@ -443,7 +441,7 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
u16 flag;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -555,7 +553,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
- list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
+ list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
dev_priv->swaps_pending++;
spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
diff --git a/drivers/char/drm/i915_mem.c b/drivers/char/drm/i915_mem.c
index 56fb9b30a5d7..6126a60dc9cb 100644
--- a/drivers/char/drm/i915_mem.c
+++ b/drivers/char/drm/i915_mem.c
@@ -276,7 +276,7 @@ int i915_mem_alloc(struct drm_device *dev, void *data,
struct mem_block *block, **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -314,7 +314,7 @@ int i915_mem_free(struct drm_device *dev, void *data,
struct mem_block *block, **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -342,7 +342,7 @@ int i915_mem_init_heap(struct drm_device *dev, void *data,
struct mem_block **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -366,7 +366,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
struct mem_block **heap;
if ( !dev_priv ) {
- DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
+ DRM_ERROR( "called with no initialization\n" );
return -EINVAL;
}
@@ -375,7 +375,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
DRM_ERROR("get_heap failed");
return -EFAULT;
}
-
+
if (!*heap) {
DRM_ERROR("heap not initialized?");
return -EFAULT;
@@ -384,4 +384,3 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
i915_mem_takedown( heap );
return 0;
}
-
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index c567c34cda78..c1d12dbfa8d8 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -493,7 +493,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
dma_bs->agp_size);
return err;
}
-
+
dev_priv->agp_size = agp_size;
dev_priv->agp_handle = agp_req.handle;
@@ -550,7 +550,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
{
struct drm_map_list *_entry;
unsigned long agp_token = 0;
-
+
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == dev->agp_buffer_map)
agp_token = _entry->user_token;
@@ -964,7 +964,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
free_req.handle = dev_priv->agp_handle;
drm_agp_free(dev, &free_req);
-
+
dev_priv->agp_textures = NULL;
dev_priv->agp_size = 0;
dev_priv->agp_handle = 0;
@@ -998,7 +998,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
}
}
- return 0;
+ return err;
}
int mga_dma_init(struct drm_device *dev, void *data,
@@ -1050,7 +1050,7 @@ int mga_dma_flush(struct drm_device *dev, void *data,
#if MGA_DMA_DEBUG
int ret = mga_do_wait_for_idle(dev_priv);
if (ret < 0)
- DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
+ DRM_INFO("-EBUSY\n");
return ret;
#else
return mga_do_wait_for_idle(dev_priv);
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index cd94c04e31c0..f6ebd24bd587 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -216,8 +216,8 @@ static inline u32 _MGA_READ(u32 * addr)
#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
#endif
-#define DWGREG0 0x1c00
-#define DWGREG0_END 0x1dff
+#define DWGREG0 0x1c00
+#define DWGREG0_END 0x1dff
#define DWGREG1 0x2c00
#define DWGREG1_END 0x2dff
@@ -249,7 +249,7 @@ do { \
} else if ( dev_priv->prim.space < \
dev_priv->prim.high_mark ) { \
if ( MGA_DMA_DEBUG ) \
- DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
+ DRM_INFO( "wrap...\n"); \
return -EBUSY; \
} \
} \
@@ -260,7 +260,7 @@ do { \
if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
if ( MGA_DMA_DEBUG ) \
- DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
+ DRM_INFO( "wrap...\n"); \
return -EBUSY; \
} \
mga_do_dma_wrap_end( dev_priv ); \
@@ -280,8 +280,7 @@ do { \
#define BEGIN_DMA( n ) \
do { \
if ( MGA_VERBOSE ) { \
- DRM_INFO( "BEGIN_DMA( %d ) in %s\n", \
- (n), __FUNCTION__ ); \
+ DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \
DRM_INFO( " space=0x%x req=0x%Zx\n", \
dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
} \
@@ -292,7 +291,7 @@ do { \
#define BEGIN_DMA_WRAP() \
do { \
if ( MGA_VERBOSE ) { \
- DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ ); \
+ DRM_INFO( "BEGIN_DMA()\n" ); \
DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \
} \
prim = dev_priv->prim.start; \
@@ -311,7 +310,7 @@ do { \
#define FLUSH_DMA() \
do { \
if ( 0 ) { \
- DRM_INFO( "%s:\n", __FUNCTION__ ); \
+ DRM_INFO( "\n" ); \
DRM_INFO( " tail=0x%06x head=0x%06lx\n", \
dev_priv->prim.tail, \
MGA_READ( MGA_PRIMADDRESS ) - \
@@ -394,22 +393,22 @@ do { \
#define MGA_VINTCLR (1 << 4)
#define MGA_VINTEN (1 << 5)
-#define MGA_ALPHACTRL 0x2c7c
-#define MGA_AR0 0x1c60
-#define MGA_AR1 0x1c64
-#define MGA_AR2 0x1c68
-#define MGA_AR3 0x1c6c
-#define MGA_AR4 0x1c70
-#define MGA_AR5 0x1c74
-#define MGA_AR6 0x1c78
+#define MGA_ALPHACTRL 0x2c7c
+#define MGA_AR0 0x1c60
+#define MGA_AR1 0x1c64
+#define MGA_AR2 0x1c68
+#define MGA_AR3 0x1c6c
+#define MGA_AR4 0x1c70
+#define MGA_AR5 0x1c74
+#define MGA_AR6 0x1c78
#define MGA_CXBNDRY 0x1c80
-#define MGA_CXLEFT 0x1ca0
+#define MGA_CXLEFT 0x1ca0
#define MGA_CXRIGHT 0x1ca4
-#define MGA_DMAPAD 0x1c54
-#define MGA_DSTORG 0x2cb8
-#define MGA_DWGCTL 0x1c00
+#define MGA_DMAPAD 0x1c54
+#define MGA_DSTORG 0x2cb8
+#define MGA_DWGCTL 0x1c00
# define MGA_OPCOD_MASK (15 << 0)
# define MGA_OPCOD_TRAP (4 << 0)
# define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
@@ -455,27 +454,27 @@ do { \
# define MGA_CLIPDIS (1 << 31)
#define MGA_DWGSYNC 0x2c4c
-#define MGA_FCOL 0x1c24
-#define MGA_FIFOSTATUS 0x1e10
-#define MGA_FOGCOL 0x1cf4
+#define MGA_FCOL 0x1c24
+#define MGA_FIFOSTATUS 0x1e10
+#define MGA_FOGCOL 0x1cf4
#define MGA_FXBNDRY 0x1c84
-#define MGA_FXLEFT 0x1ca8
+#define MGA_FXLEFT 0x1ca8
#define MGA_FXRIGHT 0x1cac
-#define MGA_ICLEAR 0x1e18
+#define MGA_ICLEAR 0x1e18
# define MGA_SOFTRAPICLR (1 << 0)
# define MGA_VLINEICLR (1 << 5)
-#define MGA_IEN 0x1e1c
+#define MGA_IEN 0x1e1c
# define MGA_SOFTRAPIEN (1 << 0)
# define MGA_VLINEIEN (1 << 5)
-#define MGA_LEN 0x1c5c
+#define MGA_LEN 0x1c5c
#define MGA_MACCESS 0x1c04
-#define MGA_PITCH 0x1c8c
-#define MGA_PLNWT 0x1c1c
-#define MGA_PRIMADDRESS 0x1e58
+#define MGA_PITCH 0x1c8c
+#define MGA_PLNWT 0x1c1c
+#define MGA_PRIMADDRESS 0x1e58
# define MGA_DMA_GENERAL (0 << 0)
# define MGA_DMA_BLIT (1 << 0)
# define MGA_DMA_VECTOR (2 << 0)
@@ -487,43 +486,43 @@ do { \
# define MGA_PRIMPTREN0 (1 << 0)
# define MGA_PRIMPTREN1 (1 << 1)
-#define MGA_RST 0x1e40
+#define MGA_RST 0x1e40
# define MGA_SOFTRESET (1 << 0)
# define MGA_SOFTEXTRST (1 << 1)
-#define MGA_SECADDRESS 0x2c40
-#define MGA_SECEND 0x2c44
-#define MGA_SETUPADDRESS 0x2cd0
-#define MGA_SETUPEND 0x2cd4
+#define MGA_SECADDRESS 0x2c40
+#define MGA_SECEND 0x2c44
+#define MGA_SETUPADDRESS 0x2cd0
+#define MGA_SETUPEND 0x2cd4
#define MGA_SGN 0x1c58
#define MGA_SOFTRAP 0x2c48
-#define MGA_SRCORG 0x2cb4
+#define MGA_SRCORG 0x2cb4
# define MGA_SRMMAP_MASK (1 << 0)
# define MGA_SRCMAP_FB (0 << 0)
# define MGA_SRCMAP_SYSMEM (1 << 0)
# define MGA_SRCACC_MASK (1 << 1)
# define MGA_SRCACC_PCI (0 << 1)
# define MGA_SRCACC_AGP (1 << 1)
-#define MGA_STATUS 0x1e14
+#define MGA_STATUS 0x1e14
# define MGA_SOFTRAPEN (1 << 0)
# define MGA_VSYNCPEN (1 << 4)
# define MGA_VLINEPEN (1 << 5)
# define MGA_DWGENGSTS (1 << 16)
# define MGA_ENDPRDMASTS (1 << 17)
#define MGA_STENCIL 0x2cc8
-#define MGA_STENCILCTL 0x2ccc
+#define MGA_STENCILCTL 0x2ccc
-#define MGA_TDUALSTAGE0 0x2cf8
-#define MGA_TDUALSTAGE1 0x2cfc
-#define MGA_TEXBORDERCOL 0x2c5c
-#define MGA_TEXCTL 0x2c30
+#define MGA_TDUALSTAGE0 0x2cf8
+#define MGA_TDUALSTAGE1 0x2cfc
+#define MGA_TEXBORDERCOL 0x2c5c
+#define MGA_TEXCTL 0x2c30
#define MGA_TEXCTL2 0x2c3c
# define MGA_DUALTEX (1 << 7)
# define MGA_G400_TC2_MAGIC (1 << 15)
# define MGA_MAP1_ENABLE (1 << 31)
-#define MGA_TEXFILTER 0x2c58
-#define MGA_TEXHEIGHT 0x2c2c
-#define MGA_TEXORG 0x2c24
+#define MGA_TEXFILTER 0x2c58
+#define MGA_TEXHEIGHT 0x2c2c
+#define MGA_TEXORG 0x2c24
# define MGA_TEXORGMAP_MASK (1 << 0)
# define MGA_TEXORGMAP_FB (0 << 0)
# define MGA_TEXORGMAP_SYSMEM (1 << 0)
@@ -534,45 +533,45 @@ do { \
#define MGA_TEXORG2 0x2ca8
#define MGA_TEXORG3 0x2cac
#define MGA_TEXORG4 0x2cb0
-#define MGA_TEXTRANS 0x2c34
-#define MGA_TEXTRANSHIGH 0x2c38
-#define MGA_TEXWIDTH 0x2c28
-
-#define MGA_WACCEPTSEQ 0x1dd4
-#define MGA_WCODEADDR 0x1e6c
-#define MGA_WFLAG 0x1dc4
-#define MGA_WFLAG1 0x1de0
+#define MGA_TEXTRANS 0x2c34
+#define MGA_TEXTRANSHIGH 0x2c38
+#define MGA_TEXWIDTH 0x2c28
+
+#define MGA_WACCEPTSEQ 0x1dd4
+#define MGA_WCODEADDR 0x1e6c
+#define MGA_WFLAG 0x1dc4
+#define MGA_WFLAG1 0x1de0
#define MGA_WFLAGNB 0x1e64
-#define MGA_WFLAGNB1 0x1e08
+#define MGA_WFLAGNB1 0x1e08
#define MGA_WGETMSB 0x1dc8
-#define MGA_WIADDR 0x1dc0
+#define MGA_WIADDR 0x1dc0
#define MGA_WIADDR2 0x1dd8
# define MGA_WMODE_SUSPEND (0 << 0)
# define MGA_WMODE_RESUME (1 << 0)
# define MGA_WMODE_JUMP (2 << 0)
# define MGA_WMODE_START (3 << 0)
# define MGA_WAGP_ENABLE (1 << 2)
-#define MGA_WMISC 0x1e70
+#define MGA_WMISC 0x1e70
# define MGA_WUCODECACHE_ENABLE (1 << 0)
# define MGA_WMASTER_ENABLE (1 << 1)
# define MGA_WCACHEFLUSH_ENABLE (1 << 3)
#define MGA_WVRTXSZ 0x1dcc
-#define MGA_YBOT 0x1c9c
-#define MGA_YDST 0x1c90
+#define MGA_YBOT 0x1c9c
+#define MGA_YDST 0x1c90
#define MGA_YDSTLEN 0x1c88
#define MGA_YDSTORG 0x1c94
-#define MGA_YTOP 0x1c98
+#define MGA_YTOP 0x1c98
-#define MGA_ZORG 0x1c0c
+#define MGA_ZORG 0x1c0c
/* This finishes the current batch of commands
*/
-#define MGA_EXEC 0x0100
+#define MGA_EXEC 0x0100
/* AGP PLL encoding (for G200 only).
*/
-#define MGA_AGP_PLL 0x1e4c
+#define MGA_AGP_PLL 0x1e4c
# define MGA_AGP2XPLL_DISABLE (0 << 0)
# define MGA_AGP2XPLL_ENABLE (1 << 0)
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 5ec8b61c5d45..d3f8aade07b3 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -150,8 +150,8 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
DMA_LOCALS;
-/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
-/* tex->texctl, tex->texctl2); */
+/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
+/* tex->texctl, tex->texctl2); */
BEGIN_DMA(6);
@@ -190,8 +190,8 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
DMA_LOCALS;
-/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
-/* tex->texctl, tex->texctl2); */
+/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
+/* tex->texctl, tex->texctl2); */
BEGIN_DMA(5);
@@ -256,7 +256,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
unsigned int pipe = sarea_priv->warp_pipe;
DMA_LOCALS;
-/* printk("mga_g400_emit_pipe %x\n", pipe); */
+/* printk("mga_g400_emit_pipe %x\n", pipe); */
BEGIN_DMA(10);
@@ -619,7 +619,7 @@ static void mga_dma_dispatch_swap(struct drm_device * dev)
FLUSH_DMA();
- DRM_DEBUG("%s... done.\n", __FUNCTION__);
+ DRM_DEBUG("... done.\n");
}
static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
@@ -631,7 +631,7 @@ static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * bu
u32 length = (u32) buf->used;
int i = 0;
DMA_LOCALS;
- DRM_DEBUG("vertex: buf=%d used=%d\n", buf->idx, buf->used);
+ DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
if (buf->used) {
buf_priv->dispatched = 1;
@@ -678,7 +678,7 @@ static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * b
u32 address = (u32) buf->bus_address;
int i = 0;
DMA_LOCALS;
- DRM_DEBUG("indices: buf=%d start=%d end=%d\n", buf->idx, start, end);
+ DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
if (start != end) {
buf_priv->dispatched = 1;
@@ -955,7 +955,7 @@ static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *fi
#if 0
if (mga_do_wait_for_idle(dev_priv) < 0) {
if (MGA_DMA_DEBUG)
- DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
+ DRM_INFO("-EBUSY\n");
return -EBUSY;
}
#endif
@@ -1014,7 +1014,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
int value;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1046,7 +1046,7 @@ static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *fi
DMA_LOCALS;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1075,7 +1075,7 @@ file_priv)
u32 *fence = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index 7d550aba165e..892e0a589846 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -1,4 +1,4 @@
-/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
+/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
* Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
*/
/*
@@ -651,7 +651,7 @@ int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_pri
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
- DRM_DEBUG("%s while CCE running\n", __FUNCTION__);
+ DRM_DEBUG("while CCE running\n");
return 0;
}
@@ -710,7 +710,7 @@ int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_pri
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_DEBUG("%s called before init done\n", __FUNCTION__);
+ DRM_DEBUG("called before init done\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/r128_drv.h b/drivers/char/drm/r128_drv.h
index 5041bd8dbed8..011105e51ac6 100644
--- a/drivers/char/drm/r128_drv.h
+++ b/drivers/char/drm/r128_drv.h
@@ -462,8 +462,7 @@ do { \
#define BEGIN_RING( n ) do { \
if ( R128_VERBOSE ) { \
- DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
- (n), __FUNCTION__ ); \
+ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
COMMIT_RING(); \
@@ -493,7 +492,7 @@ do { \
write * sizeof(u32) ); \
} \
if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
- DRM_ERROR( \
+ DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & tail_mask), \
write, __LINE__); \
diff --git a/drivers/char/drm/r128_state.c b/drivers/char/drm/r128_state.c
index b7f483cac6d4..51a9afce7b9b 100644
--- a/drivers/char/drm/r128_state.c
+++ b/drivers/char/drm/r128_state.c
@@ -42,7 +42,7 @@ static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
{
u32 aux_sc_cntl = 0x00000000;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
@@ -85,7 +85,7 @@ static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(2);
@@ -100,7 +100,7 @@ static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(13);
@@ -126,7 +126,7 @@ static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(3);
@@ -142,7 +142,7 @@ static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(5);
@@ -161,7 +161,7 @@ static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(2);
@@ -178,7 +178,7 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
int i;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
@@ -204,7 +204,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
int i;
RING_LOCALS;
- DRM_DEBUG(" %s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
@@ -226,7 +226,7 @@ static void r128_emit_state(drm_r128_private_t * dev_priv)
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
- DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty);
+ DRM_DEBUG("dirty=0x%08x\n", dirty);
if (dirty & R128_UPLOAD_CORE) {
r128_emit_core(dev_priv);
@@ -362,7 +362,7 @@ static void r128_cce_dispatch_clear(struct drm_device * dev,
unsigned int flags = clear->flags;
int i;
RING_LOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (dev_priv->page_flipping && dev_priv->current_page == 1) {
unsigned int tmp = flags;
@@ -466,7 +466,7 @@ static void r128_cce_dispatch_swap(struct drm_device * dev)
struct drm_clip_rect *pbox = sarea_priv->boxes;
int i;
RING_LOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
#if R128_PERFORMANCE_BOXES
/* Do some trivial performance monitoring...
@@ -528,8 +528,7 @@ static void r128_cce_dispatch_flip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __FUNCTION__,
+ DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
#if R128_PERFORMANCE_BOXES
@@ -1156,7 +1155,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
int count, *x, *y;
int i, xbuf_size, ybuf_size;
RING_LOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
count = depth->n;
if (count > 4096 || count <= 0)
@@ -1226,7 +1225,7 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
RING_LOCALS;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
BEGIN_RING(33);
@@ -1309,7 +1308,7 @@ static int r128_do_cleanup_pageflip(struct drm_device * dev)
static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -1328,7 +1327,7 @@ static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *fi
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -1356,7 +1355,7 @@ static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1412,7 +1411,7 @@ static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1557,11 +1556,11 @@ static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
- DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
+ DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
indirect->idx, indirect->start, indirect->end,
indirect->discard);
@@ -1622,7 +1621,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
int value;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
index 59b2944811c5..0f4afc44245c 100644
--- a/drivers/char/drm/r300_cmdbuf.c
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -77,23 +77,31 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
return -EFAULT;
}
- box.x1 =
- (box.x1 +
- R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
- box.y1 =
- (box.y1 +
- R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
- box.x2 =
- (box.x2 +
- R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
- box.y2 =
- (box.y2 +
- R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+ box.x1 = (box.x1) &
+ R300_CLIPRECT_MASK;
+ box.y1 = (box.y1) &
+ R300_CLIPRECT_MASK;
+ box.x2 = (box.x2) &
+ R300_CLIPRECT_MASK;
+ box.y2 = (box.y2) &
+ R300_CLIPRECT_MASK;
+ } else {
+ box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
+ R300_CLIPRECT_MASK;
+ }
OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
(box.y1 << R300_CLIPRECT_Y_SHIFT));
OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
(box.y2 << R300_CLIPRECT_Y_SHIFT));
+
}
OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
@@ -133,9 +141,11 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
static u8 r300_reg_flags[0x10000 >> 2];
-void r300_init_reg_flags(void)
+void r300_init_reg_flags(struct drm_device *dev)
{
int i;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
memset(r300_reg_flags, 0, 0x10000 >> 2);
#define ADD_RANGE_MARK(reg, count,mark) \
for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
@@ -230,6 +240,9 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+ ADD_RANGE(0x4074, 16);
+ }
}
static __inline__ int r300_check_range(unsigned reg, int count)
@@ -486,7 +499,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
if (cmd[0] & 0x8000) {
u32 offset;
- if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[2] << 10;
ret = !radeon_check_offset(dev_priv, offset);
@@ -504,7 +517,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
return -EINVAL;
}
-
+
}
}
@@ -723,54 +736,54 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
u32 *ref_age_base;
u32 i, buf_idx, h_pending;
RING_LOCALS;
-
- if (cmdbuf->bufsz <
+
+ if (cmdbuf->bufsz <
(sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
return -EINVAL;
}
-
+
if (header.scratch.reg >= 5) {
return -EINVAL;
}
-
+
dev_priv->scratch_ages[header.scratch.reg]++;
-
+
ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
-
+
cmdbuf->buf += sizeof(u64);
cmdbuf->bufsz -= sizeof(u64);
-
+
for (i=0; i < header.scratch.n_bufs; i++) {
buf_idx = *(u32 *)cmdbuf->buf;
buf_idx *= 2; /* 8 bytes per buf */
-
+
if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
return -EINVAL;
}
-
+
if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
return -EINVAL;
}
-
+
if (h_pending == 0) {
return -EINVAL;
}
-
+
h_pending--;
-
+
if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
return -EINVAL;
}
-
+
cmdbuf->buf += sizeof(buf_idx);
cmdbuf->bufsz -= sizeof(buf_idx);
}
-
+
BEGIN_RING(2);
OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
ADVANCE_RING();
-
+
return 0;
}
@@ -919,7 +932,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
break;
-
+
default:
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
index fa194a46c1e4..8f664af9c4a4 100644
--- a/drivers/char/drm/r300_reg.h
+++ b/drivers/char/drm/r300_reg.h
@@ -853,13 +853,13 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_TX_FORMAT_W8Z8Y8X8 0xC
# define R300_TX_FORMAT_W2Z10Y10X10 0xD
# define R300_TX_FORMAT_W16Z16Y16X16 0xE
-# define R300_TX_FORMAT_DXT1 0xF
-# define R300_TX_FORMAT_DXT3 0x10
-# define R300_TX_FORMAT_DXT5 0x11
+# define R300_TX_FORMAT_DXT1 0xF
+# define R300_TX_FORMAT_DXT3 0x10
+# define R300_TX_FORMAT_DXT5 0x11
# define R300_TX_FORMAT_D3DMFT_CxV8U8 0x12 /* no swizzle */
-# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
-# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
-# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
+# define R300_TX_FORMAT_A8R8G8B8 0x13 /* no swizzle */
+# define R300_TX_FORMAT_B8G8_B8G8 0x14 /* no swizzle */
+# define R300_TX_FORMAT_G8R8_G8B8 0x15 /* no swizzle */
/* 0x16 - some 16 bit green format.. ?? */
# define R300_TX_FORMAT_UNK25 (1 << 25) /* no swizzle */
# define R300_TX_FORMAT_CUBIC_MAP (1 << 26)
@@ -867,19 +867,19 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
/* gap */
/* Floating point formats */
/* Note - hardware supports both 16 and 32 bit floating point */
-# define R300_TX_FORMAT_FL_I16 0x18
-# define R300_TX_FORMAT_FL_I16A16 0x19
+# define R300_TX_FORMAT_FL_I16 0x18
+# define R300_TX_FORMAT_FL_I16A16 0x19
# define R300_TX_FORMAT_FL_R16G16B16A16 0x1A
-# define R300_TX_FORMAT_FL_I32 0x1B
-# define R300_TX_FORMAT_FL_I32A32 0x1C
+# define R300_TX_FORMAT_FL_I32 0x1B
+# define R300_TX_FORMAT_FL_I32A32 0x1C
# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
/* alpha modes, convenience mostly */
/* if you have alpha, pick constant appropriate to the
number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
-# define R300_TX_FORMAT_ALPHA_1CH 0x000
-# define R300_TX_FORMAT_ALPHA_2CH 0x200
-# define R300_TX_FORMAT_ALPHA_4CH 0x600
-# define R300_TX_FORMAT_ALPHA_NONE 0xA00
+# define R300_TX_FORMAT_ALPHA_1CH 0x000
+# define R300_TX_FORMAT_ALPHA_2CH 0x200
+# define R300_TX_FORMAT_ALPHA_4CH 0x600
+# define R300_TX_FORMAT_ALPHA_NONE 0xA00
/* Swizzling */
/* constants */
# define R300_TX_FORMAT_X 0
@@ -1360,11 +1360,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
# define R300_RB3D_Z_DISABLED_2 0x00000014
# define R300_RB3D_Z_TEST 0x00000012
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
-# define R300_RB3D_Z_WRITE_ONLY 0x00000006
+# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_Z_TEST 0x00000012
# define R300_RB3D_Z_TEST_AND_WRITE 0x00000016
-# define R300_RB3D_Z_WRITE_ONLY 0x00000006
+# define R300_RB3D_Z_WRITE_ONLY 0x00000006
# define R300_RB3D_STENCIL_ENABLE 0x00000001
#define R300_RB3D_ZSTENCIL_CNTL_1 0x4F04
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 24fca8ec1379..5dc799ab86b8 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -816,6 +816,46 @@ static const u32 R300_cp_microcode[][2] = {
{0000000000, 0000000000},
};
+static u32 RADEON_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+ u32 ret;
+ RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
+ ret = RADEON_READ(R520_MC_IND_DATA);
+ RADEON_WRITE(R520_MC_IND_INDEX, 0);
+ return ret;
+}
+
+u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
+{
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+ return RADEON_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+ return RADEON_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
+ else
+ return RADEON_READ(RADEON_MC_FB_LOCATION);
+}
+
+static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
+{
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+ RADEON_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+ RADEON_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
+ else
+ RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
+}
+
+static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
+{
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+ RADEON_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
+ else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+ RADEON_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
+ else
+ RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
+}
+
static int RADEON_READ_PLL(struct drm_device * dev, int addr)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -824,7 +864,7 @@ static int RADEON_READ_PLL(struct drm_device * dev, int addr)
return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
}
-static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
+static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
{
RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
return RADEON_READ(RADEON_PCIE_DATA);
@@ -1074,41 +1114,43 @@ static int radeon_do_engine_reset(struct drm_device * dev)
radeon_do_pixcache_flush(dev_priv);
- clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
- mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
-
- RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
- RADEON_FORCEON_MCLKA |
- RADEON_FORCEON_MCLKB |
- RADEON_FORCEON_YCLKA |
- RADEON_FORCEON_YCLKB |
- RADEON_FORCEON_MC |
- RADEON_FORCEON_AIC));
-
- rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
-
- RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
- RADEON_SOFT_RESET_CP |
- RADEON_SOFT_RESET_HI |
- RADEON_SOFT_RESET_SE |
- RADEON_SOFT_RESET_RE |
- RADEON_SOFT_RESET_PP |
- RADEON_SOFT_RESET_E2 |
- RADEON_SOFT_RESET_RB));
- RADEON_READ(RADEON_RBBM_SOFT_RESET);
- RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
- ~(RADEON_SOFT_RESET_CP |
- RADEON_SOFT_RESET_HI |
- RADEON_SOFT_RESET_SE |
- RADEON_SOFT_RESET_RE |
- RADEON_SOFT_RESET_PP |
- RADEON_SOFT_RESET_E2 |
- RADEON_SOFT_RESET_RB)));
- RADEON_READ(RADEON_RBBM_SOFT_RESET);
-
- RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
- RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
- RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
+ clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
+ mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
+
+ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
+ RADEON_FORCEON_MCLKA |
+ RADEON_FORCEON_MCLKB |
+ RADEON_FORCEON_YCLKA |
+ RADEON_FORCEON_YCLKB |
+ RADEON_FORCEON_MC |
+ RADEON_FORCEON_AIC));
+
+ rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
+ RADEON_SOFT_RESET_CP |
+ RADEON_SOFT_RESET_HI |
+ RADEON_SOFT_RESET_SE |
+ RADEON_SOFT_RESET_RE |
+ RADEON_SOFT_RESET_PP |
+ RADEON_SOFT_RESET_E2 |
+ RADEON_SOFT_RESET_RB));
+ RADEON_READ(RADEON_RBBM_SOFT_RESET);
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
+ ~(RADEON_SOFT_RESET_CP |
+ RADEON_SOFT_RESET_HI |
+ RADEON_SOFT_RESET_SE |
+ RADEON_SOFT_RESET_RE |
+ RADEON_SOFT_RESET_PP |
+ RADEON_SOFT_RESET_E2 |
+ RADEON_SOFT_RESET_RB)));
+ RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+ RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
+ RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
+ RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+ }
/* Reset the CP ring */
radeon_do_cp_reset(dev_priv);
@@ -1127,21 +1169,21 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
{
u32 ring_start, cur_read_ptr;
u32 tmp;
-
+
/* Initialize the memory controller. With new memory map, the fb location
* is not changed, it should have been properly initialized already. Part
* of the problem is that the code below is bogus, assuming the GART is
* always appended to the fb which is not necessarily the case
*/
if (!dev_priv->new_memmap)
- RADEON_WRITE(RADEON_MC_FB_LOCATION,
+ radeon_write_fb_location(dev_priv,
((dev_priv->gart_vm_start - 1) & 0xffff0000)
| (dev_priv->fb_location >> 16));
#if __OS_HAS_AGP
if (dev_priv->flags & RADEON_IS_AGP) {
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
- RADEON_WRITE(RADEON_MC_AGP_LOCATION,
+ radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16)));
@@ -1190,9 +1232,15 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
/* Set ring buffer size */
#ifdef __BIG_ENDIAN
RADEON_WRITE(RADEON_CP_RB_CNTL,
- dev_priv->ring.size_l2qw | RADEON_BUF_SWAP_32BIT);
+ RADEON_BUF_SWAP_32BIT |
+ (dev_priv->ring.fetch_size_l2ow << 18) |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
#else
- RADEON_WRITE(RADEON_CP_RB_CNTL, dev_priv->ring.size_l2qw);
+ RADEON_WRITE(RADEON_CP_RB_CNTL,
+ (dev_priv->ring.fetch_size_l2ow << 18) |
+ (dev_priv->ring.rptr_update_l2qw << 8) |
+ dev_priv->ring.size_l2qw);
#endif
/* Start with assuming that writeback doesn't work */
@@ -1299,7 +1347,7 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start);
dev_priv->gart_size = 32*1024*1024;
- RADEON_WRITE(RADEON_MC_AGP_LOCATION,
+ radeon_write_agp_location(dev_priv,
(((dev_priv->gart_vm_start - 1 +
dev_priv->gart_size) & 0xffff0000) |
(dev_priv->gart_vm_start >> 16)));
@@ -1333,7 +1381,7 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
dev_priv->gart_vm_start +
dev_priv->gart_size - 1);
- RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
+ radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
RADEON_PCIE_TX_GART_EN);
@@ -1358,7 +1406,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
return;
}
- tmp = RADEON_READ(RADEON_AIC_CNTL);
+ tmp = RADEON_READ(RADEON_AIC_CNTL);
if (on) {
RADEON_WRITE(RADEON_AIC_CNTL,
@@ -1376,7 +1424,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
/* Turn off AGP aperture -- is this required for PCI GART?
*/
- RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0); /* ?? */
+ radeon_write_agp_location(dev_priv, 0xffffffc0);
RADEON_WRITE(RADEON_AGP_COMMAND, 0); /* clear AGP_COMMAND */
} else {
RADEON_WRITE(RADEON_AIC_CNTL,
@@ -1581,10 +1629,9 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
dev->agp_buffer_map->handle);
}
- dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION)
- & 0xffff) << 16;
- dev_priv->fb_size =
- ((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000)
+ dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
+ dev_priv->fb_size =
+ ((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
- dev_priv->fb_location;
dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
@@ -1630,7 +1677,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
((base + dev_priv->gart_size) & 0xfffffffful) < base)
base = dev_priv->fb_location
- dev_priv->gart_size;
- }
+ }
dev_priv->gart_vm_start = base & 0xffc00000u;
if (dev_priv->gart_vm_start != base)
DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
@@ -1663,6 +1710,11 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
dev_priv->ring.size = init->ring_size;
dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
+ dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+
+ dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
+ dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
@@ -1830,7 +1882,7 @@ int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_pri
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (init->func == RADEON_INIT_R300_CP)
- r300_init_reg_flags();
+ r300_init_reg_flags(dev);
switch (init->func) {
case RADEON_INIT_CP:
@@ -1852,12 +1904,12 @@ int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_pr
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cp_running) {
- DRM_DEBUG("%s while CP running\n", __FUNCTION__);
+ DRM_DEBUG("while CP running\n");
return 0;
}
if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
- DRM_DEBUG("%s called with bogus CP mode (%d)\n",
- __FUNCTION__, dev_priv->cp_mode);
+ DRM_DEBUG("called with bogus CP mode (%d)\n",
+ dev_priv->cp_mode);
return 0;
}
@@ -1962,7 +2014,7 @@ int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_pr
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_DEBUG("%s called before init done\n", __FUNCTION__);
+ DRM_DEBUG("called before init done\n");
return -EINVAL;
}
@@ -2239,6 +2291,10 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
case CHIP_R350:
case CHIP_R420:
case CHIP_RV410:
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV570:
+ case CHIP_R580:
dev_priv->flags |= RADEON_HAS_HIERZ;
break;
default:
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index 5a8e23f916fc..71e5b21fad2c 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -223,10 +223,10 @@ typedef union {
#define R300_CMD_CP_DELAY 5
#define R300_CMD_DMA_DISCARD 6
#define R300_CMD_WAIT 7
-# define R300_WAIT_2D 0x1
-# define R300_WAIT_3D 0x2
-# define R300_WAIT_2D_CLEAN 0x3
-# define R300_WAIT_3D_CLEAN 0x4
+# define R300_WAIT_2D 0x1
+# define R300_WAIT_3D 0x2
+# define R300_WAIT_2D_CLEAN 0x3
+# define R300_WAIT_3D_CLEAN 0x4
#define R300_CMD_SCRATCH 8
typedef union {
@@ -656,6 +656,7 @@ typedef struct drm_radeon_indirect {
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
+#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
typedef struct drm_radeon_getparam {
int param;
@@ -722,7 +723,7 @@ typedef struct drm_radeon_surface_free {
unsigned int address;
} drm_radeon_surface_free_t;
-#define DRM_RADEON_VBLANK_CRTC1 1
-#define DRM_RADEON_VBLANK_CRTC2 2
+#define DRM_RADEON_VBLANK_CRTC1 1
+#define DRM_RADEON_VBLANK_CRTC2 2
#endif
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index bfbb60a9298c..4434332c79bc 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -123,6 +123,12 @@ enum radeon_family {
CHIP_R420,
CHIP_RV410,
CHIP_RS400,
+ CHIP_RV515,
+ CHIP_R520,
+ CHIP_RV530,
+ CHIP_RV560,
+ CHIP_RV570,
+ CHIP_R580,
CHIP_LAST,
};
@@ -166,6 +172,12 @@ typedef struct drm_radeon_ring_buffer {
int size;
int size_l2qw;
+ int rptr_update; /* Double Words */
+ int rptr_update_l2qw; /* log2 Quad Words */
+
+ int fetch_size; /* Double Words */
+ int fetch_size_l2ow; /* log2 Oct Words */
+
u32 tail;
u32 tail_mask;
int space;
@@ -336,6 +348,7 @@ extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file
extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
extern void radeon_freelist_reset(struct drm_device * dev);
extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
@@ -382,7 +395,7 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
/* r300_cmdbuf.c */
-extern void r300_init_reg_flags(void);
+extern void r300_init_reg_flags(struct drm_device *dev);
extern int r300_do_cp_cmdbuf(struct drm_device * dev,
struct drm_file *file_priv,
@@ -429,7 +442,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_PCIE_INDEX 0x0030
#define RADEON_PCIE_DATA 0x0034
#define RADEON_PCIE_TX_GART_CNTL 0x10
-# define RADEON_PCIE_TX_GART_EN (1 << 0)
+# define RADEON_PCIE_TX_GART_EN (1 << 0)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0<<1)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO (1<<1)
# define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD (3<<1)
@@ -439,7 +452,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
# define RADEON_PCIE_TX_GART_INVALIDATE_TLB (1<<8)
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
-#define RADEON_PCIE_TX_GART_BASE 0x13
+#define RADEON_PCIE_TX_GART_BASE 0x13
#define RADEON_PCIE_TX_GART_START_LO 0x14
#define RADEON_PCIE_TX_GART_START_HI 0x15
#define RADEON_PCIE_TX_GART_END_LO 0x16
@@ -454,6 +467,16 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_IGPGART_ENABLE 0x38
#define RADEON_IGPGART_UNK_39 0x39
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1<<24)
+#define R520_MC_IND_DATA 0x74
+
+#define RV515_MC_FB_LOCATION 0x01
+#define RV515_MC_AGP_LOCATION 0x02
+
+#define R520_MC_FB_LOCATION 0x04
+#define R520_MC_AGP_LOCATION 0x05
+
#define RADEON_MPP_TB_CONFIG 0x01c0
#define RADEON_MEM_CNTL 0x0140
#define RADEON_MEM_SDRAM_MODE_REG 0x0158
@@ -512,12 +535,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
#define RADEON_GEN_INT_STATUS 0x0044
# define RADEON_CRTC_VBLANK_STAT (1 << 0)
-# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
+# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
-# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
+# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
# define RADEON_GUI_IDLE_INT_TEST_ACK (1 << 19)
# define RADEON_SW_INT_TEST (1 << 25)
-# define RADEON_SW_INT_TEST_ACK (1 << 25)
+# define RADEON_SW_INT_TEST_ACK (1 << 25)
# define RADEON_SW_INT_FIRE (1 << 26)
#define RADEON_HOST_PATH_CNTL 0x0130
@@ -615,9 +638,51 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
# define RADEON_SOFT_RESET_E2 (1 << 5)
# define RADEON_SOFT_RESET_RB (1 << 6)
# define RADEON_SOFT_RESET_HDP (1 << 7)
+/*
+ * 6:0 Available slots in the FIFO
+ * 8 Host Interface active
+ * 9 CP request active
+ * 10 FIFO request active
+ * 11 Host Interface retry active
+ * 12 CP retry active
+ * 13 FIFO retry active
+ * 14 FIFO pipeline busy
+ * 15 Event engine busy
+ * 16 CP command stream busy
+ * 17 2D engine busy
+ * 18 2D portion of render backend busy
+ * 20 3D setup engine busy
+ * 26 GA engine busy
+ * 27 CBA 2D engine busy
+ * 31 2D engine busy or 3D engine busy or FIFO not empty or CP busy or
+ * command stream queue not empty or Ring Buffer not empty
+ */
#define RADEON_RBBM_STATUS 0x0e40
+/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register. */
+/* #define RADEON_RBBM_STATUS 0x1740 */
+/* bits 6:0 are dword slots available in the cmd fifo */
# define RADEON_RBBM_FIFOCNT_MASK 0x007f
-# define RADEON_RBBM_ACTIVE (1 << 31)
+# define RADEON_HIRQ_ON_RBB (1 << 8)
+# define RADEON_CPRQ_ON_RBB (1 << 9)
+# define RADEON_CFRQ_ON_RBB (1 << 10)
+# define RADEON_HIRQ_IN_RTBUF (1 << 11)
+# define RADEON_CPRQ_IN_RTBUF (1 << 12)
+# define RADEON_CFRQ_IN_RTBUF (1 << 13)
+# define RADEON_PIPE_BUSY (1 << 14)
+# define RADEON_ENG_EV_BUSY (1 << 15)
+# define RADEON_CP_CMDSTRM_BUSY (1 << 16)
+# define RADEON_E2_BUSY (1 << 17)
+# define RADEON_RB2D_BUSY (1 << 18)
+# define RADEON_RB3D_BUSY (1 << 19) /* not used on r300 */
+# define RADEON_VAP_BUSY (1 << 20)
+# define RADEON_RE_BUSY (1 << 21) /* not used on r300 */
+# define RADEON_TAM_BUSY (1 << 22) /* not used on r300 */
+# define RADEON_TDM_BUSY (1 << 23) /* not used on r300 */
+# define RADEON_PB_BUSY (1 << 24) /* not used on r300 */
+# define RADEON_TIM_BUSY (1 << 25) /* not used on r300 */
+# define RADEON_GA_BUSY (1 << 26)
+# define RADEON_CBA2D_BUSY (1 << 27)
+# define RADEON_RBBM_ACTIVE (1 << 31)
#define RADEON_RE_LINE_PATTERN 0x1cd0
#define RADEON_RE_MISC 0x26c4
#define RADEON_RE_TOP_LEFT 0x26c0
@@ -1004,6 +1069,13 @@ do { \
RADEON_WRITE( RADEON_PCIE_DATA, (val) ); \
} while (0)
+#define RADEON_WRITE_MCIND( addr, val ) \
+ do { \
+ RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff)); \
+ RADEON_WRITE(R520_MC_IND_DATA, (val)); \
+ RADEON_WRITE(R520_MC_IND_INDEX, 0); \
+ } while (0)
+
#define CP_PACKET0( reg, n ) \
(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
#define CP_PACKET0_TABLE( reg, n ) \
@@ -1114,8 +1186,7 @@ do { \
#define BEGIN_RING( n ) do { \
if ( RADEON_VERBOSE ) { \
- DRM_INFO( "BEGIN_RING( %d ) in %s\n", \
- n, __FUNCTION__ ); \
+ DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
} \
if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
COMMIT_RING(); \
@@ -1133,7 +1204,7 @@ do { \
write, dev_priv->ring.tail ); \
} \
if (((dev_priv->ring.tail + _nr) & mask) != write) { \
- DRM_ERROR( \
+ DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & mask), \
write, __LINE__); \
diff --git a/drivers/char/drm/radeon_irq.c b/drivers/char/drm/radeon_irq.c
index 84f5bc36252b..009af3814b6f 100644
--- a/drivers/char/drm/radeon_irq.c
+++ b/drivers/char/drm/radeon_irq.c
@@ -154,7 +154,7 @@ static int radeon_driver_vblank_do_wait(struct drm_device * dev,
int ack = 0;
atomic_t *counter;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -205,7 +205,7 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -227,7 +227,7 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
drm_radeon_irq_wait_t *irqwait = data;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/radeon_mem.c b/drivers/char/drm/radeon_mem.c
index a29acfe2f973..78b34fa7c89a 100644
--- a/drivers/char/drm/radeon_mem.c
+++ b/drivers/char/drm/radeon_mem.c
@@ -224,7 +224,7 @@ int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_p
struct mem_block *block, **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -259,7 +259,7 @@ int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_pr
struct mem_block *block, **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -285,7 +285,7 @@ int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *fi
struct mem_block **heap;
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index f824f2f5fdc2..6f75512f591e 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -898,7 +898,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
int w = pbox[i].x2 - x;
int h = pbox[i].y2 - y;
- DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
+ DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
x, y, w, h, flags);
if (flags & RADEON_FRONT) {
@@ -1368,7 +1368,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)
int w = pbox[i].x2 - x;
int h = pbox[i].y2 - y;
- DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h);
+ DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
BEGIN_RING(9);
@@ -1422,8 +1422,7 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev)
int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
? dev_priv->front_offset : dev_priv->back_offset;
RING_LOCALS;
- DRM_DEBUG("%s: pfCurrentPage=%d\n",
- __FUNCTION__,
+ DRM_DEBUG("pfCurrentPage=%d\n",
dev_priv->sarea_priv->pfCurrentPage);
/* Do some trivial performance monitoring...
@@ -1562,7 +1561,7 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev,
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
- DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
+ DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
if (start != end) {
int offset = (dev_priv->gart_buffers_offset
@@ -1758,7 +1757,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
buf = radeon_freelist_get(dev);
}
if (!buf) {
- DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
+ DRM_DEBUG("EAGAIN\n");
if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
return -EFAULT;
return -EAGAIN;
@@ -2413,7 +2412,7 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil
LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
+ DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
indirect->idx, indirect->start, indirect->end,
indirect->discard);
@@ -2779,7 +2778,7 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
- DRM_DEBUG("%s: %x\n", __FUNCTION__, flags);
+ DRM_DEBUG("%x\n", flags);
switch (flags) {
case RADEON_WAIT_2D:
BEGIN_RING(2);
@@ -3035,6 +3034,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
case RADEON_PARAM_VBLANK_CRTC:
value = radeon_vblank_crtc_get(dev);
break;
+ case RADEON_PARAM_FB_LOCATION:
+ value = radeon_read_fb_location(dev_priv);
+ break;
default:
DRM_DEBUG("Invalid parameter %d\n", param->param);
return -EINVAL;
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
index bf8e0e10fe21..5f6238fdf1fa 100644
--- a/drivers/char/drm/savage_state.c
+++ b/drivers/char/drm/savage_state.c
@@ -512,7 +512,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
DMA_DRAW_PRIMITIVE(count, prim, skip);
if (vb_stride == vtx_size) {
- DMA_COPY(&vtxbuf[vb_stride * start],
+ DMA_COPY(&vtxbuf[vb_stride * start],
vtx_size * count);
} else {
for (i = start; i < start + count; ++i) {
@@ -742,7 +742,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
while (n != 0) {
/* Can emit up to 255 vertices (85 triangles) at once. */
unsigned int count = n > 255 ? 255 : n;
-
+
/* Check indices */
for (i = 0; i < count; ++i) {
if (idx[i] > vb_size / (vb_stride * 4)) {
@@ -933,7 +933,7 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
/* j was check in savage_bci_cmdbuf */
ret = savage_dispatch_vb_idx(dev_priv,
&cmd_header, (const uint16_t *)cmdbuf,
- (const uint32_t *)vtxbuf, vb_size,
+ (const uint32_t *)vtxbuf, vb_size,
vb_stride);
cmdbuf += j;
break;
diff --git a/drivers/char/drm/sis_mm.c b/drivers/char/drm/sis_mm.c
index a6b7ccdaf73d..b3878770fce1 100644
--- a/drivers/char/drm/sis_mm.c
+++ b/drivers/char/drm/sis_mm.c
@@ -115,7 +115,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
+ DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
}
@@ -205,7 +205,7 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
+ DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
@@ -249,7 +249,7 @@ int sis_idle(struct drm_device *dev)
return 0;
}
}
-
+
/*
* Implement a device switch here if needed
*/
diff --git a/drivers/char/drm/via_dma.c b/drivers/char/drm/via_dma.c
index 7009dbddac43..94baec692b57 100644
--- a/drivers/char/drm/via_dma.c
+++ b/drivers/char/drm/via_dma.c
@@ -179,14 +179,12 @@ static int via_initialize(struct drm_device * dev,
}
if (dev_priv->ring.virtual_start != NULL) {
- DRM_ERROR("%s called again without calling cleanup\n",
- __FUNCTION__);
+ DRM_ERROR("called again without calling cleanup\n");
return -EFAULT;
}
if (!dev->agp || !dev->agp->base) {
- DRM_ERROR("%s called with no agp memory available\n",
- __FUNCTION__);
+ DRM_ERROR("called with no agp memory available\n");
return -EFAULT;
}
@@ -267,8 +265,7 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
- DRM_ERROR("%s called without initializing AGP ring buffer.\n",
- __FUNCTION__);
+ DRM_ERROR("called without initializing AGP ring buffer.\n");
return -EFAULT;
}
@@ -337,8 +334,7 @@ static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *fi
LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
- cmdbuf->size);
+ DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_cmdbuffer(dev, cmdbuf);
if (ret) {
@@ -379,8 +375,7 @@ static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file
LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
- cmdbuf->size);
+ DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
if (ret) {
@@ -648,14 +643,13 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
uint32_t tmp_size, count;
drm_via_private_t *dev_priv;
- DRM_DEBUG("via cmdbuf_size\n");
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
- DRM_ERROR("%s called without initializing AGP ring buffer.\n",
- __FUNCTION__);
+ DRM_ERROR("called without initializing AGP ring buffer.\n");
return -EFAULT;
}
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index c6fd16f3cb43..33c5197b73c4 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -1,5 +1,5 @@
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
+ *
* Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -16,22 +16,22 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors:
+ * Authors:
* Thomas Hellstrom.
* Partially based on code obtained from Digeo Inc.
*/
/*
- * Unmaps the DMA mappings.
- * FIXME: Is this a NoOp on x86? Also
- * FIXME: What happens if this one is called and a pending blit has previously done
- * the same DMA mappings?
+ * Unmaps the DMA mappings.
+ * FIXME: Is this a NoOp on x86? Also
+ * FIXME: What happens if this one is called and a pending blit has previously done
+ * the same DMA mappings?
*/
#include "drmP.h"
@@ -65,7 +65,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
int num_desc = vsg->num_desc;
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
- drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
dma_addr_t next = vsg->chain_start;
@@ -73,7 +73,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
if (descriptor_this_page-- == 0) {
cur_descriptor_page--;
descriptor_this_page = vsg->descriptors_per_page - 1;
- desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
}
dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
@@ -93,7 +93,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
static void
via_map_blit_for_device(struct pci_dev *pdev,
const drm_via_dmablit_t *xfer,
- drm_via_sg_info_t *vsg,
+ drm_via_sg_info_t *vsg,
int mode)
{
unsigned cur_descriptor_page = 0;
@@ -110,7 +110,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
drm_via_descriptor_t *desc_ptr = NULL;
- if (mode == 1)
+ if (mode == 1)
desc_ptr = vsg->desc_pages[cur_descriptor_page];
for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
@@ -118,24 +118,24 @@ via_map_blit_for_device(struct pci_dev *pdev,
line_len = xfer->line_length;
cur_fb = fb_addr;
cur_mem = mem_addr;
-
+
while (line_len > 0) {
remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
line_len -= remaining_len;
if (mode == 1) {
- desc_ptr->mem_addr =
- dma_map_page(&pdev->dev,
- vsg->pages[VIA_PFN(cur_mem) -
+ desc_ptr->mem_addr =
+ dma_map_page(&pdev->dev,
+ vsg->pages[VIA_PFN(cur_mem) -
VIA_PFN(first_addr)],
- VIA_PGOFF(cur_mem), remaining_len,
+ VIA_PGOFF(cur_mem), remaining_len,
vsg->direction);
desc_ptr->dev_addr = cur_fb;
-
+
desc_ptr->size = remaining_len;
desc_ptr->next = (uint32_t) next;
- next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
+ next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
DMA_TO_DEVICE);
desc_ptr++;
if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
@@ -143,12 +143,12 @@ via_map_blit_for_device(struct pci_dev *pdev,
desc_ptr = vsg->desc_pages[++cur_descriptor_page];
}
}
-
+
num_desc++;
cur_mem += remaining_len;
cur_fb += remaining_len;
}
-
+
mem_addr += xfer->mem_stride;
fb_addr += xfer->fb_stride;
}
@@ -161,14 +161,14 @@ via_map_blit_for_device(struct pci_dev *pdev,
}
/*
- * Function that frees up all resources for a blit. It is usable even if the
+ * Function that frees up all resources for a blit. It is usable even if the
* blit info has only been partially built as long as the status enum is consistent
* with the actual status of the used resources.
*/
static void
-via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{
struct page *page;
int i;
@@ -185,7 +185,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
case dr_via_pages_locked:
for (i=0; i<vsg->num_pages; ++i) {
if ( NULL != (page = vsg->pages[i])) {
- if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
+ if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
page_cache_release(page);
}
@@ -200,7 +200,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
vsg->bounce_buffer = NULL;
}
vsg->free_on_sequence = 0;
-}
+}
/*
* Fire a blit engine.
@@ -213,7 +213,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
VIA_DMA_CSR_DE);
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
@@ -233,9 +233,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int ret;
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
- vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
+ vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
first_pfn + 1;
-
+
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
return -ENOMEM;
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
@@ -248,7 +248,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
- if (ret < 0)
+ if (ret < 0)
return ret;
vsg->state = dr_via_pages_locked;
return -EINVAL;
@@ -264,21 +264,21 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
* quite large for some blits, and pages don't need to be contingous.
*/
-static int
+static int
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
{
int i;
-
+
vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
- vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+ vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
vsg->descriptors_per_page;
if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
return -ENOMEM;
-
+
vsg->state = dr_via_desc_pages_alloc;
for (i=0; i<vsg->num_desc_pages; ++i) {
- if (NULL == (vsg->desc_pages[i] =
+ if (NULL == (vsg->desc_pages[i] =
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
return -ENOMEM;
}
@@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
vsg->num_desc);
return 0;
}
-
+
static void
via_abort_dmablit(struct drm_device *dev, int engine)
{
@@ -300,7 +300,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
}
@@ -311,7 +311,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
* the workqueue task takes care of processing associated with the old blit.
*/
-
+
void
via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
{
@@ -331,19 +331,19 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
- done_transfer = blitq->is_active &&
+ done_transfer = blitq->is_active &&
(( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
- done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
+ done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
if (done_transfer) {
blitq->blits[cur]->aborted = blitq->aborting;
blitq->done_blit_handle++;
- DRM_WAKEUP(blitq->blit_queue + cur);
+ DRM_WAKEUP(blitq->blit_queue + cur);
cur++;
- if (cur >= VIA_NUM_BLIT_SLOTS)
+ if (cur >= VIA_NUM_BLIT_SLOTS)
cur = 0;
blitq->cur = cur;
@@ -355,7 +355,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->is_active = 0;
blitq->aborting = 0;
- schedule_work(&blitq->wq);
+ schedule_work(&blitq->wq);
} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
@@ -367,7 +367,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->aborting = 1;
blitq->end = jiffies + DRM_HZ;
}
-
+
if (!blitq->is_active) {
if (blitq->num_outstanding) {
via_fire_dmablit(dev, blitq->blits[cur], engine);
@@ -383,14 +383,14 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
}
via_dmablit_engine_off(dev, engine);
}
- }
+ }
if (from_irq) {
spin_unlock(&blitq->blit_lock);
} else {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
-}
+}
@@ -426,13 +426,13 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
return active;
}
-
+
/*
* Sync. Wait for at least three seconds for the blit to be performed.
*/
static int
-via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -441,12 +441,12 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
int ret = 0;
if (via_dmablit_active(blitq, engine, handle, &queue)) {
- DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
handle, engine, ret);
-
+
return ret;
}
@@ -468,12 +468,12 @@ via_dmablit_timer(unsigned long data)
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
-
- DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
+
+ DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
(unsigned long) jiffies);
via_dmablit_handler(dev, engine, 0);
-
+
if (!timer_pending(&blitq->poll_timer)) {
mod_timer(&blitq->poll_timer, jiffies + 1);
@@ -497,7 +497,7 @@ via_dmablit_timer(unsigned long data)
*/
-static void
+static void
via_dmablit_workqueue(struct work_struct *work)
{
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
@@ -505,38 +505,38 @@ via_dmablit_workqueue(struct work_struct *work)
unsigned long irqsave;
drm_via_sg_info_t *cur_sg;
int cur_released;
-
-
- DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
+
+
+ DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
+
while(blitq->serviced != blitq->cur) {
cur_released = blitq->serviced++;
DRM_DEBUG("Releasing blit slot %d\n", cur_released);
- if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
+ if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
blitq->serviced = 0;
-
+
cur_sg = blitq->blits[cur_released];
blitq->num_free++;
-
+
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
+
DRM_WAKEUP(&blitq->busy_queue);
-
+
via_free_sg_info(dev->pdev, cur_sg);
kfree(cur_sg);
-
+
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
-
+
/*
* Init all blit engines. Currently we use two, but some hardware have 4.
@@ -550,8 +550,8 @@ via_init_dmablit(struct drm_device *dev)
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq;
- pci_set_master(dev->pdev);
-
+ pci_set_master(dev->pdev);
+
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
blitq->dev = dev;
@@ -572,20 +572,20 @@ via_init_dmablit(struct drm_device *dev)
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
setup_timer(&blitq->poll_timer, via_dmablit_timer,
(unsigned long)blitq);
- }
+ }
}
/*
* Build all info and do all mappings required for a blit.
*/
-
+
static int
via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int draw = xfer->to_fb;
int ret = 0;
-
+
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
vsg->bounce_buffer = NULL;
@@ -599,7 +599,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
/*
* Below check is a driver limitation, not a hardware one. We
* don't want to lock unused pages, and don't want to incoporate the
- * extra logic of avoiding them. Make sure there are no.
+ * extra logic of avoiding them. Make sure there are no.
* (Not a big limitation anyway.)
*/
@@ -625,11 +625,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
DRM_ERROR("Too large PCI DMA bitblt.\n");
return -EINVAL;
- }
+ }
- /*
+ /*
* we allow a negative fb stride to allow flipping of images in
- * transfer.
+ * transfer.
*/
if (xfer->mem_stride < xfer->line_length ||
@@ -653,11 +653,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
#else
if ((((unsigned long)xfer->mem_addr & 15) ||
((unsigned long)xfer->fb_addr & 3)) ||
- ((xfer->num_lines > 1) &&
+ ((xfer->num_lines > 1) &&
((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
return -EINVAL;
- }
+ }
#endif
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
@@ -673,17 +673,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
return ret;
}
via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
-
+
return 0;
}
-
+
/*
* Reserve one free slot in the blit queue. Will wait for one second for one
* to become available. Otherwise -EBUSY is returned.
*/
-static int
+static int
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
{
int ret=0;
@@ -698,10 +698,10 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
if (ret) {
return (-EINTR == ret) ? -EAGAIN : ret;
}
-
+
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
-
+
blitq->num_free--;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
@@ -712,7 +712,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
* Hand back a free slot if we changed our mind.
*/
-static void
+static void
via_dmablit_release_slot(drm_via_blitq_t *blitq)
{
unsigned long irqsave;
@@ -728,8 +728,8 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
*/
-static int
-via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
+static int
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_sg_info_t *vsg;
@@ -760,15 +760,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->blits[blitq->head++] = vsg;
- if (blitq->head >= VIA_NUM_BLIT_SLOTS)
+ if (blitq->head >= VIA_NUM_BLIT_SLOTS)
blitq->head = 0;
blitq->num_outstanding++;
- xfer->sync.sync_handle = ++blitq->cur_blit_handle;
+ xfer->sync.sync_handle = ++blitq->cur_blit_handle;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
xfer->sync.engine = engine;
- via_dmablit_handler(dev, engine, 0);
+ via_dmablit_handler(dev, engine, 0);
return 0;
}
@@ -776,7 +776,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
/*
* Sync on a previously submitted blit. Note that the X server use signals extensively, and
* that there is a very big probability that this IOCTL will be interrupted by a signal. In that
- * case it returns with -EAGAIN for the signal to be delivered.
+ * case it returns with -EAGAIN for the signal to be delivered.
* The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
*/
@@ -786,7 +786,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
drm_via_blitsync_t *sync = data;
int err;
- if (sync->engine >= VIA_NUM_BLIT_ENGINES)
+ if (sync->engine >= VIA_NUM_BLIT_ENGINES)
return -EINVAL;
err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
@@ -796,15 +796,15 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
return err;
}
-
+
/*
* Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
- * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
+ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
* be reissued. See the above IOCTL code.
*/
-int
+int
via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
{
drm_via_dmablit_t *xfer = data;
diff --git a/drivers/char/drm/via_dmablit.h b/drivers/char/drm/via_dmablit.h
index 6f6a513d5147..7408a547a036 100644
--- a/drivers/char/drm/via_dmablit.h
+++ b/drivers/char/drm/via_dmablit.h
@@ -1,5 +1,5 @@
/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
+ *
* Copyright 2005 Thomas Hellstrom.
* All Rights Reserved.
*
@@ -17,12 +17,12 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors:
+ * Authors:
* Thomas Hellstrom.
* Register info from Digeo Inc.
*/
@@ -67,7 +67,7 @@ typedef struct _drm_via_blitq {
unsigned cur;
unsigned num_free;
unsigned num_outstanding;
- unsigned long end;
+ unsigned long end;
int aborting;
int is_active;
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
@@ -77,46 +77,46 @@ typedef struct _drm_via_blitq {
struct work_struct wq;
struct timer_list poll_timer;
} drm_via_blitq_t;
-
-/*
+
+/*
* PCI DMA Registers
* Channels 2 & 3 don't seem to be implemented in hardware.
*/
-
-#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
-#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
-#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
-#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
-
-#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
-#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
-#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
-#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
-
-#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
-#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
-#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
-#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
-
-#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
-#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
-#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
-#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
-
-#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
-#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
-#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
-#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
-
-#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
-#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
-#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
-#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
-
-#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
-
-/* Define for DMA engine */
+
+#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
+#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
+#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
+#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
+
+#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
+#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
+#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
+#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
+
+#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
+#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
+#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
+#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
+
+#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
+#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
+#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
+#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
+
+#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
+#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
+#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
+#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
+
+#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
+#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
+#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
+#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
+
+#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
+
+/* Define for DMA engine */
/* DPR */
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
diff --git a/drivers/char/drm/via_drm.h b/drivers/char/drm/via_drm.h
index 8f53c76062e9..a3b5c102b067 100644
--- a/drivers/char/drm/via_drm.h
+++ b/drivers/char/drm/via_drm.h
@@ -35,7 +35,7 @@
#include "via_drmclient.h"
#endif
-#define VIA_NR_SAREA_CLIPRECTS 8
+#define VIA_NR_SAREA_CLIPRECTS 8
#define VIA_NR_XVMC_PORTS 10
#define VIA_NR_XVMC_LOCKS 5
#define VIA_MAX_CACHELINE_SIZE 64
@@ -259,7 +259,7 @@ typedef struct drm_via_blitsync {
typedef struct drm_via_dmablit {
uint32_t num_lines;
uint32_t line_length;
-
+
uint32_t fb_addr;
uint32_t fb_stride;
diff --git a/drivers/char/drm/via_drv.c b/drivers/char/drm/via_drv.c
index 2d4957ab256a..80c01cdfa37d 100644
--- a/drivers/char/drm/via_drv.c
+++ b/drivers/char/drm/via_drv.c
@@ -71,7 +71,7 @@ static struct drm_driver driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
},
-
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/char/drm/via_irq.c b/drivers/char/drm/via_irq.c
index 9c1d52bc92d7..c6bb978a1106 100644
--- a/drivers/char/drm/via_irq.c
+++ b/drivers/char/drm/via_irq.c
@@ -169,9 +169,9 @@ int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
unsigned int cur_vblank;
int ret = 0;
- DRM_DEBUG("viadrv_vblank_wait\n");
+ DRM_DEBUG("\n");
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -201,24 +201,23 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
maskarray_t *masks;
int real_irq;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+ DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
if (irq >= drm_via_irq_num) {
- DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
- irq);
+ DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
return -EINVAL;
}
real_irq = dev_priv->irq_map[irq];
if (real_irq < 0) {
- DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
- __FUNCTION__, irq);
+ DRM_ERROR("Video IRQ %d not available on this hardware.\n",
+ irq);
return -EINVAL;
}
@@ -251,7 +250,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
drm_via_irq_t *cur_irq;
int i;
- DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv);
+ DRM_DEBUG("dev_priv: %p\n", dev_priv);
if (dev_priv) {
cur_irq = dev_priv->via_irqs;
@@ -298,7 +297,7 @@ void via_driver_irq_postinstall(struct drm_device * dev)
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
- DRM_DEBUG("via_driver_irq_postinstall\n");
+ DRM_DEBUG("\n");
if (dev_priv) {
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
@@ -317,7 +316,7 @@ void via_driver_irq_uninstall(struct drm_device * dev)
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
- DRM_DEBUG("driver_irq_uninstall)\n");
+ DRM_DEBUG("\n");
if (dev_priv) {
/* Some more magic, oh for some data sheets ! */
@@ -344,7 +343,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
if (irqwait->request.irq >= dev_priv->num_irqs) {
- DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
+ DRM_ERROR("Trying to wait on unknown irq %d\n",
irqwait->request.irq);
return -EINVAL;
}
@@ -362,8 +361,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
if (irqwait->request.type & VIA_IRQ_SIGNAL) {
- DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n",
- __FUNCTION__);
+ DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
return -EINVAL;
}
diff --git a/drivers/char/drm/via_map.c b/drivers/char/drm/via_map.c
index 10091507a0dc..a967556be014 100644
--- a/drivers/char/drm/via_map.c
+++ b/drivers/char/drm/via_map.c
@@ -29,7 +29,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
{
drm_via_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
@@ -79,7 +79,7 @@ int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_init_t *init = data;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
switch (init->func) {
case VIA_INIT_MAP:
@@ -121,4 +121,3 @@ int via_driver_unload(struct drm_device *dev)
return 0;
}
-
diff --git a/drivers/char/drm/via_mm.c b/drivers/char/drm/via_mm.c
index 3ffbf8649833..e64094916e4f 100644
--- a/drivers/char/drm/via_mm.c
+++ b/drivers/char/drm/via_mm.c
@@ -53,7 +53,7 @@ int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
+ DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
@@ -77,7 +77,7 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
+ DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
@@ -113,7 +113,7 @@ void via_lastclose(struct drm_device *dev)
dev_priv->vram_initialized = 0;
dev_priv->agp_initialized = 0;
mutex_unlock(&dev->struct_mutex);
-}
+}
int via_mem_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/char/drm/via_video.c b/drivers/char/drm/via_video.c
index c15e75b54cb1..6ec04ac12459 100644
--- a/drivers/char/drm/via_video.c
+++ b/drivers/char/drm/via_video.c
@@ -33,7 +33,7 @@ void via_init_futex(drm_via_private_t * dev_priv)
{
unsigned int i;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
@@ -73,7 +73,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
int ret = 0;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (fx->lock > VIA_NR_XVMC_LOCKS)
return -EFAULT;
diff --git a/drivers/char/epca.h b/drivers/char/epca.h
index a297238cd3ba..3c77c02b5d65 100644
--- a/drivers/char/epca.h
+++ b/drivers/char/epca.h
@@ -77,7 +77,6 @@ static char *board_desc[] =
#define ON 1
#define FEPTIMEOUT 200000
-#define SERIAL_TYPE_NORMAL 1
#define SERIAL_TYPE_INFO 3
#define EPCA_EVENT_HANGUP 1
#define EPCA_MAGIC 0x5c6df104L
diff --git a/drivers/char/esp.c b/drivers/char/esp.c
index 28607763ae64..c01e26d9ee5e 100644
--- a/drivers/char/esp.c
+++ b/drivers/char/esp.c
@@ -111,9 +111,6 @@ static char serial_version[] __initdata = "2.2";
static struct tty_driver *esp_driver;
-/* serial subtype definitions */
-#define SERIAL_TYPE_NORMAL 1
-
/*
* Serial driver configuration section. Here are the various options:
*
@@ -245,17 +242,6 @@ static void rs_start(struct tty_struct *tty)
* -----------------------------------------------------------------------
*/
-/*
- * This routine is used by the interrupt handler to schedule
- * processing in the software interrupt portion of the driver.
- */
-static inline void rs_sched_event(struct esp_struct *info,
- int event)
-{
- info->event |= 1 << event;
- schedule_work(&info->tqueue);
-}
-
static DEFINE_SPINLOCK(pio_lock);
static inline struct esp_pio_buffer *get_pio_buffer(void)
@@ -477,7 +463,8 @@ static inline void transmit_chars_pio(struct esp_struct *info,
}
if (info->xmit_cnt < WAKEUP_CHARS) {
- rs_sched_event(info, ESP_EVENT_WRITE_WAKEUP);
+ if (info->tty)
+ tty_wakeup(info->tty);
#ifdef SERIAL_DEBUG_INTR
printk("THRE...");
@@ -515,7 +502,8 @@ static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes)
info->xmit_tail = (info->xmit_tail + dma_bytes) & (ESP_XMIT_SIZE - 1);
if (info->xmit_cnt < WAKEUP_CHARS) {
- rs_sched_event(info, ESP_EVENT_WRITE_WAKEUP);
+ if (info->tty)
+ tty_wakeup(info->tty);
#ifdef SERIAL_DEBUG_INTR
printk("THRE...");
@@ -607,7 +595,7 @@ static inline void check_modem_status(struct esp_struct *info)
#ifdef SERIAL_DEBUG_OPEN
printk("scheduling hangup...");
#endif
- schedule_work(&info->tqueue_hangup);
+ tty_hangup(info->tty);
}
}
}
@@ -723,41 +711,6 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
* -------------------------------------------------------------------
*/
-static void do_softint(struct work_struct *work)
-{
- struct esp_struct *info =
- container_of(work, struct esp_struct, tqueue);
- struct tty_struct *tty;
-
- tty = info->tty;
- if (!tty)
- return;
-
- if (test_and_clear_bit(ESP_EVENT_WRITE_WAKEUP, &info->event)) {
- tty_wakeup(tty);
- }
-}
-
-/*
- * This routine is called from the scheduler tqueue when the interrupt
- * routine has signalled that a hangup has occurred. The path of
- * hangup processing is:
- *
- * serial interrupt routine -> (scheduler tqueue) ->
- * do_serial_hangup() -> tty->hangup() -> esp_hangup()
- *
- */
-static void do_serial_hangup(struct work_struct *work)
-{
- struct esp_struct *info =
- container_of(work, struct esp_struct, tqueue_hangup);
- struct tty_struct *tty;
-
- tty = info->tty;
- if (tty)
- tty_hangup(tty);
-}
-
/*
* ---------------------------------------------------------------
* Low level utility subroutines for the serial driver: routines to
@@ -2041,7 +1994,6 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
tty->driver->flush_buffer(tty);
tty_ldisc_flush(tty);
tty->closing = 0;
- info->event = 0;
info->tty = NULL;
if (info->blocked_open) {
@@ -2109,7 +2061,6 @@ static void esp_hangup(struct tty_struct *tty)
rs_flush_buffer(tty);
shutdown(info);
- info->event = 0;
info->count = 0;
info->flags &= ~ASYNC_NORMAL_ACTIVE;
info->tty = NULL;
@@ -2495,8 +2446,6 @@ static int __init espserial_init(void)
info->magic = ESP_MAGIC;
info->close_delay = 5*HZ/10;
info->closing_wait = 30*HZ;
- INIT_WORK(&info->tqueue, do_softint);
- INIT_WORK(&info->tqueue_hangup, do_serial_hangup);
info->config.rx_timeout = rx_timeout;
info->config.flow_on = flow_on;
info->config.flow_off = flow_off;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 480fae29c9b2..44160d5ebca0 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -93,7 +93,7 @@ struct hvc_struct {
};
/* dynamic list of hvc_struct instances */
-static struct list_head hvc_structs = LIST_HEAD_INIT(hvc_structs);
+static LIST_HEAD(hvc_structs);
/*
* Protect the list of hvc_struct instances from inserts and removals during
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 3402def22007..786d518e9477 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -306,7 +306,7 @@ struct hvcs_struct {
/* Required to back map a kref to its containing object */
#define from_kref(k) container_of(k, struct hvcs_struct, kref)
-static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs);
+static LIST_HEAD(hvcs_structs);
static DEFINE_SPINLOCK(hvcs_structs_lock);
static void hvcs_unthrottle(struct tty_struct *tty);
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 0118b9817a95..84cdf9025737 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -234,11 +234,11 @@ static DEVICE_ATTR(rng_available, S_IRUGO,
NULL);
-static void unregister_miscdev(void)
+static void unregister_miscdev(bool suspended)
{
device_remove_file(rng_miscdev.this_device, &dev_attr_rng_available);
device_remove_file(rng_miscdev.this_device, &dev_attr_rng_current);
- misc_deregister(&rng_miscdev);
+ __misc_deregister(&rng_miscdev, suspended);
}
static int register_miscdev(void)
@@ -313,7 +313,7 @@ out:
}
EXPORT_SYMBOL_GPL(hwrng_register);
-void hwrng_unregister(struct hwrng *rng)
+void __hwrng_unregister(struct hwrng *rng, bool suspended)
{
int err;
@@ -332,11 +332,11 @@ void hwrng_unregister(struct hwrng *rng)
}
}
if (list_empty(&rng_list))
- unregister_miscdev();
+ unregister_miscdev(suspended);
mutex_unlock(&rng_mutex);
}
-EXPORT_SYMBOL_GPL(hwrng_unregister);
+EXPORT_SYMBOL_GPL(__hwrng_unregister);
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 868e39fd42e4..f7feae4ebb5e 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -42,6 +42,8 @@ enum {
VIA_STRFILT_ENABLE = (1 << 14),
VIA_RAWBITS_ENABLE = (1 << 13),
VIA_RNG_ENABLE = (1 << 6),
+ VIA_NOISESRC1 = (1 << 8),
+ VIA_NOISESRC2 = (1 << 9),
VIA_XSTORE_CNT_MASK = 0x0F,
VIA_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */
@@ -119,6 +121,7 @@ static int via_rng_data_read(struct hwrng *rng, u32 *data)
static int via_rng_init(struct hwrng *rng)
{
+ struct cpuinfo_x86 *c = &cpu_data(0);
u32 lo, hi, old_lo;
/* Control the RNG via MSR. Tread lightly and pay very close
@@ -134,6 +137,17 @@ static int via_rng_init(struct hwrng *rng)
lo &= ~VIA_XSTORE_CNT_MASK;
lo &= ~(VIA_STRFILT_ENABLE | VIA_STRFILT_FAIL | VIA_RAWBITS_ENABLE);
lo |= VIA_RNG_ENABLE;
+ lo |= VIA_NOISESRC1;
+
+ /* Enable secondary noise source on CPUs where it is present. */
+
+ /* Nehemiah stepping 8 and higher */
+ if ((c->x86_model == 9) && (c->x86_mask > 7))
+ lo |= VIA_NOISESRC2;
+
+ /* Esther */
+ if (c->x86_model >= 10)
+ lo |= VIA_NOISESRC2;
if (lo != old_lo)
wrmsr(MSR_VIA_RNG, lo, hi);
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 30e564516422..8609b8236c67 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -113,6 +113,33 @@ static int i8k_smm(struct smm_regs *regs)
int rc;
int eax = regs->eax;
+#if defined(CONFIG_X86_64)
+ asm("pushq %%rax\n\t"
+ "movl 0(%%rax),%%edx\n\t"
+ "pushq %%rdx\n\t"
+ "movl 4(%%rax),%%ebx\n\t"
+ "movl 8(%%rax),%%ecx\n\t"
+ "movl 12(%%rax),%%edx\n\t"
+ "movl 16(%%rax),%%esi\n\t"
+ "movl 20(%%rax),%%edi\n\t"
+ "popq %%rax\n\t"
+ "out %%al,$0xb2\n\t"
+ "out %%al,$0x84\n\t"
+ "xchgq %%rax,(%%rsp)\n\t"
+ "movl %%ebx,4(%%rax)\n\t"
+ "movl %%ecx,8(%%rax)\n\t"
+ "movl %%edx,12(%%rax)\n\t"
+ "movl %%esi,16(%%rax)\n\t"
+ "movl %%edi,20(%%rax)\n\t"
+ "popq %%rdx\n\t"
+ "movl %%edx,0(%%rax)\n\t"
+ "lahf\n\t"
+ "shrl $8,%%eax\n\t"
+ "andl $1,%%eax\n"
+ :"=a"(rc)
+ : "a"(regs)
+ : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
+#else
asm("pushl %%eax\n\t"
"movl 0(%%eax),%%edx\n\t"
"push %%edx\n\t"
@@ -137,7 +164,7 @@ static int i8k_smm(struct smm_regs *regs)
"andl $1,%%eax\n":"=a"(rc)
: "a"(regs)
: "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory");
-
+#endif
if (rc != 0 || (regs->eax & 0xffff) == 0xffff || regs->eax == eax)
return -EINVAL;
@@ -439,6 +466,20 @@ static struct dmi_system_id __initdata i8k_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude"),
},
},
+ { /* UK Inspiron 6400 */
+ .ident = "Dell Inspiron 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MM061"),
+ },
+ },
+ {
+ .ident = "Dell Inspiron 3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MP061"),
+ },
+ },
{ }
};
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 0f49ccf02a7f..b1d6cad84282 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -153,9 +153,6 @@ static char *pcVersion = "1.2.14";
static char *pcDriver_name = "ip2";
static char *pcIpl = "ip2ipl";
-/* Serial subtype definitions */
-#define SERIAL_TYPE_NORMAL 1
-
// cheezy kludge or genius - you decide?
int ip2_loadmain(int *, int *, unsigned char *, int);
static unsigned char *Fip_firmware;
diff --git a/drivers/char/ip27-rtc.c b/drivers/char/ip27-rtc.c
index 932264a657d0..86e6538a77b0 100644
--- a/drivers/char/ip27-rtc.c
+++ b/drivers/char/ip27-rtc.c
@@ -46,8 +46,8 @@
#include <asm/sn/sn0/hub.h>
#include <asm/sn/sn_private.h>
-static int rtc_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
+static long rtc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
static int rtc_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data);
@@ -75,8 +75,7 @@ static unsigned long epoch = 1970; /* year corresponding to 0x00 */
static const unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
-static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
+static long rtc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct rtc_time wtime;
@@ -197,7 +196,7 @@ static int rtc_release(struct inode *inode, struct file *file)
static const struct file_operations rtc_fops = {
.owner = THIS_MODULE,
- .ioctl = rtc_ioctl,
+ .unlocked_ioctl = rtc_ioctl,
.open = rtc_open,
.release = rtc_release,
};
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 5dc1265ce1d5..32b2b22996dc 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -365,12 +365,12 @@ static struct device_driver ipmidriver = {
};
static DEFINE_MUTEX(ipmidriver_mutex);
-static struct list_head ipmi_interfaces = LIST_HEAD_INIT(ipmi_interfaces);
+static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
/* List of watchers that want to know when smi's are added and
deleted. */
-static struct list_head smi_watchers = LIST_HEAD_INIT(smi_watchers);
+static LIST_HEAD(smi_watchers);
static DEFINE_MUTEX(smi_watchers_mutex);
@@ -441,7 +441,7 @@ struct watcher_entry {
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
{
ipmi_smi_t intf;
- struct list_head to_deliver = LIST_HEAD_INIT(to_deliver);
+ LIST_HEAD(to_deliver);
struct watcher_entry *e, *e2;
mutex_lock(&smi_watchers_mutex);
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
index 1f27be1ec3d4..c645455c3fd1 100644
--- a/drivers/char/istallion.c
+++ b/drivers/char/istallion.c
@@ -627,7 +627,6 @@ static int stli_initopen(struct stlibrd *brdp, struct stliport *portp);
static int stli_rawopen(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait);
static int stli_rawclose(struct stlibrd *brdp, struct stliport *portp, unsigned long arg, int wait);
static int stli_waitcarrier(struct stlibrd *brdp, struct stliport *portp, struct file *filp);
-static void stli_dohangup(struct work_struct *);
static int stli_setport(struct stliport *portp);
static int stli_cmdwait(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback);
static void stli_sendcmd(struct stlibrd *brdp, struct stliport *portp, unsigned long cmd, void *arg, int size, int copyback);
@@ -1824,25 +1823,6 @@ static void stli_start(struct tty_struct *tty)
/*****************************************************************************/
/*
- * Scheduler called hang up routine. This is called from the scheduler,
- * not direct from the driver "poll" routine. We can't call it there
- * since the real local hangup code will enable/disable the board and
- * other things that we can't do while handling the poll. Much easier
- * to deal with it some time later (don't really care when, hangups
- * aren't that time critical).
- */
-
-static void stli_dohangup(struct work_struct *ugly_api)
-{
- struct stliport *portp = container_of(ugly_api, struct stliport, tqhangup);
- if (portp->tty != NULL) {
- tty_hangup(portp->tty);
- }
-}
-
-/*****************************************************************************/
-
-/*
* Hangup this port. This is pretty much like closing the port, only
* a little more brutal. No waiting for data to drain. Shutdown the
* port and maybe drop signals. This is rather tricky really. We want
@@ -2405,7 +2385,7 @@ static int stli_hostcmd(struct stlibrd *brdp, struct stliport *portp)
((portp->sigs & TIOCM_CD) == 0)) {
if (portp->flags & ASYNC_CHECK_CD) {
if (tty)
- schedule_work(&portp->tqhangup);
+ tty_hangup(tty);
}
}
}
@@ -2733,7 +2713,6 @@ static int stli_initports(struct stlibrd *brdp)
portp->baud_base = STL_BAUDBASE;
portp->close_delay = STL_CLOSEDELAY;
portp->closing_wait = 30 * HZ;
- INIT_WORK(&portp->tqhangup, stli_dohangup);
init_waitqueue_head(&portp->open_wait);
init_waitqueue_head(&portp->close_wait);
init_waitqueue_head(&portp->raw_wait);
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index fc54d234507a..4dbd3425e928 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -38,7 +38,6 @@
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
#include <linux/vt_kern.h>
-#include <linux/consolemap.h>
#include <linux/sysrq.h>
#include <linux/input.h>
#include <linux/reboot.h>
@@ -194,7 +193,7 @@ int getkeycode(unsigned int scancode)
int error = -ENODEV;
list_for_each_entry(handle, &kbd_handler.h_list, h_node) {
- error = handle->dev->getkeycode(handle->dev, scancode, &keycode);
+ error = input_get_keycode(handle->dev, scancode, &keycode);
if (!error)
return keycode;
}
@@ -208,7 +207,7 @@ int setkeycode(unsigned int scancode, unsigned int keycode)
int error = -ENODEV;
list_for_each_entry(handle, &kbd_handler.h_list, h_node) {
- error = handle->dev->setkeycode(handle->dev, scancode, keycode);
+ error = input_set_keycode(handle->dev, scancode, keycode);
if (!error)
break;
}
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 81674d7c56c7..60ac642752be 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -312,7 +312,7 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
if (copy_size > LP_BUFFER_SIZE)
copy_size = LP_BUFFER_SIZE;
- if (down_interruptible (&lp_table[minor].port_mutex))
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
return -EINTR;
if (copy_from_user (kbuf, buf, copy_size)) {
@@ -399,7 +399,7 @@ static ssize_t lp_write(struct file * file, const char __user * buf,
lp_release_parport (&lp_table[minor]);
}
out_unlock:
- up (&lp_table[minor].port_mutex);
+ mutex_unlock(&lp_table[minor].port_mutex);
return retv;
}
@@ -421,7 +421,7 @@ static ssize_t lp_read(struct file * file, char __user * buf,
if (count > LP_BUFFER_SIZE)
count = LP_BUFFER_SIZE;
- if (down_interruptible (&lp_table[minor].port_mutex))
+ if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
return -EINTR;
lp_claim_parport_or_block (&lp_table[minor]);
@@ -479,7 +479,7 @@ static ssize_t lp_read(struct file * file, char __user * buf,
if (retval > 0 && copy_to_user (buf, kbuf, retval))
retval = -EFAULT;
- up (&lp_table[minor].port_mutex);
+ mutex_unlock(&lp_table[minor].port_mutex);
return retval;
}
@@ -888,7 +888,7 @@ static int __init lp_init (void)
lp_table[i].last_error = 0;
init_waitqueue_head (&lp_table[i].waitq);
init_waitqueue_head (&lp_table[i].dataq);
- init_MUTEX (&lp_table[i].port_mutex);
+ mutex_init(&lp_table[i].port_mutex);
lp_table[i].timeout = 10 * HZ;
}
diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
index 3c5802ae1716..f4716ad7348a 100644
--- a/drivers/char/mbcs.c
+++ b/drivers/char/mbcs.c
@@ -23,6 +23,7 @@
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/uio.h>
+#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -281,7 +282,7 @@ static inline int mbcs_algo_start(struct mbcs_soft *soft)
void *mmr_base = soft->mmr_base;
union cm_control cm_control;
- if (down_interruptible(&soft->algolock))
+ if (mutex_lock_interruptible(&soft->algolock))
return -ERESTARTSYS;
atomic_set(&soft->algo_done, 0);
@@ -298,7 +299,7 @@ static inline int mbcs_algo_start(struct mbcs_soft *soft)
cm_control.alg_go = 1;
MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
- up(&soft->algolock);
+ mutex_unlock(&soft->algolock);
return 0;
}
@@ -309,7 +310,7 @@ do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
{
int rv = 0;
- if (down_interruptible(&soft->dmawritelock))
+ if (mutex_lock_interruptible(&soft->dmawritelock))
return -ERESTARTSYS;
atomic_set(&soft->dmawrite_done, 0);
@@ -335,7 +336,7 @@ do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
*off += len;
dmawrite_exit:
- up(&soft->dmawritelock);
+ mutex_unlock(&soft->dmawritelock);
return rv;
}
@@ -346,7 +347,7 @@ do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
{
int rv = 0;
- if (down_interruptible(&soft->dmareadlock))
+ if (mutex_lock_interruptible(&soft->dmareadlock))
return -ERESTARTSYS;
atomic_set(&soft->dmawrite_done, 0);
@@ -371,7 +372,7 @@ do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
*off += len;
dmaread_exit:
- up(&soft->dmareadlock);
+ mutex_unlock(&soft->dmareadlock);
return rv;
}
@@ -762,9 +763,9 @@ static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
init_waitqueue_head(&soft->dmaread_queue);
init_waitqueue_head(&soft->algo_queue);
- init_MUTEX(&soft->dmawritelock);
- init_MUTEX(&soft->dmareadlock);
- init_MUTEX(&soft->algolock);
+ mutex_init(&soft->dmawritelock);
+ mutex_init(&soft->dmareadlock);
+ mutex_init(&soft->algolock);
mbcs_getdma_init(&soft->getdma);
mbcs_putdma_init(&soft->putdma);
diff --git a/drivers/char/mbcs.h b/drivers/char/mbcs.h
index c9905a3c3353..ba671589f4cb 100644
--- a/drivers/char/mbcs.h
+++ b/drivers/char/mbcs.h
@@ -537,9 +537,9 @@ struct mbcs_soft {
atomic_t dmawrite_done;
atomic_t dmaread_done;
atomic_t algo_done;
- struct semaphore dmawritelock;
- struct semaphore dmareadlock;
- struct semaphore algolock;
+ struct mutex dmawritelock;
+ struct mutex dmareadlock;
+ struct mutex algolock;
};
static int mbcs_open(struct inode *ip, struct file *fp);
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 71c8cd7fa15f..a39101feb2ed 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -232,8 +232,9 @@ int misc_register(struct miscdevice * misc)
}
/**
- * misc_deregister - unregister a miscellaneous device
+ * __misc_deregister - unregister a miscellaneous device
* @misc: device to unregister
+ * @suspended: to be set if the function is used during suspend/resume
*
* Unregister a miscellaneous device that was previously
* successfully registered with misc_register(). Success
@@ -241,7 +242,7 @@ int misc_register(struct miscdevice * misc)
* indicates an error.
*/
-int misc_deregister(struct miscdevice * misc)
+int __misc_deregister(struct miscdevice *misc, bool suspended)
{
int i = misc->minor;
@@ -250,7 +251,11 @@ int misc_deregister(struct miscdevice * misc)
mutex_lock(&misc_mtx);
list_del(&misc->list);
- device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
+ if (suspended)
+ destroy_suspended_device(misc_class,
+ MKDEV(MISC_MAJOR, misc->minor));
+ else
+ device_destroy(misc_class, MKDEV(MISC_MAJOR, misc->minor));
if (i < DYNAMIC_MINORS && i>0) {
misc_minors[i>>3] &= ~(1 << (misc->minor & 7));
}
@@ -259,7 +264,7 @@ int misc_deregister(struct miscdevice * misc)
}
EXPORT_SYMBOL(misc_register);
-EXPORT_SYMBOL(misc_deregister);
+EXPORT_SYMBOL(__misc_deregister);
static int __init misc_init(void)
{
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index fd0abef7ee08..68c2e9234691 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -1,43 +1,25 @@
/*
* mxser.c -- MOXA Smartio/Industio family multiport serial driver.
*
- * Copyright (C) 1999-2001 Moxa Technologies (support@moxa.com.tw).
+ * Copyright (C) 1999-2006 Moxa Technologies (support@moxa.com).
+ * Copyright (C) 2006-2008 Jiri Slaby <jirislaby@gmail.com>
*
- * This code is loosely based on the Linux serial driver, written by
- * Linus Torvalds, Theodore T'so and others.
+ * This code is loosely based on the 1.8 moxa driver which is based on
+ * Linux serial driver, written by Linus Torvalds, Theodore T'so and
+ * others.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Original release 10/26/00
- *
- * 02/06/01 Support MOXA Industio family boards.
- * 02/06/01 Support TIOCGICOUNT.
- * 02/06/01 Fix the problem for connecting to serial mouse.
- * 02/06/01 Fix the problem for H/W flow control.
- * 02/06/01 Fix the compling warning when CONFIG_PCI
- * don't be defined.
- *
* Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox
* <alan@redhat.com>. The original 1.8 code is available on www.moxa.com.
* - Fixed x86_64 cleanness
* - Fixed sleep with spinlock held in mxser_send_break
*/
-
#include <linux/module.h>
-#include <linux/autoconf.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/sched.h>
@@ -65,33 +47,37 @@
#include "mxser.h"
-#define MXSER_VERSION "1.8"
+#define MXSER_VERSION "2.0.3" /* 1.11 */
#define MXSERMAJOR 174
#define MXSERCUMAJOR 175
-#define MXSER_EVENT_TXLOW 1
-#define MXSER_EVENT_HANGUP 2
-
#define MXSER_BOARDS 4 /* Max. boards */
-#define MXSER_PORTS 32 /* Max. ports */
#define MXSER_PORTS_PER_BOARD 8 /* Max. ports per board */
-#define MXSER_ISR_PASS_LIMIT 256
+#define MXSER_PORTS (MXSER_BOARDS * MXSER_PORTS_PER_BOARD)
+#define MXSER_ISR_PASS_LIMIT 100
#define MXSER_ERR_IOADDR -1
#define MXSER_ERR_IRQ -2
#define MXSER_ERR_IRQ_CONFLIT -3
#define MXSER_ERR_VECTOR -4
-#define SERIAL_TYPE_NORMAL 1
-#define SERIAL_TYPE_CALLOUT 2
+/*CheckIsMoxaMust return value*/
+#define MOXA_OTHER_UART 0x00
+#define MOXA_MUST_MU150_HWID 0x01
+#define MOXA_MUST_MU860_HWID 0x02
#define WAKEUP_CHARS 256
#define UART_MCR_AFE 0x20
#define UART_LSR_SPECIAL 0x1E
+#define PCI_DEVICE_ID_CB108 0x1080
+#define PCI_DEVICE_ID_CB114 0x1142
+#define PCI_DEVICE_ID_CP114UL 0x1143
+#define PCI_DEVICE_ID_CB134I 0x1341
+#define PCI_DEVICE_ID_CP138U 0x1380
+#define PCI_DEVICE_ID_POS104UL 0x1044
-#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? IRQF_SHARED : IRQF_DISABLED)
#define C168_ASIC_ID 1
#define C104_ASIC_ID 2
@@ -100,88 +86,11 @@
#define CI134_ASIC_ID 3
#define CI104J_ASIC_ID 5
-enum {
- MXSER_BOARD_C168_ISA = 1,
- MXSER_BOARD_C104_ISA,
- MXSER_BOARD_CI104J,
- MXSER_BOARD_C168_PCI,
- MXSER_BOARD_C104_PCI,
- MXSER_BOARD_C102_ISA,
- MXSER_BOARD_CI132,
- MXSER_BOARD_CI134,
- MXSER_BOARD_CP132,
- MXSER_BOARD_CP114,
- MXSER_BOARD_CT114,
- MXSER_BOARD_CP102,
- MXSER_BOARD_CP104U,
- MXSER_BOARD_CP168U,
- MXSER_BOARD_CP132U,
- MXSER_BOARD_CP134U,
- MXSER_BOARD_CP104JU,
- MXSER_BOARD_RC7000,
- MXSER_BOARD_CP118U,
- MXSER_BOARD_CP102UL,
- MXSER_BOARD_CP102U,
-};
-
-static char *mxser_brdname[] = {
- "C168 series",
- "C104 series",
- "CI-104J series",
- "C168H/PCI series",
- "C104H/PCI series",
- "C102 series",
- "CI-132 series",
- "CI-134 series",
- "CP-132 series",
- "CP-114 series",
- "CT-114 series",
- "CP-102 series",
- "CP-104U series",
- "CP-168U series",
- "CP-132U series",
- "CP-134U series",
- "CP-104JU series",
- "Moxa UC7000 Serial",
- "CP-118U series",
- "CP-102UL series",
- "CP-102U series",
-};
-
-static int mxser_numports[] = {
- 8, /* C168-ISA */
- 4, /* C104-ISA */
- 4, /* CI104J */
- 8, /* C168-PCI */
- 4, /* C104-PCI */
- 2, /* C102-ISA */
- 2, /* CI132 */
- 4, /* CI134 */
- 2, /* CP132 */
- 4, /* CP114 */
- 4, /* CT114 */
- 2, /* CP102 */
- 4, /* CP104U */
- 8, /* CP168U */
- 2, /* CP132U */
- 4, /* CP134U */
- 4, /* CP104JU */
- 8, /* RC7000 */
- 8, /* CP118U */
- 2, /* CP102UL */
- 2, /* CP102U */
-};
-
-#define UART_TYPE_NUM 2
-
-static const unsigned int Gmoxa_uart_id[UART_TYPE_NUM] = {
- MOXA_MUST_MU150_HWID,
- MOXA_MUST_MU860_HWID
-};
+#define MXSER_HIGHBAUD 1
+#define MXSER_HAS2 2
/* This is only for PCI */
-#define UART_INFO_NUM 3
-struct mxpciuart_info {
+static const struct {
int type;
int tx_fifo;
int rx_fifo;
@@ -190,51 +99,85 @@ struct mxpciuart_info {
int rx_trigger;
int rx_low_water;
long max_baud;
-};
-
-static const struct mxpciuart_info Gpci_uart_info[UART_INFO_NUM] = {
+} Gpci_uart_info[] = {
{MOXA_OTHER_UART, 16, 16, 16, 14, 14, 1, 921600L},
{MOXA_MUST_MU150_HWID, 64, 64, 64, 48, 48, 16, 230400L},
{MOXA_MUST_MU860_HWID, 128, 128, 128, 96, 96, 32, 921600L}
};
+#define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info)
+struct mxser_cardinfo {
+ char *name;
+ unsigned int nports;
+ unsigned int flags;
+};
-#ifdef CONFIG_PCI
+static const struct mxser_cardinfo mxser_cards[] = {
+/* 0*/ { "C168 series", 8, },
+ { "C104 series", 4, },
+ { "CI-104J series", 4, },
+ { "C168H/PCI series", 8, },
+ { "C104H/PCI series", 4, },
+/* 5*/ { "C102 series", 4, MXSER_HAS2 }, /* C102-ISA */
+ { "CI-132 series", 4, MXSER_HAS2 },
+ { "CI-134 series", 4, },
+ { "CP-132 series", 2, },
+ { "CP-114 series", 4, },
+/*10*/ { "CT-114 series", 4, },
+ { "CP-102 series", 2, MXSER_HIGHBAUD },
+ { "CP-104U series", 4, },
+ { "CP-168U series", 8, },
+ { "CP-132U series", 2, },
+/*15*/ { "CP-134U series", 4, },
+ { "CP-104JU series", 4, },
+ { "Moxa UC7000 Serial", 8, }, /* RC7000 */
+ { "CP-118U series", 8, },
+ { "CP-102UL series", 2, },
+/*20*/ { "CP-102U series", 2, },
+ { "CP-118EL series", 8, },
+ { "CP-168EL series", 8, },
+ { "CP-104EL series", 4, },
+ { "CB-108 series", 8, },
+/*25*/ { "CB-114 series", 4, },
+ { "CB-134I series", 4, },
+ { "CP-138U series", 8, },
+ { "POS-104UL series", 4, },
+ { "CP-114UL series", 4, }
+};
+/* driver_data correspond to the lines in the structure above
+ see also ISA probe function before you change something */
static struct pci_device_id mxser_pcibrds[] = {
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_C168_PCI},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_C104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_C104_PCI},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP132},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP114},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CT114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CT114},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP102},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP104U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP104U},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP168U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP168U},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP132U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP132U},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP134U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP134U},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP104JU, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP104JU},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_RC7000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_RC7000},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP118U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP118U},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102UL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP102UL},
- {PCI_VENDOR_ID_MOXA, PCI_DEVICE_ID_MOXA_CP102U, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MXSER_BOARD_CP102U},
- {0}
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168), .driver_data = 3 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104), .driver_data = 4 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132), .driver_data = 8 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114), .driver_data = 9 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CT114), .driver_data = 10 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102), .driver_data = 11 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U), .driver_data = 12 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U), .driver_data = 13 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U), .driver_data = 14 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U), .driver_data = 15 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104JU),.driver_data = 16 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_RC7000), .driver_data = 17 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U), .driver_data = 18 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),.driver_data = 19 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U), .driver_data = 20 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL),.driver_data = 21 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL),.driver_data = 22 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL),.driver_data = 23 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB108), .driver_data = 24 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB114), .driver_data = 25 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB134I), .driver_data = 26 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U), .driver_data = 27 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_POS104UL), .driver_data = 28 },
+ { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL), .driver_data = 29 },
+ { }
};
-
MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
-
-#endif
-
-typedef struct _moxa_pci_info {
- unsigned short busNum;
- unsigned short devNum;
- struct pci_dev *pdev; /* add by Victor Yu. 06-23-2003 */
-} moxa_pci_info;
-
static int ioaddr[MXSER_BOARDS] = { 0, 0, 0, 0 };
static int ttymajor = MXSERMAJOR;
-static int calloutmajor = MXSERCUMAJOR;
-static int verbose = 0;
/* Variables for insmod */
@@ -242,8 +185,6 @@ MODULE_AUTHOR("Casper Yang");
MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver");
module_param_array(ioaddr, int, NULL, 0);
module_param(ttymajor, int, 0);
-module_param(calloutmajor, int, 0);
-module_param(verbose, bool, 0);
MODULE_LICENSE("GPL");
struct mxser_log {
@@ -278,67 +219,69 @@ struct mxser_mon_ext {
int iftype[32];
};
-struct mxser_hwconf {
- int board_type;
- int ports;
- int irq;
- int vector;
- int vector_mask;
- int uart_type;
- int ioaddr[MXSER_PORTS_PER_BOARD];
- int baud_base[MXSER_PORTS_PER_BOARD];
- moxa_pci_info pciInfo;
- int IsMoxaMustChipFlag; /* add by Victor Yu. 08-30-2002 */
- int MaxCanSetBaudRate[MXSER_PORTS_PER_BOARD]; /* add by Victor Yu. 09-04-2002 */
- int opmode_ioaddr[MXSER_PORTS_PER_BOARD]; /* add by Victor Yu. 01-05-2004 */
-};
+struct mxser_board;
+
+struct mxser_port {
+ struct mxser_board *board;
+ struct tty_struct *tty;
+
+ unsigned long ioaddr;
+ unsigned long opmode_ioaddr;
+ int max_baud;
-struct mxser_struct {
- int port;
- int base; /* port base address */
- int irq; /* port using irq no. */
- int vector; /* port irq vector */
- int vectormask; /* port vector mask */
int rx_high_water;
int rx_trigger; /* Rx fifo trigger level */
int rx_low_water;
int baud_base; /* max. speed */
- int flags; /* defined in tty.h */
int type; /* UART type */
- struct tty_struct *tty;
- int read_status_mask;
- int ignore_status_mask;
- int xmit_fifo_size;
- int custom_divisor;
+ int flags; /* defined in tty.h */
+
int x_char; /* xon/xoff character */
- int close_delay;
- unsigned short closing_wait;
int IER; /* Interrupt Enable Register */
int MCR; /* Modem control register */
+
+ unsigned char stop_rx;
+ unsigned char ldisc_stop_rx;
+
+ int custom_divisor;
+ int close_delay;
+ unsigned short closing_wait;
+ unsigned char err_shadow;
unsigned long event;
+
int count; /* # of fd on device */
int blocked_open; /* # of blocked opens */
+ struct async_icount icount; /* kernel counters for 4 input interrupts */
+ int timeout;
+
+ int read_status_mask;
+ int ignore_status_mask;
+ int xmit_fifo_size;
unsigned char *xmit_buf;
int xmit_head;
int xmit_tail;
int xmit_cnt;
- struct work_struct tqueue;
+
struct ktermios normal_termios;
- struct ktermios callout_termios;
- wait_queue_head_t open_wait;
- wait_queue_head_t close_wait;
- wait_queue_head_t delta_msr_wait;
- struct async_icount icount; /* kernel counters for the 4 input interrupts */
- int timeout;
- int IsMoxaMustChipFlag; /* add by Victor Yu. 08-30-2002 */
- int MaxCanSetBaudRate; /* add by Victor Yu. 09-04-2002 */
- int opmode_ioaddr; /* add by Victor Yu. 01-05-2004 */
- unsigned char stop_rx;
- unsigned char ldisc_stop_rx;
- long realbaud;
+
struct mxser_mon mon_data;
- unsigned char err_shadow;
+
spinlock_t slock;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t delta_msr_wait;
+};
+
+struct mxser_board {
+ unsigned int idx;
+ int irq;
+ const struct mxser_cardinfo *info;
+ unsigned long vector;
+ unsigned long vector_mask;
+
+ int chip_flag;
+ int uart_type;
+
+ struct mxser_port ports[MXSER_PORTS_PER_BOARD];
};
struct mxser_mstatus {
@@ -356,73 +299,16 @@ static int mxserBoardCAP[MXSER_BOARDS] = {
/* 0x180, 0x280, 0x200, 0x320 */
};
+static struct mxser_board mxser_boards[MXSER_BOARDS];
static struct tty_driver *mxvar_sdriver;
-static struct mxser_struct mxvar_table[MXSER_PORTS];
-static struct tty_struct *mxvar_tty[MXSER_PORTS + 1];
-static struct ktermios *mxvar_termios[MXSER_PORTS + 1];
-static struct ktermios *mxvar_termios_locked[MXSER_PORTS + 1];
static struct mxser_log mxvar_log;
static int mxvar_diagflag;
static unsigned char mxser_msr[MXSER_PORTS + 1];
static struct mxser_mon_ext mon_data_ext;
static int mxser_set_baud_method[MXSER_PORTS + 1];
-static spinlock_t gm_lock;
-
-/*
- * This is used to figure out the divisor speeds and the timeouts
- */
-
-static struct mxser_hwconf mxsercfg[MXSER_BOARDS];
-
-/*
- * static functions:
- */
-
-static void mxser_getcfg(int board, struct mxser_hwconf *hwconf);
-static int mxser_init(void);
-
-/* static void mxser_poll(unsigned long); */
-static int mxser_get_ISA_conf(int, struct mxser_hwconf *);
-static void mxser_do_softint(struct work_struct *);
-static int mxser_open(struct tty_struct *, struct file *);
-static void mxser_close(struct tty_struct *, struct file *);
-static int mxser_write(struct tty_struct *, const unsigned char *, int);
-static int mxser_write_room(struct tty_struct *);
-static void mxser_flush_buffer(struct tty_struct *);
-static int mxser_chars_in_buffer(struct tty_struct *);
-static void mxser_flush_chars(struct tty_struct *);
-static void mxser_put_char(struct tty_struct *, unsigned char);
-static int mxser_ioctl(struct tty_struct *, struct file *, uint, ulong);
-static int mxser_ioctl_special(unsigned int, void __user *);
-static void mxser_throttle(struct tty_struct *);
-static void mxser_unthrottle(struct tty_struct *);
-static void mxser_set_termios(struct tty_struct *, struct ktermios *);
-static void mxser_stop(struct tty_struct *);
-static void mxser_start(struct tty_struct *);
-static void mxser_hangup(struct tty_struct *);
-static void mxser_rs_break(struct tty_struct *, int);
-static irqreturn_t mxser_interrupt(int, void *);
-static void mxser_receive_chars(struct mxser_struct *, int *);
-static void mxser_transmit_chars(struct mxser_struct *);
-static void mxser_check_modem_status(struct mxser_struct *, int);
-static int mxser_block_til_ready(struct tty_struct *, struct file *, struct mxser_struct *);
-static int mxser_startup(struct mxser_struct *);
-static void mxser_shutdown(struct mxser_struct *);
-static int mxser_change_speed(struct mxser_struct *, struct ktermios *old_termios);
-static int mxser_get_serial_info(struct mxser_struct *, struct serial_struct __user *);
-static int mxser_set_serial_info(struct mxser_struct *, struct serial_struct __user *);
-static int mxser_get_lsr_info(struct mxser_struct *, unsigned int __user *);
-static void mxser_send_break(struct mxser_struct *, int);
-static int mxser_tiocmget(struct tty_struct *, struct file *);
-static int mxser_tiocmset(struct tty_struct *, struct file *, unsigned int, unsigned int);
-static int mxser_set_baud(struct mxser_struct *info, long newspd);
-static void mxser_wait_until_sent(struct tty_struct *tty, int timeout);
-
-static void mxser_startrx(struct tty_struct *tty);
-static void mxser_stoprx(struct tty_struct *tty);
#ifdef CONFIG_PCI
-static int CheckIsMoxaMust(int io)
+static int __devinit CheckIsMoxaMust(unsigned long io)
{
u8 oldmcr, hwid;
int i;
@@ -438,90 +324,15 @@ static int CheckIsMoxaMust(int io)
}
GET_MOXA_MUST_HARDWARE_ID(io, &hwid);
- for (i = 0; i < UART_TYPE_NUM; i++) {
- if (hwid == Gmoxa_uart_id[i])
+ for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */
+ if (hwid == Gpci_uart_info[i].type)
return (int)hwid;
}
return MOXA_OTHER_UART;
}
#endif
-/* above is modified by Victor Yu. 08-15-2002 */
-
-static const struct tty_operations mxser_ops = {
- .open = mxser_open,
- .close = mxser_close,
- .write = mxser_write,
- .put_char = mxser_put_char,
- .flush_chars = mxser_flush_chars,
- .write_room = mxser_write_room,
- .chars_in_buffer = mxser_chars_in_buffer,
- .flush_buffer = mxser_flush_buffer,
- .ioctl = mxser_ioctl,
- .throttle = mxser_throttle,
- .unthrottle = mxser_unthrottle,
- .set_termios = mxser_set_termios,
- .stop = mxser_stop,
- .start = mxser_start,
- .hangup = mxser_hangup,
- .break_ctl = mxser_rs_break,
- .wait_until_sent = mxser_wait_until_sent,
- .tiocmget = mxser_tiocmget,
- .tiocmset = mxser_tiocmset,
-};
-
-/*
- * The MOXA Smartio/Industio serial driver boot-time initialization code!
- */
-
-static int __init mxser_module_init(void)
-{
- int ret;
-
- if (verbose)
- printk(KERN_DEBUG "Loading module mxser ...\n");
- ret = mxser_init();
- if (verbose)
- printk(KERN_DEBUG "Done.\n");
- return ret;
-}
-
-static void __exit mxser_module_exit(void)
-{
- int i, err;
-
- if (verbose)
- printk(KERN_DEBUG "Unloading module mxser ...\n");
-
- err = tty_unregister_driver(mxvar_sdriver);
- if (!err)
- put_tty_driver(mxvar_sdriver);
- else
- printk(KERN_ERR "Couldn't unregister MOXA Smartio/Industio family serial driver\n");
-
- for (i = 0; i < MXSER_BOARDS; i++) {
- struct pci_dev *pdev;
-
- if (mxsercfg[i].board_type == -1)
- continue;
- else {
- pdev = mxsercfg[i].pciInfo.pdev;
- free_irq(mxsercfg[i].irq, &mxvar_table[i * MXSER_PORTS_PER_BOARD]);
- if (pdev != NULL) { /* PCI */
- release_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
- release_region(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
- pci_dev_put(pdev);
- } else {
- release_region(mxsercfg[i].ioaddr[0], 8 * mxsercfg[i].ports);
- release_region(mxsercfg[i].vector, 1);
- }
- }
- }
- if (verbose)
- printk(KERN_DEBUG "Done.\n");
-}
-
-static void process_txrx_fifo(struct mxser_struct *info)
+static void process_txrx_fifo(struct mxser_port *info)
{
int i;
@@ -530,424 +341,548 @@ static void process_txrx_fifo(struct mxser_struct *info)
info->rx_high_water = 1;
info->rx_low_water = 1;
info->xmit_fifo_size = 1;
- } else {
- for (i = 0; i < UART_INFO_NUM; i++) {
- if (info->IsMoxaMustChipFlag == Gpci_uart_info[i].type) {
+ } else
+ for (i = 0; i < UART_INFO_NUM; i++)
+ if (info->board->chip_flag == Gpci_uart_info[i].type) {
info->rx_trigger = Gpci_uart_info[i].rx_trigger;
info->rx_low_water = Gpci_uart_info[i].rx_low_water;
info->rx_high_water = Gpci_uart_info[i].rx_high_water;
info->xmit_fifo_size = Gpci_uart_info[i].xmit_fifo_size;
break;
}
- }
- }
}
-static int mxser_initbrd(int board, struct mxser_hwconf *hwconf)
+static unsigned char mxser_get_msr(int baseaddr, int mode, int port)
{
- struct mxser_struct *info;
- int retval;
- int i, n;
+ unsigned char status = 0;
- n = board * MXSER_PORTS_PER_BOARD;
- info = &mxvar_table[n];
- /*if (verbose) */ {
- printk(KERN_DEBUG " ttyMI%d - ttyMI%d ",
- n, n + hwconf->ports - 1);
- printk(" max. baud rate = %d bps.\n",
- hwconf->MaxCanSetBaudRate[0]);
- }
-
- for (i = 0; i < hwconf->ports; i++, n++, info++) {
- info->port = n;
- info->base = hwconf->ioaddr[i];
- info->irq = hwconf->irq;
- info->vector = hwconf->vector;
- info->vectormask = hwconf->vector_mask;
- info->opmode_ioaddr = hwconf->opmode_ioaddr[i]; /* add by Victor Yu. 01-05-2004 */
- info->stop_rx = 0;
- info->ldisc_stop_rx = 0;
+ status = inb(baseaddr + UART_MSR);
- info->IsMoxaMustChipFlag = hwconf->IsMoxaMustChipFlag;
- /* Enhance mode enabled here */
- if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) {
- ENABLE_MOXA_MUST_ENCHANCE_MODE(info->base);
- }
+ mxser_msr[port] &= 0x0F;
+ mxser_msr[port] |= status;
+ status = mxser_msr[port];
+ if (mode)
+ mxser_msr[port] = 0;
- info->flags = ASYNC_SHARE_IRQ;
- info->type = hwconf->uart_type;
- info->baud_base = hwconf->baud_base[i];
+ return status;
+}
- info->MaxCanSetBaudRate = hwconf->MaxCanSetBaudRate[i];
+static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp,
+ struct mxser_port *port)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int retval;
+ int do_clocal = 0;
+ unsigned long flags;
- process_txrx_fifo(info);
+ /*
+ * If non-blocking mode is set, or the port is not enabled,
+ * then make the check up front and then exit.
+ */
+ if ((filp->f_flags & O_NONBLOCK) ||
+ test_bit(TTY_IO_ERROR, &tty->flags)) {
+ port->flags |= ASYNC_NORMAL_ACTIVE;
+ return 0;
+ }
+ if (tty->termios->c_cflag & CLOCAL)
+ do_clocal = 1;
- info->custom_divisor = hwconf->baud_base[i] * 16;
- info->close_delay = 5 * HZ / 10;
- info->closing_wait = 30 * HZ;
- INIT_WORK(&info->tqueue, mxser_do_softint);
- info->normal_termios = mxvar_sdriver->init_termios;
- init_waitqueue_head(&info->open_wait);
- init_waitqueue_head(&info->close_wait);
- init_waitqueue_head(&info->delta_msr_wait);
- memset(&info->mon_data, 0, sizeof(struct mxser_mon));
- info->err_shadow = 0;
- spin_lock_init(&info->slock);
- }
/*
- * Allocate the IRQ if necessary
+ * Block waiting for the carrier detect and the line to become
+ * free (i.e., not in use by the callout). While we are in
+ * this loop, port->count is dropped by one, so that
+ * mxser_close() knows when to free things. We restore it upon
+ * exit, either normal or abnormal.
*/
+ retval = 0;
+ add_wait_queue(&port->open_wait, &wait);
-
- /* before set INT ISR, disable all int */
- for (i = 0; i < hwconf->ports; i++) {
- outb(inb(hwconf->ioaddr[i] + UART_IER) & 0xf0,
- hwconf->ioaddr[i] + UART_IER);
+ spin_lock_irqsave(&port->slock, flags);
+ if (!tty_hung_up_p(filp))
+ port->count--;
+ spin_unlock_irqrestore(&port->slock, flags);
+ port->blocked_open++;
+ while (1) {
+ spin_lock_irqsave(&port->slock, flags);
+ outb(inb(port->ioaddr + UART_MCR) |
+ UART_MCR_DTR | UART_MCR_RTS, port->ioaddr + UART_MCR);
+ spin_unlock_irqrestore(&port->slock, flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
+ if (port->flags & ASYNC_HUP_NOTIFY)
+ retval = -EAGAIN;
+ else
+ retval = -ERESTARTSYS;
+ break;
+ }
+ if (!(port->flags & ASYNC_CLOSING) &&
+ (do_clocal ||
+ (inb(port->ioaddr + UART_MSR) & UART_MSR_DCD)))
+ break;
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+ schedule();
}
-
- n = board * MXSER_PORTS_PER_BOARD;
- info = &mxvar_table[n];
-
- retval = request_irq(hwconf->irq, mxser_interrupt, IRQ_T(info),
- "mxser", info);
- if (retval) {
- printk(KERN_ERR "Board %d: %s",
- board, mxser_brdname[hwconf->board_type - 1]);
- printk(" Request irq failed, IRQ (%d) may conflict with"
- " another device.\n", info->irq);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&port->open_wait, &wait);
+ if (!tty_hung_up_p(filp))
+ port->count++;
+ port->blocked_open--;
+ if (retval)
return retval;
- }
+ port->flags |= ASYNC_NORMAL_ACTIVE;
return 0;
}
-static void mxser_getcfg(int board, struct mxser_hwconf *hwconf)
+static int mxser_set_baud(struct mxser_port *info, long newspd)
{
- mxsercfg[board] = *hwconf;
-}
+ int quot = 0, baud;
+ unsigned char cval;
-#ifdef CONFIG_PCI
-static int mxser_get_PCI_conf(int busnum, int devnum, int board_type, struct mxser_hwconf *hwconf)
-{
- int i, j;
- /* unsigned int val; */
- unsigned int ioaddress;
- struct pci_dev *pdev = hwconf->pciInfo.pdev;
+ if (!info->tty || !info->tty->termios)
+ return -1;
- /* io address */
- hwconf->board_type = board_type;
- hwconf->ports = mxser_numports[board_type - 1];
- ioaddress = pci_resource_start(pdev, 2);
- request_region(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2),
- "mxser(IO)");
+ if (!(info->ioaddr))
+ return -1;
- for (i = 0; i < hwconf->ports; i++)
- hwconf->ioaddr[i] = ioaddress + 8 * i;
+ if (newspd > info->max_baud)
+ return -1;
- /* vector */
- ioaddress = pci_resource_start(pdev, 3);
- request_region(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3),
- "mxser(vector)");
- hwconf->vector = ioaddress;
+ if (newspd == 134) {
+ quot = 2 * info->baud_base / 269;
+ tty_encode_baud_rate(info->tty, 134, 134);
+ } else if (newspd) {
+ quot = info->baud_base / newspd;
+ if (quot == 0)
+ quot = 1;
+ baud = info->baud_base/quot;
+ tty_encode_baud_rate(info->tty, baud, baud);
+ } else {
+ quot = 0;
+ }
- /* irq */
- hwconf->irq = hwconf->pciInfo.pdev->irq;
+ info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base);
+ info->timeout += HZ / 50; /* Add .02 seconds of slop */
- hwconf->IsMoxaMustChipFlag = CheckIsMoxaMust(hwconf->ioaddr[0]);
- hwconf->uart_type = PORT_16550A;
- hwconf->vector_mask = 0;
+ if (quot) {
+ info->MCR |= UART_MCR_DTR;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ } else {
+ info->MCR &= ~UART_MCR_DTR;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ return 0;
+ }
+ cval = inb(info->ioaddr + UART_LCR);
- for (i = 0; i < hwconf->ports; i++) {
- for (j = 0; j < UART_INFO_NUM; j++) {
- if (Gpci_uart_info[j].type == hwconf->IsMoxaMustChipFlag) {
- hwconf->MaxCanSetBaudRate[i] = Gpci_uart_info[j].max_baud;
+ outb(cval | UART_LCR_DLAB, info->ioaddr + UART_LCR); /* set DLAB */
- /* exception....CP-102 */
- if (board_type == MXSER_BOARD_CP102)
- hwconf->MaxCanSetBaudRate[i] = 921600;
- break;
- }
- }
- }
+ outb(quot & 0xff, info->ioaddr + UART_DLL); /* LS of divisor */
+ outb(quot >> 8, info->ioaddr + UART_DLM); /* MS of divisor */
+ outb(cval, info->ioaddr + UART_LCR); /* reset DLAB */
- if (hwconf->IsMoxaMustChipFlag == MOXA_MUST_MU860_HWID) {
- for (i = 0; i < hwconf->ports; i++) {
- if (i < 4)
- hwconf->opmode_ioaddr[i] = ioaddress + 4;
- else
- hwconf->opmode_ioaddr[i] = ioaddress + 0x0c;
- }
- outb(0, ioaddress + 4); /* default set to RS232 mode */
- outb(0, ioaddress + 0x0c); /* default set to RS232 mode */
- }
+#ifdef BOTHER
+ if (C_BAUD(info->tty) == BOTHER) {
+ quot = info->baud_base % newspd;
+ quot *= 8;
+ if (quot % newspd > newspd / 2) {
+ quot /= newspd;
+ quot++;
+ } else
+ quot /= newspd;
+
+ SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, quot);
+ } else
+#endif
+ SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, 0);
- for (i = 0; i < hwconf->ports; i++) {
- hwconf->vector_mask |= (1 << i);
- hwconf->baud_base[i] = 921600;
- }
return 0;
}
-#endif
-static int mxser_init(void)
+/*
+ * This routine is called to set the UART divisor registers to match
+ * the specified baud rate for a serial port.
+ */
+static int mxser_change_speed(struct mxser_port *info,
+ struct ktermios *old_termios)
{
- int i, m, retval, b, n;
- struct pci_dev *pdev = NULL;
- int index;
- unsigned char busnum, devnum;
- struct mxser_hwconf hwconf;
-
- mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1);
- if (!mxvar_sdriver)
- return -ENOMEM;
- spin_lock_init(&gm_lock);
-
- for (i = 0; i < MXSER_BOARDS; i++) {
- mxsercfg[i].board_type = -1;
- }
+ unsigned cflag, cval, fcr;
+ int ret = 0;
+ unsigned char status;
- printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n",
- MXSER_VERSION);
+ if (!info->tty || !info->tty->termios)
+ return ret;
+ cflag = info->tty->termios->c_cflag;
+ if (!(info->ioaddr))
+ return ret;
- /* Initialize the tty_driver structure */
- memset(mxvar_sdriver, 0, sizeof(struct tty_driver));
- mxvar_sdriver->owner = THIS_MODULE;
- mxvar_sdriver->magic = TTY_DRIVER_MAGIC;
- mxvar_sdriver->name = "ttyMI";
- mxvar_sdriver->major = ttymajor;
- mxvar_sdriver->minor_start = 0;
- mxvar_sdriver->num = MXSER_PORTS + 1;
- mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL;
- mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL;
- mxvar_sdriver->init_termios = tty_std_termios;
- mxvar_sdriver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
- mxvar_sdriver->init_termios.c_ispeed = 9600;
- mxvar_sdriver->init_termios.c_ospeed = 9600;
- mxvar_sdriver->flags = TTY_DRIVER_REAL_RAW;
- tty_set_operations(mxvar_sdriver, &mxser_ops);
- mxvar_sdriver->ttys = mxvar_tty;
- mxvar_sdriver->termios = mxvar_termios;
- mxvar_sdriver->termios_locked = mxvar_termios_locked;
+ if (mxser_set_baud_method[info->tty->index] == 0)
+ mxser_set_baud(info, tty_get_baud_rate(info->tty));
- mxvar_diagflag = 0;
- memset(mxvar_table, 0, MXSER_PORTS * sizeof(struct mxser_struct));
- memset(&mxvar_log, 0, sizeof(struct mxser_log));
+ /* byte size and parity */
+ switch (cflag & CSIZE) {
+ case CS5:
+ cval = 0x00;
+ break;
+ case CS6:
+ cval = 0x01;
+ break;
+ case CS7:
+ cval = 0x02;
+ break;
+ case CS8:
+ cval = 0x03;
+ break;
+ default:
+ cval = 0x00;
+ break; /* too keep GCC shut... */
+ }
+ if (cflag & CSTOPB)
+ cval |= 0x04;
+ if (cflag & PARENB)
+ cval |= UART_LCR_PARITY;
+ if (!(cflag & PARODD))
+ cval |= UART_LCR_EPAR;
+ if (cflag & CMSPAR)
+ cval |= UART_LCR_SPAR;
- memset(&mxser_msr, 0, sizeof(unsigned char) * (MXSER_PORTS + 1));
- memset(&mon_data_ext, 0, sizeof(struct mxser_mon_ext));
- memset(&mxser_set_baud_method, 0, sizeof(int) * (MXSER_PORTS + 1));
- memset(&hwconf, 0, sizeof(struct mxser_hwconf));
+ if ((info->type == PORT_8250) || (info->type == PORT_16450)) {
+ if (info->board->chip_flag) {
+ fcr = UART_FCR_ENABLE_FIFO;
+ fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
+ SET_MOXA_MUST_FIFO_VALUE(info);
+ } else
+ fcr = 0;
+ } else {
+ fcr = UART_FCR_ENABLE_FIFO;
+ if (info->board->chip_flag) {
+ fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
+ SET_MOXA_MUST_FIFO_VALUE(info);
+ } else {
+ switch (info->rx_trigger) {
+ case 1:
+ fcr |= UART_FCR_TRIGGER_1;
+ break;
+ case 4:
+ fcr |= UART_FCR_TRIGGER_4;
+ break;
+ case 8:
+ fcr |= UART_FCR_TRIGGER_8;
+ break;
+ default:
+ fcr |= UART_FCR_TRIGGER_14;
+ break;
+ }
+ }
+ }
- m = 0;
- /* Start finding ISA boards here */
- for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) {
- int cap;
-
- if (!(cap = mxserBoardCAP[b]))
- continue;
-
- retval = mxser_get_ISA_conf(cap, &hwconf);
-
- if (retval != 0)
- printk(KERN_INFO "Found MOXA %s board (CAP=0x%x)\n",
- mxser_brdname[hwconf.board_type - 1], ioaddr[b]);
-
- if (retval <= 0) {
- if (retval == MXSER_ERR_IRQ)
- printk(KERN_ERR "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IRQ_CONFLIT)
- printk(KERN_ERR "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_VECTOR)
- printk(KERN_ERR "Invalid interrupt vector, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IOADDR)
- printk(KERN_ERR "Invalid I/O address, "
- "board not configured\n");
-
- continue;
+ /* CTS flow control flag and modem status interrupts */
+ info->IER &= ~UART_IER_MSI;
+ info->MCR &= ~UART_MCR_AFE;
+ if (cflag & CRTSCTS) {
+ info->flags |= ASYNC_CTS_FLOW;
+ info->IER |= UART_IER_MSI;
+ if ((info->type == PORT_16550A) || (info->board->chip_flag)) {
+ info->MCR |= UART_MCR_AFE;
+ } else {
+ status = inb(info->ioaddr + UART_MSR);
+ if (info->tty->hw_stopped) {
+ if (status & UART_MSR_CTS) {
+ info->tty->hw_stopped = 0;
+ if (info->type != PORT_16550A &&
+ !info->board->chip_flag) {
+ outb(info->IER & ~UART_IER_THRI,
+ info->ioaddr +
+ UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr +
+ UART_IER);
+ }
+ tty_wakeup(info->tty);
+ }
+ } else {
+ if (!(status & UART_MSR_CTS)) {
+ info->tty->hw_stopped = 1;
+ if ((info->type != PORT_16550A) &&
+ (!info->board->chip_flag)) {
+ info->IER &= ~UART_IER_THRI;
+ outb(info->IER, info->ioaddr +
+ UART_IER);
+ }
+ }
+ }
}
+ } else {
+ info->flags &= ~ASYNC_CTS_FLOW;
+ }
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ if (cflag & CLOCAL) {
+ info->flags &= ~ASYNC_CHECK_CD;
+ } else {
+ info->flags |= ASYNC_CHECK_CD;
+ info->IER |= UART_IER_MSI;
+ }
+ outb(info->IER, info->ioaddr + UART_IER);
- hwconf.pciInfo.busNum = 0;
- hwconf.pciInfo.devNum = 0;
- hwconf.pciInfo.pdev = NULL;
+ /*
+ * Set up parity check flag
+ */
+ info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
+ if (I_INPCK(info->tty))
+ info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
+ if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
+ info->read_status_mask |= UART_LSR_BI;
+
+ info->ignore_status_mask = 0;
- mxser_getcfg(m, &hwconf);
+ if (I_IGNBRK(info->tty)) {
+ info->ignore_status_mask |= UART_LSR_BI;
+ info->read_status_mask |= UART_LSR_BI;
/*
- * init mxsercfg first,
- * or mxsercfg data is not correct on ISR.
+ * If we're ignore parity and break indicators, ignore
+ * overruns too. (For real raw support).
*/
- /* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(m, &hwconf) < 0)
- continue;
-
- m++;
+ if (I_IGNPAR(info->tty)) {
+ info->ignore_status_mask |=
+ UART_LSR_OE |
+ UART_LSR_PE |
+ UART_LSR_FE;
+ info->read_status_mask |=
+ UART_LSR_OE |
+ UART_LSR_PE |
+ UART_LSR_FE;
+ }
+ }
+ if (info->board->chip_flag) {
+ SET_MOXA_MUST_XON1_VALUE(info->ioaddr, START_CHAR(info->tty));
+ SET_MOXA_MUST_XOFF1_VALUE(info->ioaddr, STOP_CHAR(info->tty));
+ if (I_IXON(info->tty)) {
+ ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+ } else {
+ DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+ }
+ if (I_IXOFF(info->tty)) {
+ ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+ } else {
+ DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+ }
}
- /* Start finding ISA boards from module arg */
- for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) {
- int cap;
- if (!(cap = ioaddr[b]))
- continue;
+ outb(fcr, info->ioaddr + UART_FCR); /* set fcr */
+ outb(cval, info->ioaddr + UART_LCR);
- retval = mxser_get_ISA_conf(cap, &hwconf);
+ return ret;
+}
- if (retval != 0)
- printk(KERN_INFO "Found MOXA %s board (CAP=0x%x)\n",
- mxser_brdname[hwconf.board_type - 1], ioaddr[b]);
+static void mxser_check_modem_status(struct mxser_port *port, int status)
+{
+ /* update input line counters */
+ if (status & UART_MSR_TERI)
+ port->icount.rng++;
+ if (status & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ port->icount.dcd++;
+ if (status & UART_MSR_DCTS)
+ port->icount.cts++;
+ port->mon_data.modem_status = status;
+ wake_up_interruptible(&port->delta_msr_wait);
- if (retval <= 0) {
- if (retval == MXSER_ERR_IRQ)
- printk(KERN_ERR "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IRQ_CONFLIT)
- printk(KERN_ERR "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_VECTOR)
- printk(KERN_ERR "Invalid interrupt vector, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IOADDR)
- printk(KERN_ERR "Invalid I/O address, "
- "board not configured\n");
+ if ((port->flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
+ if (status & UART_MSR_DCD)
+ wake_up_interruptible(&port->open_wait);
+ }
- continue;
+ if (port->flags & ASYNC_CTS_FLOW) {
+ if (port->tty->hw_stopped) {
+ if (status & UART_MSR_CTS) {
+ port->tty->hw_stopped = 0;
+
+ if ((port->type != PORT_16550A) &&
+ (!port->board->chip_flag)) {
+ outb(port->IER & ~UART_IER_THRI,
+ port->ioaddr + UART_IER);
+ port->IER |= UART_IER_THRI;
+ outb(port->IER, port->ioaddr +
+ UART_IER);
+ }
+ tty_wakeup(port->tty);
+ }
+ } else {
+ if (!(status & UART_MSR_CTS)) {
+ port->tty->hw_stopped = 1;
+ if (port->type != PORT_16550A &&
+ !port->board->chip_flag) {
+ port->IER &= ~UART_IER_THRI;
+ outb(port->IER, port->ioaddr +
+ UART_IER);
+ }
+ }
}
+ }
+}
- hwconf.pciInfo.busNum = 0;
- hwconf.pciInfo.devNum = 0;
- hwconf.pciInfo.pdev = NULL;
+static int mxser_startup(struct mxser_port *info)
+{
+ unsigned long page;
+ unsigned long flags;
- mxser_getcfg(m, &hwconf);
- /*
- * init mxsercfg first,
- * or mxsercfg data is not correct on ISR.
- */
- /* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(m, &hwconf) < 0)
- continue;
+ page = __get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&info->slock, flags);
- m++;
+ if (info->flags & ASYNC_INITIALIZED) {
+ free_page(page);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return 0;
}
- /* start finding PCI board here */
-#ifdef CONFIG_PCI
- n = ARRAY_SIZE(mxser_pcibrds) - 1;
- index = 0;
- b = 0;
- while (b < n) {
- pdev = pci_get_device(mxser_pcibrds[b].vendor,
- mxser_pcibrds[b].device, pdev);
- if (pdev == NULL) {
- b++;
- continue;
- }
- hwconf.pciInfo.busNum = busnum = pdev->bus->number;
- hwconf.pciInfo.devNum = devnum = PCI_SLOT(pdev->devfn) << 3;
- hwconf.pciInfo.pdev = pdev;
- printk(KERN_INFO "Found MOXA %s board(BusNo=%d,DevNo=%d)\n",
- mxser_brdname[(int) (mxser_pcibrds[b].driver_data) - 1],
- busnum, devnum >> 3);
- index++;
- if (m >= MXSER_BOARDS)
- printk(KERN_ERR
- "Too many Smartio/Industio family boards find "
- "(maximum %d), board not configured\n",
- MXSER_BOARDS);
- else {
- if (pci_enable_device(pdev)) {
- printk(KERN_ERR "Moxa SmartI/O PCI enable "
- "fail !\n");
- continue;
- }
- retval = mxser_get_PCI_conf(busnum, devnum,
- (int)mxser_pcibrds[b].driver_data,
- &hwconf);
- if (retval < 0) {
- if (retval == MXSER_ERR_IRQ)
- printk(KERN_ERR
- "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IRQ_CONFLIT)
- printk(KERN_ERR
- "Invalid interrupt number, "
- "board not configured\n");
- else if (retval == MXSER_ERR_VECTOR)
- printk(KERN_ERR
- "Invalid interrupt vector, "
- "board not configured\n");
- else if (retval == MXSER_ERR_IOADDR)
- printk(KERN_ERR
- "Invalid I/O address, "
- "board not configured\n");
- continue;
- }
- mxser_getcfg(m, &hwconf);
- /* init mxsercfg first,
- * or mxsercfg data is not correct on ISR.
- */
- /* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(m, &hwconf) < 0)
- continue;
- m++;
- /* Keep an extra reference if we succeeded. It will
- be returned at unload time */
- pci_dev_get(pdev);
- }
+ if (!info->ioaddr || !info->type) {
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+ free_page(page);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return 0;
}
-#endif
+ if (info->xmit_buf)
+ free_page(page);
+ else
+ info->xmit_buf = (unsigned char *) page;
- retval = tty_register_driver(mxvar_sdriver);
- if (retval) {
- printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family"
- " driver !\n");
- put_tty_driver(mxvar_sdriver);
+ /*
+ * Clear the FIFO buffers and disable them
+ * (they will be reenabled in mxser_change_speed())
+ */
+ if (info->board->chip_flag)
+ outb((UART_FCR_CLEAR_RCVR |
+ UART_FCR_CLEAR_XMIT |
+ MOXA_MUST_FCR_GDA_MODE_ENABLE), info->ioaddr + UART_FCR);
+ else
+ outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
+ info->ioaddr + UART_FCR);
- for (i = 0; i < MXSER_BOARDS; i++) {
- if (mxsercfg[i].board_type == -1)
- continue;
- else {
- free_irq(mxsercfg[i].irq, &mxvar_table[i * MXSER_PORTS_PER_BOARD]);
- /* todo: release io, vector */
- }
- }
- return retval;
+ /*
+ * At this point there's no way the LSR could still be 0xFF;
+ * if it is, then bail out, because there's likely no UART
+ * here.
+ */
+ if (inb(info->ioaddr + UART_LSR) == 0xff) {
+ spin_unlock_irqrestore(&info->slock, flags);
+ if (capable(CAP_SYS_ADMIN)) {
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+ return 0;
+ } else
+ return -ENODEV;
}
+ /*
+ * Clear the interrupt registers.
+ */
+ (void) inb(info->ioaddr + UART_LSR);
+ (void) inb(info->ioaddr + UART_RX);
+ (void) inb(info->ioaddr + UART_IIR);
+ (void) inb(info->ioaddr + UART_MSR);
+
+ /*
+ * Now, initialize the UART
+ */
+ outb(UART_LCR_WLEN8, info->ioaddr + UART_LCR); /* reset DLAB */
+ info->MCR = UART_MCR_DTR | UART_MCR_RTS;
+ outb(info->MCR, info->ioaddr + UART_MCR);
+
+ /*
+ * Finally, enable interrupts
+ */
+ info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
+
+ if (info->board->chip_flag)
+ info->IER |= MOXA_MUST_IER_EGDAI;
+ outb(info->IER, info->ioaddr + UART_IER); /* enable interrupts */
+
+ /*
+ * And clear the interrupt registers again for luck.
+ */
+ (void) inb(info->ioaddr + UART_LSR);
+ (void) inb(info->ioaddr + UART_RX);
+ (void) inb(info->ioaddr + UART_IIR);
+ (void) inb(info->ioaddr + UART_MSR);
+
+ if (info->tty)
+ clear_bit(TTY_IO_ERROR, &info->tty->flags);
+ info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+
+ /*
+ * and set the speed of the serial port
+ */
+ mxser_change_speed(info, NULL);
+ info->flags |= ASYNC_INITIALIZED;
+ spin_unlock_irqrestore(&info->slock, flags);
+
return 0;
}
-static void mxser_do_softint(struct work_struct *work)
+/*
+ * This routine will shutdown a serial port; interrupts maybe disabled, and
+ * DTR is dropped if the hangup on close termio flag is on.
+ */
+static void mxser_shutdown(struct mxser_port *info)
{
- struct mxser_struct *info =
- container_of(work, struct mxser_struct, tqueue);
- struct tty_struct *tty;
+ unsigned long flags;
- tty = info->tty;
+ if (!(info->flags & ASYNC_INITIALIZED))
+ return;
+
+ spin_lock_irqsave(&info->slock, flags);
+
+ /*
+ * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
+ * here so the queue might never be waken up
+ */
+ wake_up_interruptible(&info->delta_msr_wait);
- if (tty) {
- if (test_and_clear_bit(MXSER_EVENT_TXLOW, &info->event))
- tty_wakeup(tty);
- if (test_and_clear_bit(MXSER_EVENT_HANGUP, &info->event))
- tty_hangup(tty);
+ /*
+ * Free the IRQ, if necessary
+ */
+ if (info->xmit_buf) {
+ free_page((unsigned long) info->xmit_buf);
+ info->xmit_buf = NULL;
}
-}
-static unsigned char mxser_get_msr(int baseaddr, int mode, int port, struct mxser_struct *info)
-{
- unsigned char status = 0;
+ info->IER = 0;
+ outb(0x00, info->ioaddr + UART_IER);
- status = inb(baseaddr + UART_MSR);
+ if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
+ info->MCR &= ~(UART_MCR_DTR | UART_MCR_RTS);
+ outb(info->MCR, info->ioaddr + UART_MCR);
- mxser_msr[port] &= 0x0F;
- mxser_msr[port] |= status;
- status = mxser_msr[port];
- if (mode)
- mxser_msr[port] = 0;
+ /* clear Rx/Tx FIFO's */
+ if (info->board->chip_flag)
+ outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT |
+ MOXA_MUST_FCR_GDA_MODE_ENABLE,
+ info->ioaddr + UART_FCR);
+ else
+ outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ info->ioaddr + UART_FCR);
- return status;
+ /* read data port to reset things */
+ (void) inb(info->ioaddr + UART_RX);
+
+ if (info->tty)
+ set_bit(TTY_IO_ERROR, &info->tty->flags);
+
+ info->flags &= ~ASYNC_INITIALIZED;
+
+ if (info->board->chip_flag)
+ SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+
+ spin_unlock_irqrestore(&info->slock, flags);
}
/*
@@ -958,19 +893,17 @@ static unsigned char mxser_get_msr(int baseaddr, int mode, int port, struct mxse
*/
static int mxser_open(struct tty_struct *tty, struct file *filp)
{
- struct mxser_struct *info;
+ struct mxser_port *info;
+ unsigned long flags;
int retval, line;
- /* initialize driver_data in case something fails */
- tty->driver_data = NULL;
-
line = tty->index;
if (line == MXSER_PORTS)
return 0;
if (line < 0 || line > MXSER_PORTS)
return -ENODEV;
- info = mxvar_table + line;
- if (!info->base)
+ info = &mxser_boards[line / MXSER_PORTS_PER_BOARD].ports[line % MXSER_PORTS_PER_BOARD];
+ if (!info->ioaddr)
return -ENODEV;
tty->driver_data = info;
@@ -978,6 +911,9 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
/*
* Start up serial port
*/
+ spin_lock_irqsave(&info->slock, flags);
+ info->count++;
+ spin_unlock_irqrestore(&info->slock, flags);
retval = mxser_startup(info);
if (retval)
return retval;
@@ -986,21 +922,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
if (retval)
return retval;
- info->count++;
-
- if ((info->count == 1) && (info->flags & ASYNC_SPLIT_TERMIOS)) {
- if (tty->driver->subtype == SERIAL_TYPE_NORMAL)
- *tty->termios = info->normal_termios;
- else
- *tty->termios = info->callout_termios;
- mxser_change_speed(info, NULL);
- }
-
- /*
- status = mxser_get_msr(info->base, 0, info->port);
- mxser_check_modem_status(info, status);
- */
-
/* unmark here for very high baud rate (ex. 921600 bps) used */
tty->low_latency = 1;
return 0;
@@ -1014,11 +935,10 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
*/
static void mxser_close(struct tty_struct *tty, struct file *filp)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long timeout;
unsigned long flags;
- struct tty_ldisc *ld;
if (tty->index == MXSER_PORTS)
return;
@@ -1045,7 +965,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
}
if (--info->count < 0) {
printk(KERN_ERR "mxser_close: bad serial port count for "
- "ttys%d: %d\n", info->port, info->count);
+ "ttys%d: %d\n", tty->index, info->count);
info->count = 0;
}
if (info->count) {
@@ -1074,20 +994,18 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
* line status register.
*/
info->IER &= ~UART_IER_RLSI;
- if (info->IsMoxaMustChipFlag)
+ if (info->board->chip_flag)
info->IER &= ~MOXA_MUST_RECV_ISR;
-/* by William
- info->read_status_mask &= ~UART_LSR_DR;
-*/
+
if (info->flags & ASYNC_INITIALIZED) {
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
* important if there is a transmit FIFO!
*/
timeout = jiffies + HZ;
- while (!(inb(info->base + UART_LSR) & UART_LSR_TEMT)) {
+ while (!(inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT)) {
schedule_timeout_interruptible(5);
if (time_after(jiffies, timeout))
break;
@@ -1097,14 +1015,9 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
if (tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
-
- ld = tty_ldisc_ref(tty);
- if (ld) {
- if (ld->flush_buffer)
- ld->flush_buffer(tty);
- tty_ldisc_deref(ld);
- }
-
+
+ tty_ldisc_flush(tty);
+
tty->closing = 0;
info->event = 0;
info->tty = NULL;
@@ -1115,14 +1028,12 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
}
info->flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
- wake_up_interruptible(&info->close_wait);
-
}
static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
int c, total = 0;
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long flags;
if (!info->xmit_buf)
@@ -1146,13 +1057,15 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
total += c;
}
- if (info->xmit_cnt && !tty->stopped && !(info->IER & UART_IER_THRI)) {
+ if (info->xmit_cnt && !tty->stopped) {
if (!tty->hw_stopped ||
(info->type == PORT_16550A) ||
- (info->IsMoxaMustChipFlag)) {
+ (info->board->chip_flag)) {
spin_lock_irqsave(&info->slock, flags);
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr +
+ UART_IER);
info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
spin_unlock_irqrestore(&info->slock, flags);
}
}
@@ -1161,7 +1074,7 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long flags;
if (!info->xmit_buf)
@@ -1175,13 +1088,14 @@ static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
info->xmit_head &= SERIAL_XMIT_SIZE - 1;
info->xmit_cnt++;
spin_unlock_irqrestore(&info->slock, flags);
- if (!tty->stopped && !(info->IER & UART_IER_THRI)) {
+ if (!tty->stopped) {
if (!tty->hw_stopped ||
(info->type == PORT_16550A) ||
- info->IsMoxaMustChipFlag) {
+ info->board->chip_flag) {
spin_lock_irqsave(&info->slock, flags);
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
spin_unlock_irqrestore(&info->slock, flags);
}
}
@@ -1190,7 +1104,7 @@ static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
static void mxser_flush_chars(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long flags;
if (info->xmit_cnt <= 0 ||
@@ -1198,21 +1112,22 @@ static void mxser_flush_chars(struct tty_struct *tty)
!info->xmit_buf ||
(tty->hw_stopped &&
(info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag)
+ (!info->board->chip_flag)
))
return;
spin_lock_irqsave(&info->slock, flags);
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
spin_unlock_irqrestore(&info->slock, flags);
}
static int mxser_write_room(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
int ret;
ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
@@ -1223,13 +1138,13 @@ static int mxser_write_room(struct tty_struct *tty)
static int mxser_chars_in_buffer(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
return info->xmit_cnt;
}
static void mxser_flush_buffer(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
char fcr;
unsigned long flags;
@@ -1237,39 +1152,497 @@ static void mxser_flush_buffer(struct tty_struct *tty)
spin_lock_irqsave(&info->slock, flags);
info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
- /* below added by shinhay */
- fcr = inb(info->base + UART_FCR);
+ fcr = inb(info->ioaddr + UART_FCR);
outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
- info->base + UART_FCR);
- outb(fcr, info->base + UART_FCR);
+ info->ioaddr + UART_FCR);
+ outb(fcr, info->ioaddr + UART_FCR);
spin_unlock_irqrestore(&info->slock, flags);
- /* above added by shinhay */
tty_wakeup(tty);
}
-static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg)
+/*
+ * ------------------------------------------------------------
+ * friends of mxser_ioctl()
+ * ------------------------------------------------------------
+ */
+static int mxser_get_serial_info(struct mxser_port *info,
+ struct serial_struct __user *retinfo)
{
- struct mxser_struct *info = tty->driver_data;
- int retval;
- struct async_icount cprev, cnow; /* kernel counter temps */
+ struct serial_struct tmp = {
+ .type = info->type,
+ .line = info->tty->index,
+ .port = info->ioaddr,
+ .irq = info->board->irq,
+ .flags = info->flags,
+ .baud_base = info->baud_base,
+ .close_delay = info->close_delay,
+ .closing_wait = info->closing_wait,
+ .custom_divisor = info->custom_divisor,
+ .hub6 = 0
+ };
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+ return 0;
+}
+
+static int mxser_set_serial_info(struct mxser_port *info,
+ struct serial_struct __user *new_info)
+{
+ struct serial_struct new_serial;
+ speed_t baud;
+ unsigned long sl_flags;
+ unsigned int flags;
+ int retval = 0;
+
+ if (!new_info || !info->ioaddr)
+ return -ENODEV;
+ if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+ return -EFAULT;
+
+ if (new_serial.irq != info->board->irq ||
+ new_serial.port != info->ioaddr)
+ return -EINVAL;
+
+ flags = info->flags & ASYNC_SPD_MASK;
+
+ if (!capable(CAP_SYS_ADMIN)) {
+ if ((new_serial.baud_base != info->baud_base) ||
+ (new_serial.close_delay != info->close_delay) ||
+ ((new_serial.flags & ~ASYNC_USR_MASK) != (info->flags & ~ASYNC_USR_MASK)))
+ return -EPERM;
+ info->flags = ((info->flags & ~ASYNC_USR_MASK) |
+ (new_serial.flags & ASYNC_USR_MASK));
+ } else {
+ /*
+ * OK, past this point, all the error checking has been done.
+ * At this point, we start making changes.....
+ */
+ info->flags = ((info->flags & ~ASYNC_FLAGS) |
+ (new_serial.flags & ASYNC_FLAGS));
+ info->close_delay = new_serial.close_delay * HZ / 100;
+ info->closing_wait = new_serial.closing_wait * HZ / 100;
+ info->tty->low_latency =
+ (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ info->tty->low_latency = 0;
+ if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST &&
+ (new_serial.baud_base != info->baud_base ||
+ new_serial.custom_divisor !=
+ info->custom_divisor)) {
+ baud = new_serial.baud_base / new_serial.custom_divisor;
+ tty_encode_baud_rate(info->tty, baud, baud);
+ }
+ }
+
+ info->type = new_serial.type;
+
+ process_txrx_fifo(info);
+
+ if (info->flags & ASYNC_INITIALIZED) {
+ if (flags != (info->flags & ASYNC_SPD_MASK)) {
+ spin_lock_irqsave(&info->slock, sl_flags);
+ mxser_change_speed(info, NULL);
+ spin_unlock_irqrestore(&info->slock, sl_flags);
+ }
+ } else
+ retval = mxser_startup(info);
+
+ return retval;
+}
+
+/*
+ * mxser_get_lsr_info - get line status register info
+ *
+ * Purpose: Let user call ioctl() to get info when the UART physically
+ * is emptied. On bus types like RS485, the transmitter must
+ * release the bus after transmitting. This must be done when
+ * the transmit shift register is empty, not be done when the
+ * transmit holding register is empty. This functionality
+ * allows an RS485 driver to be written in user space.
+ */
+static int mxser_get_lsr_info(struct mxser_port *info,
+ unsigned int __user *value)
+{
+ unsigned char status;
+ unsigned int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->slock, flags);
+ status = inb(info->ioaddr + UART_LSR);
+ spin_unlock_irqrestore(&info->slock, flags);
+ result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
+ return put_user(result, value);
+}
+
+/*
+ * This routine sends a break character out the serial port.
+ */
+static void mxser_send_break(struct mxser_port *info, int duration)
+{
+ unsigned long flags;
+
+ if (!info->ioaddr)
+ return;
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&info->slock, flags);
+ outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC,
+ info->ioaddr + UART_LCR);
+ spin_unlock_irqrestore(&info->slock, flags);
+ schedule_timeout(duration);
+ spin_lock_irqsave(&info->slock, flags);
+ outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC,
+ info->ioaddr + UART_LCR);
+ spin_unlock_irqrestore(&info->slock, flags);
+}
+
+static int mxser_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned char control, status;
+ unsigned long flags;
+
+
+ if (tty->index == MXSER_PORTS)
+ return -ENOIOCTLCMD;
+ if (test_bit(TTY_IO_ERROR, &tty->flags))
+ return -EIO;
+
+ control = info->MCR;
+
+ spin_lock_irqsave(&info->slock, flags);
+ status = inb(info->ioaddr + UART_MSR);
+ if (status & UART_MSR_ANY_DELTA)
+ mxser_check_modem_status(info, status);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
+ ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
+ ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
+ ((status & UART_MSR_RI) ? TIOCM_RNG : 0) |
+ ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) |
+ ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
+}
+
+static int mxser_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
+
+
+ if (tty->index == MXSER_PORTS)
+ return -ENOIOCTLCMD;
+ if (test_bit(TTY_IO_ERROR, &tty->flags))
+ return -EIO;
+
+ spin_lock_irqsave(&info->slock, flags);
+
+ if (set & TIOCM_RTS)
+ info->MCR |= UART_MCR_RTS;
+ if (set & TIOCM_DTR)
+ info->MCR |= UART_MCR_DTR;
+
+ if (clear & TIOCM_RTS)
+ info->MCR &= ~UART_MCR_RTS;
+ if (clear & TIOCM_DTR)
+ info->MCR &= ~UART_MCR_DTR;
+
+ outb(info->MCR, info->ioaddr + UART_MCR);
+ spin_unlock_irqrestore(&info->slock, flags);
+ return 0;
+}
+
+static int __init mxser_program_mode(int port)
+{
+ int id, i, j, n;
+
+ outb(0, port);
+ outb(0, port);
+ outb(0, port);
+ (void)inb(port);
+ (void)inb(port);
+ outb(0, port);
+ (void)inb(port);
+
+ id = inb(port + 1) & 0x1F;
+ if ((id != C168_ASIC_ID) &&
+ (id != C104_ASIC_ID) &&
+ (id != C102_ASIC_ID) &&
+ (id != CI132_ASIC_ID) &&
+ (id != CI134_ASIC_ID) &&
+ (id != CI104J_ASIC_ID))
+ return -1;
+ for (i = 0, j = 0; i < 4; i++) {
+ n = inb(port + 2);
+ if (n == 'M') {
+ j = 1;
+ } else if ((j == 1) && (n == 1)) {
+ j = 2;
+ break;
+ } else
+ j = 0;
+ }
+ if (j != 2)
+ id = -2;
+ return id;
+}
+
+static void __init mxser_normal_mode(int port)
+{
+ int i, n;
+
+ outb(0xA5, port + 1);
+ outb(0x80, port + 3);
+ outb(12, port + 0); /* 9600 bps */
+ outb(0, port + 1);
+ outb(0x03, port + 3); /* 8 data bits */
+ outb(0x13, port + 4); /* loop back mode */
+ for (i = 0; i < 16; i++) {
+ n = inb(port + 5);
+ if ((n & 0x61) == 0x60)
+ break;
+ if ((n & 1) == 1)
+ (void)inb(port);
+ }
+ outb(0x00, port + 4);
+}
+
+#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */
+#define CHIP_DO 0x02 /* Serial Data Output in Eprom */
+#define CHIP_CS 0x04 /* Serial Chip Select in Eprom */
+#define CHIP_DI 0x08 /* Serial Data Input in Eprom */
+#define EN_CCMD 0x000 /* Chip's command register */
+#define EN0_RSARLO 0x008 /* Remote start address reg 0 */
+#define EN0_RSARHI 0x009 /* Remote start address reg 1 */
+#define EN0_RCNTLO 0x00A /* Remote byte count reg WR */
+#define EN0_RCNTHI 0x00B /* Remote byte count reg WR */
+#define EN0_DCFG 0x00E /* Data configuration reg WR */
+#define EN0_PORT 0x010 /* Rcv missed frame error counter RD */
+#define ENC_PAGE0 0x000 /* Select page 0 of chip registers */
+#define ENC_PAGE3 0x0C0 /* Select page 3 of chip registers */
+static int __init mxser_read_register(int port, unsigned short *regs)
+{
+ int i, k, value, id;
+ unsigned int j;
+
+ id = mxser_program_mode(port);
+ if (id < 0)
+ return id;
+ for (i = 0; i < 14; i++) {
+ k = (i & 0x3F) | 0x180;
+ for (j = 0x100; j > 0; j >>= 1) {
+ outb(CHIP_CS, port);
+ if (k & j) {
+ outb(CHIP_CS | CHIP_DO, port);
+ outb(CHIP_CS | CHIP_DO | CHIP_SK, port); /* A? bit of read */
+ } else {
+ outb(CHIP_CS, port);
+ outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */
+ }
+ }
+ (void)inb(port);
+ value = 0;
+ for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) {
+ outb(CHIP_CS, port);
+ outb(CHIP_CS | CHIP_SK, port);
+ if (inb(port) & CHIP_DI)
+ value |= j;
+ }
+ regs[i] = value;
+ outb(0, port);
+ }
+ mxser_normal_mode(port);
+ return id;
+}
+
+static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
+{
+ struct mxser_port *port;
+ int result, status;
+ unsigned int i, j;
+
+ switch (cmd) {
+ case MOXA_GET_MAJOR:
+ return put_user(ttymajor, (int __user *)argp);
+
+ case MOXA_CHKPORTENABLE:
+ result = 0;
+
+ for (i = 0; i < MXSER_BOARDS; i++)
+ for (j = 0; j < MXSER_PORTS_PER_BOARD; j++)
+ if (mxser_boards[i].ports[j].ioaddr)
+ result |= (1 << i);
+
+ return put_user(result, (unsigned long __user *)argp);
+ case MOXA_GETDATACOUNT:
+ if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
+ return -EFAULT;
+ return 0;
+ case MOXA_GETMSTATUS:
+ for (i = 0; i < MXSER_BOARDS; i++)
+ for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
+ port = &mxser_boards[i].ports[j];
+
+ GMStatus[i].ri = 0;
+ if (!port->ioaddr) {
+ GMStatus[i].dcd = 0;
+ GMStatus[i].dsr = 0;
+ GMStatus[i].cts = 0;
+ continue;
+ }
+
+ if (!port->tty || !port->tty->termios)
+ GMStatus[i].cflag =
+ port->normal_termios.c_cflag;
+ else
+ GMStatus[i].cflag =
+ port->tty->termios->c_cflag;
+
+ status = inb(port->ioaddr + UART_MSR);
+ if (status & 0x80 /*UART_MSR_DCD */ )
+ GMStatus[i].dcd = 1;
+ else
+ GMStatus[i].dcd = 0;
+
+ if (status & 0x20 /*UART_MSR_DSR */ )
+ GMStatus[i].dsr = 1;
+ else
+ GMStatus[i].dsr = 0;
+
+
+ if (status & 0x10 /*UART_MSR_CTS */ )
+ GMStatus[i].cts = 1;
+ else
+ GMStatus[i].cts = 0;
+ }
+ if (copy_to_user(argp, GMStatus,
+ sizeof(struct mxser_mstatus) * MXSER_PORTS))
+ return -EFAULT;
+ return 0;
+ case MOXA_ASPP_MON_EXT: {
+ int p, shiftbit;
+ unsigned long opmode;
+ unsigned cflag, iflag;
+
+ for (i = 0; i < MXSER_BOARDS; i++)
+ for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
+ port = &mxser_boards[i].ports[j];
+ if (!port->ioaddr)
+ continue;
+
+ status = mxser_get_msr(port->ioaddr, 0, i);
+
+ if (status & UART_MSR_TERI)
+ port->icount.rng++;
+ if (status & UART_MSR_DDSR)
+ port->icount.dsr++;
+ if (status & UART_MSR_DDCD)
+ port->icount.dcd++;
+ if (status & UART_MSR_DCTS)
+ port->icount.cts++;
+
+ port->mon_data.modem_status = status;
+ mon_data_ext.rx_cnt[i] = port->mon_data.rxcnt;
+ mon_data_ext.tx_cnt[i] = port->mon_data.txcnt;
+ mon_data_ext.up_rxcnt[i] =
+ port->mon_data.up_rxcnt;
+ mon_data_ext.up_txcnt[i] =
+ port->mon_data.up_txcnt;
+ mon_data_ext.modem_status[i] =
+ port->mon_data.modem_status;
+ mon_data_ext.baudrate[i] =
+ tty_get_baud_rate(port->tty);
+
+ if (!port->tty || !port->tty->termios) {
+ cflag = port->normal_termios.c_cflag;
+ iflag = port->normal_termios.c_iflag;
+ } else {
+ cflag = port->tty->termios->c_cflag;
+ iflag = port->tty->termios->c_iflag;
+ }
+
+ mon_data_ext.databits[i] = cflag & CSIZE;
+
+ mon_data_ext.stopbits[i] = cflag & CSTOPB;
+
+ mon_data_ext.parity[i] =
+ cflag & (PARENB | PARODD | CMSPAR);
+
+ mon_data_ext.flowctrl[i] = 0x00;
+
+ if (cflag & CRTSCTS)
+ mon_data_ext.flowctrl[i] |= 0x03;
+
+ if (iflag & (IXON | IXOFF))
+ mon_data_ext.flowctrl[i] |= 0x0C;
+
+ if (port->type == PORT_16550A)
+ mon_data_ext.fifo[i] = 1;
+ else
+ mon_data_ext.fifo[i] = 0;
+
+ p = i % 4;
+ shiftbit = p * 2;
+ opmode = inb(port->opmode_ioaddr) >> shiftbit;
+ opmode &= OP_MODE_MASK;
+
+ mon_data_ext.iftype[i] = opmode;
+
+ }
+ if (copy_to_user(argp, &mon_data_ext,
+ sizeof(mon_data_ext)))
+ return -EFAULT;
+
+ return 0;
+
+ } default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static int mxser_cflags_changed(struct mxser_port *info, unsigned long arg,
+ struct async_icount *cprev)
+{
+ struct async_icount cnow;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&info->slock, flags);
+ cnow = info->icount; /* atomic copy */
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ ret = ((arg & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
+ ((arg & TIOCM_DSR) && (cnow.dsr != cprev->dsr)) ||
+ ((arg & TIOCM_CD) && (cnow.dcd != cprev->dcd)) ||
+ ((arg & TIOCM_CTS) && (cnow.cts != cprev->cts));
+
+ *cprev = cnow;
+
+ return ret;
+}
+
+static int mxser_ioctl(struct tty_struct *tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct mxser_port *info = tty->driver_data;
+ struct async_icount cnow;
struct serial_icounter_struct __user *p_cuser;
- unsigned long templ;
unsigned long flags;
void __user *argp = (void __user *)arg;
+ int retval;
if (tty->index == MXSER_PORTS)
return mxser_ioctl_special(cmd, argp);
- /* following add by Victor Yu. 01-05-2004 */
if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE) {
- int opmode, p;
+ int p;
+ unsigned long opmode;
static unsigned char ModeMask[] = { 0xfc, 0xf3, 0xcf, 0x3f };
int shiftbit;
unsigned char val, mask;
- p = info->port % 4;
+ p = tty->index % 4;
if (cmd == MOXA_SET_OP_MODE) {
if (get_user(opmode, (int __user *) argp))
return -EFAULT;
@@ -1288,17 +1661,16 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
shiftbit = p * 2;
opmode = inb(info->opmode_ioaddr) >> shiftbit;
opmode &= OP_MODE_MASK;
- if (copy_to_user(argp, &opmode, sizeof(int)))
+ if (put_user(opmode, (int __user *)argp))
return -EFAULT;
}
return 0;
}
- /* above add by Victor Yu. 01-05-2004 */
- if ((cmd != TIOCGSERIAL) && (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) {
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
- }
+ if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT && cmd != TIOCGICOUNT &&
+ test_bit(TTY_IO_ERROR, &tty->flags))
+ return -EIO;
+
switch (cmd) {
case TCSBRK: /* SVID version: non-zero arg --> no break */
retval = tty_check_change(tty);
@@ -1316,11 +1688,10 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
return 0;
case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
+ return put_user(!!C_CLOCAL(tty), (unsigned long __user *)argp);
case TIOCSSOFTCAR:
- if (get_user(templ, (unsigned long __user *) argp))
+ if (get_user(arg, (unsigned long __user *)argp))
return -EFAULT;
- arg = templ;
tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
return 0;
case TIOCGSERIAL:
@@ -1340,30 +1711,19 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
cnow = info->icount; /* note the counters on entry */
spin_unlock_irqrestore(&info->slock, flags);
- wait_event_interruptible(info->delta_msr_wait, ({
- cprev = cnow;
- spin_lock_irqsave(&info->slock, flags);
- cnow = info->icount; /* atomic copy */
- spin_unlock_irqrestore(&info->slock, flags);
-
- ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
- ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
- ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
- ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts));
- }));
- break;
- /*
- * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
- * Return: write counters to the user passed counter struct
- * NB: both 1->0 and 0->1 transitions are counted except for
- * RI where only 0->1 is counted.
- */
+ return wait_event_interruptible(info->delta_msr_wait,
+ mxser_cflags_changed(info, arg, &cnow));
+ /*
+ * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+ * Return: write counters to the user passed counter struct
+ * NB: both 1->0 and 0->1 transitions are counted except for
+ * RI where only 0->1 is counted.
+ */
case TIOCGICOUNT:
spin_lock_irqsave(&info->slock, flags);
cnow = info->icount;
spin_unlock_irqrestore(&info->slock, flags);
p_cuser = argp;
- /* modified by casper 1/11/2000 */
if (put_user(cnow.frame, &p_cuser->frame))
return -EFAULT;
if (put_user(cnow.brk, &p_cuser->brk))
@@ -1385,240 +1745,65 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, unsigned int c
return 0;
case MOXA_HighSpeedOn:
return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
- case MOXA_SDS_RSTICOUNTER: {
- info->mon_data.rxcnt = 0;
- info->mon_data.txcnt = 0;
- return 0;
- }
-/* (above) added by James. */
- case MOXA_ASPP_SETBAUD:{
- long baud;
- if (get_user(baud, (long __user *)argp))
- return -EFAULT;
- mxser_set_baud(info, baud);
- return 0;
- }
- case MOXA_ASPP_GETBAUD:
- if (copy_to_user(argp, &info->realbaud, sizeof(long)))
- return -EFAULT;
-
+ case MOXA_SDS_RSTICOUNTER:
+ info->mon_data.rxcnt = 0;
+ info->mon_data.txcnt = 0;
return 0;
case MOXA_ASPP_OQUEUE:{
- int len, lsr;
+ int len, lsr;
- len = mxser_chars_in_buffer(tty);
+ len = mxser_chars_in_buffer(tty);
- lsr = inb(info->base + UART_LSR) & UART_LSR_TEMT;
+ lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT;
- len += (lsr ? 0 : 1);
+ len += (lsr ? 0 : 1);
- if (copy_to_user(argp, &len, sizeof(int)))
- return -EFAULT;
-
- return 0;
- }
+ return put_user(len, (int __user *)argp);
+ }
case MOXA_ASPP_MON: {
- int mcr, status;
-
- /* info->mon_data.ser_param = tty->termios->c_cflag; */
+ int mcr, status;
- status = mxser_get_msr(info->base, 1, info->port, info);
- mxser_check_modem_status(info, status);
-
- mcr = inb(info->base + UART_MCR);
- if (mcr & MOXA_MUST_MCR_XON_FLAG)
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD;
- else
- info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFHOLD;
-
- if (mcr & MOXA_MUST_MCR_TX_XON)
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFXENT;
- else
- info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFXENT;
-
- if (info->tty->hw_stopped)
- info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
- else
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
-
- if (copy_to_user(argp, &info->mon_data,
- sizeof(struct mxser_mon)))
- return -EFAULT;
-
- return 0;
- }
-
- case MOXA_ASPP_LSTATUS: {
- if (copy_to_user(argp, &info->err_shadow,
- sizeof(unsigned char)))
- return -EFAULT;
-
- info->err_shadow = 0;
- return 0;
- }
- case MOXA_SET_BAUD_METHOD: {
- int method;
-
- if (get_user(method, (int __user *)argp))
- return -EFAULT;
- mxser_set_baud_method[info->port] = method;
- if (copy_to_user(argp, &method, sizeof(int)))
- return -EFAULT;
+ status = mxser_get_msr(info->ioaddr, 1, tty->index);
+ mxser_check_modem_status(info, status);
- return 0;
- }
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
+ mcr = inb(info->ioaddr + UART_MCR);
+ if (mcr & MOXA_MUST_MCR_XON_FLAG)
+ info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD;
+ else
+ info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFHOLD;
-#ifndef CMSPAR
-#define CMSPAR 010000000000
-#endif
+ if (mcr & MOXA_MUST_MCR_TX_XON)
+ info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFXENT;
+ else
+ info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFXENT;
-static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
-{
- int i, result, status;
+ if (info->tty->hw_stopped)
+ info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
+ else
+ info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
- switch (cmd) {
- case MOXA_GET_CONF:
- if (copy_to_user(argp, mxsercfg,
- sizeof(struct mxser_hwconf) * 4))
+ if (copy_to_user(argp, &info->mon_data,
+ sizeof(struct mxser_mon)))
return -EFAULT;
- return 0;
- case MOXA_GET_MAJOR:
- if (copy_to_user(argp, &ttymajor, sizeof(int)))
- return -EFAULT;
- return 0;
- case MOXA_GET_CUMAJOR:
- if (copy_to_user(argp, &calloutmajor, sizeof(int)))
- return -EFAULT;
return 0;
-
- case MOXA_CHKPORTENABLE:
- result = 0;
- for (i = 0; i < MXSER_PORTS; i++) {
- if (mxvar_table[i].base)
- result |= (1 << i);
- }
- return put_user(result, (unsigned long __user *)argp);
- case MOXA_GETDATACOUNT:
- if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
+ }
+ case MOXA_ASPP_LSTATUS: {
+ if (put_user(info->err_shadow, (unsigned char __user *)argp))
return -EFAULT;
- return 0;
- case MOXA_GETMSTATUS:
- for (i = 0; i < MXSER_PORTS; i++) {
- GMStatus[i].ri = 0;
- if (!mxvar_table[i].base) {
- GMStatus[i].dcd = 0;
- GMStatus[i].dsr = 0;
- GMStatus[i].cts = 0;
- continue;
- }
-
- if (!mxvar_table[i].tty || !mxvar_table[i].tty->termios)
- GMStatus[i].cflag = mxvar_table[i].normal_termios.c_cflag;
- else
- GMStatus[i].cflag = mxvar_table[i].tty->termios->c_cflag;
-
- status = inb(mxvar_table[i].base + UART_MSR);
- if (status & 0x80 /*UART_MSR_DCD */ )
- GMStatus[i].dcd = 1;
- else
- GMStatus[i].dcd = 0;
-
- if (status & 0x20 /*UART_MSR_DSR */ )
- GMStatus[i].dsr = 1;
- else
- GMStatus[i].dsr = 0;
-
- if (status & 0x10 /*UART_MSR_CTS */ )
- GMStatus[i].cts = 1;
- else
- GMStatus[i].cts = 0;
- }
- if (copy_to_user(argp, GMStatus,
- sizeof(struct mxser_mstatus) * MXSER_PORTS))
- return -EFAULT;
+ info->err_shadow = 0;
return 0;
- case MOXA_ASPP_MON_EXT: {
- int status;
- int opmode, p;
- int shiftbit;
- unsigned cflag, iflag;
-
- for (i = 0; i < MXSER_PORTS; i++) {
- if (!mxvar_table[i].base)
- continue;
-
- status = mxser_get_msr(mxvar_table[i].base, 0,
- i, &(mxvar_table[i]));
- /*
- mxser_check_modem_status(&mxvar_table[i],
- status);
- */
- if (status & UART_MSR_TERI)
- mxvar_table[i].icount.rng++;
- if (status & UART_MSR_DDSR)
- mxvar_table[i].icount.dsr++;
- if (status & UART_MSR_DDCD)
- mxvar_table[i].icount.dcd++;
- if (status & UART_MSR_DCTS)
- mxvar_table[i].icount.cts++;
-
- mxvar_table[i].mon_data.modem_status = status;
- mon_data_ext.rx_cnt[i] = mxvar_table[i].mon_data.rxcnt;
- mon_data_ext.tx_cnt[i] = mxvar_table[i].mon_data.txcnt;
- mon_data_ext.up_rxcnt[i] = mxvar_table[i].mon_data.up_rxcnt;
- mon_data_ext.up_txcnt[i] = mxvar_table[i].mon_data.up_txcnt;
- mon_data_ext.modem_status[i] = mxvar_table[i].mon_data.modem_status;
- mon_data_ext.baudrate[i] = mxvar_table[i].realbaud;
-
- if (!mxvar_table[i].tty || !mxvar_table[i].tty->termios) {
- cflag = mxvar_table[i].normal_termios.c_cflag;
- iflag = mxvar_table[i].normal_termios.c_iflag;
- } else {
- cflag = mxvar_table[i].tty->termios->c_cflag;
- iflag = mxvar_table[i].tty->termios->c_iflag;
- }
-
- mon_data_ext.databits[i] = cflag & CSIZE;
-
- mon_data_ext.stopbits[i] = cflag & CSTOPB;
-
- mon_data_ext.parity[i] = cflag & (PARENB | PARODD | CMSPAR);
-
- mon_data_ext.flowctrl[i] = 0x00;
-
- if (cflag & CRTSCTS)
- mon_data_ext.flowctrl[i] |= 0x03;
-
- if (iflag & (IXON | IXOFF))
- mon_data_ext.flowctrl[i] |= 0x0C;
-
- if (mxvar_table[i].type == PORT_16550A)
- mon_data_ext.fifo[i] = 1;
- else
- mon_data_ext.fifo[i] = 0;
-
- p = i % 4;
- shiftbit = p * 2;
- opmode = inb(mxvar_table[i].opmode_ioaddr) >> shiftbit;
- opmode &= OP_MODE_MASK;
-
- mon_data_ext.iftype[i] = opmode;
-
- }
- if (copy_to_user(argp, &mon_data_ext, sizeof(struct mxser_mon_ext)))
- return -EFAULT;
-
- return 0;
+ }
+ case MOXA_SET_BAUD_METHOD: {
+ int method;
- }
+ if (get_user(method, (int __user *)argp))
+ return -EFAULT;
+ mxser_set_baud_method[tty->index] = method;
+ return put_user(method, (int __user *)argp);
+ }
default:
return -ENOIOCTLCMD;
}
@@ -1627,107 +1812,105 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
static void mxser_stoprx(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
- /* unsigned long flags; */
+ struct mxser_port *info = tty->driver_data;
info->ldisc_stop_rx = 1;
if (I_IXOFF(tty)) {
- /* MX_LOCK(&info->slock); */
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag) {
+ if (info->board->chip_flag) {
info->IER &= ~MOXA_MUST_RECV_ISR;
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
} else {
- /* above add by Victor Yu. 09-02-2002 */
info->x_char = STOP_CHAR(tty);
- /* mask by Victor Yu. 09-02-2002 */
- /* outb(info->IER, 0); */
- outb(0, info->base + UART_IER);
+ outb(0, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
- /* force Tx interrupt */
- outb(info->IER, info->base + UART_IER);
- } /* add by Victor Yu. 09-02-2002 */
- /* MX_UNLOCK(&info->slock); */
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
}
if (info->tty->termios->c_cflag & CRTSCTS) {
- /* MX_LOCK(&info->slock); */
info->MCR &= ~UART_MCR_RTS;
- outb(info->MCR, info->base + UART_MCR);
- /* MX_UNLOCK(&info->slock); */
+ outb(info->MCR, info->ioaddr + UART_MCR);
}
}
-static void mxser_startrx(struct tty_struct *tty)
+/*
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ */
+static void mxser_throttle(struct tty_struct *tty)
+{
+ mxser_stoprx(tty);
+}
+
+static void mxser_unthrottle(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
- /* unsigned long flags; */
+ struct mxser_port *info = tty->driver_data;
+ /* startrx */
info->ldisc_stop_rx = 0;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
else {
- /* MX_LOCK(&info->slock); */
-
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag) {
+ if (info->board->chip_flag) {
info->IER |= MOXA_MUST_RECV_ISR;
- outb(info->IER, info->base + UART_IER);
+ outb(info->IER, info->ioaddr + UART_IER);
} else {
- /* above add by Victor Yu. 09-02-2002 */
-
info->x_char = START_CHAR(tty);
- /* mask by Victor Yu. 09-02-2002 */
- /* outb(info->IER, 0); */
- /* add by Victor Yu. 09-02-2002 */
- outb(0, info->base + UART_IER);
- /* force Tx interrupt */
+ outb(0, info->ioaddr + UART_IER);
info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- } /* add by Victor Yu. 09-02-2002 */
- /* MX_UNLOCK(&info->slock); */
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
}
}
if (info->tty->termios->c_cflag & CRTSCTS) {
- /* MX_LOCK(&info->slock); */
info->MCR |= UART_MCR_RTS;
- outb(info->MCR, info->base + UART_MCR);
- /* MX_UNLOCK(&info->slock); */
+ outb(info->MCR, info->ioaddr + UART_MCR);
}
}
/*
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
+ * mxser_stop() and mxser_start()
+ *
+ * This routines are called before setting or resetting tty->stopped.
+ * They enable or disable transmitter interrupts, as necessary.
*/
-static void mxser_throttle(struct tty_struct *tty)
+static void mxser_stop(struct tty_struct *tty)
{
- /* struct mxser_struct *info = tty->driver_data; */
- /* unsigned long flags; */
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
- /* MX_LOCK(&info->slock); */
- mxser_stoprx(tty);
- /* MX_UNLOCK(&info->slock); */
+ spin_lock_irqsave(&info->slock, flags);
+ if (info->IER & UART_IER_THRI) {
+ info->IER &= ~UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
+ spin_unlock_irqrestore(&info->slock, flags);
}
-static void mxser_unthrottle(struct tty_struct *tty)
+static void mxser_start(struct tty_struct *tty)
{
- /* struct mxser_struct *info = tty->driver_data; */
- /* unsigned long flags; */
+ struct mxser_port *info = tty->driver_data;
+ unsigned long flags;
- /* MX_LOCK(&info->slock); */
- mxser_startrx(tty);
- /* MX_UNLOCK(&info->slock); */
+ spin_lock_irqsave(&info->slock, flags);
+ if (info->xmit_cnt && info->xmit_buf) {
+ outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
+ info->IER |= UART_IER_THRI;
+ outb(info->IER, info->ioaddr + UART_IER);
+ }
+ spin_unlock_irqrestore(&info->slock, flags);
}
static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long flags;
+ spin_lock_irqsave(&info->slock, flags);
mxser_change_speed(info, old_termios);
+ spin_unlock_irqrestore(&info->slock, flags);
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios->c_cflag & CRTSCTS)) {
@@ -1735,61 +1918,27 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
mxser_start(tty);
}
-/* Handle sw stopped */
+ /* Handle sw stopped */
if ((old_termios->c_iflag & IXON) &&
!(tty->termios->c_iflag & IXON)) {
tty->stopped = 0;
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag) {
+ if (info->board->chip_flag) {
spin_lock_irqsave(&info->slock, flags);
- DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->base);
+ DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
spin_unlock_irqrestore(&info->slock, flags);
}
- /* above add by Victor Yu. 09-02-2002 */
mxser_start(tty);
}
}
/*
- * mxser_stop() and mxser_start()
- *
- * This routines are called before setting or resetting tty->stopped.
- * They enable or disable transmitter interrupts, as necessary.
- */
-static void mxser_stop(struct tty_struct *tty)
-{
- struct mxser_struct *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- if (info->IER & UART_IER_THRI) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static void mxser_start(struct tty_struct *tty)
-{
- struct mxser_struct *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- if (info->xmit_cnt && info->xmit_buf && !(info->IER & UART_IER_THRI)) {
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-/*
* mxser_wait_until_sent() --- wait until the transmitter is empty
*/
static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long orig_jiffies, char_time;
int lsr;
@@ -1830,7 +1979,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
timeout, char_time);
printk("jiff=%lu...", jiffies);
#endif
- while (!((lsr = inb(info->base + UART_LSR)) & UART_LSR_TEMT)) {
+ while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
#endif
@@ -1847,13 +1996,12 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
#endif
}
-
/*
* This routine is called by tty_hangup() when a hangup is signaled.
*/
-void mxser_hangup(struct tty_struct *tty)
+static void mxser_hangup(struct tty_struct *tty)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
mxser_flush_buffer(tty);
mxser_shutdown(info);
@@ -1864,231 +2012,73 @@ void mxser_hangup(struct tty_struct *tty)
wake_up_interruptible(&info->open_wait);
}
-
-/* added by James 03-12-2004. */
/*
* mxser_rs_break() --- routine which turns the break handling on or off
*/
static void mxser_rs_break(struct tty_struct *tty, int break_state)
{
- struct mxser_struct *info = tty->driver_data;
+ struct mxser_port *info = tty->driver_data;
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
if (break_state == -1)
- outb(inb(info->base + UART_LCR) | UART_LCR_SBC,
- info->base + UART_LCR);
+ outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC,
+ info->ioaddr + UART_LCR);
else
- outb(inb(info->base + UART_LCR) & ~UART_LCR_SBC,
- info->base + UART_LCR);
+ outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC,
+ info->ioaddr + UART_LCR);
spin_unlock_irqrestore(&info->slock, flags);
}
-/* (above) added by James. */
-
-
-/*
- * This is the serial driver's generic interrupt routine
- */
-static irqreturn_t mxser_interrupt(int irq, void *dev_id)
+static void mxser_receive_chars(struct mxser_port *port, int *status)
{
- int status, iir, i;
- struct mxser_struct *info;
- struct mxser_struct *port;
- int max, irqbits, bits, msr;
- int pass_counter = 0;
- int handled = IRQ_NONE;
-
- port = NULL;
- /* spin_lock(&gm_lock); */
-
- for (i = 0; i < MXSER_BOARDS; i++) {
- if (dev_id == &(mxvar_table[i * MXSER_PORTS_PER_BOARD])) {
- port = dev_id;
- break;
- }
- }
-
- if (i == MXSER_BOARDS)
- goto irq_stop;
- if (port == 0)
- goto irq_stop;
- max = mxser_numports[mxsercfg[i].board_type - 1];
- while (1) {
- irqbits = inb(port->vector) & port->vectormask;
- if (irqbits == port->vectormask)
- break;
-
- handled = IRQ_HANDLED;
- for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) {
- if (irqbits == port->vectormask)
- break;
- if (bits & irqbits)
- continue;
- info = port + i;
-
- /* following add by Victor Yu. 09-13-2002 */
- iir = inb(info->base + UART_IIR);
- if (iir & UART_IIR_NO_INT)
- continue;
- iir &= MOXA_MUST_IIR_MASK;
- if (!info->tty) {
- status = inb(info->base + UART_LSR);
- outb(0x27, info->base + UART_FCR);
- inb(info->base + UART_MSR);
- continue;
- }
-
- /* mask by Victor Yu. 09-13-2002
- if ( !info->tty ||
- (inb(info->base + UART_IIR) & UART_IIR_NO_INT) )
- continue;
- */
- /* mask by Victor Yu. 09-02-2002
- status = inb(info->base + UART_LSR) & info->read_status_mask;
- */
-
- /* following add by Victor Yu. 09-02-2002 */
- status = inb(info->base + UART_LSR);
-
- if (status & UART_LSR_PE)
- info->err_shadow |= NPPI_NOTIFY_PARITY;
- if (status & UART_LSR_FE)
- info->err_shadow |= NPPI_NOTIFY_FRAMING;
- if (status & UART_LSR_OE)
- info->err_shadow |= NPPI_NOTIFY_HW_OVERRUN;
- if (status & UART_LSR_BI)
- info->err_shadow |= NPPI_NOTIFY_BREAK;
-
- if (info->IsMoxaMustChipFlag) {
- /*
- if ( (status & 0x02) && !(status & 0x01) ) {
- outb(info->base+UART_FCR, 0x23);
- continue;
- }
- */
- if (iir == MOXA_MUST_IIR_GDA ||
- iir == MOXA_MUST_IIR_RDA ||
- iir == MOXA_MUST_IIR_RTO ||
- iir == MOXA_MUST_IIR_LSR)
- mxser_receive_chars(info, &status);
-
- } else {
- /* above add by Victor Yu. 09-02-2002 */
-
- status &= info->read_status_mask;
- if (status & UART_LSR_DR)
- mxser_receive_chars(info, &status);
- }
- msr = inb(info->base + UART_MSR);
- if (msr & UART_MSR_ANY_DELTA) {
- mxser_check_modem_status(info, msr);
- }
- /* following add by Victor Yu. 09-13-2002 */
- if (info->IsMoxaMustChipFlag) {
- if ((iir == 0x02) && (status & UART_LSR_THRE)) {
- mxser_transmit_chars(info);
- }
- } else {
- /* above add by Victor Yu. 09-13-2002 */
-
- if (status & UART_LSR_THRE) {
-/* 8-2-99 by William
- if ( info->x_char || (info->xmit_cnt > 0) )
-*/
- mxser_transmit_chars(info);
- }
- }
- }
- if (pass_counter++ > MXSER_ISR_PASS_LIMIT) {
- break; /* Prevent infinite loops */
- }
- }
-
- irq_stop:
- /* spin_unlock(&gm_lock); */
- return handled;
-}
-
-static void mxser_receive_chars(struct mxser_struct *info, int *status)
-{
- struct tty_struct *tty = info->tty;
+ struct tty_struct *tty = port->tty;
unsigned char ch, gdl;
int ignored = 0;
int cnt = 0;
int recv_room;
int max = 256;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
recv_room = tty->receive_room;
- if ((recv_room == 0) && (!info->ldisc_stop_rx)) {
- /* mxser_throttle(tty); */
+ if ((recv_room == 0) && (!port->ldisc_stop_rx))
mxser_stoprx(tty);
- /* return; */
- }
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag != MOXA_OTHER_UART) {
+ if (port->board->chip_flag != MOXA_OTHER_UART) {
- if (*status & UART_LSR_SPECIAL) {
+ if (*status & UART_LSR_SPECIAL)
goto intr_old;
- }
- /* following add by Victor Yu. 02-11-2004 */
- if (info->IsMoxaMustChipFlag == MOXA_MUST_MU860_HWID &&
+ if (port->board->chip_flag == MOXA_MUST_MU860_HWID &&
(*status & MOXA_MUST_LSR_RERR))
goto intr_old;
- /* above add by Victor Yu. 02-14-2004 */
if (*status & MOXA_MUST_LSR_RERR)
goto intr_old;
- gdl = inb(info->base + MOXA_MUST_GDL_REGISTER);
+ gdl = inb(port->ioaddr + MOXA_MUST_GDL_REGISTER);
- /* add by Victor Yu. 02-11-2004 */
- if (info->IsMoxaMustChipFlag == MOXA_MUST_MU150_HWID)
+ if (port->board->chip_flag == MOXA_MUST_MU150_HWID)
gdl &= MOXA_MUST_GDL_MASK;
if (gdl >= recv_room) {
- if (!info->ldisc_stop_rx) {
- /* mxser_throttle(tty); */
+ if (!port->ldisc_stop_rx)
mxser_stoprx(tty);
- }
- /* return; */
}
while (gdl--) {
- ch = inb(info->base + UART_RX);
+ ch = inb(port->ioaddr + UART_RX);
tty_insert_flip_char(tty, ch, 0);
cnt++;
- /*
- if ((cnt >= HI_WATER) && (info->stop_rx == 0)) {
- mxser_stoprx(tty);
- info->stop_rx = 1;
- break;
- } */
}
goto end_intr;
}
- intr_old:
- /* above add by Victor Yu. 09-02-2002 */
+intr_old:
do {
if (max-- < 0)
break;
- /*
- if ((cnt >= HI_WATER) && (info->stop_rx == 0)) {
- mxser_stoprx(tty);
- info->stop_rx=1;
- break;
- }
- */
- ch = inb(info->base + UART_RX);
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag && (*status & UART_LSR_OE) /*&& !(*status&UART_LSR_DR) */ )
- outb(0x23, info->base + UART_FCR);
- *status &= info->read_status_mask;
- /* above add by Victor Yu. 09-02-2002 */
- if (*status & info->ignore_status_mask) {
+ ch = inb(port->ioaddr + UART_RX);
+ if (port->board->chip_flag && (*status & UART_LSR_OE))
+ outb(0x23, port->ioaddr + UART_FCR);
+ *status &= port->read_status_mask;
+ if (*status & port->ignore_status_mask) {
if (++ignored > 100)
break;
} else {
@@ -2096,1038 +2086,652 @@ static void mxser_receive_chars(struct mxser_struct *info, int *status)
if (*status & UART_LSR_SPECIAL) {
if (*status & UART_LSR_BI) {
flag = TTY_BREAK;
-/* added by casper 1/11/2000 */
- info->icount.brk++;
-/* */
- if (info->flags & ASYNC_SAK)
+ port->icount.brk++;
+
+ if (port->flags & ASYNC_SAK)
do_SAK(tty);
} else if (*status & UART_LSR_PE) {
flag = TTY_PARITY;
-/* added by casper 1/11/2000 */
- info->icount.parity++;
-/* */
+ port->icount.parity++;
} else if (*status & UART_LSR_FE) {
flag = TTY_FRAME;
-/* added by casper 1/11/2000 */
- info->icount.frame++;
-/* */
+ port->icount.frame++;
} else if (*status & UART_LSR_OE) {
flag = TTY_OVERRUN;
-/* added by casper 1/11/2000 */
- info->icount.overrun++;
-/* */
- }
+ port->icount.overrun++;
+ } else
+ flag = TTY_BREAK;
}
tty_insert_flip_char(tty, ch, flag);
cnt++;
if (cnt >= recv_room) {
- if (!info->ldisc_stop_rx) {
- /* mxser_throttle(tty); */
+ if (!port->ldisc_stop_rx)
mxser_stoprx(tty);
- }
break;
}
}
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag)
+ if (port->board->chip_flag)
break;
- /* above add by Victor Yu. 09-02-2002 */
- /* mask by Victor Yu. 09-02-2002
- *status = inb(info->base + UART_LSR) & info->read_status_mask;
- */
- /* following add by Victor Yu. 09-02-2002 */
- *status = inb(info->base + UART_LSR);
- /* above add by Victor Yu. 09-02-2002 */
+ *status = inb(port->ioaddr + UART_LSR);
} while (*status & UART_LSR_DR);
-end_intr: /* add by Victor Yu. 09-02-2002 */
- mxvar_log.rxcnt[info->port] += cnt;
- info->mon_data.rxcnt += cnt;
- info->mon_data.up_rxcnt += cnt;
- spin_unlock_irqrestore(&info->slock, flags);
+end_intr:
+ mxvar_log.rxcnt[port->tty->index] += cnt;
+ port->mon_data.rxcnt += cnt;
+ port->mon_data.up_rxcnt += cnt;
+ /*
+ * We are called from an interrupt context with &port->slock
+ * being held. Drop it temporarily in order to prevent
+ * recursive locking.
+ */
+ spin_unlock(&port->slock);
tty_flip_buffer_push(tty);
+ spin_lock(&port->slock);
}
-static void mxser_transmit_chars(struct mxser_struct *info)
+static void mxser_transmit_chars(struct mxser_port *port)
{
int count, cnt;
- unsigned long flags;
- spin_lock_irqsave(&info->slock, flags);
-
- if (info->x_char) {
- outb(info->x_char, info->base + UART_TX);
- info->x_char = 0;
- mxvar_log.txcnt[info->port]++;
- info->mon_data.txcnt++;
- info->mon_data.up_txcnt++;
-
-/* added by casper 1/11/2000 */
- info->icount.tx++;
-/* */
- spin_unlock_irqrestore(&info->slock, flags);
+ if (port->x_char) {
+ outb(port->x_char, port->ioaddr + UART_TX);
+ port->x_char = 0;
+ mxvar_log.txcnt[port->tty->index]++;
+ port->mon_data.txcnt++;
+ port->mon_data.up_txcnt++;
+ port->icount.tx++;
return;
}
- if (info->xmit_buf == 0) {
- spin_unlock_irqrestore(&info->slock, flags);
+ if (port->xmit_buf == NULL)
return;
- }
- if ((info->xmit_cnt <= 0) || info->tty->stopped ||
- (info->tty->hw_stopped &&
- (info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag))) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- spin_unlock_irqrestore(&info->slock, flags);
+ if ((port->xmit_cnt <= 0) || port->tty->stopped ||
+ (port->tty->hw_stopped &&
+ (port->type != PORT_16550A) &&
+ (!port->board->chip_flag))) {
+ port->IER &= ~UART_IER_THRI;
+ outb(port->IER, port->ioaddr + UART_IER);
return;
}
- cnt = info->xmit_cnt;
- count = info->xmit_fifo_size;
+ cnt = port->xmit_cnt;
+ count = port->xmit_fifo_size;
do {
- outb(info->xmit_buf[info->xmit_tail++],
- info->base + UART_TX);
- info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE - 1);
- if (--info->xmit_cnt <= 0)
+ outb(port->xmit_buf[port->xmit_tail++],
+ port->ioaddr + UART_TX);
+ port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE - 1);
+ if (--port->xmit_cnt <= 0)
break;
} while (--count > 0);
- mxvar_log.txcnt[info->port] += (cnt - info->xmit_cnt);
+ mxvar_log.txcnt[port->tty->index] += (cnt - port->xmit_cnt);
-/* added by James 03-12-2004. */
- info->mon_data.txcnt += (cnt - info->xmit_cnt);
- info->mon_data.up_txcnt += (cnt - info->xmit_cnt);
-/* (above) added by James. */
+ port->mon_data.txcnt += (cnt - port->xmit_cnt);
+ port->mon_data.up_txcnt += (cnt - port->xmit_cnt);
+ port->icount.tx += (cnt - port->xmit_cnt);
-/* added by casper 1/11/2000 */
- info->icount.tx += (cnt - info->xmit_cnt);
-/* */
+ if (port->xmit_cnt < WAKEUP_CHARS)
+ tty_wakeup(port->tty);
- if (info->xmit_cnt < WAKEUP_CHARS) {
- set_bit(MXSER_EVENT_TXLOW, &info->event);
- schedule_work(&info->tqueue);
+ if (port->xmit_cnt <= 0) {
+ port->IER &= ~UART_IER_THRI;
+ outb(port->IER, port->ioaddr + UART_IER);
}
- if (info->xmit_cnt <= 0) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- spin_unlock_irqrestore(&info->slock, flags);
}
-static void mxser_check_modem_status(struct mxser_struct *info, int status)
+/*
+ * This is the serial driver's generic interrupt routine
+ */
+static irqreturn_t mxser_interrupt(int irq, void *dev_id)
{
- /* update input line counters */
- if (status & UART_MSR_TERI)
- info->icount.rng++;
- if (status & UART_MSR_DDSR)
- info->icount.dsr++;
- if (status & UART_MSR_DDCD)
- info->icount.dcd++;
- if (status & UART_MSR_DCTS)
- info->icount.cts++;
- info->mon_data.modem_status = status;
- wake_up_interruptible(&info->delta_msr_wait);
-
- if ((info->flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
- if (status & UART_MSR_DCD)
- wake_up_interruptible(&info->open_wait);
- schedule_work(&info->tqueue);
- }
-
- if (info->flags & ASYNC_CTS_FLOW) {
- if (info->tty->hw_stopped) {
- if (status & UART_MSR_CTS) {
- info->tty->hw_stopped = 0;
+ int status, iir, i;
+ struct mxser_board *brd = NULL;
+ struct mxser_port *port;
+ int max, irqbits, bits, msr;
+ unsigned int int_cnt, pass_counter = 0;
+ int handled = IRQ_NONE;
- if ((info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag)) {
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- set_bit(MXSER_EVENT_TXLOW, &info->event);
- schedule_work(&info->tqueue); }
- } else {
- if (!(status & UART_MSR_CTS)) {
- info->tty->hw_stopped = 1;
- if ((info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag)) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- }
+ for (i = 0; i < MXSER_BOARDS; i++)
+ if (dev_id == &mxser_boards[i]) {
+ brd = dev_id;
+ break;
}
- }
-}
-static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp, struct mxser_struct *info)
-{
- DECLARE_WAITQUEUE(wait, current);
- int retval;
- int do_clocal = 0;
- unsigned long flags;
-
- /*
- * If non-blocking mode is set, or the port is not enabled,
- * then make the check up front and then exit.
- */
- if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) {
- info->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
- }
+ if (i == MXSER_BOARDS)
+ goto irq_stop;
+ if (brd == NULL)
+ goto irq_stop;
+ max = brd->info->nports;
+ while (pass_counter++ < MXSER_ISR_PASS_LIMIT) {
+ irqbits = inb(brd->vector) & brd->vector_mask;
+ if (irqbits == brd->vector_mask)
+ break;
- if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
+ handled = IRQ_HANDLED;
+ for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) {
+ if (irqbits == brd->vector_mask)
+ break;
+ if (bits & irqbits)
+ continue;
+ port = &brd->ports[i];
+
+ int_cnt = 0;
+ spin_lock(&port->slock);
+ do {
+ iir = inb(port->ioaddr + UART_IIR);
+ if (iir & UART_IIR_NO_INT)
+ break;
+ iir &= MOXA_MUST_IIR_MASK;
+ if (!port->tty ||
+ (port->flags & ASYNC_CLOSING) ||
+ !(port->flags &
+ ASYNC_INITIALIZED)) {
+ status = inb(port->ioaddr + UART_LSR);
+ outb(0x27, port->ioaddr + UART_FCR);
+ inb(port->ioaddr + UART_MSR);
+ break;
+ }
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, info->count is dropped by one, so that
- * mxser_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
- add_wait_queue(&info->open_wait, &wait);
+ status = inb(port->ioaddr + UART_LSR);
+
+ if (status & UART_LSR_PE)
+ port->err_shadow |= NPPI_NOTIFY_PARITY;
+ if (status & UART_LSR_FE)
+ port->err_shadow |= NPPI_NOTIFY_FRAMING;
+ if (status & UART_LSR_OE)
+ port->err_shadow |=
+ NPPI_NOTIFY_HW_OVERRUN;
+ if (status & UART_LSR_BI)
+ port->err_shadow |= NPPI_NOTIFY_BREAK;
+
+ if (port->board->chip_flag) {
+ if (iir == MOXA_MUST_IIR_GDA ||
+ iir == MOXA_MUST_IIR_RDA ||
+ iir == MOXA_MUST_IIR_RTO ||
+ iir == MOXA_MUST_IIR_LSR)
+ mxser_receive_chars(port,
+ &status);
- spin_lock_irqsave(&info->slock, flags);
- if (!tty_hung_up_p(filp))
- info->count--;
- spin_unlock_irqrestore(&info->slock, flags);
- info->blocked_open++;
- while (1) {
- spin_lock_irqsave(&info->slock, flags);
- outb(inb(info->base + UART_MCR) |
- UART_MCR_DTR | UART_MCR_RTS, info->base + UART_MCR);
- spin_unlock_irqrestore(&info->slock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)) {
- if (info->flags & ASYNC_HUP_NOTIFY)
- retval = -EAGAIN;
- else
- retval = -ERESTARTSYS;
- break;
- }
- if (!(info->flags & ASYNC_CLOSING) &&
- (do_clocal ||
- (inb(info->base + UART_MSR) & UART_MSR_DCD)))
- break;
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
+ } else {
+ status &= port->read_status_mask;
+ if (status & UART_LSR_DR)
+ mxser_receive_chars(port,
+ &status);
+ }
+ msr = inb(port->ioaddr + UART_MSR);
+ if (msr & UART_MSR_ANY_DELTA)
+ mxser_check_modem_status(port, msr);
+
+ if (port->board->chip_flag) {
+ if (iir == 0x02 && (status &
+ UART_LSR_THRE))
+ mxser_transmit_chars(port);
+ } else {
+ if (status & UART_LSR_THRE)
+ mxser_transmit_chars(port);
+ }
+ } while (int_cnt++ < MXSER_ISR_PASS_LIMIT);
+ spin_unlock(&port->slock);
}
- schedule();
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&info->open_wait, &wait);
- if (!tty_hung_up_p(filp))
- info->count++;
- info->blocked_open--;
- if (retval)
- return retval;
- info->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
-}
-static int mxser_startup(struct mxser_struct *info)
-{
- unsigned long page;
- unsigned long flags;
-
- page = __get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- spin_lock_irqsave(&info->slock, flags);
-
- if (info->flags & ASYNC_INITIALIZED) {
- free_page(page);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
+irq_stop:
+ return handled;
+}
- if (!info->base || !info->type) {
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- free_page(page);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
- if (info->xmit_buf)
- free_page(page);
- else
- info->xmit_buf = (unsigned char *) page;
+static const struct tty_operations mxser_ops = {
+ .open = mxser_open,
+ .close = mxser_close,
+ .write = mxser_write,
+ .put_char = mxser_put_char,
+ .flush_chars = mxser_flush_chars,
+ .write_room = mxser_write_room,
+ .chars_in_buffer = mxser_chars_in_buffer,
+ .flush_buffer = mxser_flush_buffer,
+ .ioctl = mxser_ioctl,
+ .throttle = mxser_throttle,
+ .unthrottle = mxser_unthrottle,
+ .set_termios = mxser_set_termios,
+ .stop = mxser_stop,
+ .start = mxser_start,
+ .hangup = mxser_hangup,
+ .break_ctl = mxser_rs_break,
+ .wait_until_sent = mxser_wait_until_sent,
+ .tiocmget = mxser_tiocmget,
+ .tiocmset = mxser_tiocmset,
+};
- /*
- * Clear the FIFO buffers and disable them
- * (they will be reenabled in mxser_change_speed())
- */
- if (info->IsMoxaMustChipFlag)
- outb((UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT |
- MOXA_MUST_FCR_GDA_MODE_ENABLE), info->base + UART_FCR);
- else
- outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
- info->base + UART_FCR);
+/*
+ * The MOXA Smartio/Industio serial driver boot-time initialization code!
+ */
- /*
- * At this point there's no way the LSR could still be 0xFF;
- * if it is, then bail out, because there's likely no UART
- * here.
- */
- if (inb(info->base + UART_LSR) == 0xff) {
- spin_unlock_irqrestore(&info->slock, flags);
- if (capable(CAP_SYS_ADMIN)) {
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- return 0;
- } else
- return -ENODEV;
+static void mxser_release_res(struct mxser_board *brd, struct pci_dev *pdev,
+ unsigned int irq)
+{
+ if (irq)
+ free_irq(brd->irq, brd);
+ if (pdev != NULL) { /* PCI */
+#ifdef CONFIG_PCI
+ pci_release_region(pdev, 2);
+ pci_release_region(pdev, 3);
+#endif
+ } else {
+ release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
+ release_region(brd->vector, 1);
}
-
- /*
- * Clear the interrupt registers.
- */
- (void) inb(info->base + UART_LSR);
- (void) inb(info->base + UART_RX);
- (void) inb(info->base + UART_IIR);
- (void) inb(info->base + UART_MSR);
-
- /*
- * Now, initialize the UART
- */
- outb(UART_LCR_WLEN8, info->base + UART_LCR); /* reset DLAB */
- info->MCR = UART_MCR_DTR | UART_MCR_RTS;
- outb(info->MCR, info->base + UART_MCR);
-
- /*
- * Finally, enable interrupts
- */
- info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
- /* info->IER = UART_IER_RLSI | UART_IER_RDI; */
-
- /* following add by Victor Yu. 08-30-2002 */
- if (info->IsMoxaMustChipFlag)
- info->IER |= MOXA_MUST_IER_EGDAI;
- /* above add by Victor Yu. 08-30-2002 */
- outb(info->IER, info->base + UART_IER); /* enable interrupts */
-
- /*
- * And clear the interrupt registers again for luck.
- */
- (void) inb(info->base + UART_LSR);
- (void) inb(info->base + UART_RX);
- (void) inb(info->base + UART_IIR);
- (void) inb(info->base + UART_MSR);
-
- if (info->tty)
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
- /*
- * and set the speed of the serial port
- */
- spin_unlock_irqrestore(&info->slock, flags);
- mxser_change_speed(info, NULL);
-
- info->flags |= ASYNC_INITIALIZED;
- return 0;
}
-/*
- * This routine will shutdown a serial port; interrupts maybe disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void mxser_shutdown(struct mxser_struct *info)
+static int __devinit mxser_initbrd(struct mxser_board *brd,
+ struct pci_dev *pdev)
{
- unsigned long flags;
-
- if (!(info->flags & ASYNC_INITIALIZED))
- return;
-
- spin_lock_irqsave(&info->slock, flags);
-
- /*
- * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
- * here so the queue might never be waken up
- */
- wake_up_interruptible(&info->delta_msr_wait);
-
- /*
- * Free the IRQ, if necessary
- */
- if (info->xmit_buf) {
- free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
- }
+ struct mxser_port *info;
+ unsigned int i;
+ int retval;
- info->IER = 0;
- outb(0x00, info->base + UART_IER);
+ printk(KERN_INFO "max. baud rate = %d bps.\n", brd->ports[0].max_baud);
- if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
- info->MCR &= ~(UART_MCR_DTR | UART_MCR_RTS);
- outb(info->MCR, info->base + UART_MCR);
+ for (i = 0; i < brd->info->nports; i++) {
+ info = &brd->ports[i];
+ info->board = brd;
+ info->stop_rx = 0;
+ info->ldisc_stop_rx = 0;
- /* clear Rx/Tx FIFO's */
- /* following add by Victor Yu. 08-30-2002 */
- if (info->IsMoxaMustChipFlag)
- outb((UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT |
- MOXA_MUST_FCR_GDA_MODE_ENABLE), info->base + UART_FCR);
- else
- /* above add by Victor Yu. 08-30-2002 */
- outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
- info->base + UART_FCR);
+ /* Enhance mode enabled here */
+ if (brd->chip_flag != MOXA_OTHER_UART)
+ ENABLE_MOXA_MUST_ENCHANCE_MODE(info->ioaddr);
- /* read data port to reset things */
- (void) inb(info->base + UART_RX);
+ info->flags = ASYNC_SHARE_IRQ;
+ info->type = brd->uart_type;
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
+ process_txrx_fifo(info);
- info->flags &= ~ASYNC_INITIALIZED;
+ info->custom_divisor = info->baud_base * 16;
+ info->close_delay = 5 * HZ / 10;
+ info->closing_wait = 30 * HZ;
+ info->normal_termios = mxvar_sdriver->init_termios;
+ init_waitqueue_head(&info->open_wait);
+ init_waitqueue_head(&info->delta_msr_wait);
+ memset(&info->mon_data, 0, sizeof(struct mxser_mon));
+ info->err_shadow = 0;
+ spin_lock_init(&info->slock);
- /* following add by Victor Yu. 09-23-2002 */
- if (info->IsMoxaMustChipFlag)
- SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->base);
- /* above add by Victor Yu. 09-23-2002 */
+ /* before set INT ISR, disable all int */
+ outb(inb(info->ioaddr + UART_IER) & 0xf0,
+ info->ioaddr + UART_IER);
+ }
- spin_unlock_irqrestore(&info->slock, flags);
+ retval = request_irq(brd->irq, mxser_interrupt, IRQF_SHARED, "mxser",
+ brd);
+ if (retval) {
+ printk(KERN_ERR "Board %s: Request irq failed, IRQ (%d) may "
+ "conflict with another device.\n",
+ brd->info->name, brd->irq);
+ /* We hold resources, we need to release them. */
+ mxser_release_res(brd, pdev, 0);
+ }
+ return retval;
}
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static int mxser_change_speed(struct mxser_struct *info, struct ktermios *old_termios)
+static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
{
- unsigned cflag, cval, fcr;
- int ret = 0;
- unsigned char status;
- long baud;
- unsigned long flags;
-
- if (!info->tty || !info->tty->termios)
- return ret;
- cflag = info->tty->termios->c_cflag;
- if (!(info->base))
- return ret;
+ int id, i, bits;
+ unsigned short regs[16], irq;
+ unsigned char scratch, scratch2;
-#ifndef B921600
-#define B921600 (B460800 +1)
-#endif
- if (mxser_set_baud_method[info->port] == 0) {
- baud = tty_get_baud_rate(info->tty);
- mxser_set_baud(info, baud);
- }
+ brd->chip_flag = MOXA_OTHER_UART;
- /* byte size and parity */
- switch (cflag & CSIZE) {
- case CS5:
- cval = 0x00;
+ id = mxser_read_register(cap, regs);
+ switch (id) {
+ case C168_ASIC_ID:
+ brd->info = &mxser_cards[0];
break;
- case CS6:
- cval = 0x01;
+ case C104_ASIC_ID:
+ brd->info = &mxser_cards[1];
break;
- case CS7:
- cval = 0x02;
+ case CI104J_ASIC_ID:
+ brd->info = &mxser_cards[2];
break;
- case CS8:
- cval = 0x03;
+ case C102_ASIC_ID:
+ brd->info = &mxser_cards[5];
+ break;
+ case CI132_ASIC_ID:
+ brd->info = &mxser_cards[6];
+ break;
+ case CI134_ASIC_ID:
+ brd->info = &mxser_cards[7];
break;
default:
- cval = 0x00;
- break; /* too keep GCC shut... */
+ return 0;
}
- if (cflag & CSTOPB)
- cval |= 0x04;
- if (cflag & PARENB)
- cval |= UART_LCR_PARITY;
- if (!(cflag & PARODD))
- cval |= UART_LCR_EPAR;
- if (cflag & CMSPAR)
- cval |= UART_LCR_SPAR;
- if ((info->type == PORT_8250) || (info->type == PORT_16450)) {
- if (info->IsMoxaMustChipFlag) {
- fcr = UART_FCR_ENABLE_FIFO;
- fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
- SET_MOXA_MUST_FIFO_VALUE(info);
- } else
- fcr = 0;
- } else {
- fcr = UART_FCR_ENABLE_FIFO;
- /* following add by Victor Yu. 08-30-2002 */
- if (info->IsMoxaMustChipFlag) {
- fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
- SET_MOXA_MUST_FIFO_VALUE(info);
- } else {
- /* above add by Victor Yu. 08-30-2002 */
- switch (info->rx_trigger) {
- case 1:
- fcr |= UART_FCR_TRIGGER_1;
- break;
- case 4:
- fcr |= UART_FCR_TRIGGER_4;
- break;
- case 8:
- fcr |= UART_FCR_TRIGGER_8;
- break;
- default:
- fcr |= UART_FCR_TRIGGER_14;
- break;
- }
- }
+ irq = 0;
+ /* some ISA cards have 2 ports, but we want to see them as 4-port (why?)
+ Flag-hack checks if configuration should be read as 2-port here. */
+ if (brd->info->nports == 2 || (brd->info->flags & MXSER_HAS2)) {
+ irq = regs[9] & 0xF000;
+ irq = irq | (irq >> 4);
+ if (irq != (regs[9] & 0xFF00))
+ return MXSER_ERR_IRQ_CONFLIT;
+ } else if (brd->info->nports == 4) {
+ irq = regs[9] & 0xF000;
+ irq = irq | (irq >> 4);
+ irq = irq | (irq >> 8);
+ if (irq != regs[9])
+ return MXSER_ERR_IRQ_CONFLIT;
+ } else if (brd->info->nports == 8) {
+ irq = regs[9] & 0xF000;
+ irq = irq | (irq >> 4);
+ irq = irq | (irq >> 8);
+ if ((irq != regs[9]) || (irq != regs[10]))
+ return MXSER_ERR_IRQ_CONFLIT;
}
- /* CTS flow control flag and modem status interrupts */
- info->IER &= ~UART_IER_MSI;
- info->MCR &= ~UART_MCR_AFE;
- if (cflag & CRTSCTS) {
- info->flags |= ASYNC_CTS_FLOW;
- info->IER |= UART_IER_MSI;
- if ((info->type == PORT_16550A) || (info->IsMoxaMustChipFlag)) {
- info->MCR |= UART_MCR_AFE;
+ if (!irq)
+ return MXSER_ERR_IRQ;
+ brd->irq = ((int)(irq & 0xF000) >> 12);
+ for (i = 0; i < 8; i++)
+ brd->ports[i].ioaddr = (int) regs[i + 1] & 0xFFF8;
+ if ((regs[12] & 0x80) == 0)
+ return MXSER_ERR_VECTOR;
+ brd->vector = (int)regs[11]; /* interrupt vector */
+ if (id == 1)
+ brd->vector_mask = 0x00FF;
+ else
+ brd->vector_mask = 0x000F;
+ for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) {
+ if (regs[12] & bits) {
+ brd->ports[i].baud_base = 921600;
+ brd->ports[i].max_baud = 921600;
} else {
- status = inb(info->base + UART_MSR);
- if (info->tty->hw_stopped) {
- if (status & UART_MSR_CTS) {
- info->tty->hw_stopped = 0;
- if ((info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag)) {
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- set_bit(MXSER_EVENT_TXLOW, &info->event);
- schedule_work(&info->tqueue); }
- } else {
- if (!(status & UART_MSR_CTS)) {
- info->tty->hw_stopped = 1;
- if ((info->type != PORT_16550A) &&
- (!info->IsMoxaMustChipFlag)) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->base + UART_IER);
- }
- }
- }
+ brd->ports[i].baud_base = 115200;
+ brd->ports[i].max_baud = 115200;
}
- } else {
- info->flags &= ~ASYNC_CTS_FLOW;
- }
- outb(info->MCR, info->base + UART_MCR);
- if (cflag & CLOCAL) {
- info->flags &= ~ASYNC_CHECK_CD;
- } else {
- info->flags |= ASYNC_CHECK_CD;
- info->IER |= UART_IER_MSI;
}
- outb(info->IER, info->base + UART_IER);
-
- /*
- * Set up parity check flag
- */
- info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
- if (I_INPCK(info->tty))
- info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
- info->read_status_mask |= UART_LSR_BI;
-
- info->ignore_status_mask = 0;
+ scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB);
+ outb(scratch2 | UART_LCR_DLAB, cap + UART_LCR);
+ outb(0, cap + UART_EFR); /* EFR is the same as FCR */
+ outb(scratch2, cap + UART_LCR);
+ outb(UART_FCR_ENABLE_FIFO, cap + UART_FCR);
+ scratch = inb(cap + UART_IIR);
- if (I_IGNBRK(info->tty)) {
- info->ignore_status_mask |= UART_LSR_BI;
- info->read_status_mask |= UART_LSR_BI;
- /*
- * If we're ignore parity and break indicators, ignore
- * overruns too. (For real raw support).
- */
- if (I_IGNPAR(info->tty)) {
- info->ignore_status_mask |=
- UART_LSR_OE |
- UART_LSR_PE |
- UART_LSR_FE;
- info->read_status_mask |=
- UART_LSR_OE |
- UART_LSR_PE |
- UART_LSR_FE;
- }
- }
- /* following add by Victor Yu. 09-02-2002 */
- if (info->IsMoxaMustChipFlag) {
- spin_lock_irqsave(&info->slock, flags);
- SET_MOXA_MUST_XON1_VALUE(info->base, START_CHAR(info->tty));
- SET_MOXA_MUST_XOFF1_VALUE(info->base, STOP_CHAR(info->tty));
- if (I_IXON(info->tty)) {
- ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->base);
- } else {
- DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->base);
- }
- if (I_IXOFF(info->tty)) {
- ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->base);
- } else {
- DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->base);
- }
- /*
- if ( I_IXANY(info->tty) ) {
- info->MCR |= MOXA_MUST_MCR_XON_ANY;
- ENABLE_MOXA_MUST_XON_ANY_FLOW_CONTROL(info->base);
- } else {
- info->MCR &= ~MOXA_MUST_MCR_XON_ANY;
- DISABLE_MOXA_MUST_XON_ANY_FLOW_CONTROL(info->base);
- }
- */
- spin_unlock_irqrestore(&info->slock, flags);
+ if (scratch & 0xC0)
+ brd->uart_type = PORT_16550A;
+ else
+ brd->uart_type = PORT_16450;
+ if (!request_region(brd->ports[0].ioaddr, 8 * brd->info->nports,
+ "mxser(IO)"))
+ return MXSER_ERR_IOADDR;
+ if (!request_region(brd->vector, 1, "mxser(vector)")) {
+ release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
+ return MXSER_ERR_VECTOR;
}
- /* above add by Victor Yu. 09-02-2002 */
-
-
- outb(fcr, info->base + UART_FCR); /* set fcr */
- outb(cval, info->base + UART_LCR);
-
- return ret;
+ return brd->info->nports;
}
-
-static int mxser_set_baud(struct mxser_struct *info, long newspd)
+static int __devinit mxser_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- int quot = 0;
- unsigned char cval;
- int ret = 0;
- unsigned long flags;
-
- if (!info->tty || !info->tty->termios)
- return ret;
-
- if (!(info->base))
- return ret;
+#ifdef CONFIG_PCI
+ struct mxser_board *brd;
+ unsigned int i, j;
+ unsigned long ioaddress;
+ int retval = -EINVAL;
- if (newspd > info->MaxCanSetBaudRate)
- return 0;
+ for (i = 0; i < MXSER_BOARDS; i++)
+ if (mxser_boards[i].info == NULL)
+ break;
- info->realbaud = newspd;
- if (newspd == 134) {
- quot = (2 * info->baud_base / 269);
- } else if (newspd) {
- quot = info->baud_base / newspd;
- if (quot == 0)
- quot = 1;
- } else {
- quot = 0;
+ if (i >= MXSER_BOARDS) {
+ printk(KERN_ERR "Too many Smartio/Industio family boards found "
+ "(maximum %d), board not configured\n", MXSER_BOARDS);
+ goto err;
}
- info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base);
- info->timeout += HZ / 50; /* Add .02 seconds of slop */
+ brd = &mxser_boards[i];
+ brd->idx = i * MXSER_PORTS_PER_BOARD;
+ printk(KERN_INFO "Found MOXA %s board (BusNo=%d, DevNo=%d)\n",
+ mxser_cards[ent->driver_data].name,
+ pdev->bus->number, PCI_SLOT(pdev->devfn));
- if (quot) {
- spin_lock_irqsave(&info->slock, flags);
- info->MCR |= UART_MCR_DTR;
- outb(info->MCR, info->base + UART_MCR);
- spin_unlock_irqrestore(&info->slock, flags);
- } else {
- spin_lock_irqsave(&info->slock, flags);
- info->MCR &= ~UART_MCR_DTR;
- outb(info->MCR, info->base + UART_MCR);
- spin_unlock_irqrestore(&info->slock, flags);
- return ret;
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ printk(KERN_ERR "Moxa SmartI/O PCI enable fail !\n");
+ goto err;
}
- cval = inb(info->base + UART_LCR);
-
- outb(cval | UART_LCR_DLAB, info->base + UART_LCR); /* set DLAB */
-
- outb(quot & 0xff, info->base + UART_DLL); /* LS of divisor */
- outb(quot >> 8, info->base + UART_DLM); /* MS of divisor */
- outb(cval, info->base + UART_LCR); /* reset DLAB */
-
+ /* io address */
+ ioaddress = pci_resource_start(pdev, 2);
+ retval = pci_request_region(pdev, 2, "mxser(IO)");
+ if (retval)
+ goto err;
- return ret;
-}
+ brd->info = &mxser_cards[ent->driver_data];
+ for (i = 0; i < brd->info->nports; i++)
+ brd->ports[i].ioaddr = ioaddress + 8 * i;
-/*
- * ------------------------------------------------------------
- * friends of mxser_ioctl()
- * ------------------------------------------------------------
- */
-static int mxser_get_serial_info(struct mxser_struct *info, struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
+ /* vector */
+ ioaddress = pci_resource_start(pdev, 3);
+ retval = pci_request_region(pdev, 3, "mxser(vector)");
+ if (retval)
+ goto err_relio;
+ brd->vector = ioaddress;
- if (!retinfo)
- return -EFAULT;
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = info->type;
- tmp.line = info->port;
- tmp.port = info->base;
- tmp.irq = info->irq;
- tmp.flags = info->flags;
- tmp.baud_base = info->baud_base;
- tmp.close_delay = info->close_delay;
- tmp.closing_wait = info->closing_wait;
- tmp.custom_divisor = info->custom_divisor;
- tmp.hub6 = 0;
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
- return 0;
-}
+ /* irq */
+ brd->irq = pdev->irq;
-static int mxser_set_serial_info(struct mxser_struct *info, struct serial_struct __user *new_info)
-{
- struct serial_struct new_serial;
- unsigned int flags;
- int retval = 0;
+ brd->chip_flag = CheckIsMoxaMust(brd->ports[0].ioaddr);
+ brd->uart_type = PORT_16550A;
+ brd->vector_mask = 0;
- if (!new_info || !info->base)
- return -EFAULT;
- if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
- return -EFAULT;
+ for (i = 0; i < brd->info->nports; i++) {
+ for (j = 0; j < UART_INFO_NUM; j++) {
+ if (Gpci_uart_info[j].type == brd->chip_flag) {
+ brd->ports[i].max_baud =
+ Gpci_uart_info[j].max_baud;
- if ((new_serial.irq != info->irq) ||
- (new_serial.port != info->base) ||
- (new_serial.custom_divisor != info->custom_divisor) ||
- (new_serial.baud_base != info->baud_base))
- return -EPERM;
+ /* exception....CP-102 */
+ if (brd->info->flags & MXSER_HIGHBAUD)
+ brd->ports[i].max_baud = 921600;
+ break;
+ }
+ }
+ }
- flags = info->flags & ASYNC_SPD_MASK;
+ if (brd->chip_flag == MOXA_MUST_MU860_HWID) {
+ for (i = 0; i < brd->info->nports; i++) {
+ if (i < 4)
+ brd->ports[i].opmode_ioaddr = ioaddress + 4;
+ else
+ brd->ports[i].opmode_ioaddr = ioaddress + 0x0c;
+ }
+ outb(0, ioaddress + 4); /* default set to RS232 mode */
+ outb(0, ioaddress + 0x0c); /* default set to RS232 mode */
+ }
- if (!capable(CAP_SYS_ADMIN)) {
- if ((new_serial.baud_base != info->baud_base) ||
- (new_serial.close_delay != info->close_delay) ||
- ((new_serial.flags & ~ASYNC_USR_MASK) != (info->flags & ~ASYNC_USR_MASK)))
- return -EPERM;
- info->flags = ((info->flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK));
- } else {
- /*
- * OK, past this point, all the error checking has been done.
- * At this point, we start making changes.....
- */
- info->flags = ((info->flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS));
- info->close_delay = new_serial.close_delay * HZ / 100;
- info->closing_wait = new_serial.closing_wait * HZ / 100;
- info->tty->low_latency =
- (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
- info->tty->low_latency = 0; /* (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0; */
+ for (i = 0; i < brd->info->nports; i++) {
+ brd->vector_mask |= (1 << i);
+ brd->ports[i].baud_base = 921600;
}
- /* added by casper, 3/17/2000, for mouse */
- info->type = new_serial.type;
+ /* mxser_initbrd will hook ISR. */
+ retval = mxser_initbrd(brd, pdev);
+ if (retval)
+ goto err_null;
- process_txrx_fifo(info);
+ for (i = 0; i < brd->info->nports; i++)
+ tty_register_device(mxvar_sdriver, brd->idx + i, &pdev->dev);
- if (info->flags & ASYNC_INITIALIZED) {
- if (flags != (info->flags & ASYNC_SPD_MASK)) {
- mxser_change_speed(info, NULL);
- }
- } else {
- retval = mxser_startup(info);
- }
+ pci_set_drvdata(pdev, brd);
+
+ return 0;
+err_relio:
+ pci_release_region(pdev, 2);
+err_null:
+ brd->info = NULL;
+err:
return retval;
+#else
+ return -ENODEV;
+#endif
}
-/*
- * mxser_get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- * is emptied. On bus types like RS485, the transmitter must
- * release the bus after transmitting. This must be done when
- * the transmit shift register is empty, not be done when the
- * transmit holding register is empty. This functionality
- * allows an RS485 driver to be written in user space.
- */
-static int mxser_get_lsr_info(struct mxser_struct *info, unsigned int __user *value)
+static void __devexit mxser_remove(struct pci_dev *pdev)
{
- unsigned char status;
- unsigned int result;
- unsigned long flags;
+ struct mxser_board *brd = pci_get_drvdata(pdev);
+ unsigned int i;
- spin_lock_irqsave(&info->slock, flags);
- status = inb(info->base + UART_LSR);
- spin_unlock_irqrestore(&info->slock, flags);
- result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
- return put_user(result, value);
-}
+ for (i = 0; i < brd->info->nports; i++)
+ tty_unregister_device(mxvar_sdriver, brd->idx + i);
-/*
- * This routine sends a break character out the serial port.
- */
-static void mxser_send_break(struct mxser_struct *info, int duration)
-{
- unsigned long flags;
-
- if (!info->base)
- return;
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&info->slock, flags);
- outb(inb(info->base + UART_LCR) | UART_LCR_SBC,
- info->base + UART_LCR);
- spin_unlock_irqrestore(&info->slock, flags);
- schedule_timeout(duration);
- spin_lock_irqsave(&info->slock, flags);
- outb(inb(info->base + UART_LCR) & ~UART_LCR_SBC,
- info->base + UART_LCR);
- spin_unlock_irqrestore(&info->slock, flags);
+ mxser_release_res(brd, pdev, 1);
+ brd->info = NULL;
}
-static int mxser_tiocmget(struct tty_struct *tty, struct file *file)
-{
- struct mxser_struct *info = tty->driver_data;
- unsigned char control, status;
- unsigned long flags;
+static struct pci_driver mxser_driver = {
+ .name = "mxser",
+ .id_table = mxser_pcibrds,
+ .probe = mxser_probe,
+ .remove = __devexit_p(mxser_remove)
+};
+static int __init mxser_module_init(void)
+{
+ struct mxser_board *brd;
+ unsigned long cap;
+ unsigned int i, m, isaloop;
+ int retval, b;
- if (tty->index == MXSER_PORTS)
- return -ENOIOCTLCMD;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
+ pr_debug("Loading module mxser ...\n");
- control = info->MCR;
+ mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1);
+ if (!mxvar_sdriver)
+ return -ENOMEM;
- spin_lock_irqsave(&info->slock, flags);
- status = inb(info->base + UART_MSR);
- if (status & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(info, status);
- spin_unlock_irqrestore(&info->slock, flags);
- return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
- ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
- ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
- ((status & UART_MSR_RI) ? TIOCM_RNG : 0) |
- ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) |
- ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
-}
+ printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n",
+ MXSER_VERSION);
-static int mxser_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear)
-{
- struct mxser_struct *info = tty->driver_data;
- unsigned long flags;
+ /* Initialize the tty_driver structure */
+ mxvar_sdriver->owner = THIS_MODULE;
+ mxvar_sdriver->magic = TTY_DRIVER_MAGIC;
+ mxvar_sdriver->name = "ttyMI";
+ mxvar_sdriver->major = ttymajor;
+ mxvar_sdriver->minor_start = 0;
+ mxvar_sdriver->num = MXSER_PORTS + 1;
+ mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL;
+ mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL;
+ mxvar_sdriver->init_termios = tty_std_termios;
+ mxvar_sdriver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
+ mxvar_sdriver->flags = TTY_DRIVER_REAL_RAW|TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(mxvar_sdriver, &mxser_ops);
+ retval = tty_register_driver(mxvar_sdriver);
+ if (retval) {
+ printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family "
+ "tty driver !\n");
+ goto err_put;
+ }
- if (tty->index == MXSER_PORTS)
- return -ENOIOCTLCMD;
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
+ mxvar_diagflag = 0;
- spin_lock_irqsave(&info->slock, flags);
+ m = 0;
+ /* Start finding ISA boards here */
+ for (isaloop = 0; isaloop < 2; isaloop++)
+ for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) {
+ if (!isaloop)
+ cap = mxserBoardCAP[b]; /* predefined */
+ else
+ cap = ioaddr[b]; /* module param */
- if (set & TIOCM_RTS)
- info->MCR |= UART_MCR_RTS;
- if (set & TIOCM_DTR)
- info->MCR |= UART_MCR_DTR;
+ if (!cap)
+ continue;
- if (clear & TIOCM_RTS)
- info->MCR &= ~UART_MCR_RTS;
- if (clear & TIOCM_DTR)
- info->MCR &= ~UART_MCR_DTR;
+ brd = &mxser_boards[m];
+ retval = mxser_get_ISA_conf(cap, brd);
- outb(info->MCR, info->base + UART_MCR);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
-}
+ if (retval != 0)
+ printk(KERN_INFO "Found MOXA %s board "
+ "(CAP=0x%x)\n",
+ brd->info->name, ioaddr[b]);
+ if (retval <= 0) {
+ if (retval == MXSER_ERR_IRQ)
+ printk(KERN_ERR "Invalid interrupt "
+ "number, board not "
+ "configured\n");
+ else if (retval == MXSER_ERR_IRQ_CONFLIT)
+ printk(KERN_ERR "Invalid interrupt "
+ "number, board not "
+ "configured\n");
+ else if (retval == MXSER_ERR_VECTOR)
+ printk(KERN_ERR "Invalid interrupt "
+ "vector, board not "
+ "configured\n");
+ else if (retval == MXSER_ERR_IOADDR)
+ printk(KERN_ERR "Invalid I/O address, "
+ "board not configured\n");
-static int mxser_read_register(int, unsigned short *);
-static int mxser_program_mode(int);
-static void mxser_normal_mode(int);
+ brd->info = NULL;
+ continue;
+ }
-static int mxser_get_ISA_conf(int cap, struct mxser_hwconf *hwconf)
-{
- int id, i, bits;
- unsigned short regs[16], irq;
- unsigned char scratch, scratch2;
+ /* mxser_initbrd will hook ISR. */
+ if (mxser_initbrd(brd, NULL) < 0) {
+ brd->info = NULL;
+ continue;
+ }
- hwconf->IsMoxaMustChipFlag = MOXA_OTHER_UART;
+ brd->idx = m * MXSER_PORTS_PER_BOARD;
+ for (i = 0; i < brd->info->nports; i++)
+ tty_register_device(mxvar_sdriver, brd->idx + i,
+ NULL);
- id = mxser_read_register(cap, regs);
- if (id == C168_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_C168_ISA;
- hwconf->ports = 8;
- } else if (id == C104_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_C104_ISA;
- hwconf->ports = 4;
- } else if (id == C102_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_C102_ISA;
- hwconf->ports = 2;
- } else if (id == CI132_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_CI132;
- hwconf->ports = 2;
- } else if (id == CI134_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_CI134;
- hwconf->ports = 4;
- } else if (id == CI104J_ASIC_ID) {
- hwconf->board_type = MXSER_BOARD_CI104J;
- hwconf->ports = 4;
- } else
- return 0;
+ m++;
+ }
- irq = 0;
- if (hwconf->ports == 2) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- if (irq != (regs[9] & 0xFF00))
- return MXSER_ERR_IRQ_CONFLIT;
- } else if (hwconf->ports == 4) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- irq = irq | (irq >> 8);
- if (irq != regs[9])
- return MXSER_ERR_IRQ_CONFLIT;
- } else if (hwconf->ports == 8) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- irq = irq | (irq >> 8);
- if ((irq != regs[9]) || (irq != regs[10]))
- return MXSER_ERR_IRQ_CONFLIT;
+ retval = pci_register_driver(&mxser_driver);
+ if (retval) {
+ printk(KERN_ERR "Can't register pci driver\n");
+ if (!m) {
+ retval = -ENODEV;
+ goto err_unr;
+ } /* else: we have some ISA cards under control */
}
- if (!irq)
- return MXSER_ERR_IRQ;
- hwconf->irq = ((int)(irq & 0xF000) >> 12);
- for (i = 0; i < 8; i++)
- hwconf->ioaddr[i] = (int) regs[i + 1] & 0xFFF8;
- if ((regs[12] & 0x80) == 0)
- return MXSER_ERR_VECTOR;
- hwconf->vector = (int)regs[11]; /* interrupt vector */
- if (id == 1)
- hwconf->vector_mask = 0x00FF;
- else
- hwconf->vector_mask = 0x000F;
- for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) {
- if (regs[12] & bits) {
- hwconf->baud_base[i] = 921600;
- hwconf->MaxCanSetBaudRate[i] = 921600; /* add by Victor Yu. 09-04-2002 */
- } else {
- hwconf->baud_base[i] = 115200;
- hwconf->MaxCanSetBaudRate[i] = 115200; /* add by Victor Yu. 09-04-2002 */
- }
- }
- scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB);
- outb(scratch2 | UART_LCR_DLAB, cap + UART_LCR);
- outb(0, cap + UART_EFR); /* EFR is the same as FCR */
- outb(scratch2, cap + UART_LCR);
- outb(UART_FCR_ENABLE_FIFO, cap + UART_FCR);
- scratch = inb(cap + UART_IIR);
+ pr_debug("Done.\n");
- if (scratch & 0xC0)
- hwconf->uart_type = PORT_16550A;
- else
- hwconf->uart_type = PORT_16450;
- if (id == 1)
- hwconf->ports = 8;
- else
- hwconf->ports = 4;
- request_region(hwconf->ioaddr[0], 8 * hwconf->ports, "mxser(IO)");
- request_region(hwconf->vector, 1, "mxser(vector)");
- return hwconf->ports;
+ return 0;
+err_unr:
+ tty_unregister_driver(mxvar_sdriver);
+err_put:
+ put_tty_driver(mxvar_sdriver);
+ return retval;
}
-#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */
-#define CHIP_DO 0x02 /* Serial Data Output in Eprom */
-#define CHIP_CS 0x04 /* Serial Chip Select in Eprom */
-#define CHIP_DI 0x08 /* Serial Data Input in Eprom */
-#define EN_CCMD 0x000 /* Chip's command register */
-#define EN0_RSARLO 0x008 /* Remote start address reg 0 */
-#define EN0_RSARHI 0x009 /* Remote start address reg 1 */
-#define EN0_RCNTLO 0x00A /* Remote byte count reg WR */
-#define EN0_RCNTHI 0x00B /* Remote byte count reg WR */
-#define EN0_DCFG 0x00E /* Data configuration reg WR */
-#define EN0_PORT 0x010 /* Rcv missed frame error counter RD */
-#define ENC_PAGE0 0x000 /* Select page 0 of chip registers */
-#define ENC_PAGE3 0x0C0 /* Select page 3 of chip registers */
-static int mxser_read_register(int port, unsigned short *regs)
+static void __exit mxser_module_exit(void)
{
- int i, k, value, id;
- unsigned int j;
-
- id = mxser_program_mode(port);
- if (id < 0)
- return id;
- for (i = 0; i < 14; i++) {
- k = (i & 0x3F) | 0x180;
- for (j = 0x100; j > 0; j >>= 1) {
- outb(CHIP_CS, port);
- if (k & j) {
- outb(CHIP_CS | CHIP_DO, port);
- outb(CHIP_CS | CHIP_DO | CHIP_SK, port); /* A? bit of read */
- } else {
- outb(CHIP_CS, port);
- outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */
- }
- }
- (void)inb(port);
- value = 0;
- for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) {
- outb(CHIP_CS, port);
- outb(CHIP_CS | CHIP_SK, port);
- if (inb(port) & CHIP_DI)
- value |= j;
- }
- regs[i] = value;
- outb(0, port);
- }
- mxser_normal_mode(port);
- return id;
-}
+ unsigned int i, j;
-static int mxser_program_mode(int port)
-{
- int id, i, j, n;
- /* unsigned long flags; */
+ pr_debug("Unloading module mxser ...\n");
- spin_lock(&gm_lock);
- outb(0, port);
- outb(0, port);
- outb(0, port);
- (void)inb(port);
- (void)inb(port);
- outb(0, port);
- (void)inb(port);
- /* restore_flags(flags); */
- spin_unlock(&gm_lock);
+ pci_unregister_driver(&mxser_driver);
- id = inb(port + 1) & 0x1F;
- if ((id != C168_ASIC_ID) &&
- (id != C104_ASIC_ID) &&
- (id != C102_ASIC_ID) &&
- (id != CI132_ASIC_ID) &&
- (id != CI134_ASIC_ID) &&
- (id != CI104J_ASIC_ID))
- return -1;
- for (i = 0, j = 0; i < 4; i++) {
- n = inb(port + 2);
- if (n == 'M') {
- j = 1;
- } else if ((j == 1) && (n == 1)) {
- j = 2;
- break;
- } else
- j = 0;
- }
- if (j != 2)
- id = -2;
- return id;
-}
+ for (i = 0; i < MXSER_BOARDS; i++) /* ISA remains */
+ if (mxser_boards[i].info != NULL)
+ for (j = 0; j < mxser_boards[i].info->nports; j++)
+ tty_unregister_device(mxvar_sdriver,
+ mxser_boards[i].idx + j);
+ tty_unregister_driver(mxvar_sdriver);
+ put_tty_driver(mxvar_sdriver);
-static void mxser_normal_mode(int port)
-{
- int i, n;
+ for (i = 0; i < MXSER_BOARDS; i++)
+ if (mxser_boards[i].info != NULL)
+ mxser_release_res(&mxser_boards[i], NULL, 1);
- outb(0xA5, port + 1);
- outb(0x80, port + 3);
- outb(12, port + 0); /* 9600 bps */
- outb(0, port + 1);
- outb(0x03, port + 3); /* 8 data bits */
- outb(0x13, port + 4); /* loop back mode */
- for (i = 0; i < 16; i++) {
- n = inb(port + 5);
- if ((n & 0x61) == 0x60)
- break;
- if ((n & 1) == 1)
- (void)inb(port);
- }
- outb(0x00, port + 4);
+ pr_debug("Done.\n");
}
module_init(mxser_module_init);
diff --git a/drivers/char/mxser.h b/drivers/char/mxser.h
index 1f4aa45ec004..844171115954 100644
--- a/drivers/char/mxser.h
+++ b/drivers/char/mxser.h
@@ -4,19 +4,17 @@
/*
* Semi-public control interfaces
*/
-
+
/*
* MOXA ioctls
*/
#define MOXA 0x400
#define MOXA_GETDATACOUNT (MOXA + 23)
-#define MOXA_GET_CONF (MOXA + 35)
#define MOXA_DIAGNOSE (MOXA + 50)
#define MOXA_CHKPORTENABLE (MOXA + 60)
#define MOXA_HighSpeedOn (MOXA + 61)
#define MOXA_GET_MAJOR (MOXA + 63)
-#define MOXA_GET_CUMAJOR (MOXA + 64)
#define MOXA_GETMSTATUS (MOXA + 65)
#define MOXA_SET_OP_MODE (MOXA + 66)
#define MOXA_GET_OP_MODE (MOXA + 67)
@@ -26,26 +24,14 @@
#define RS422_MODE 2
#define RS485_4WIRE_MODE 3
#define OP_MODE_MASK 3
-// above add by Victor Yu. 01-05-2004
-
-#define TTY_THRESHOLD_THROTTLE 128
-
-#define HI_WATER 768
-
-// added by James. 03-11-2004.
-#define MOXA_SDS_GETICOUNTER (MOXA + 68)
-#define MOXA_SDS_RSTICOUNTER (MOXA + 69)
-// (above) added by James.
+#define MOXA_SDS_RSTICOUNTER (MOXA + 69)
#define MOXA_ASPP_OQUEUE (MOXA + 70)
-#define MOXA_ASPP_SETBAUD (MOXA + 71)
-#define MOXA_ASPP_GETBAUD (MOXA + 72)
#define MOXA_ASPP_MON (MOXA + 73)
#define MOXA_ASPP_LSTATUS (MOXA + 74)
#define MOXA_ASPP_MON_EXT (MOXA + 75)
#define MOXA_SET_BAUD_METHOD (MOXA + 76)
-
/* --------------------------------------------------- */
#define NPPI_NOTIFY_PARITY 0x01
@@ -54,51 +40,46 @@
#define NPPI_NOTIFY_SW_OVERRUN 0x08
#define NPPI_NOTIFY_BREAK 0x10
-#define NPPI_NOTIFY_CTSHOLD 0x01 // Tx hold by CTS low
-#define NPPI_NOTIFY_DSRHOLD 0x02 // Tx hold by DSR low
-#define NPPI_NOTIFY_XOFFHOLD 0x08 // Tx hold by Xoff received
-#define NPPI_NOTIFY_XOFFXENT 0x10 // Xoff Sent
-
-//CheckIsMoxaMust return value
-#define MOXA_OTHER_UART 0x00
-#define MOXA_MUST_MU150_HWID 0x01
-#define MOXA_MUST_MU860_HWID 0x02
-
-// follow just for Moxa Must chip define.
-//
-// when LCR register (offset 0x03) write following value,
-// the Must chip will enter enchance mode. And write value
-// on EFR (offset 0x02) bit 6,7 to change bank.
+#define NPPI_NOTIFY_CTSHOLD 0x01 /* Tx hold by CTS low */
+#define NPPI_NOTIFY_DSRHOLD 0x02 /* Tx hold by DSR low */
+#define NPPI_NOTIFY_XOFFHOLD 0x08 /* Tx hold by Xoff received */
+#define NPPI_NOTIFY_XOFFXENT 0x10 /* Xoff Sent */
+
+/* follow just for Moxa Must chip define. */
+/* */
+/* when LCR register (offset 0x03) write following value, */
+/* the Must chip will enter enchance mode. And write value */
+/* on EFR (offset 0x02) bit 6,7 to change bank. */
#define MOXA_MUST_ENTER_ENCHANCE 0xBF
-// when enhance mode enable, access on general bank register
+/* when enhance mode enable, access on general bank register */
#define MOXA_MUST_GDL_REGISTER 0x07
#define MOXA_MUST_GDL_MASK 0x7F
#define MOXA_MUST_GDL_HAS_BAD_DATA 0x80
-#define MOXA_MUST_LSR_RERR 0x80 // error in receive FIFO
-// enchance register bank select and enchance mode setting register
-// when LCR register equal to 0xBF
+#define MOXA_MUST_LSR_RERR 0x80 /* error in receive FIFO */
+/* enchance register bank select and enchance mode setting register */
+/* when LCR register equal to 0xBF */
#define MOXA_MUST_EFR_REGISTER 0x02
-// enchance mode enable
+/* enchance mode enable */
#define MOXA_MUST_EFR_EFRB_ENABLE 0x10
-// enchance reister bank set 0, 1, 2
+/* enchance reister bank set 0, 1, 2 */
#define MOXA_MUST_EFR_BANK0 0x00
#define MOXA_MUST_EFR_BANK1 0x40
#define MOXA_MUST_EFR_BANK2 0x80
#define MOXA_MUST_EFR_BANK3 0xC0
#define MOXA_MUST_EFR_BANK_MASK 0xC0
-// set XON1 value register, when LCR=0xBF and change to bank0
+/* set XON1 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XON1_REGISTER 0x04
-// set XON2 value register, when LCR=0xBF and change to bank0
+/* set XON2 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XON2_REGISTER 0x05
-// set XOFF1 value register, when LCR=0xBF and change to bank0
+/* set XOFF1 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XOFF1_REGISTER 0x06
-// set XOFF2 value register, when LCR=0xBF and change to bank0
+/* set XOFF2 value register, when LCR=0xBF and change to bank0 */
#define MOXA_MUST_XOFF2_REGISTER 0x07
#define MOXA_MUST_RBRTL_REGISTER 0x04
@@ -110,32 +91,32 @@
#define MOXA_MUST_ECR_REGISTER 0x06
#define MOXA_MUST_CSR_REGISTER 0x07
-// good data mode enable
+/* good data mode enable */
#define MOXA_MUST_FCR_GDA_MODE_ENABLE 0x20
-// only good data put into RxFIFO
+/* only good data put into RxFIFO */
#define MOXA_MUST_FCR_GDA_ONLY_ENABLE 0x10
-// enable CTS interrupt
+/* enable CTS interrupt */
#define MOXA_MUST_IER_ECTSI 0x80
-// enable RTS interrupt
+/* enable RTS interrupt */
#define MOXA_MUST_IER_ERTSI 0x40
-// enable Xon/Xoff interrupt
+/* enable Xon/Xoff interrupt */
#define MOXA_MUST_IER_XINT 0x20
-// enable GDA interrupt
+/* enable GDA interrupt */
#define MOXA_MUST_IER_EGDAI 0x10
#define MOXA_MUST_RECV_ISR (UART_IER_RDI | MOXA_MUST_IER_EGDAI)
-// GDA interrupt pending
+/* GDA interrupt pending */
#define MOXA_MUST_IIR_GDA 0x1C
#define MOXA_MUST_IIR_RDA 0x04
#define MOXA_MUST_IIR_RTO 0x0C
#define MOXA_MUST_IIR_LSR 0x06
-// recieved Xon/Xoff or specical interrupt pending
+/* recieved Xon/Xoff or specical interrupt pending */
#define MOXA_MUST_IIR_XSC 0x10
-// RTS/CTS change state interrupt pending
+/* RTS/CTS change state interrupt pending */
#define MOXA_MUST_IIR_RTSCTS 0x20
#define MOXA_MUST_IIR_MASK 0x3E
@@ -143,299 +124,164 @@
#define MOXA_MUST_MCR_XON_ANY 0x80
#define MOXA_MUST_MCR_TX_XON 0x08
-
-// software flow control on chip mask value
+/* software flow control on chip mask value */
#define MOXA_MUST_EFR_SF_MASK 0x0F
-// send Xon1/Xoff1
+/* send Xon1/Xoff1 */
#define MOXA_MUST_EFR_SF_TX1 0x08
-// send Xon2/Xoff2
+/* send Xon2/Xoff2 */
#define MOXA_MUST_EFR_SF_TX2 0x04
-// send Xon1,Xon2/Xoff1,Xoff2
+/* send Xon1,Xon2/Xoff1,Xoff2 */
#define MOXA_MUST_EFR_SF_TX12 0x0C
-// don't send Xon/Xoff
+/* don't send Xon/Xoff */
#define MOXA_MUST_EFR_SF_TX_NO 0x00
-// Tx software flow control mask
+/* Tx software flow control mask */
#define MOXA_MUST_EFR_SF_TX_MASK 0x0C
-// don't receive Xon/Xoff
+/* don't receive Xon/Xoff */
#define MOXA_MUST_EFR_SF_RX_NO 0x00
-// receive Xon1/Xoff1
+/* receive Xon1/Xoff1 */
#define MOXA_MUST_EFR_SF_RX1 0x02
-// receive Xon2/Xoff2
+/* receive Xon2/Xoff2 */
#define MOXA_MUST_EFR_SF_RX2 0x01
-// receive Xon1,Xon2/Xoff1,Xoff2
+/* receive Xon1,Xon2/Xoff1,Xoff2 */
#define MOXA_MUST_EFR_SF_RX12 0x03
-// Rx software flow control mask
+/* Rx software flow control mask */
#define MOXA_MUST_EFR_SF_RX_MASK 0x03
-//#define MOXA_MUST_MIN_XOFFLIMIT 66
-//#define MOXA_MUST_MIN_XONLIMIT 20
-//#define ID1_RX_TRIG 120
-
-
-#define CHECK_MOXA_MUST_XOFFLIMIT(info) { \
- if ( (info)->IsMoxaMustChipFlag && \
- (info)->HandFlow.XoffLimit < MOXA_MUST_MIN_XOFFLIMIT ) { \
- (info)->HandFlow.XoffLimit = MOXA_MUST_MIN_XOFFLIMIT; \
- (info)->HandFlow.XonLimit = MOXA_MUST_MIN_XONLIMIT; \
- } \
-}
-
-#define ENABLE_MOXA_MUST_ENCHANCE_MODE(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+#define ENABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr |= MOXA_MUST_EFR_EFRB_ENABLE; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define DISABLE_MOXA_MUST_ENCHANCE_MODE(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr |= MOXA_MUST_EFR_EFRB_ENABLE; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define DISABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_EFRB_ENABLE; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_XON1_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_EFRB_ENABLE; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define SET_MOXA_MUST_XON1_VALUE(baseio, Value) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
+ __efr |= MOXA_MUST_EFR_BANK0; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
outb((u8)(Value), (baseio)+MOXA_MUST_XON1_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
-#define SET_MOXA_MUST_XON2_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+#define SET_MOXA_MUST_XOFF1_VALUE(baseio, Value) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_XON2_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_XOFF1_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
+ __efr |= MOXA_MUST_EFR_BANK0; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
outb((u8)(Value), (baseio)+MOXA_MUST_XOFF1_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_XOFF2_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_XOFF2_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_RBRTL_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_RBRTL_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_RBRTH_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_RBRTH_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_RBRTI_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define SET_MOXA_MUST_FIFO_VALUE(info) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((info)->ioaddr+UART_LCR); \
+ outb(MOXA_MUST_ENTER_ENCHANCE, (info)->ioaddr+UART_LCR);\
+ __efr = inb((info)->ioaddr+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
+ __efr |= MOXA_MUST_EFR_BANK1; \
+ outb(__efr, (info)->ioaddr+MOXA_MUST_EFR_REGISTER); \
+ outb((u8)((info)->rx_high_water), (info)->ioaddr+ \
+ MOXA_MUST_RBRTH_REGISTER); \
+ outb((u8)((info)->rx_trigger), (info)->ioaddr+ \
+ MOXA_MUST_RBRTI_REGISTER); \
+ outb((u8)((info)->rx_low_water), (info)->ioaddr+ \
+ MOXA_MUST_RBRTL_REGISTER); \
+ outb(__oldlcr, (info)->ioaddr+UART_LCR); \
+} while (0)
+
+#define SET_MOXA_MUST_ENUM_VALUE(baseio, Value) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_RBRTI_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_THRTL_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_THRTL_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-//#define MOXA_MUST_RBRL_VALUE 4
-#define SET_MOXA_MUST_FIFO_VALUE(info) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((info)->base+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (info)->base+UART_LCR); \
- __efr = inb((info)->base+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (info)->base+MOXA_MUST_EFR_REGISTER); \
- outb((u8)((info)->rx_high_water), (info)->base+MOXA_MUST_RBRTH_REGISTER); \
- outb((u8)((info)->rx_trigger), (info)->base+MOXA_MUST_RBRTI_REGISTER); \
- outb((u8)((info)->rx_low_water), (info)->base+MOXA_MUST_RBRTL_REGISTER); \
- outb(__oldlcr, (info)->base+UART_LCR); \
-}
-
-
-
-#define SET_MOXA_MUST_ENUM_VALUE(baseio, Value) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK2; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
+ __efr |= MOXA_MUST_EFR_BANK2; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
outb((u8)(Value), (baseio)+MOXA_MUST_ENUM_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
-#define GET_MOXA_MUST_HARDWARE_ID(baseio, pId) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK2; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- *pId = inb((baseio)+MOXA_MUST_HWID_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_JUST_TX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_MASK; \
- __efr |= MOXA_MUST_EFR_SF_TX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+#define GET_MOXA_MUST_HARDWARE_ID(baseio, pId) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
- __efr |= MOXA_MUST_EFR_SF_TX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
+ __efr |= MOXA_MUST_EFR_BANK2; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ *pId = inb((baseio)+MOXA_MUST_HWID_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define SET_MOXA_MUST_JUST_RX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_SF_MASK; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_MASK; \
- __efr |= MOXA_MUST_EFR_SF_RX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
+ __efr |= MOXA_MUST_EFR_SF_TX1; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
- __efr |= MOXA_MUST_EFR_SF_RX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define ENABLE_MOXA_MUST_TX_RX_SOFTWARE_FLOW_CONTROL(baseio) { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
+ __efr |= MOXA_MUST_EFR_SF_RX1; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
+
+#define DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
+ u8 __oldlcr, __efr; \
+ __oldlcr = inb((baseio)+UART_LCR); \
outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_MASK; \
- __efr |= (MOXA_MUST_EFR_SF_RX1|MOXA_MUST_EFR_SF_TX1); \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-}
-
-#define ENABLE_MOXA_MUST_XON_ANY_FLOW_CONTROL(baseio) { \
- u8 __oldmcr; \
- __oldmcr = inb((baseio)+UART_MCR); \
- __oldmcr |= MOXA_MUST_MCR_XON_ANY; \
- outb(__oldmcr, (baseio)+UART_MCR); \
-}
-
-#define DISABLE_MOXA_MUST_XON_ANY_FLOW_CONTROL(baseio) { \
- u8 __oldmcr; \
- __oldmcr = inb((baseio)+UART_MCR); \
- __oldmcr &= ~MOXA_MUST_MCR_XON_ANY; \
- outb(__oldmcr, (baseio)+UART_MCR); \
-}
-
-#define READ_MOXA_MUST_GDL(baseio) inb((baseio)+MOXA_MUST_GDL_REGISTER)
+ __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
+ __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
+ outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
+ outb(__oldlcr, (baseio)+UART_LCR); \
+} while (0)
#endif
diff --git a/drivers/char/mxser_new.c b/drivers/char/mxser_new.c
deleted file mode 100644
index 081c84c7b548..000000000000
--- a/drivers/char/mxser_new.c
+++ /dev/null
@@ -1,2817 +0,0 @@
-/*
- * mxser.c -- MOXA Smartio/Industio family multiport serial driver.
- *
- * Copyright (C) 1999-2006 Moxa Technologies (support@moxa.com.tw).
- * Copyright (C) 2006-2007 Jiri Slaby <jirislaby@gmail.com>
- *
- * This code is loosely based on the 1.8 moxa driver which is based on
- * Linux serial driver, written by Linus Torvalds, Theodore T'so and
- * others.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Fed through a cleanup, indent and remove of non 2.6 code by Alan Cox
- * <alan@redhat.com>. The original 1.8 code is available on www.moxa.com.
- * - Fixed x86_64 cleanness
- * - Fixed sleep with spinlock held in mxser_send_break
- */
-
-#include <linux/module.h>
-#include <linux/autoconf.h>
-#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial.h>
-#include <linux/serial_reg.h>
-#include <linux/major.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/ptrace.h>
-#include <linux/gfp.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/bitops.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-
-#include "mxser_new.h"
-
-#define MXSER_VERSION "2.0.2" /* 1.10 */
-#define MXSERMAJOR 174
-#define MXSERCUMAJOR 175
-
-#define MXSER_BOARDS 4 /* Max. boards */
-#define MXSER_PORTS_PER_BOARD 8 /* Max. ports per board */
-#define MXSER_PORTS (MXSER_BOARDS * MXSER_PORTS_PER_BOARD)
-#define MXSER_ISR_PASS_LIMIT 100
-
-#define MXSER_ERR_IOADDR -1
-#define MXSER_ERR_IRQ -2
-#define MXSER_ERR_IRQ_CONFLIT -3
-#define MXSER_ERR_VECTOR -4
-
-/*CheckIsMoxaMust return value*/
-#define MOXA_OTHER_UART 0x00
-#define MOXA_MUST_MU150_HWID 0x01
-#define MOXA_MUST_MU860_HWID 0x02
-
-#define WAKEUP_CHARS 256
-
-#define UART_MCR_AFE 0x20
-#define UART_LSR_SPECIAL 0x1E
-
-#define PCI_DEVICE_ID_CB108 0x1080
-#define PCI_DEVICE_ID_CB114 0x1142
-#define PCI_DEVICE_ID_CB134I 0x1341
-#define PCI_DEVICE_ID_CP138U 0x1380
-#define PCI_DEVICE_ID_POS104UL 0x1044
-
-
-#define C168_ASIC_ID 1
-#define C104_ASIC_ID 2
-#define C102_ASIC_ID 0xB
-#define CI132_ASIC_ID 4
-#define CI134_ASIC_ID 3
-#define CI104J_ASIC_ID 5
-
-#define MXSER_HIGHBAUD 1
-#define MXSER_HAS2 2
-
-/* This is only for PCI */
-static const struct {
- int type;
- int tx_fifo;
- int rx_fifo;
- int xmit_fifo_size;
- int rx_high_water;
- int rx_trigger;
- int rx_low_water;
- long max_baud;
-} Gpci_uart_info[] = {
- {MOXA_OTHER_UART, 16, 16, 16, 14, 14, 1, 921600L},
- {MOXA_MUST_MU150_HWID, 64, 64, 64, 48, 48, 16, 230400L},
- {MOXA_MUST_MU860_HWID, 128, 128, 128, 96, 96, 32, 921600L}
-};
-#define UART_INFO_NUM ARRAY_SIZE(Gpci_uart_info)
-
-struct mxser_cardinfo {
- unsigned int nports;
- char *name;
- unsigned int flags;
-};
-
-static const struct mxser_cardinfo mxser_cards[] = {
-/* 0*/ { 8, "C168 series", },
- { 4, "C104 series", },
- { 4, "CI-104J series", },
- { 8, "C168H/PCI series", },
- { 4, "C104H/PCI series", },
-/* 5*/ { 4, "C102 series", MXSER_HAS2 }, /* C102-ISA */
- { 4, "CI-132 series", MXSER_HAS2 },
- { 4, "CI-134 series", },
- { 2, "CP-132 series", },
- { 4, "CP-114 series", },
-/*10*/ { 4, "CT-114 series", },
- { 2, "CP-102 series", MXSER_HIGHBAUD },
- { 4, "CP-104U series", },
- { 8, "CP-168U series", },
- { 2, "CP-132U series", },
-/*15*/ { 4, "CP-134U series", },
- { 4, "CP-104JU series", },
- { 8, "Moxa UC7000 Serial", }, /* RC7000 */
- { 8, "CP-118U series", },
- { 2, "CP-102UL series", },
-/*20*/ { 2, "CP-102U series", },
- { 8, "CP-118EL series", },
- { 8, "CP-168EL series", },
- { 4, "CP-104EL series", },
- { 8, "CB-108 series", },
-/*25*/ { 4, "CB-114 series", },
- { 4, "CB-134I series", },
- { 8, "CP-138U series", },
- { 4, "POS-104UL series", }
-};
-
-/* driver_data correspond to the lines in the structure above
- see also ISA probe function before you change something */
-static struct pci_device_id mxser_pcibrds[] = {
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168), .driver_data = 3 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114), .driver_data = 9 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CT114), .driver_data = 10 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102), .driver_data = 11 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U), .driver_data = 12 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U), .driver_data = 13 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U), .driver_data = 14 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U), .driver_data = 15 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104JU),.driver_data = 16 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_RC7000), .driver_data = 17 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U), .driver_data = 18 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),.driver_data = 19 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U), .driver_data = 20 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL),.driver_data = 21 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL),.driver_data = 22 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL),.driver_data = 23 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB108), .driver_data = 24 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB114), .driver_data = 25 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB134I), .driver_data = 26 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U), .driver_data = 27 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_POS104UL), .driver_data = 28 },
- { }
-};
-MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
-
-static int mxvar_baud_table[] = {
- 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400,
- 4800, 9600, 19200, 38400, 57600, 115200, 230400, 460800, 921600
-};
-static unsigned int mxvar_baud_table1[] = {
- 0, B50, B75, B110, B134, B150, B200, B300, B600, B1200, B1800, B2400,
- B4800, B9600, B19200, B38400, B57600, B115200, B230400, B460800, B921600
-};
-#define BAUD_TABLE_NO ARRAY_SIZE(mxvar_baud_table)
-
-#define B_SPEC B2000000
-
-static int ioaddr[MXSER_BOARDS] = { 0, 0, 0, 0 };
-static int ttymajor = MXSERMAJOR;
-static int calloutmajor = MXSERCUMAJOR;
-
-/* Variables for insmod */
-
-MODULE_AUTHOR("Casper Yang");
-MODULE_DESCRIPTION("MOXA Smartio/Industio Family Multiport Board Device Driver");
-module_param_array(ioaddr, int, NULL, 0);
-module_param(ttymajor, int, 0);
-MODULE_LICENSE("GPL");
-
-struct mxser_log {
- int tick;
- unsigned long rxcnt[MXSER_PORTS];
- unsigned long txcnt[MXSER_PORTS];
-};
-
-
-struct mxser_mon {
- unsigned long rxcnt;
- unsigned long txcnt;
- unsigned long up_rxcnt;
- unsigned long up_txcnt;
- int modem_status;
- unsigned char hold_reason;
-};
-
-struct mxser_mon_ext {
- unsigned long rx_cnt[32];
- unsigned long tx_cnt[32];
- unsigned long up_rxcnt[32];
- unsigned long up_txcnt[32];
- int modem_status[32];
-
- long baudrate[32];
- int databits[32];
- int stopbits[32];
- int parity[32];
- int flowctrl[32];
- int fifo[32];
- int iftype[32];
-};
-
-struct mxser_board;
-
-struct mxser_port {
- struct mxser_board *board;
- struct tty_struct *tty;
-
- unsigned long ioaddr;
- unsigned long opmode_ioaddr;
- int max_baud;
-
- int rx_high_water;
- int rx_trigger; /* Rx fifo trigger level */
- int rx_low_water;
- int baud_base; /* max. speed */
- long realbaud;
- int type; /* UART type */
- int flags; /* defined in tty.h */
- int speed;
-
- int x_char; /* xon/xoff character */
- int IER; /* Interrupt Enable Register */
- int MCR; /* Modem control register */
-
- unsigned char stop_rx;
- unsigned char ldisc_stop_rx;
-
- int custom_divisor;
- int close_delay;
- unsigned short closing_wait;
- unsigned char err_shadow;
- unsigned long event;
-
- int count; /* # of fd on device */
- int blocked_open; /* # of blocked opens */
- struct async_icount icount; /* kernel counters for 4 input interrupts */
- int timeout;
-
- int read_status_mask;
- int ignore_status_mask;
- int xmit_fifo_size;
- unsigned char *xmit_buf;
- int xmit_head;
- int xmit_tail;
- int xmit_cnt;
-
- struct ktermios normal_termios;
-
- struct mxser_mon mon_data;
-
- spinlock_t slock;
- wait_queue_head_t open_wait;
- wait_queue_head_t delta_msr_wait;
-};
-
-struct mxser_board {
- unsigned int idx;
- int irq;
- const struct mxser_cardinfo *info;
- unsigned long vector;
- unsigned long vector_mask;
-
- int chip_flag;
- int uart_type;
-
- struct mxser_port ports[MXSER_PORTS_PER_BOARD];
-};
-
-struct mxser_mstatus {
- tcflag_t cflag;
- int cts;
- int dsr;
- int ri;
- int dcd;
-};
-
-static struct mxser_mstatus GMStatus[MXSER_PORTS];
-
-static int mxserBoardCAP[MXSER_BOARDS] = {
- 0, 0, 0, 0
- /* 0x180, 0x280, 0x200, 0x320 */
-};
-
-static struct mxser_board mxser_boards[MXSER_BOARDS];
-static struct tty_driver *mxvar_sdriver;
-static struct mxser_log mxvar_log;
-static int mxvar_diagflag;
-static unsigned char mxser_msr[MXSER_PORTS + 1];
-static struct mxser_mon_ext mon_data_ext;
-static int mxser_set_baud_method[MXSER_PORTS + 1];
-
-#ifdef CONFIG_PCI
-static int __devinit CheckIsMoxaMust(int io)
-{
- u8 oldmcr, hwid;
- int i;
-
- outb(0, io + UART_LCR);
- DISABLE_MOXA_MUST_ENCHANCE_MODE(io);
- oldmcr = inb(io + UART_MCR);
- outb(0, io + UART_MCR);
- SET_MOXA_MUST_XON1_VALUE(io, 0x11);
- if ((hwid = inb(io + UART_MCR)) != 0) {
- outb(oldmcr, io + UART_MCR);
- return MOXA_OTHER_UART;
- }
-
- GET_MOXA_MUST_HARDWARE_ID(io, &hwid);
- for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */
- if (hwid == Gpci_uart_info[i].type)
- return (int)hwid;
- }
- return MOXA_OTHER_UART;
-}
-#endif
-
-static void process_txrx_fifo(struct mxser_port *info)
-{
- int i;
-
- if ((info->type == PORT_16450) || (info->type == PORT_8250)) {
- info->rx_trigger = 1;
- info->rx_high_water = 1;
- info->rx_low_water = 1;
- info->xmit_fifo_size = 1;
- } else
- for (i = 0; i < UART_INFO_NUM; i++)
- if (info->board->chip_flag == Gpci_uart_info[i].type) {
- info->rx_trigger = Gpci_uart_info[i].rx_trigger;
- info->rx_low_water = Gpci_uart_info[i].rx_low_water;
- info->rx_high_water = Gpci_uart_info[i].rx_high_water;
- info->xmit_fifo_size = Gpci_uart_info[i].xmit_fifo_size;
- break;
- }
-}
-
-static unsigned char mxser_get_msr(int baseaddr, int mode, int port)
-{
- unsigned char status = 0;
-
- status = inb(baseaddr + UART_MSR);
-
- mxser_msr[port] &= 0x0F;
- mxser_msr[port] |= status;
- status = mxser_msr[port];
- if (mode)
- mxser_msr[port] = 0;
-
- return status;
-}
-
-static int mxser_block_til_ready(struct tty_struct *tty, struct file *filp,
- struct mxser_port *port)
-{
- DECLARE_WAITQUEUE(wait, current);
- int retval;
- int do_clocal = 0;
- unsigned long flags;
-
- /*
- * If non-blocking mode is set, or the port is not enabled,
- * then make the check up front and then exit.
- */
- if ((filp->f_flags & O_NONBLOCK) ||
- test_bit(TTY_IO_ERROR, &tty->flags)) {
- port->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
- }
-
- if (tty->termios->c_cflag & CLOCAL)
- do_clocal = 1;
-
- /*
- * Block waiting for the carrier detect and the line to become
- * free (i.e., not in use by the callout). While we are in
- * this loop, port->count is dropped by one, so that
- * mxser_close() knows when to free things. We restore it upon
- * exit, either normal or abnormal.
- */
- retval = 0;
- add_wait_queue(&port->open_wait, &wait);
-
- spin_lock_irqsave(&port->slock, flags);
- if (!tty_hung_up_p(filp))
- port->count--;
- spin_unlock_irqrestore(&port->slock, flags);
- port->blocked_open++;
- while (1) {
- spin_lock_irqsave(&port->slock, flags);
- outb(inb(port->ioaddr + UART_MCR) |
- UART_MCR_DTR | UART_MCR_RTS, port->ioaddr + UART_MCR);
- spin_unlock_irqrestore(&port->slock, flags);
- set_current_state(TASK_INTERRUPTIBLE);
- if (tty_hung_up_p(filp) || !(port->flags & ASYNC_INITIALIZED)) {
- if (port->flags & ASYNC_HUP_NOTIFY)
- retval = -EAGAIN;
- else
- retval = -ERESTARTSYS;
- break;
- }
- if (!(port->flags & ASYNC_CLOSING) &&
- (do_clocal ||
- (inb(port->ioaddr + UART_MSR) & UART_MSR_DCD)))
- break;
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
- break;
- }
- schedule();
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&port->open_wait, &wait);
- if (!tty_hung_up_p(filp))
- port->count++;
- port->blocked_open--;
- if (retval)
- return retval;
- port->flags |= ASYNC_NORMAL_ACTIVE;
- return 0;
-}
-
-static int mxser_set_baud(struct mxser_port *info, long newspd)
-{
- unsigned int i;
- int quot = 0;
- unsigned char cval;
- int ret = 0;
-
- if (!info->tty || !info->tty->termios)
- return ret;
-
- if (!(info->ioaddr))
- return ret;
-
- if (newspd > info->max_baud)
- return 0;
-
- info->realbaud = newspd;
- for (i = 0; i < BAUD_TABLE_NO; i++)
- if (newspd == mxvar_baud_table[i])
- break;
- if (i == BAUD_TABLE_NO) {
- quot = info->baud_base / info->speed;
- if (info->speed <= 0 || info->speed > info->max_baud)
- quot = 0;
- } else {
- if (newspd == 134) {
- quot = (2 * info->baud_base / 269);
- } else if (newspd) {
- quot = info->baud_base / newspd;
- if (quot == 0)
- quot = 1;
- } else {
- quot = 0;
- }
- }
-
- info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base);
- info->timeout += HZ / 50; /* Add .02 seconds of slop */
-
- if (quot) {
- info->MCR |= UART_MCR_DTR;
- outb(info->MCR, info->ioaddr + UART_MCR);
- } else {
- info->MCR &= ~UART_MCR_DTR;
- outb(info->MCR, info->ioaddr + UART_MCR);
- return ret;
- }
-
- cval = inb(info->ioaddr + UART_LCR);
-
- outb(cval | UART_LCR_DLAB, info->ioaddr + UART_LCR); /* set DLAB */
-
- outb(quot & 0xff, info->ioaddr + UART_DLL); /* LS of divisor */
- outb(quot >> 8, info->ioaddr + UART_DLM); /* MS of divisor */
- outb(cval, info->ioaddr + UART_LCR); /* reset DLAB */
-
- if (i == BAUD_TABLE_NO) {
- quot = info->baud_base % info->speed;
- quot *= 8;
- if ((quot % info->speed) > (info->speed / 2)) {
- quot /= info->speed;
- quot++;
- } else {
- quot /= info->speed;
- }
- SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, quot);
- } else
- SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, 0);
-
- return ret;
-}
-
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static int mxser_change_speed(struct mxser_port *info,
- struct ktermios *old_termios)
-{
- unsigned cflag, cval, fcr;
- int ret = 0;
- unsigned char status;
- long baud;
-
- if (!info->tty || !info->tty->termios)
- return ret;
- cflag = info->tty->termios->c_cflag;
- if (!(info->ioaddr))
- return ret;
-
- if (mxser_set_baud_method[info->tty->index] == 0) {
- if ((cflag & CBAUD) == B_SPEC)
- baud = info->speed;
- else
- baud = tty_get_baud_rate(info->tty);
- mxser_set_baud(info, baud);
- }
-
- /* byte size and parity */
- switch (cflag & CSIZE) {
- case CS5:
- cval = 0x00;
- break;
- case CS6:
- cval = 0x01;
- break;
- case CS7:
- cval = 0x02;
- break;
- case CS8:
- cval = 0x03;
- break;
- default:
- cval = 0x00;
- break; /* too keep GCC shut... */
- }
- if (cflag & CSTOPB)
- cval |= 0x04;
- if (cflag & PARENB)
- cval |= UART_LCR_PARITY;
- if (!(cflag & PARODD))
- cval |= UART_LCR_EPAR;
- if (cflag & CMSPAR)
- cval |= UART_LCR_SPAR;
-
- if ((info->type == PORT_8250) || (info->type == PORT_16450)) {
- if (info->board->chip_flag) {
- fcr = UART_FCR_ENABLE_FIFO;
- fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
- SET_MOXA_MUST_FIFO_VALUE(info);
- } else
- fcr = 0;
- } else {
- fcr = UART_FCR_ENABLE_FIFO;
- if (info->board->chip_flag) {
- fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
- SET_MOXA_MUST_FIFO_VALUE(info);
- } else {
- switch (info->rx_trigger) {
- case 1:
- fcr |= UART_FCR_TRIGGER_1;
- break;
- case 4:
- fcr |= UART_FCR_TRIGGER_4;
- break;
- case 8:
- fcr |= UART_FCR_TRIGGER_8;
- break;
- default:
- fcr |= UART_FCR_TRIGGER_14;
- break;
- }
- }
- }
-
- /* CTS flow control flag and modem status interrupts */
- info->IER &= ~UART_IER_MSI;
- info->MCR &= ~UART_MCR_AFE;
- if (cflag & CRTSCTS) {
- info->flags |= ASYNC_CTS_FLOW;
- info->IER |= UART_IER_MSI;
- if ((info->type == PORT_16550A) || (info->board->chip_flag)) {
- info->MCR |= UART_MCR_AFE;
- } else {
- status = inb(info->ioaddr + UART_MSR);
- if (info->tty->hw_stopped) {
- if (status & UART_MSR_CTS) {
- info->tty->hw_stopped = 0;
- if (info->type != PORT_16550A &&
- !info->board->chip_flag) {
- outb(info->IER & ~UART_IER_THRI,
- info->ioaddr +
- UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr +
- UART_IER);
- }
- tty_wakeup(info->tty);
- }
- } else {
- if (!(status & UART_MSR_CTS)) {
- info->tty->hw_stopped = 1;
- if ((info->type != PORT_16550A) &&
- (!info->board->chip_flag)) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->ioaddr +
- UART_IER);
- }
- }
- }
- }
- } else {
- info->flags &= ~ASYNC_CTS_FLOW;
- }
- outb(info->MCR, info->ioaddr + UART_MCR);
- if (cflag & CLOCAL) {
- info->flags &= ~ASYNC_CHECK_CD;
- } else {
- info->flags |= ASYNC_CHECK_CD;
- info->IER |= UART_IER_MSI;
- }
- outb(info->IER, info->ioaddr + UART_IER);
-
- /*
- * Set up parity check flag
- */
- info->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
- if (I_INPCK(info->tty))
- info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
- if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
- info->read_status_mask |= UART_LSR_BI;
-
- info->ignore_status_mask = 0;
-
- if (I_IGNBRK(info->tty)) {
- info->ignore_status_mask |= UART_LSR_BI;
- info->read_status_mask |= UART_LSR_BI;
- /*
- * If we're ignore parity and break indicators, ignore
- * overruns too. (For real raw support).
- */
- if (I_IGNPAR(info->tty)) {
- info->ignore_status_mask |=
- UART_LSR_OE |
- UART_LSR_PE |
- UART_LSR_FE;
- info->read_status_mask |=
- UART_LSR_OE |
- UART_LSR_PE |
- UART_LSR_FE;
- }
- }
- if (info->board->chip_flag) {
- SET_MOXA_MUST_XON1_VALUE(info->ioaddr, START_CHAR(info->tty));
- SET_MOXA_MUST_XOFF1_VALUE(info->ioaddr, STOP_CHAR(info->tty));
- if (I_IXON(info->tty)) {
- ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
- } else {
- DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
- }
- if (I_IXOFF(info->tty)) {
- ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
- } else {
- DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
- }
- }
-
-
- outb(fcr, info->ioaddr + UART_FCR); /* set fcr */
- outb(cval, info->ioaddr + UART_LCR);
-
- return ret;
-}
-
-static void mxser_check_modem_status(struct mxser_port *port, int status)
-{
- /* update input line counters */
- if (status & UART_MSR_TERI)
- port->icount.rng++;
- if (status & UART_MSR_DDSR)
- port->icount.dsr++;
- if (status & UART_MSR_DDCD)
- port->icount.dcd++;
- if (status & UART_MSR_DCTS)
- port->icount.cts++;
- port->mon_data.modem_status = status;
- wake_up_interruptible(&port->delta_msr_wait);
-
- if ((port->flags & ASYNC_CHECK_CD) && (status & UART_MSR_DDCD)) {
- if (status & UART_MSR_DCD)
- wake_up_interruptible(&port->open_wait);
- }
-
- if (port->flags & ASYNC_CTS_FLOW) {
- if (port->tty->hw_stopped) {
- if (status & UART_MSR_CTS) {
- port->tty->hw_stopped = 0;
-
- if ((port->type != PORT_16550A) &&
- (!port->board->chip_flag)) {
- outb(port->IER & ~UART_IER_THRI,
- port->ioaddr + UART_IER);
- port->IER |= UART_IER_THRI;
- outb(port->IER, port->ioaddr +
- UART_IER);
- }
- tty_wakeup(port->tty);
- }
- } else {
- if (!(status & UART_MSR_CTS)) {
- port->tty->hw_stopped = 1;
- if (port->type != PORT_16550A &&
- !port->board->chip_flag) {
- port->IER &= ~UART_IER_THRI;
- outb(port->IER, port->ioaddr +
- UART_IER);
- }
- }
- }
- }
-}
-
-static int mxser_startup(struct mxser_port *info)
-{
- unsigned long page;
- unsigned long flags;
-
- page = __get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- spin_lock_irqsave(&info->slock, flags);
-
- if (info->flags & ASYNC_INITIALIZED) {
- free_page(page);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
-
- if (!info->ioaddr || !info->type) {
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- free_page(page);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
- if (info->xmit_buf)
- free_page(page);
- else
- info->xmit_buf = (unsigned char *) page;
-
- /*
- * Clear the FIFO buffers and disable them
- * (they will be reenabled in mxser_change_speed())
- */
- if (info->board->chip_flag)
- outb((UART_FCR_CLEAR_RCVR |
- UART_FCR_CLEAR_XMIT |
- MOXA_MUST_FCR_GDA_MODE_ENABLE), info->ioaddr + UART_FCR);
- else
- outb((UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
- info->ioaddr + UART_FCR);
-
- /*
- * At this point there's no way the LSR could still be 0xFF;
- * if it is, then bail out, because there's likely no UART
- * here.
- */
- if (inb(info->ioaddr + UART_LSR) == 0xff) {
- spin_unlock_irqrestore(&info->slock, flags);
- if (capable(CAP_SYS_ADMIN)) {
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
- return 0;
- } else
- return -ENODEV;
- }
-
- /*
- * Clear the interrupt registers.
- */
- (void) inb(info->ioaddr + UART_LSR);
- (void) inb(info->ioaddr + UART_RX);
- (void) inb(info->ioaddr + UART_IIR);
- (void) inb(info->ioaddr + UART_MSR);
-
- /*
- * Now, initialize the UART
- */
- outb(UART_LCR_WLEN8, info->ioaddr + UART_LCR); /* reset DLAB */
- info->MCR = UART_MCR_DTR | UART_MCR_RTS;
- outb(info->MCR, info->ioaddr + UART_MCR);
-
- /*
- * Finally, enable interrupts
- */
- info->IER = UART_IER_MSI | UART_IER_RLSI | UART_IER_RDI;
-
- if (info->board->chip_flag)
- info->IER |= MOXA_MUST_IER_EGDAI;
- outb(info->IER, info->ioaddr + UART_IER); /* enable interrupts */
-
- /*
- * And clear the interrupt registers again for luck.
- */
- (void) inb(info->ioaddr + UART_LSR);
- (void) inb(info->ioaddr + UART_RX);
- (void) inb(info->ioaddr + UART_IIR);
- (void) inb(info->ioaddr + UART_MSR);
-
- if (info->tty)
- clear_bit(TTY_IO_ERROR, &info->tty->flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
- /*
- * and set the speed of the serial port
- */
- mxser_change_speed(info, NULL);
- info->flags |= ASYNC_INITIALIZED;
- spin_unlock_irqrestore(&info->slock, flags);
-
- return 0;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts maybe disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void mxser_shutdown(struct mxser_port *info)
-{
- unsigned long flags;
-
- if (!(info->flags & ASYNC_INITIALIZED))
- return;
-
- spin_lock_irqsave(&info->slock, flags);
-
- /*
- * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
- * here so the queue might never be waken up
- */
- wake_up_interruptible(&info->delta_msr_wait);
-
- /*
- * Free the IRQ, if necessary
- */
- if (info->xmit_buf) {
- free_page((unsigned long) info->xmit_buf);
- info->xmit_buf = NULL;
- }
-
- info->IER = 0;
- outb(0x00, info->ioaddr + UART_IER);
-
- if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
- info->MCR &= ~(UART_MCR_DTR | UART_MCR_RTS);
- outb(info->MCR, info->ioaddr + UART_MCR);
-
- /* clear Rx/Tx FIFO's */
- if (info->board->chip_flag)
- outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT |
- MOXA_MUST_FCR_GDA_MODE_ENABLE,
- info->ioaddr + UART_FCR);
- else
- outb(UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
- info->ioaddr + UART_FCR);
-
- /* read data port to reset things */
- (void) inb(info->ioaddr + UART_RX);
-
- if (info->tty)
- set_bit(TTY_IO_ERROR, &info->tty->flags);
-
- info->flags &= ~ASYNC_INITIALIZED;
-
- if (info->board->chip_flag)
- SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(info->ioaddr);
-
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-/*
- * This routine is called whenever a serial port is opened. It
- * enables interrupts for a serial port, linking in its async structure into
- * the IRQ chain. It also performs the serial-specific
- * initialization for the tty structure.
- */
-static int mxser_open(struct tty_struct *tty, struct file *filp)
-{
- struct mxser_port *info;
- unsigned long flags;
- int retval, line;
-
- line = tty->index;
- if (line == MXSER_PORTS)
- return 0;
- if (line < 0 || line > MXSER_PORTS)
- return -ENODEV;
- info = &mxser_boards[line / MXSER_PORTS_PER_BOARD].ports[line % MXSER_PORTS_PER_BOARD];
- if (!info->ioaddr)
- return -ENODEV;
-
- tty->driver_data = info;
- info->tty = tty;
- /*
- * Start up serial port
- */
- spin_lock_irqsave(&info->slock, flags);
- info->count++;
- spin_unlock_irqrestore(&info->slock, flags);
- retval = mxser_startup(info);
- if (retval)
- return retval;
-
- retval = mxser_block_til_ready(tty, filp, info);
- if (retval)
- return retval;
-
- /* unmark here for very high baud rate (ex. 921600 bps) used */
- tty->low_latency = 1;
- return 0;
-}
-
-/*
- * This routine is called when the serial port gets closed. First, we
- * wait for the last remaining data to be sent. Then, we unlink its
- * async structure from the interrupt chain if necessary, and we free
- * that IRQ if nothing is left in the chain.
- */
-static void mxser_close(struct tty_struct *tty, struct file *filp)
-{
- struct mxser_port *info = tty->driver_data;
-
- unsigned long timeout;
- unsigned long flags;
-
- if (tty->index == MXSER_PORTS)
- return;
- if (!info)
- return;
-
- spin_lock_irqsave(&info->slock, flags);
-
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&info->slock, flags);
- return;
- }
- if ((tty->count == 1) && (info->count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. Info->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk(KERN_ERR "mxser_close: bad serial port count; "
- "tty->count is 1, info->count is %d\n", info->count);
- info->count = 1;
- }
- if (--info->count < 0) {
- printk(KERN_ERR "mxser_close: bad serial port count for "
- "ttys%d: %d\n", tty->index, info->count);
- info->count = 0;
- }
- if (info->count) {
- spin_unlock_irqrestore(&info->slock, flags);
- return;
- }
- info->flags |= ASYNC_CLOSING;
- spin_unlock_irqrestore(&info->slock, flags);
- /*
- * Save the termios structure, since this port may have
- * separate termios for callout and dialin.
- */
- if (info->flags & ASYNC_NORMAL_ACTIVE)
- info->normal_termios = *tty->termios;
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters.
- */
- tty->closing = 1;
- if (info->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent(tty, info->closing_wait);
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
- info->IER &= ~UART_IER_RLSI;
- if (info->board->chip_flag)
- info->IER &= ~MOXA_MUST_RECV_ISR;
-
- if (info->flags & ASYNC_INITIALIZED) {
- outb(info->IER, info->ioaddr + UART_IER);
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- timeout = jiffies + HZ;
- while (!(inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT)) {
- schedule_timeout_interruptible(5);
- if (time_after(jiffies, timeout))
- break;
- }
- }
- mxser_shutdown(info);
-
- if (tty->driver->flush_buffer)
- tty->driver->flush_buffer(tty);
-
- tty_ldisc_flush(tty);
-
- tty->closing = 0;
- info->event = 0;
- info->tty = NULL;
- if (info->blocked_open) {
- if (info->close_delay)
- schedule_timeout_interruptible(info->close_delay);
- wake_up_interruptible(&info->open_wait);
- }
-
- info->flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
-}
-
-static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count)
-{
- int c, total = 0;
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- if (!info->xmit_buf)
- return 0;
-
- while (1) {
- c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
- SERIAL_XMIT_SIZE - info->xmit_head));
- if (c <= 0)
- break;
-
- memcpy(info->xmit_buf + info->xmit_head, buf, c);
- spin_lock_irqsave(&info->slock, flags);
- info->xmit_head = (info->xmit_head + c) &
- (SERIAL_XMIT_SIZE - 1);
- info->xmit_cnt += c;
- spin_unlock_irqrestore(&info->slock, flags);
-
- buf += c;
- count -= c;
- total += c;
- }
-
- if (info->xmit_cnt && !tty->stopped) {
- if (!tty->hw_stopped ||
- (info->type == PORT_16550A) ||
- (info->board->chip_flag)) {
- spin_lock_irqsave(&info->slock, flags);
- outb(info->IER & ~UART_IER_THRI, info->ioaddr +
- UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- spin_unlock_irqrestore(&info->slock, flags);
- }
- }
- return total;
-}
-
-static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- if (!info->xmit_buf)
- return;
-
- if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
- return;
-
- spin_lock_irqsave(&info->slock, flags);
- info->xmit_buf[info->xmit_head++] = ch;
- info->xmit_head &= SERIAL_XMIT_SIZE - 1;
- info->xmit_cnt++;
- spin_unlock_irqrestore(&info->slock, flags);
- if (!tty->stopped) {
- if (!tty->hw_stopped ||
- (info->type == PORT_16550A) ||
- info->board->chip_flag) {
- spin_lock_irqsave(&info->slock, flags);
- outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- spin_unlock_irqrestore(&info->slock, flags);
- }
- }
-}
-
-
-static void mxser_flush_chars(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- if (info->xmit_cnt <= 0 ||
- tty->stopped ||
- !info->xmit_buf ||
- (tty->hw_stopped &&
- (info->type != PORT_16550A) &&
- (!info->board->chip_flag)
- ))
- return;
-
- spin_lock_irqsave(&info->slock, flags);
-
- outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
-
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static int mxser_write_room(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- int ret;
-
- ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
- if (ret < 0)
- ret = 0;
- return ret;
-}
-
-static int mxser_chars_in_buffer(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- return info->xmit_cnt;
-}
-
-static void mxser_flush_buffer(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- char fcr;
- unsigned long flags;
-
-
- spin_lock_irqsave(&info->slock, flags);
- info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
- fcr = inb(info->ioaddr + UART_FCR);
- outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
- info->ioaddr + UART_FCR);
- outb(fcr, info->ioaddr + UART_FCR);
-
- spin_unlock_irqrestore(&info->slock, flags);
-
- tty_wakeup(tty);
-}
-
-/*
- * ------------------------------------------------------------
- * friends of mxser_ioctl()
- * ------------------------------------------------------------
- */
-static int mxser_get_serial_info(struct mxser_port *info,
- struct serial_struct __user *retinfo)
-{
- struct serial_struct tmp;
-
- if (!retinfo)
- return -EFAULT;
- memset(&tmp, 0, sizeof(tmp));
- tmp.type = info->type;
- tmp.line = info->tty->index;
- tmp.port = info->ioaddr;
- tmp.irq = info->board->irq;
- tmp.flags = info->flags;
- tmp.baud_base = info->baud_base;
- tmp.close_delay = info->close_delay;
- tmp.closing_wait = info->closing_wait;
- tmp.custom_divisor = info->custom_divisor;
- tmp.hub6 = 0;
- if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
- return -EFAULT;
- return 0;
-}
-
-static int mxser_set_serial_info(struct mxser_port *info,
- struct serial_struct __user *new_info)
-{
- struct serial_struct new_serial;
- unsigned long sl_flags;
- unsigned int flags;
- int retval = 0;
-
- if (!new_info || !info->ioaddr)
- return -EFAULT;
- if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
- return -EFAULT;
-
- if ((new_serial.irq != info->board->irq) ||
- (new_serial.port != info->ioaddr) ||
- (new_serial.custom_divisor != info->custom_divisor) ||
- (new_serial.baud_base != info->baud_base))
- return -EPERM;
-
- flags = info->flags & ASYNC_SPD_MASK;
-
- if (!capable(CAP_SYS_ADMIN)) {
- if ((new_serial.baud_base != info->baud_base) ||
- (new_serial.close_delay != info->close_delay) ||
- ((new_serial.flags & ~ASYNC_USR_MASK) != (info->flags & ~ASYNC_USR_MASK)))
- return -EPERM;
- info->flags = ((info->flags & ~ASYNC_USR_MASK) |
- (new_serial.flags & ASYNC_USR_MASK));
- } else {
- /*
- * OK, past this point, all the error checking has been done.
- * At this point, we start making changes.....
- */
- info->flags = ((info->flags & ~ASYNC_FLAGS) |
- (new_serial.flags & ASYNC_FLAGS));
- info->close_delay = new_serial.close_delay * HZ / 100;
- info->closing_wait = new_serial.closing_wait * HZ / 100;
- info->tty->low_latency =
- (info->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
- info->tty->low_latency = 0;
- }
-
- info->type = new_serial.type;
-
- process_txrx_fifo(info);
-
- if (info->flags & ASYNC_INITIALIZED) {
- if (flags != (info->flags & ASYNC_SPD_MASK)) {
- spin_lock_irqsave(&info->slock, sl_flags);
- mxser_change_speed(info, NULL);
- spin_unlock_irqrestore(&info->slock, sl_flags);
- }
- } else
- retval = mxser_startup(info);
-
- return retval;
-}
-
-/*
- * mxser_get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- * is emptied. On bus types like RS485, the transmitter must
- * release the bus after transmitting. This must be done when
- * the transmit shift register is empty, not be done when the
- * transmit holding register is empty. This functionality
- * allows an RS485 driver to be written in user space.
- */
-static int mxser_get_lsr_info(struct mxser_port *info,
- unsigned int __user *value)
-{
- unsigned char status;
- unsigned int result;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- status = inb(info->ioaddr + UART_LSR);
- spin_unlock_irqrestore(&info->slock, flags);
- result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
- return put_user(result, value);
-}
-
-/*
- * This routine sends a break character out the serial port.
- */
-static void mxser_send_break(struct mxser_port *info, int duration)
-{
- unsigned long flags;
-
- if (!info->ioaddr)
- return;
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&info->slock, flags);
- outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC,
- info->ioaddr + UART_LCR);
- spin_unlock_irqrestore(&info->slock, flags);
- schedule_timeout(duration);
- spin_lock_irqsave(&info->slock, flags);
- outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC,
- info->ioaddr + UART_LCR);
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static int mxser_tiocmget(struct tty_struct *tty, struct file *file)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned char control, status;
- unsigned long flags;
-
-
- if (tty->index == MXSER_PORTS)
- return -ENOIOCTLCMD;
- if (test_bit(TTY_IO_ERROR, &tty->flags))
- return -EIO;
-
- control = info->MCR;
-
- spin_lock_irqsave(&info->slock, flags);
- status = inb(info->ioaddr + UART_MSR);
- if (status & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(info, status);
- spin_unlock_irqrestore(&info->slock, flags);
- return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
- ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
- ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
- ((status & UART_MSR_RI) ? TIOCM_RNG : 0) |
- ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) |
- ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
-}
-
-static int mxser_tiocmset(struct tty_struct *tty, struct file *file,
- unsigned int set, unsigned int clear)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
-
- if (tty->index == MXSER_PORTS)
- return -ENOIOCTLCMD;
- if (test_bit(TTY_IO_ERROR, &tty->flags))
- return -EIO;
-
- spin_lock_irqsave(&info->slock, flags);
-
- if (set & TIOCM_RTS)
- info->MCR |= UART_MCR_RTS;
- if (set & TIOCM_DTR)
- info->MCR |= UART_MCR_DTR;
-
- if (clear & TIOCM_RTS)
- info->MCR &= ~UART_MCR_RTS;
- if (clear & TIOCM_DTR)
- info->MCR &= ~UART_MCR_DTR;
-
- outb(info->MCR, info->ioaddr + UART_MCR);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
-}
-
-static int __init mxser_program_mode(int port)
-{
- int id, i, j, n;
-
- outb(0, port);
- outb(0, port);
- outb(0, port);
- (void)inb(port);
- (void)inb(port);
- outb(0, port);
- (void)inb(port);
-
- id = inb(port + 1) & 0x1F;
- if ((id != C168_ASIC_ID) &&
- (id != C104_ASIC_ID) &&
- (id != C102_ASIC_ID) &&
- (id != CI132_ASIC_ID) &&
- (id != CI134_ASIC_ID) &&
- (id != CI104J_ASIC_ID))
- return -1;
- for (i = 0, j = 0; i < 4; i++) {
- n = inb(port + 2);
- if (n == 'M') {
- j = 1;
- } else if ((j == 1) && (n == 1)) {
- j = 2;
- break;
- } else
- j = 0;
- }
- if (j != 2)
- id = -2;
- return id;
-}
-
-static void __init mxser_normal_mode(int port)
-{
- int i, n;
-
- outb(0xA5, port + 1);
- outb(0x80, port + 3);
- outb(12, port + 0); /* 9600 bps */
- outb(0, port + 1);
- outb(0x03, port + 3); /* 8 data bits */
- outb(0x13, port + 4); /* loop back mode */
- for (i = 0; i < 16; i++) {
- n = inb(port + 5);
- if ((n & 0x61) == 0x60)
- break;
- if ((n & 1) == 1)
- (void)inb(port);
- }
- outb(0x00, port + 4);
-}
-
-#define CHIP_SK 0x01 /* Serial Data Clock in Eprom */
-#define CHIP_DO 0x02 /* Serial Data Output in Eprom */
-#define CHIP_CS 0x04 /* Serial Chip Select in Eprom */
-#define CHIP_DI 0x08 /* Serial Data Input in Eprom */
-#define EN_CCMD 0x000 /* Chip's command register */
-#define EN0_RSARLO 0x008 /* Remote start address reg 0 */
-#define EN0_RSARHI 0x009 /* Remote start address reg 1 */
-#define EN0_RCNTLO 0x00A /* Remote byte count reg WR */
-#define EN0_RCNTHI 0x00B /* Remote byte count reg WR */
-#define EN0_DCFG 0x00E /* Data configuration reg WR */
-#define EN0_PORT 0x010 /* Rcv missed frame error counter RD */
-#define ENC_PAGE0 0x000 /* Select page 0 of chip registers */
-#define ENC_PAGE3 0x0C0 /* Select page 3 of chip registers */
-static int __init mxser_read_register(int port, unsigned short *regs)
-{
- int i, k, value, id;
- unsigned int j;
-
- id = mxser_program_mode(port);
- if (id < 0)
- return id;
- for (i = 0; i < 14; i++) {
- k = (i & 0x3F) | 0x180;
- for (j = 0x100; j > 0; j >>= 1) {
- outb(CHIP_CS, port);
- if (k & j) {
- outb(CHIP_CS | CHIP_DO, port);
- outb(CHIP_CS | CHIP_DO | CHIP_SK, port); /* A? bit of read */
- } else {
- outb(CHIP_CS, port);
- outb(CHIP_CS | CHIP_SK, port); /* A? bit of read */
- }
- }
- (void)inb(port);
- value = 0;
- for (k = 0, j = 0x8000; k < 16; k++, j >>= 1) {
- outb(CHIP_CS, port);
- outb(CHIP_CS | CHIP_SK, port);
- if (inb(port) & CHIP_DI)
- value |= j;
- }
- regs[i] = value;
- outb(0, port);
- }
- mxser_normal_mode(port);
- return id;
-}
-
-static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
-{
- struct mxser_port *port;
- int result, status;
- unsigned int i, j;
-
- switch (cmd) {
- case MOXA_GET_CONF:
-/* if (copy_to_user(argp, mxsercfg,
- sizeof(struct mxser_hwconf) * 4))
- return -EFAULT;
- return 0;*/
- return -ENXIO;
- case MOXA_GET_MAJOR:
- if (copy_to_user(argp, &ttymajor, sizeof(int)))
- return -EFAULT;
- return 0;
-
- case MOXA_GET_CUMAJOR:
- if (copy_to_user(argp, &calloutmajor, sizeof(int)))
- return -EFAULT;
- return 0;
-
- case MOXA_CHKPORTENABLE:
- result = 0;
-
- for (i = 0; i < MXSER_BOARDS; i++)
- for (j = 0; j < MXSER_PORTS_PER_BOARD; j++)
- if (mxser_boards[i].ports[j].ioaddr)
- result |= (1 << i);
-
- return put_user(result, (unsigned long __user *)argp);
- case MOXA_GETDATACOUNT:
- if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
- return -EFAULT;
- return 0;
- case MOXA_GETMSTATUS:
- for (i = 0; i < MXSER_BOARDS; i++)
- for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
- port = &mxser_boards[i].ports[j];
-
- GMStatus[i].ri = 0;
- if (!port->ioaddr) {
- GMStatus[i].dcd = 0;
- GMStatus[i].dsr = 0;
- GMStatus[i].cts = 0;
- continue;
- }
-
- if (!port->tty || !port->tty->termios)
- GMStatus[i].cflag =
- port->normal_termios.c_cflag;
- else
- GMStatus[i].cflag =
- port->tty->termios->c_cflag;
-
- status = inb(port->ioaddr + UART_MSR);
- if (status & 0x80 /*UART_MSR_DCD */ )
- GMStatus[i].dcd = 1;
- else
- GMStatus[i].dcd = 0;
-
- if (status & 0x20 /*UART_MSR_DSR */ )
- GMStatus[i].dsr = 1;
- else
- GMStatus[i].dsr = 0;
-
-
- if (status & 0x10 /*UART_MSR_CTS */ )
- GMStatus[i].cts = 1;
- else
- GMStatus[i].cts = 0;
- }
- if (copy_to_user(argp, GMStatus,
- sizeof(struct mxser_mstatus) * MXSER_PORTS))
- return -EFAULT;
- return 0;
- case MOXA_ASPP_MON_EXT: {
- int p, shiftbit;
- unsigned long opmode;
- unsigned cflag, iflag;
-
- for (i = 0; i < MXSER_BOARDS; i++)
- for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
- port = &mxser_boards[i].ports[j];
- if (!port->ioaddr)
- continue;
-
- status = mxser_get_msr(port->ioaddr, 0, i);
-
- if (status & UART_MSR_TERI)
- port->icount.rng++;
- if (status & UART_MSR_DDSR)
- port->icount.dsr++;
- if (status & UART_MSR_DDCD)
- port->icount.dcd++;
- if (status & UART_MSR_DCTS)
- port->icount.cts++;
-
- port->mon_data.modem_status = status;
- mon_data_ext.rx_cnt[i] = port->mon_data.rxcnt;
- mon_data_ext.tx_cnt[i] = port->mon_data.txcnt;
- mon_data_ext.up_rxcnt[i] =
- port->mon_data.up_rxcnt;
- mon_data_ext.up_txcnt[i] =
- port->mon_data.up_txcnt;
- mon_data_ext.modem_status[i] =
- port->mon_data.modem_status;
- mon_data_ext.baudrate[i] = port->realbaud;
-
- if (!port->tty || !port->tty->termios) {
- cflag = port->normal_termios.c_cflag;
- iflag = port->normal_termios.c_iflag;
- } else {
- cflag = port->tty->termios->c_cflag;
- iflag = port->tty->termios->c_iflag;
- }
-
- mon_data_ext.databits[i] = cflag & CSIZE;
-
- mon_data_ext.stopbits[i] = cflag & CSTOPB;
-
- mon_data_ext.parity[i] =
- cflag & (PARENB | PARODD | CMSPAR);
-
- mon_data_ext.flowctrl[i] = 0x00;
-
- if (cflag & CRTSCTS)
- mon_data_ext.flowctrl[i] |= 0x03;
-
- if (iflag & (IXON | IXOFF))
- mon_data_ext.flowctrl[i] |= 0x0C;
-
- if (port->type == PORT_16550A)
- mon_data_ext.fifo[i] = 1;
- else
- mon_data_ext.fifo[i] = 0;
-
- p = i % 4;
- shiftbit = p * 2;
- opmode = inb(port->opmode_ioaddr) >> shiftbit;
- opmode &= OP_MODE_MASK;
-
- mon_data_ext.iftype[i] = opmode;
-
- }
- if (copy_to_user(argp, &mon_data_ext,
- sizeof(mon_data_ext)))
- return -EFAULT;
-
- return 0;
-
- } default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static int mxser_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct mxser_port *info = tty->driver_data;
- struct async_icount cprev, cnow; /* kernel counter temps */
- struct serial_icounter_struct __user *p_cuser;
- unsigned long templ;
- unsigned long flags;
- unsigned int i;
- void __user *argp = (void __user *)arg;
- int retval;
-
- if (tty->index == MXSER_PORTS)
- return mxser_ioctl_special(cmd, argp);
-
- if (cmd == MOXA_SET_OP_MODE || cmd == MOXA_GET_OP_MODE) {
- int p;
- unsigned long opmode;
- static unsigned char ModeMask[] = { 0xfc, 0xf3, 0xcf, 0x3f };
- int shiftbit;
- unsigned char val, mask;
-
- p = tty->index % 4;
- if (cmd == MOXA_SET_OP_MODE) {
- if (get_user(opmode, (int __user *) argp))
- return -EFAULT;
- if (opmode != RS232_MODE &&
- opmode != RS485_2WIRE_MODE &&
- opmode != RS422_MODE &&
- opmode != RS485_4WIRE_MODE)
- return -EFAULT;
- mask = ModeMask[p];
- shiftbit = p * 2;
- val = inb(info->opmode_ioaddr);
- val &= mask;
- val |= (opmode << shiftbit);
- outb(val, info->opmode_ioaddr);
- } else {
- shiftbit = p * 2;
- opmode = inb(info->opmode_ioaddr) >> shiftbit;
- opmode &= OP_MODE_MASK;
- if (copy_to_user(argp, &opmode, sizeof(int)))
- return -EFAULT;
- }
- return 0;
- }
-
- if (cmd == MOXA_SET_SPECIAL_BAUD_RATE) {
- int speed;
-
- if (get_user(speed, (int __user *)argp))
- return -EFAULT;
- if (speed <= 0 || speed > info->max_baud)
- return -EFAULT;
- if (!info->tty || !info->tty->termios || !info->ioaddr)
- return 0;
- info->tty->termios->c_cflag &= ~(CBAUD | CBAUDEX);
- for (i = 0; i < BAUD_TABLE_NO; i++)
- if (speed == mxvar_baud_table[i])
- break;
- if (i == BAUD_TABLE_NO) {
- info->tty->termios->c_cflag |= B_SPEC;
- } else if (speed != 0)
- info->tty->termios->c_cflag |= mxvar_baud_table1[i];
-
- info->speed = speed;
- spin_lock_irqsave(&info->slock, flags);
- mxser_change_speed(info, NULL);
- spin_unlock_irqrestore(&info->slock, flags);
-
- return 0;
- } else if (cmd == MOXA_GET_SPECIAL_BAUD_RATE) {
- if (copy_to_user(argp, &info->speed, sizeof(int)))
- return -EFAULT;
- return 0;
- }
-
- if (cmd != TIOCGSERIAL && cmd != TIOCMIWAIT && cmd != TIOCGICOUNT &&
- test_bit(TTY_IO_ERROR, &tty->flags))
- return -EIO;
-
- switch (cmd) {
- case TCSBRK: /* SVID version: non-zero arg --> no break */
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- if (!arg)
- mxser_send_break(info, HZ / 4); /* 1/4 second */
- return 0;
- case TCSBRKP: /* support for POSIX tcsendbreak() */
- retval = tty_check_change(tty);
- if (retval)
- return retval;
- tty_wait_until_sent(tty, 0);
- mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
- return 0;
- case TIOCGSOFTCAR:
- return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *)argp);
- case TIOCSSOFTCAR:
- if (get_user(templ, (unsigned long __user *) argp))
- return -EFAULT;
- arg = templ;
- tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
- return 0;
- case TIOCGSERIAL:
- return mxser_get_serial_info(info, argp);
- case TIOCSSERIAL:
- return mxser_set_serial_info(info, argp);
- case TIOCSERGETLSR: /* Get line status register */
- return mxser_get_lsr_info(info, argp);
- /*
- * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
- * - mask passed in arg for lines of interest
- * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
- * Caller should use TIOCGICOUNT to see which one it was
- */
- case TIOCMIWAIT:
- spin_lock_irqsave(&info->slock, flags);
- cnow = info->icount; /* note the counters on entry */
- spin_unlock_irqrestore(&info->slock, flags);
-
- wait_event_interruptible(info->delta_msr_wait, ({
- cprev = cnow;
- spin_lock_irqsave(&info->slock, flags);
- cnow = info->icount; /* atomic copy */
- spin_unlock_irqrestore(&info->slock, flags);
-
- ((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
- ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
- ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
- ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts));
- }));
- break;
- /*
- * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
- * Return: write counters to the user passed counter struct
- * NB: both 1->0 and 0->1 transitions are counted except for
- * RI where only 0->1 is counted.
- */
- case TIOCGICOUNT:
- spin_lock_irqsave(&info->slock, flags);
- cnow = info->icount;
- spin_unlock_irqrestore(&info->slock, flags);
- p_cuser = argp;
- if (put_user(cnow.frame, &p_cuser->frame))
- return -EFAULT;
- if (put_user(cnow.brk, &p_cuser->brk))
- return -EFAULT;
- if (put_user(cnow.overrun, &p_cuser->overrun))
- return -EFAULT;
- if (put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
- return -EFAULT;
- if (put_user(cnow.parity, &p_cuser->parity))
- return -EFAULT;
- if (put_user(cnow.rx, &p_cuser->rx))
- return -EFAULT;
- if (put_user(cnow.tx, &p_cuser->tx))
- return -EFAULT;
- put_user(cnow.cts, &p_cuser->cts);
- put_user(cnow.dsr, &p_cuser->dsr);
- put_user(cnow.rng, &p_cuser->rng);
- put_user(cnow.dcd, &p_cuser->dcd);
- return 0;
- case MOXA_HighSpeedOn:
- return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
- case MOXA_SDS_RSTICOUNTER:
- info->mon_data.rxcnt = 0;
- info->mon_data.txcnt = 0;
- return 0;
- case MOXA_ASPP_SETBAUD:{
- long baud;
- if (get_user(baud, (long __user *)argp))
- return -EFAULT;
- spin_lock_irqsave(&info->slock, flags);
- mxser_set_baud(info, baud);
- spin_unlock_irqrestore(&info->slock, flags);
- return 0;
- }
- case MOXA_ASPP_GETBAUD:
- if (copy_to_user(argp, &info->realbaud, sizeof(long)))
- return -EFAULT;
-
- return 0;
-
- case MOXA_ASPP_OQUEUE:{
- int len, lsr;
-
- len = mxser_chars_in_buffer(tty);
-
- lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT;
-
- len += (lsr ? 0 : 1);
-
- if (copy_to_user(argp, &len, sizeof(int)))
- return -EFAULT;
-
- return 0;
- }
- case MOXA_ASPP_MON: {
- int mcr, status;
-
- status = mxser_get_msr(info->ioaddr, 1, tty->index);
- mxser_check_modem_status(info, status);
-
- mcr = inb(info->ioaddr + UART_MCR);
- if (mcr & MOXA_MUST_MCR_XON_FLAG)
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFHOLD;
- else
- info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFHOLD;
-
- if (mcr & MOXA_MUST_MCR_TX_XON)
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_XOFFXENT;
- else
- info->mon_data.hold_reason |= NPPI_NOTIFY_XOFFXENT;
-
- if (info->tty->hw_stopped)
- info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
- else
- info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
-
- if (copy_to_user(argp, &info->mon_data,
- sizeof(struct mxser_mon)))
- return -EFAULT;
-
- return 0;
- }
- case MOXA_ASPP_LSTATUS: {
- if (copy_to_user(argp, &info->err_shadow,
- sizeof(unsigned char)))
- return -EFAULT;
-
- info->err_shadow = 0;
- return 0;
- }
- case MOXA_SET_BAUD_METHOD: {
- int method;
-
- if (get_user(method, (int __user *)argp))
- return -EFAULT;
- mxser_set_baud_method[tty->index] = method;
- if (copy_to_user(argp, &method, sizeof(int)))
- return -EFAULT;
-
- return 0;
- }
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-static void mxser_stoprx(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
-
- info->ldisc_stop_rx = 1;
- if (I_IXOFF(tty)) {
- if (info->board->chip_flag) {
- info->IER &= ~MOXA_MUST_RECV_ISR;
- outb(info->IER, info->ioaddr + UART_IER);
- } else {
- info->x_char = STOP_CHAR(tty);
- outb(0, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- }
- }
-
- if (info->tty->termios->c_cflag & CRTSCTS) {
- info->MCR &= ~UART_MCR_RTS;
- outb(info->MCR, info->ioaddr + UART_MCR);
- }
-}
-
-/*
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- */
-static void mxser_throttle(struct tty_struct *tty)
-{
- mxser_stoprx(tty);
-}
-
-static void mxser_unthrottle(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
-
- /* startrx */
- info->ldisc_stop_rx = 0;
- if (I_IXOFF(tty)) {
- if (info->x_char)
- info->x_char = 0;
- else {
- if (info->board->chip_flag) {
- info->IER |= MOXA_MUST_RECV_ISR;
- outb(info->IER, info->ioaddr + UART_IER);
- } else {
- info->x_char = START_CHAR(tty);
- outb(0, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- }
- }
- }
-
- if (info->tty->termios->c_cflag & CRTSCTS) {
- info->MCR |= UART_MCR_RTS;
- outb(info->MCR, info->ioaddr + UART_MCR);
- }
-}
-
-/*
- * mxser_stop() and mxser_start()
- *
- * This routines are called before setting or resetting tty->stopped.
- * They enable or disable transmitter interrupts, as necessary.
- */
-static void mxser_stop(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- if (info->IER & UART_IER_THRI) {
- info->IER &= ~UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- }
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static void mxser_start(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- if (info->xmit_cnt && info->xmit_buf) {
- outb(info->IER & ~UART_IER_THRI, info->ioaddr + UART_IER);
- info->IER |= UART_IER_THRI;
- outb(info->IER, info->ioaddr + UART_IER);
- }
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- mxser_change_speed(info, old_termios);
- spin_unlock_irqrestore(&info->slock, flags);
-
- if ((old_termios->c_cflag & CRTSCTS) &&
- !(tty->termios->c_cflag & CRTSCTS)) {
- tty->hw_stopped = 0;
- mxser_start(tty);
- }
-
- /* Handle sw stopped */
- if ((old_termios->c_iflag & IXON) &&
- !(tty->termios->c_iflag & IXON)) {
- tty->stopped = 0;
-
- if (info->board->chip_flag) {
- spin_lock_irqsave(&info->slock, flags);
- DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
- spin_unlock_irqrestore(&info->slock, flags);
- }
-
- mxser_start(tty);
- }
-}
-
-/*
- * mxser_wait_until_sent() --- wait until the transmitter is empty
- */
-static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long orig_jiffies, char_time;
- int lsr;
-
- if (info->type == PORT_UNKNOWN)
- return;
-
- if (info->xmit_fifo_size == 0)
- return; /* Just in case.... */
-
- orig_jiffies = jiffies;
- /*
- * Set the check interval to be 1/5 of the estimated time to
- * send a single character, and make it at least 1. The check
- * interval should also be less than the timeout.
- *
- * Note: we have to use pretty tight timings here to satisfy
- * the NIST-PCTS.
- */
- char_time = (info->timeout - HZ / 50) / info->xmit_fifo_size;
- char_time = char_time / 5;
- if (char_time == 0)
- char_time = 1;
- if (timeout && timeout < char_time)
- char_time = timeout;
- /*
- * If the transmitter hasn't cleared in twice the approximate
- * amount of time to send the entire FIFO, it probably won't
- * ever clear. This assumes the UART isn't doing flow
- * control, which is currently the case. Hence, if it ever
- * takes longer than info->timeout, this is probably due to a
- * UART bug of some kind. So, we clamp the timeout parameter at
- * 2*info->timeout.
- */
- if (!timeout || timeout > 2 * info->timeout)
- timeout = 2 * info->timeout;
-#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
- printk(KERN_DEBUG "In rs_wait_until_sent(%d) check=%lu...",
- timeout, char_time);
- printk("jiff=%lu...", jiffies);
-#endif
- while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
-#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
- printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
-#endif
- schedule_timeout_interruptible(char_time);
- if (signal_pending(current))
- break;
- if (timeout && time_after(jiffies, orig_jiffies + timeout))
- break;
- }
- set_current_state(TASK_RUNNING);
-
-#ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
- printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
-#endif
-}
-
-/*
- * This routine is called by tty_hangup() when a hangup is signaled.
- */
-static void mxser_hangup(struct tty_struct *tty)
-{
- struct mxser_port *info = tty->driver_data;
-
- mxser_flush_buffer(tty);
- mxser_shutdown(info);
- info->event = 0;
- info->count = 0;
- info->flags &= ~ASYNC_NORMAL_ACTIVE;
- info->tty = NULL;
- wake_up_interruptible(&info->open_wait);
-}
-
-/*
- * mxser_rs_break() --- routine which turns the break handling on or off
- */
-static void mxser_rs_break(struct tty_struct *tty, int break_state)
-{
- struct mxser_port *info = tty->driver_data;
- unsigned long flags;
-
- spin_lock_irqsave(&info->slock, flags);
- if (break_state == -1)
- outb(inb(info->ioaddr + UART_LCR) | UART_LCR_SBC,
- info->ioaddr + UART_LCR);
- else
- outb(inb(info->ioaddr + UART_LCR) & ~UART_LCR_SBC,
- info->ioaddr + UART_LCR);
- spin_unlock_irqrestore(&info->slock, flags);
-}
-
-static void mxser_receive_chars(struct mxser_port *port, int *status)
-{
- struct tty_struct *tty = port->tty;
- unsigned char ch, gdl;
- int ignored = 0;
- int cnt = 0;
- int recv_room;
- int max = 256;
-
- recv_room = tty->receive_room;
- if ((recv_room == 0) && (!port->ldisc_stop_rx))
- mxser_stoprx(tty);
-
- if (port->board->chip_flag != MOXA_OTHER_UART) {
-
- if (*status & UART_LSR_SPECIAL)
- goto intr_old;
- if (port->board->chip_flag == MOXA_MUST_MU860_HWID &&
- (*status & MOXA_MUST_LSR_RERR))
- goto intr_old;
- if (*status & MOXA_MUST_LSR_RERR)
- goto intr_old;
-
- gdl = inb(port->ioaddr + MOXA_MUST_GDL_REGISTER);
-
- if (port->board->chip_flag == MOXA_MUST_MU150_HWID)
- gdl &= MOXA_MUST_GDL_MASK;
- if (gdl >= recv_room) {
- if (!port->ldisc_stop_rx)
- mxser_stoprx(tty);
- }
- while (gdl--) {
- ch = inb(port->ioaddr + UART_RX);
- tty_insert_flip_char(tty, ch, 0);
- cnt++;
- }
- goto end_intr;
- }
-intr_old:
-
- do {
- if (max-- < 0)
- break;
-
- ch = inb(port->ioaddr + UART_RX);
- if (port->board->chip_flag && (*status & UART_LSR_OE))
- outb(0x23, port->ioaddr + UART_FCR);
- *status &= port->read_status_mask;
- if (*status & port->ignore_status_mask) {
- if (++ignored > 100)
- break;
- } else {
- char flag = 0;
- if (*status & UART_LSR_SPECIAL) {
- if (*status & UART_LSR_BI) {
- flag = TTY_BREAK;
- port->icount.brk++;
-
- if (port->flags & ASYNC_SAK)
- do_SAK(tty);
- } else if (*status & UART_LSR_PE) {
- flag = TTY_PARITY;
- port->icount.parity++;
- } else if (*status & UART_LSR_FE) {
- flag = TTY_FRAME;
- port->icount.frame++;
- } else if (*status & UART_LSR_OE) {
- flag = TTY_OVERRUN;
- port->icount.overrun++;
- } else
- flag = TTY_BREAK;
- }
- tty_insert_flip_char(tty, ch, flag);
- cnt++;
- if (cnt >= recv_room) {
- if (!port->ldisc_stop_rx)
- mxser_stoprx(tty);
- break;
- }
-
- }
-
- if (port->board->chip_flag)
- break;
-
- *status = inb(port->ioaddr + UART_LSR);
- } while (*status & UART_LSR_DR);
-
-end_intr:
- mxvar_log.rxcnt[port->tty->index] += cnt;
- port->mon_data.rxcnt += cnt;
- port->mon_data.up_rxcnt += cnt;
-
- /*
- * We are called from an interrupt context with &port->slock
- * being held. Drop it temporarily in order to prevent
- * recursive locking.
- */
- spin_unlock(&port->slock);
- tty_flip_buffer_push(tty);
- spin_lock(&port->slock);
-}
-
-static void mxser_transmit_chars(struct mxser_port *port)
-{
- int count, cnt;
-
- if (port->x_char) {
- outb(port->x_char, port->ioaddr + UART_TX);
- port->x_char = 0;
- mxvar_log.txcnt[port->tty->index]++;
- port->mon_data.txcnt++;
- port->mon_data.up_txcnt++;
- port->icount.tx++;
- return;
- }
-
- if (port->xmit_buf == 0)
- return;
-
- if ((port->xmit_cnt <= 0) || port->tty->stopped ||
- (port->tty->hw_stopped &&
- (port->type != PORT_16550A) &&
- (!port->board->chip_flag))) {
- port->IER &= ~UART_IER_THRI;
- outb(port->IER, port->ioaddr + UART_IER);
- return;
- }
-
- cnt = port->xmit_cnt;
- count = port->xmit_fifo_size;
- do {
- outb(port->xmit_buf[port->xmit_tail++],
- port->ioaddr + UART_TX);
- port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE - 1);
- if (--port->xmit_cnt <= 0)
- break;
- } while (--count > 0);
- mxvar_log.txcnt[port->tty->index] += (cnt - port->xmit_cnt);
-
- port->mon_data.txcnt += (cnt - port->xmit_cnt);
- port->mon_data.up_txcnt += (cnt - port->xmit_cnt);
- port->icount.tx += (cnt - port->xmit_cnt);
-
- if (port->xmit_cnt < WAKEUP_CHARS)
- tty_wakeup(port->tty);
-
- if (port->xmit_cnt <= 0) {
- port->IER &= ~UART_IER_THRI;
- outb(port->IER, port->ioaddr + UART_IER);
- }
-}
-
-/*
- * This is the serial driver's generic interrupt routine
- */
-static irqreturn_t mxser_interrupt(int irq, void *dev_id)
-{
- int status, iir, i;
- struct mxser_board *brd = NULL;
- struct mxser_port *port;
- int max, irqbits, bits, msr;
- unsigned int int_cnt, pass_counter = 0;
- int handled = IRQ_NONE;
-
- for (i = 0; i < MXSER_BOARDS; i++)
- if (dev_id == &mxser_boards[i]) {
- brd = dev_id;
- break;
- }
-
- if (i == MXSER_BOARDS)
- goto irq_stop;
- if (brd == NULL)
- goto irq_stop;
- max = brd->info->nports;
- while (pass_counter++ < MXSER_ISR_PASS_LIMIT) {
- irqbits = inb(brd->vector) & brd->vector_mask;
- if (irqbits == brd->vector_mask)
- break;
-
- handled = IRQ_HANDLED;
- for (i = 0, bits = 1; i < max; i++, irqbits |= bits, bits <<= 1) {
- if (irqbits == brd->vector_mask)
- break;
- if (bits & irqbits)
- continue;
- port = &brd->ports[i];
-
- int_cnt = 0;
- spin_lock(&port->slock);
- do {
- iir = inb(port->ioaddr + UART_IIR);
- if (iir & UART_IIR_NO_INT)
- break;
- iir &= MOXA_MUST_IIR_MASK;
- if (!port->tty ||
- (port->flags & ASYNC_CLOSING) ||
- !(port->flags &
- ASYNC_INITIALIZED)) {
- status = inb(port->ioaddr + UART_LSR);
- outb(0x27, port->ioaddr + UART_FCR);
- inb(port->ioaddr + UART_MSR);
- break;
- }
-
- status = inb(port->ioaddr + UART_LSR);
-
- if (status & UART_LSR_PE)
- port->err_shadow |= NPPI_NOTIFY_PARITY;
- if (status & UART_LSR_FE)
- port->err_shadow |= NPPI_NOTIFY_FRAMING;
- if (status & UART_LSR_OE)
- port->err_shadow |=
- NPPI_NOTIFY_HW_OVERRUN;
- if (status & UART_LSR_BI)
- port->err_shadow |= NPPI_NOTIFY_BREAK;
-
- if (port->board->chip_flag) {
- if (iir == MOXA_MUST_IIR_GDA ||
- iir == MOXA_MUST_IIR_RDA ||
- iir == MOXA_MUST_IIR_RTO ||
- iir == MOXA_MUST_IIR_LSR)
- mxser_receive_chars(port,
- &status);
-
- } else {
- status &= port->read_status_mask;
- if (status & UART_LSR_DR)
- mxser_receive_chars(port,
- &status);
- }
- msr = inb(port->ioaddr + UART_MSR);
- if (msr & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(port, msr);
-
- if (port->board->chip_flag) {
- if (iir == 0x02 && (status &
- UART_LSR_THRE))
- mxser_transmit_chars(port);
- } else {
- if (status & UART_LSR_THRE)
- mxser_transmit_chars(port);
- }
- } while (int_cnt++ < MXSER_ISR_PASS_LIMIT);
- spin_unlock(&port->slock);
- }
- }
-
-irq_stop:
- return handled;
-}
-
-static const struct tty_operations mxser_ops = {
- .open = mxser_open,
- .close = mxser_close,
- .write = mxser_write,
- .put_char = mxser_put_char,
- .flush_chars = mxser_flush_chars,
- .write_room = mxser_write_room,
- .chars_in_buffer = mxser_chars_in_buffer,
- .flush_buffer = mxser_flush_buffer,
- .ioctl = mxser_ioctl,
- .throttle = mxser_throttle,
- .unthrottle = mxser_unthrottle,
- .set_termios = mxser_set_termios,
- .stop = mxser_stop,
- .start = mxser_start,
- .hangup = mxser_hangup,
- .break_ctl = mxser_rs_break,
- .wait_until_sent = mxser_wait_until_sent,
- .tiocmget = mxser_tiocmget,
- .tiocmset = mxser_tiocmset,
-};
-
-/*
- * The MOXA Smartio/Industio serial driver boot-time initialization code!
- */
-
-static void mxser_release_res(struct mxser_board *brd, struct pci_dev *pdev,
- unsigned int irq)
-{
- if (irq)
- free_irq(brd->irq, brd);
- if (pdev != NULL) { /* PCI */
-#ifdef CONFIG_PCI
- pci_release_region(pdev, 2);
- pci_release_region(pdev, 3);
-#endif
- } else {
- release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
- release_region(brd->vector, 1);
- }
-}
-
-static int __devinit mxser_initbrd(struct mxser_board *brd,
- struct pci_dev *pdev)
-{
- struct mxser_port *info;
- unsigned int i;
- int retval;
-
- printk(KERN_INFO "max. baud rate = %d bps.\n", brd->ports[0].max_baud);
-
- for (i = 0; i < brd->info->nports; i++) {
- info = &brd->ports[i];
- info->board = brd;
- info->stop_rx = 0;
- info->ldisc_stop_rx = 0;
-
- /* Enhance mode enabled here */
- if (brd->chip_flag != MOXA_OTHER_UART)
- ENABLE_MOXA_MUST_ENCHANCE_MODE(info->ioaddr);
-
- info->flags = ASYNC_SHARE_IRQ;
- info->type = brd->uart_type;
-
- process_txrx_fifo(info);
-
- info->custom_divisor = info->baud_base * 16;
- info->close_delay = 5 * HZ / 10;
- info->closing_wait = 30 * HZ;
- info->normal_termios = mxvar_sdriver->init_termios;
- init_waitqueue_head(&info->open_wait);
- init_waitqueue_head(&info->delta_msr_wait);
- info->speed = 9600;
- memset(&info->mon_data, 0, sizeof(struct mxser_mon));
- info->err_shadow = 0;
- spin_lock_init(&info->slock);
-
- /* before set INT ISR, disable all int */
- outb(inb(info->ioaddr + UART_IER) & 0xf0,
- info->ioaddr + UART_IER);
- }
-
- retval = request_irq(brd->irq, mxser_interrupt, IRQF_SHARED, "mxser",
- brd);
- if (retval) {
- printk(KERN_ERR "Board %s: Request irq failed, IRQ (%d) may "
- "conflict with another device.\n",
- brd->info->name, brd->irq);
- /* We hold resources, we need to release them. */
- mxser_release_res(brd, pdev, 0);
- }
- return retval;
-}
-
-static int __init mxser_get_ISA_conf(int cap, struct mxser_board *brd)
-{
- int id, i, bits;
- unsigned short regs[16], irq;
- unsigned char scratch, scratch2;
-
- brd->chip_flag = MOXA_OTHER_UART;
-
- id = mxser_read_register(cap, regs);
- switch (id) {
- case C168_ASIC_ID:
- brd->info = &mxser_cards[0];
- break;
- case C104_ASIC_ID:
- brd->info = &mxser_cards[1];
- break;
- case CI104J_ASIC_ID:
- brd->info = &mxser_cards[2];
- break;
- case C102_ASIC_ID:
- brd->info = &mxser_cards[5];
- break;
- case CI132_ASIC_ID:
- brd->info = &mxser_cards[6];
- break;
- case CI134_ASIC_ID:
- brd->info = &mxser_cards[7];
- break;
- default:
- return 0;
- }
-
- irq = 0;
- /* some ISA cards have 2 ports, but we want to see them as 4-port (why?)
- Flag-hack checks if configuration should be read as 2-port here. */
- if (brd->info->nports == 2 || (brd->info->flags & MXSER_HAS2)) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- if (irq != (regs[9] & 0xFF00))
- return MXSER_ERR_IRQ_CONFLIT;
- } else if (brd->info->nports == 4) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- irq = irq | (irq >> 8);
- if (irq != regs[9])
- return MXSER_ERR_IRQ_CONFLIT;
- } else if (brd->info->nports == 8) {
- irq = regs[9] & 0xF000;
- irq = irq | (irq >> 4);
- irq = irq | (irq >> 8);
- if ((irq != regs[9]) || (irq != regs[10]))
- return MXSER_ERR_IRQ_CONFLIT;
- }
-
- if (!irq)
- return MXSER_ERR_IRQ;
- brd->irq = ((int)(irq & 0xF000) >> 12);
- for (i = 0; i < 8; i++)
- brd->ports[i].ioaddr = (int) regs[i + 1] & 0xFFF8;
- if ((regs[12] & 0x80) == 0)
- return MXSER_ERR_VECTOR;
- brd->vector = (int)regs[11]; /* interrupt vector */
- if (id == 1)
- brd->vector_mask = 0x00FF;
- else
- brd->vector_mask = 0x000F;
- for (i = 7, bits = 0x0100; i >= 0; i--, bits <<= 1) {
- if (regs[12] & bits) {
- brd->ports[i].baud_base = 921600;
- brd->ports[i].max_baud = 921600;
- } else {
- brd->ports[i].baud_base = 115200;
- brd->ports[i].max_baud = 115200;
- }
- }
- scratch2 = inb(cap + UART_LCR) & (~UART_LCR_DLAB);
- outb(scratch2 | UART_LCR_DLAB, cap + UART_LCR);
- outb(0, cap + UART_EFR); /* EFR is the same as FCR */
- outb(scratch2, cap + UART_LCR);
- outb(UART_FCR_ENABLE_FIFO, cap + UART_FCR);
- scratch = inb(cap + UART_IIR);
-
- if (scratch & 0xC0)
- brd->uart_type = PORT_16550A;
- else
- brd->uart_type = PORT_16450;
- if (!request_region(brd->ports[0].ioaddr, 8 * brd->info->nports,
- "mxser(IO)"))
- return MXSER_ERR_IOADDR;
- if (!request_region(brd->vector, 1, "mxser(vector)")) {
- release_region(brd->ports[0].ioaddr, 8 * brd->info->nports);
- return MXSER_ERR_VECTOR;
- }
- return brd->info->nports;
-}
-
-static int __devinit mxser_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
-#ifdef CONFIG_PCI
- struct mxser_board *brd;
- unsigned int i, j;
- unsigned long ioaddress;
- int retval = -EINVAL;
-
- for (i = 0; i < MXSER_BOARDS; i++)
- if (mxser_boards[i].info == NULL)
- break;
-
- if (i >= MXSER_BOARDS) {
- printk(KERN_ERR "Too many Smartio/Industio family boards found "
- "(maximum %d), board not configured\n", MXSER_BOARDS);
- goto err;
- }
-
- brd = &mxser_boards[i];
- brd->idx = i * MXSER_PORTS_PER_BOARD;
- printk(KERN_INFO "Found MOXA %s board (BusNo=%d, DevNo=%d)\n",
- mxser_cards[ent->driver_data].name,
- pdev->bus->number, PCI_SLOT(pdev->devfn));
-
- retval = pci_enable_device(pdev);
- if (retval) {
- printk(KERN_ERR "Moxa SmartI/O PCI enable fail !\n");
- goto err;
- }
-
- /* io address */
- ioaddress = pci_resource_start(pdev, 2);
- retval = pci_request_region(pdev, 2, "mxser(IO)");
- if (retval)
- goto err;
-
- brd->info = &mxser_cards[ent->driver_data];
- for (i = 0; i < brd->info->nports; i++)
- brd->ports[i].ioaddr = ioaddress + 8 * i;
-
- /* vector */
- ioaddress = pci_resource_start(pdev, 3);
- retval = pci_request_region(pdev, 3, "mxser(vector)");
- if (retval)
- goto err_relio;
- brd->vector = ioaddress;
-
- /* irq */
- brd->irq = pdev->irq;
-
- brd->chip_flag = CheckIsMoxaMust(brd->ports[0].ioaddr);
- brd->uart_type = PORT_16550A;
- brd->vector_mask = 0;
-
- for (i = 0; i < brd->info->nports; i++) {
- for (j = 0; j < UART_INFO_NUM; j++) {
- if (Gpci_uart_info[j].type == brd->chip_flag) {
- brd->ports[i].max_baud =
- Gpci_uart_info[j].max_baud;
-
- /* exception....CP-102 */
- if (brd->info->flags & MXSER_HIGHBAUD)
- brd->ports[i].max_baud = 921600;
- break;
- }
- }
- }
-
- if (brd->chip_flag == MOXA_MUST_MU860_HWID) {
- for (i = 0; i < brd->info->nports; i++) {
- if (i < 4)
- brd->ports[i].opmode_ioaddr = ioaddress + 4;
- else
- brd->ports[i].opmode_ioaddr = ioaddress + 0x0c;
- }
- outb(0, ioaddress + 4); /* default set to RS232 mode */
- outb(0, ioaddress + 0x0c); /* default set to RS232 mode */
- }
-
- for (i = 0; i < brd->info->nports; i++) {
- brd->vector_mask |= (1 << i);
- brd->ports[i].baud_base = 921600;
- }
-
- /* mxser_initbrd will hook ISR. */
- retval = mxser_initbrd(brd, pdev);
- if (retval)
- goto err_null;
-
- for (i = 0; i < brd->info->nports; i++)
- tty_register_device(mxvar_sdriver, brd->idx + i, &pdev->dev);
-
- pci_set_drvdata(pdev, brd);
-
- return 0;
-err_relio:
- pci_release_region(pdev, 2);
-err_null:
- brd->info = NULL;
-err:
- return retval;
-#else
- return -ENODEV;
-#endif
-}
-
-static void __devexit mxser_remove(struct pci_dev *pdev)
-{
- struct mxser_board *brd = pci_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < brd->info->nports; i++)
- tty_unregister_device(mxvar_sdriver, brd->idx + i);
-
- mxser_release_res(brd, pdev, 1);
- brd->info = NULL;
-}
-
-static struct pci_driver mxser_driver = {
- .name = "mxser",
- .id_table = mxser_pcibrds,
- .probe = mxser_probe,
- .remove = __devexit_p(mxser_remove)
-};
-
-static int __init mxser_module_init(void)
-{
- struct mxser_board *brd;
- unsigned long cap;
- unsigned int i, m, isaloop;
- int retval, b;
-
- pr_debug("Loading module mxser ...\n");
-
- mxvar_sdriver = alloc_tty_driver(MXSER_PORTS + 1);
- if (!mxvar_sdriver)
- return -ENOMEM;
-
- printk(KERN_INFO "MOXA Smartio/Industio family driver version %s\n",
- MXSER_VERSION);
-
- /* Initialize the tty_driver structure */
- mxvar_sdriver->owner = THIS_MODULE;
- mxvar_sdriver->magic = TTY_DRIVER_MAGIC;
- mxvar_sdriver->name = "ttyMI";
- mxvar_sdriver->major = ttymajor;
- mxvar_sdriver->minor_start = 0;
- mxvar_sdriver->num = MXSER_PORTS + 1;
- mxvar_sdriver->type = TTY_DRIVER_TYPE_SERIAL;
- mxvar_sdriver->subtype = SERIAL_TYPE_NORMAL;
- mxvar_sdriver->init_termios = tty_std_termios;
- mxvar_sdriver->init_termios.c_cflag = B9600|CS8|CREAD|HUPCL|CLOCAL;
- mxvar_sdriver->flags = TTY_DRIVER_REAL_RAW|TTY_DRIVER_DYNAMIC_DEV;
- tty_set_operations(mxvar_sdriver, &mxser_ops);
-
- retval = tty_register_driver(mxvar_sdriver);
- if (retval) {
- printk(KERN_ERR "Couldn't install MOXA Smartio/Industio family "
- "tty driver !\n");
- goto err_put;
- }
-
- mxvar_diagflag = 0;
-
- m = 0;
- /* Start finding ISA boards here */
- for (isaloop = 0; isaloop < 2; isaloop++)
- for (b = 0; b < MXSER_BOARDS && m < MXSER_BOARDS; b++) {
- if (!isaloop)
- cap = mxserBoardCAP[b]; /* predefined */
- else
- cap = ioaddr[b]; /* module param */
-
- if (!cap)
- continue;
-
- brd = &mxser_boards[m];
- retval = mxser_get_ISA_conf(cap, brd);
-
- if (retval != 0)
- printk(KERN_INFO "Found MOXA %s board "
- "(CAP=0x%x)\n",
- brd->info->name, ioaddr[b]);
-
- if (retval <= 0) {
- if (retval == MXSER_ERR_IRQ)
- printk(KERN_ERR "Invalid interrupt "
- "number, board not "
- "configured\n");
- else if (retval == MXSER_ERR_IRQ_CONFLIT)
- printk(KERN_ERR "Invalid interrupt "
- "number, board not "
- "configured\n");
- else if (retval == MXSER_ERR_VECTOR)
- printk(KERN_ERR "Invalid interrupt "
- "vector, board not "
- "configured\n");
- else if (retval == MXSER_ERR_IOADDR)
- printk(KERN_ERR "Invalid I/O address, "
- "board not configured\n");
-
- brd->info = NULL;
- continue;
- }
-
- /* mxser_initbrd will hook ISR. */
- if (mxser_initbrd(brd, NULL) < 0) {
- brd->info = NULL;
- continue;
- }
-
- brd->idx = m * MXSER_PORTS_PER_BOARD;
- for (i = 0; i < brd->info->nports; i++)
- tty_register_device(mxvar_sdriver, brd->idx + i,
- NULL);
-
- m++;
- }
-
- retval = pci_register_driver(&mxser_driver);
- if (retval) {
- printk(KERN_ERR "Can't register pci driver\n");
- if (!m) {
- retval = -ENODEV;
- goto err_unr;
- } /* else: we have some ISA cards under control */
- }
-
- pr_debug("Done.\n");
-
- return 0;
-err_unr:
- tty_unregister_driver(mxvar_sdriver);
-err_put:
- put_tty_driver(mxvar_sdriver);
- return retval;
-}
-
-static void __exit mxser_module_exit(void)
-{
- unsigned int i, j;
-
- pr_debug("Unloading module mxser ...\n");
-
- pci_unregister_driver(&mxser_driver);
-
- for (i = 0; i < MXSER_BOARDS; i++) /* ISA remains */
- if (mxser_boards[i].info != NULL)
- for (j = 0; j < mxser_boards[i].info->nports; j++)
- tty_unregister_device(mxvar_sdriver,
- mxser_boards[i].idx + j);
- tty_unregister_driver(mxvar_sdriver);
- put_tty_driver(mxvar_sdriver);
-
- for (i = 0; i < MXSER_BOARDS; i++)
- if (mxser_boards[i].info != NULL)
- mxser_release_res(&mxser_boards[i], NULL, 1);
-
- pr_debug("Done.\n");
-}
-
-module_init(mxser_module_init);
-module_exit(mxser_module_exit);
diff --git a/drivers/char/mxser_new.h b/drivers/char/mxser_new.h
deleted file mode 100644
index d42f7766c652..000000000000
--- a/drivers/char/mxser_new.h
+++ /dev/null
@@ -1,293 +0,0 @@
-#ifndef _MXSER_H
-#define _MXSER_H
-
-/*
- * Semi-public control interfaces
- */
-
-/*
- * MOXA ioctls
- */
-
-#define MOXA 0x400
-#define MOXA_GETDATACOUNT (MOXA + 23)
-#define MOXA_GET_CONF (MOXA + 35)
-#define MOXA_DIAGNOSE (MOXA + 50)
-#define MOXA_CHKPORTENABLE (MOXA + 60)
-#define MOXA_HighSpeedOn (MOXA + 61)
-#define MOXA_GET_MAJOR (MOXA + 63)
-#define MOXA_GET_CUMAJOR (MOXA + 64)
-#define MOXA_GETMSTATUS (MOXA + 65)
-#define MOXA_SET_OP_MODE (MOXA + 66)
-#define MOXA_GET_OP_MODE (MOXA + 67)
-
-#define RS232_MODE 0
-#define RS485_2WIRE_MODE 1
-#define RS422_MODE 2
-#define RS485_4WIRE_MODE 3
-#define OP_MODE_MASK 3
-
-#define MOXA_SDS_RSTICOUNTER (MOXA + 69)
-#define MOXA_ASPP_OQUEUE (MOXA + 70)
-#define MOXA_ASPP_SETBAUD (MOXA + 71)
-#define MOXA_ASPP_GETBAUD (MOXA + 72)
-#define MOXA_ASPP_MON (MOXA + 73)
-#define MOXA_ASPP_LSTATUS (MOXA + 74)
-#define MOXA_ASPP_MON_EXT (MOXA + 75)
-#define MOXA_SET_BAUD_METHOD (MOXA + 76)
-#define MOXA_SET_SPECIAL_BAUD_RATE (MOXA + 77)
-#define MOXA_GET_SPECIAL_BAUD_RATE (MOXA + 78)
-
-/* --------------------------------------------------- */
-
-#define NPPI_NOTIFY_PARITY 0x01
-#define NPPI_NOTIFY_FRAMING 0x02
-#define NPPI_NOTIFY_HW_OVERRUN 0x04
-#define NPPI_NOTIFY_SW_OVERRUN 0x08
-#define NPPI_NOTIFY_BREAK 0x10
-
-#define NPPI_NOTIFY_CTSHOLD 0x01 /* Tx hold by CTS low */
-#define NPPI_NOTIFY_DSRHOLD 0x02 /* Tx hold by DSR low */
-#define NPPI_NOTIFY_XOFFHOLD 0x08 /* Tx hold by Xoff received */
-#define NPPI_NOTIFY_XOFFXENT 0x10 /* Xoff Sent */
-
-/* follow just for Moxa Must chip define. */
-/* */
-/* when LCR register (offset 0x03) write following value, */
-/* the Must chip will enter enchance mode. And write value */
-/* on EFR (offset 0x02) bit 6,7 to change bank. */
-#define MOXA_MUST_ENTER_ENCHANCE 0xBF
-
-/* when enhance mode enable, access on general bank register */
-#define MOXA_MUST_GDL_REGISTER 0x07
-#define MOXA_MUST_GDL_MASK 0x7F
-#define MOXA_MUST_GDL_HAS_BAD_DATA 0x80
-
-#define MOXA_MUST_LSR_RERR 0x80 /* error in receive FIFO */
-/* enchance register bank select and enchance mode setting register */
-/* when LCR register equal to 0xBF */
-#define MOXA_MUST_EFR_REGISTER 0x02
-/* enchance mode enable */
-#define MOXA_MUST_EFR_EFRB_ENABLE 0x10
-/* enchance reister bank set 0, 1, 2 */
-#define MOXA_MUST_EFR_BANK0 0x00
-#define MOXA_MUST_EFR_BANK1 0x40
-#define MOXA_MUST_EFR_BANK2 0x80
-#define MOXA_MUST_EFR_BANK3 0xC0
-#define MOXA_MUST_EFR_BANK_MASK 0xC0
-
-/* set XON1 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XON1_REGISTER 0x04
-
-/* set XON2 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XON2_REGISTER 0x05
-
-/* set XOFF1 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XOFF1_REGISTER 0x06
-
-/* set XOFF2 value register, when LCR=0xBF and change to bank0 */
-#define MOXA_MUST_XOFF2_REGISTER 0x07
-
-#define MOXA_MUST_RBRTL_REGISTER 0x04
-#define MOXA_MUST_RBRTH_REGISTER 0x05
-#define MOXA_MUST_RBRTI_REGISTER 0x06
-#define MOXA_MUST_THRTL_REGISTER 0x07
-#define MOXA_MUST_ENUM_REGISTER 0x04
-#define MOXA_MUST_HWID_REGISTER 0x05
-#define MOXA_MUST_ECR_REGISTER 0x06
-#define MOXA_MUST_CSR_REGISTER 0x07
-
-/* good data mode enable */
-#define MOXA_MUST_FCR_GDA_MODE_ENABLE 0x20
-/* only good data put into RxFIFO */
-#define MOXA_MUST_FCR_GDA_ONLY_ENABLE 0x10
-
-/* enable CTS interrupt */
-#define MOXA_MUST_IER_ECTSI 0x80
-/* enable RTS interrupt */
-#define MOXA_MUST_IER_ERTSI 0x40
-/* enable Xon/Xoff interrupt */
-#define MOXA_MUST_IER_XINT 0x20
-/* enable GDA interrupt */
-#define MOXA_MUST_IER_EGDAI 0x10
-
-#define MOXA_MUST_RECV_ISR (UART_IER_RDI | MOXA_MUST_IER_EGDAI)
-
-/* GDA interrupt pending */
-#define MOXA_MUST_IIR_GDA 0x1C
-#define MOXA_MUST_IIR_RDA 0x04
-#define MOXA_MUST_IIR_RTO 0x0C
-#define MOXA_MUST_IIR_LSR 0x06
-
-/* recieved Xon/Xoff or specical interrupt pending */
-#define MOXA_MUST_IIR_XSC 0x10
-
-/* RTS/CTS change state interrupt pending */
-#define MOXA_MUST_IIR_RTSCTS 0x20
-#define MOXA_MUST_IIR_MASK 0x3E
-
-#define MOXA_MUST_MCR_XON_FLAG 0x40
-#define MOXA_MUST_MCR_XON_ANY 0x80
-#define MOXA_MUST_MCR_TX_XON 0x08
-
-/* software flow control on chip mask value */
-#define MOXA_MUST_EFR_SF_MASK 0x0F
-/* send Xon1/Xoff1 */
-#define MOXA_MUST_EFR_SF_TX1 0x08
-/* send Xon2/Xoff2 */
-#define MOXA_MUST_EFR_SF_TX2 0x04
-/* send Xon1,Xon2/Xoff1,Xoff2 */
-#define MOXA_MUST_EFR_SF_TX12 0x0C
-/* don't send Xon/Xoff */
-#define MOXA_MUST_EFR_SF_TX_NO 0x00
-/* Tx software flow control mask */
-#define MOXA_MUST_EFR_SF_TX_MASK 0x0C
-/* don't receive Xon/Xoff */
-#define MOXA_MUST_EFR_SF_RX_NO 0x00
-/* receive Xon1/Xoff1 */
-#define MOXA_MUST_EFR_SF_RX1 0x02
-/* receive Xon2/Xoff2 */
-#define MOXA_MUST_EFR_SF_RX2 0x01
-/* receive Xon1,Xon2/Xoff1,Xoff2 */
-#define MOXA_MUST_EFR_SF_RX12 0x03
-/* Rx software flow control mask */
-#define MOXA_MUST_EFR_SF_RX_MASK 0x03
-
-#define ENABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr |= MOXA_MUST_EFR_EFRB_ENABLE; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define DISABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_EFRB_ENABLE; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define SET_MOXA_MUST_XON1_VALUE(baseio, Value) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_XON1_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define SET_MOXA_MUST_XOFF1_VALUE(baseio, Value) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK0; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_XOFF1_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define SET_MOXA_MUST_FIFO_VALUE(info) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((info)->ioaddr+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (info)->ioaddr+UART_LCR);\
- __efr = inb((info)->ioaddr+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK1; \
- outb(__efr, (info)->ioaddr+MOXA_MUST_EFR_REGISTER); \
- outb((u8)((info)->rx_high_water), (info)->ioaddr+ \
- MOXA_MUST_RBRTH_REGISTER); \
- outb((u8)((info)->rx_trigger), (info)->ioaddr+ \
- MOXA_MUST_RBRTI_REGISTER); \
- outb((u8)((info)->rx_low_water), (info)->ioaddr+ \
- MOXA_MUST_RBRTL_REGISTER); \
- outb(__oldlcr, (info)->ioaddr+UART_LCR); \
-} while (0)
-
-#define SET_MOXA_MUST_ENUM_VALUE(baseio, Value) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK2; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb((u8)(Value), (baseio)+MOXA_MUST_ENUM_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define GET_MOXA_MUST_HARDWARE_ID(baseio, pId) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_BANK_MASK; \
- __efr |= MOXA_MUST_EFR_BANK2; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- *pId = inb((baseio)+MOXA_MUST_HWID_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
- __efr |= MOXA_MUST_EFR_SF_TX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
- __efr |= MOXA_MUST_EFR_SF_RX1; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#define DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
- u8 __oldlcr, __efr; \
- __oldlcr = inb((baseio)+UART_LCR); \
- outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \
- __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \
- __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \
- outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \
- outb(__oldlcr, (baseio)+UART_LCR); \
-} while (0)
-
-#endif
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 596c7173997b..90c3969012a3 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -695,17 +695,16 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
return;
}
- if (tty->stopped && !tty->flow_stopped &&
- I_IXON(tty) && I_IXANY(tty)) {
- start_tty(tty);
- return;
- }
-
if (I_ISTRIP(tty))
c &= 0x7f;
if (I_IUCLC(tty) && L_IEXTEN(tty))
c=tolower(c);
+ if (tty->stopped && !tty->flow_stopped && I_IXON(tty) &&
+ ((I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty)) ||
+ c == INTR_CHAR(tty) || c == QUIT_CHAR(tty)))
+ start_tty(tty);
+
if (tty->closing) {
if (I_IXON(tty)) {
if (c == START_CHAR(tty))
@@ -769,7 +768,21 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
signal = SIGTSTP;
if (c == SUSP_CHAR(tty)) {
send_signal:
- isig(signal, tty, 0);
+ /*
+ * Echo character, and then send the signal.
+ * Note that we do not use isig() here because we want
+ * the order to be:
+ * 1) flush, 2) echo, 3) signal
+ */
+ if (!L_NOFLSH(tty)) {
+ n_tty_flush_buffer(tty);
+ if (tty->driver->flush_buffer)
+ tty->driver->flush_buffer(tty);
+ }
+ if (L_ECHO(tty))
+ echo_char(c, tty);
+ if (tty->pgrp)
+ kill_pgrp(tty->pgrp, signal, 1);
return;
}
}
diff --git a/drivers/char/pcmcia/Kconfig b/drivers/char/pcmcia/Kconfig
index f25facd97bb4..00b8a84b0319 100644
--- a/drivers/char/pcmcia/Kconfig
+++ b/drivers/char/pcmcia/Kconfig
@@ -43,5 +43,14 @@ config CARDMAN_4040
(http://www.omnikey.com/), or a current development version of OpenCT
(http://www.opensc.org/).
+config IPWIRELESS
+ tristate "IPWireless 3G UMTS PCMCIA card support"
+ depends on PCMCIA
+ select PPP
+ help
+ This is a driver for 3G UMTS PCMCIA card from IPWireless company. In
+ some countries (for example Czech Republic, T-Mobile ISP) this card
+ is shipped for service called UMTS 4G.
+
endmenu
diff --git a/drivers/char/pcmcia/Makefile b/drivers/char/pcmcia/Makefile
index 0aae20985d57..be8f287aa398 100644
--- a/drivers/char/pcmcia/Makefile
+++ b/drivers/char/pcmcia/Makefile
@@ -4,6 +4,8 @@
# Makefile for the Linux PCMCIA char device drivers.
#
+obj-y += ipwireless/
+
obj-$(CONFIG_SYNCLINK_CS) += synclink_cs.o
obj-$(CONFIG_CARDMAN_4000) += cm4000_cs.o
obj-$(CONFIG_CARDMAN_4040) += cm4040_cs.o
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 02518da6a386..454d7324ba40 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -308,7 +308,8 @@ static unsigned int calc_baudv(unsigned char fidi)
return (wcrcf / wbrcf);
}
-static unsigned short io_read_num_rec_bytes(ioaddr_t iobase, unsigned short *s)
+static unsigned short io_read_num_rec_bytes(unsigned int iobase,
+ unsigned short *s)
{
unsigned short tmp;
@@ -426,7 +427,7 @@ static struct card_fixup card_fixups[] = {
static void set_cardparameter(struct cm4000_dev *dev)
{
int i;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
u_int8_t stopbits = 0x02; /* ISO default */
DEBUGP(3, dev, "-> set_cardparameter\n");
@@ -459,7 +460,7 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
unsigned short num_bytes_read;
unsigned char pts_reply[4];
ssize_t rc;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
rc = 0;
@@ -610,7 +611,7 @@ exit_setprotocol:
return rc;
}
-static int io_detect_cm4000(ioaddr_t iobase, struct cm4000_dev *dev)
+static int io_detect_cm4000(unsigned int iobase, struct cm4000_dev *dev)
{
/* note: statemachine is assumed to be reset */
@@ -671,7 +672,7 @@ static void terminate_monitor(struct cm4000_dev *dev)
static void monitor_card(unsigned long p)
{
struct cm4000_dev *dev = (struct cm4000_dev *) p;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
unsigned short s;
struct ptsreq ptsreq;
int i, atrc;
@@ -933,7 +934,7 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count,
loff_t *ppos)
{
struct cm4000_dev *dev = filp->private_data;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
ssize_t rc;
int i, j, k;
@@ -1054,7 +1055,7 @@ static ssize_t cmm_write(struct file *filp, const char __user *buf,
size_t count, loff_t *ppos)
{
struct cm4000_dev *dev = (struct cm4000_dev *) filp->private_data;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
unsigned short s;
unsigned char tmp;
unsigned char infolen;
@@ -1408,7 +1409,7 @@ static int cmm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct cm4000_dev *dev = filp->private_data;
- ioaddr_t iobase = dev->p_dev->io.BasePort1;
+ unsigned int iobase = dev->p_dev->io.BasePort1;
struct pcmcia_device *link;
int size;
int rc;
diff --git a/drivers/char/pcmcia/ipwireless/Makefile b/drivers/char/pcmcia/ipwireless/Makefile
new file mode 100644
index 000000000000..b71eb593643d
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/Makefile
@@ -0,0 +1,10 @@
+#
+# drivers/char/pcmcia/ipwireless/Makefile
+#
+# Makefile for the IPWireless driver
+#
+
+obj-$(CONFIG_IPWIRELESS) += ipwireless.o
+
+ipwireless-objs := hardware.o main.o network.o tty.o
+
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c
new file mode 100644
index 000000000000..1f978ff87fa8
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/hardware.c
@@ -0,0 +1,1787 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "hardware.h"
+#include "setup_protocol.h"
+#include "network.h"
+#include "main.h"
+
+static void ipw_send_setup_packet(struct ipw_hardware *hw);
+static void handle_received_SETUP_packet(struct ipw_hardware *ipw,
+ unsigned int address,
+ unsigned char *data, int len,
+ int is_last);
+static void ipwireless_setup_timer(unsigned long data);
+static void handle_received_CTRL_packet(struct ipw_hardware *hw,
+ unsigned int channel_idx, unsigned char *data, int len);
+
+/*#define TIMING_DIAGNOSTICS*/
+
+#ifdef TIMING_DIAGNOSTICS
+
+static struct timing_stats {
+ unsigned long last_report_time;
+ unsigned long read_time;
+ unsigned long write_time;
+ unsigned long read_bytes;
+ unsigned long write_bytes;
+ unsigned long start_time;
+};
+
+static void start_timing(void)
+{
+ timing_stats.start_time = jiffies;
+}
+
+static void end_read_timing(unsigned length)
+{
+ timing_stats.read_time += (jiffies - start_time);
+ timing_stats.read_bytes += length + 2;
+ report_timing();
+}
+
+static void end_write_timing(unsigned length)
+{
+ timing_stats.write_time += (jiffies - start_time);
+ timing_stats.write_bytes += length + 2;
+ report_timing();
+}
+
+static void report_timing(void)
+{
+ unsigned long since = jiffies - timing_stats.last_report_time;
+
+ /* If it's been more than one second... */
+ if (since >= HZ) {
+ int first = (timing_stats.last_report_time == 0);
+
+ timing_stats.last_report_time = jiffies;
+ if (!first)
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": %u us elapsed - read %lu bytes in %u us, "
+ "wrote %lu bytes in %u us\n",
+ jiffies_to_usecs(since),
+ timing_stats.read_bytes,
+ jiffies_to_usecs(timing_stats.read_time),
+ timing_stats.write_bytes,
+ jiffies_to_usecs(timing_stats.write_time));
+
+ timing_stats.read_time = 0;
+ timing_stats.write_time = 0;
+ timing_stats.read_bytes = 0;
+ timing_stats.write_bytes = 0;
+ }
+}
+#else
+static void start_timing(void) { }
+static void end_read_timing(unsigned length) { }
+static void end_write_timing(unsigned length) { }
+#endif
+
+/* Imported IPW definitions */
+
+#define LL_MTU_V1 318
+#define LL_MTU_V2 250
+#define LL_MTU_MAX (LL_MTU_V1 > LL_MTU_V2 ? LL_MTU_V1 : LL_MTU_V2)
+
+#define PRIO_DATA 2
+#define PRIO_CTRL 1
+#define PRIO_SETUP 0
+
+/* Addresses */
+#define ADDR_SETUP_PROT 0
+
+/* Protocol ids */
+enum {
+ /* Identifier for the Com Data protocol */
+ TL_PROTOCOLID_COM_DATA = 0,
+
+ /* Identifier for the Com Control protocol */
+ TL_PROTOCOLID_COM_CTRL = 1,
+
+ /* Identifier for the Setup protocol */
+ TL_PROTOCOLID_SETUP = 2
+};
+
+/* Number of bytes in NL packet header (cannot do
+ * sizeof(nl_packet_header) since it's a bitfield) */
+#define NL_FIRST_PACKET_HEADER_SIZE 3
+
+/* Number of bytes in NL packet header (cannot do
+ * sizeof(nl_packet_header) since it's a bitfield) */
+#define NL_FOLLOWING_PACKET_HEADER_SIZE 1
+
+struct nl_first_packet_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned char packet_rank:2;
+ unsigned char address:3;
+ unsigned char protocol:3;
+#else
+ unsigned char protocol:3;
+ unsigned char address:3;
+ unsigned char packet_rank:2;
+#endif
+ unsigned char length_lsb;
+ unsigned char length_msb;
+};
+
+struct nl_packet_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned char packet_rank:2;
+ unsigned char address:3;
+ unsigned char protocol:3;
+#else
+ unsigned char protocol:3;
+ unsigned char address:3;
+ unsigned char packet_rank:2;
+#endif
+};
+
+/* Value of 'packet_rank' above */
+#define NL_INTERMEDIATE_PACKET 0x0
+#define NL_LAST_PACKET 0x1
+#define NL_FIRST_PACKET 0x2
+
+union nl_packet {
+ /* Network packet header of the first packet (a special case) */
+ struct nl_first_packet_header hdr_first;
+ /* Network packet header of the following packets (if any) */
+ struct nl_packet_header hdr;
+ /* Complete network packet (header + data) */
+ unsigned char rawpkt[LL_MTU_MAX];
+} __attribute__ ((__packed__));
+
+#define HW_VERSION_UNKNOWN -1
+#define HW_VERSION_1 1
+#define HW_VERSION_2 2
+
+/* IPW I/O ports */
+#define IOIER 0x00 /* Interrupt Enable Register */
+#define IOIR 0x02 /* Interrupt Source/ACK register */
+#define IODCR 0x04 /* Data Control Register */
+#define IODRR 0x06 /* Data Read Register */
+#define IODWR 0x08 /* Data Write Register */
+#define IOESR 0x0A /* Embedded Driver Status Register */
+#define IORXR 0x0C /* Rx Fifo Register (Host to Embedded) */
+#define IOTXR 0x0E /* Tx Fifo Register (Embedded to Host) */
+
+/* I/O ports and bit definitions for version 1 of the hardware */
+
+/* IER bits*/
+#define IER_RXENABLED 0x1
+#define IER_TXENABLED 0x2
+
+/* ISR bits */
+#define IR_RXINTR 0x1
+#define IR_TXINTR 0x2
+
+/* DCR bits */
+#define DCR_RXDONE 0x1
+#define DCR_TXDONE 0x2
+#define DCR_RXRESET 0x4
+#define DCR_TXRESET 0x8
+
+/* I/O ports and bit definitions for version 2 of the hardware */
+
+struct MEMCCR {
+ unsigned short reg_config_option; /* PCCOR: Configuration Option Register */
+ unsigned short reg_config_and_status; /* PCCSR: Configuration and Status Register */
+ unsigned short reg_pin_replacement; /* PCPRR: Pin Replacemant Register */
+ unsigned short reg_socket_and_copy; /* PCSCR: Socket and Copy Register */
+ unsigned short reg_ext_status; /* PCESR: Extendend Status Register */
+ unsigned short reg_io_base; /* PCIOB: I/O Base Register */
+};
+
+struct MEMINFREG {
+ unsigned short memreg_tx_old; /* TX Register (R/W) */
+ unsigned short pad1;
+ unsigned short memreg_rx_done; /* RXDone Register (R/W) */
+ unsigned short pad2;
+ unsigned short memreg_rx; /* RX Register (R/W) */
+ unsigned short pad3;
+ unsigned short memreg_pc_interrupt_ack; /* PC intr Ack Register (W) */
+ unsigned short pad4;
+ unsigned long memreg_card_present;/* Mask for Host to check (R) for
+ * CARD_PRESENT_VALUE */
+ unsigned short memreg_tx_new; /* TX2 (new) Register (R/W) */
+};
+
+#define IODMADPR 0x00 /* DMA Data Port Register (R/W) */
+
+#define CARD_PRESENT_VALUE (0xBEEFCAFEUL)
+
+#define MEMTX_TX 0x0001
+#define MEMRX_RX 0x0001
+#define MEMRX_RX_DONE 0x0001
+#define MEMRX_PCINTACKK 0x0001
+#define MEMRX_MEMSPURIOUSINT 0x0001
+
+#define NL_NUM_OF_PRIORITIES 3
+#define NL_NUM_OF_PROTOCOLS 3
+#define NL_NUM_OF_ADDRESSES NO_OF_IPW_CHANNELS
+
+struct ipw_hardware {
+ unsigned int base_port;
+ short hw_version;
+ unsigned short ll_mtu;
+ spinlock_t spinlock;
+
+ int initializing;
+ int init_loops;
+ struct timer_list setup_timer;
+
+ int tx_ready;
+ struct list_head tx_queue[NL_NUM_OF_PRIORITIES];
+ /* True if any packets are queued for transmission */
+ int tx_queued;
+
+ int rx_bytes_queued;
+ struct list_head rx_queue;
+ /* Pool of rx_packet structures that are not currently used. */
+ struct list_head rx_pool;
+ int rx_pool_size;
+ /* True if reception of data is blocked while userspace processes it. */
+ int blocking_rx;
+ /* True if there is RX data ready on the hardware. */
+ int rx_ready;
+ unsigned short last_memtx_serial;
+ /*
+ * Newer versions of the V2 card firmware send serial numbers in the
+ * MemTX register. 'serial_number_detected' is set true when we detect
+ * a non-zero serial number (indicating the new firmware). Thereafter,
+ * the driver can safely ignore the Timer Recovery re-sends to avoid
+ * out-of-sync problems.
+ */
+ int serial_number_detected;
+ struct work_struct work_rx;
+
+ /* True if we are to send the set-up data to the hardware. */
+ int to_setup;
+
+ /* Card has been removed */
+ int removed;
+ /* Saved irq value when we disable the interrupt. */
+ int irq;
+ /* True if this driver is shutting down. */
+ int shutting_down;
+ /* Modem control lines */
+ unsigned int control_lines[NL_NUM_OF_ADDRESSES];
+ struct ipw_rx_packet *packet_assembler[NL_NUM_OF_ADDRESSES];
+
+ struct tasklet_struct tasklet;
+
+ /* The handle for the network layer, for the sending of events to it. */
+ struct ipw_network *network;
+ struct MEMINFREG __iomem *memory_info_regs;
+ struct MEMCCR __iomem *memregs_CCR;
+ void (*reboot_callback) (void *data);
+ void *reboot_callback_data;
+
+ unsigned short __iomem *memreg_tx;
+};
+
+/*
+ * Packet info structure for tx packets.
+ * Note: not all the fields defined here are required for all protocols
+ */
+struct ipw_tx_packet {
+ struct list_head queue;
+ /* channel idx + 1 */
+ unsigned char dest_addr;
+ /* SETUP, CTRL or DATA */
+ unsigned char protocol;
+ /* Length of data block, which starts at the end of this structure */
+ unsigned short length;
+ /* Sending state */
+ /* Offset of where we've sent up to so far */
+ unsigned long offset;
+ /* Count of packet fragments, starting at 0 */
+ int fragment_count;
+
+ /* Called after packet is sent and before is freed */
+ void (*packet_callback) (void *cb_data, unsigned int packet_length);
+ void *callback_data;
+};
+
+/* Signals from DTE */
+#define COMCTRL_RTS 0
+#define COMCTRL_DTR 1
+
+/* Signals from DCE */
+#define COMCTRL_CTS 2
+#define COMCTRL_DCD 3
+#define COMCTRL_DSR 4
+#define COMCTRL_RI 5
+
+struct ipw_control_packet_body {
+ /* DTE signal or DCE signal */
+ unsigned char sig_no;
+ /* 0: set signal, 1: clear signal */
+ unsigned char value;
+} __attribute__ ((__packed__));
+
+struct ipw_control_packet {
+ struct ipw_tx_packet header;
+ struct ipw_control_packet_body body;
+};
+
+struct ipw_rx_packet {
+ struct list_head queue;
+ unsigned int capacity;
+ unsigned int length;
+ unsigned int protocol;
+ unsigned int channel_idx;
+};
+
+#ifdef IPWIRELESS_STATE_DEBUG
+int ipwireless_dump_hardware_state(char *p, size_t limit,
+ struct ipw_hardware *hw)
+{
+ return snprintf(p, limit,
+ "debug: initializing=%d\n"
+ "debug: tx_ready=%d\n"
+ "debug: tx_queued=%d\n"
+ "debug: rx_ready=%d\n"
+ "debug: rx_bytes_queued=%d\n"
+ "debug: blocking_rx=%d\n"
+ "debug: removed=%d\n"
+ "debug: hardware.shutting_down=%d\n"
+ "debug: to_setup=%d\n",
+ hw->initializing,
+ hw->tx_ready,
+ hw->tx_queued,
+ hw->rx_ready,
+ hw->rx_bytes_queued,
+ hw->blocking_rx,
+ hw->removed,
+ hw->shutting_down,
+ hw->to_setup);
+}
+#endif
+
+static char *data_type(const unsigned char *buf, unsigned length)
+{
+ struct nl_packet_header *hdr = (struct nl_packet_header *) buf;
+
+ if (length == 0)
+ return " ";
+
+ if (hdr->packet_rank & NL_FIRST_PACKET) {
+ switch (hdr->protocol) {
+ case TL_PROTOCOLID_COM_DATA: return "DATA ";
+ case TL_PROTOCOLID_COM_CTRL: return "CTRL ";
+ case TL_PROTOCOLID_SETUP: return "SETUP";
+ default: return "???? ";
+ }
+ } else
+ return " ";
+}
+
+#define DUMP_MAX_BYTES 64
+
+static void dump_data_bytes(const char *type, const unsigned char *data,
+ unsigned length)
+{
+ char prefix[56];
+
+ sprintf(prefix, IPWIRELESS_PCCARD_NAME ": %s %s ",
+ type, data_type(data, length));
+ print_hex_dump_bytes(prefix, 0, (void *)data,
+ length < DUMP_MAX_BYTES ? length : DUMP_MAX_BYTES);
+}
+
+static int do_send_fragment(struct ipw_hardware *hw, const unsigned char *data,
+ unsigned length)
+{
+ int i;
+ unsigned long flags;
+
+ start_timing();
+
+ if (length == 0)
+ return 0;
+
+ if (length > hw->ll_mtu)
+ return -1;
+
+ if (ipwireless_debug)
+ dump_data_bytes("send", data, length);
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+
+ if (hw->hw_version == HW_VERSION_1) {
+ outw((unsigned short) length, hw->base_port + IODWR);
+
+ for (i = 0; i < length; i += 2) {
+ unsigned short d = data[i];
+ __le16 raw_data;
+
+ if (likely(i + 1 < length))
+ d |= data[i + 1] << 8;
+ raw_data = cpu_to_le16(d);
+ outw(raw_data, hw->base_port + IODWR);
+ }
+
+ outw(DCR_TXDONE, hw->base_port + IODCR);
+ } else if (hw->hw_version == HW_VERSION_2) {
+ outw((unsigned short) length, hw->base_port + IODMADPR);
+
+ for (i = 0; i < length; i += 2) {
+ unsigned short d = data[i];
+ __le16 raw_data;
+
+ if ((i + 1 < length))
+ d |= data[i + 1] << 8;
+ raw_data = cpu_to_le16(d);
+ outw(raw_data, hw->base_port + IODMADPR);
+ }
+ while ((i & 3) != 2) {
+ outw((unsigned short) 0xDEAD, hw->base_port + IODMADPR);
+ i += 2;
+ }
+ writew(MEMRX_RX, &hw->memory_info_regs->memreg_rx);
+ }
+
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+
+ end_write_timing(length);
+
+ return 0;
+}
+
+static int do_send_packet(struct ipw_hardware *hw, struct ipw_tx_packet *packet)
+{
+ unsigned short fragment_data_len;
+ unsigned short data_left = packet->length - packet->offset;
+ unsigned short header_size;
+ union nl_packet pkt;
+
+ header_size =
+ (packet->fragment_count == 0)
+ ? NL_FIRST_PACKET_HEADER_SIZE
+ : NL_FOLLOWING_PACKET_HEADER_SIZE;
+ fragment_data_len = hw->ll_mtu - header_size;
+ if (data_left < fragment_data_len)
+ fragment_data_len = data_left;
+
+ pkt.hdr_first.protocol = packet->protocol;
+ pkt.hdr_first.address = packet->dest_addr;
+ pkt.hdr_first.packet_rank = 0;
+
+ /* First packet? */
+ if (packet->fragment_count == 0) {
+ pkt.hdr_first.packet_rank |= NL_FIRST_PACKET;
+ pkt.hdr_first.length_lsb = (unsigned char) packet->length;
+ pkt.hdr_first.length_msb =
+ (unsigned char) (packet->length >> 8);
+ }
+
+ memcpy(pkt.rawpkt + header_size,
+ ((unsigned char *) packet) + sizeof(struct ipw_tx_packet) +
+ packet->offset, fragment_data_len);
+ packet->offset += fragment_data_len;
+ packet->fragment_count++;
+
+ /* Last packet? (May also be first packet.) */
+ if (packet->offset == packet->length)
+ pkt.hdr_first.packet_rank |= NL_LAST_PACKET;
+ do_send_fragment(hw, pkt.rawpkt, header_size + fragment_data_len);
+
+ /* If this packet has unsent data, then re-queue it. */
+ if (packet->offset < packet->length) {
+ /*
+ * Re-queue it at the head of the highest priority queue so
+ * it goes before all other packets
+ */
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ list_add(&packet->queue, &hw->tx_queue[0]);
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ } else {
+ if (packet->packet_callback)
+ packet->packet_callback(packet->callback_data,
+ packet->length);
+ kfree(packet);
+ }
+
+ return 0;
+}
+
+static void ipw_setup_hardware(struct ipw_hardware *hw)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ if (hw->hw_version == HW_VERSION_1) {
+ /* Reset RX FIFO */
+ outw(DCR_RXRESET, hw->base_port + IODCR);
+ /* SB: Reset TX FIFO */
+ outw(DCR_TXRESET, hw->base_port + IODCR);
+
+ /* Enable TX and RX interrupts. */
+ outw(IER_TXENABLED | IER_RXENABLED, hw->base_port + IOIER);
+ } else {
+ /*
+ * Set INTRACK bit (bit 0), which means we must explicitly
+ * acknowledge interrupts by clearing bit 2 of reg_config_and_status.
+ */
+ unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status);
+
+ csr |= 1;
+ writew(csr, &hw->memregs_CCR->reg_config_and_status);
+ }
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+}
+
+/*
+ * If 'packet' is NULL, then this function allocates a new packet, setting its
+ * length to 0 and ensuring it has the specified minimum amount of free space.
+ *
+ * If 'packet' is not NULL, then this function enlarges it if it doesn't
+ * have the specified minimum amount of free space.
+ *
+ */
+static struct ipw_rx_packet *pool_allocate(struct ipw_hardware *hw,
+ struct ipw_rx_packet *packet,
+ int minimum_free_space)
+{
+
+ if (!packet) {
+ unsigned long flags;
+
+ /*
+ * If this is the first fragment, then we will need to fetch a
+ * packet to put it in.
+ */
+ spin_lock_irqsave(&hw->spinlock, flags);
+ /* If we have one in our pool, then pull it out. */
+ if (!list_empty(&hw->rx_pool)) {
+ packet = list_first_entry(&hw->rx_pool,
+ struct ipw_rx_packet, queue);
+ list_del(&packet->queue);
+ hw->rx_pool_size--;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ } else {
+ /* Otherwise allocate a new one. */
+ static int min_capacity = 256;
+ int new_capacity;
+
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ new_capacity =
+ minimum_free_space > min_capacity
+ ? minimum_free_space
+ : min_capacity;
+ packet = kmalloc(sizeof(struct ipw_rx_packet)
+ + new_capacity, GFP_ATOMIC);
+ if (!packet)
+ return NULL;
+ packet->capacity = new_capacity;
+ }
+ packet->length = 0;
+ }
+
+ /*
+ * If this packet does not have sufficient capacity for the data we
+ * want to add, then make it bigger.
+ */
+ if (packet->length + minimum_free_space > packet->capacity) {
+ struct ipw_rx_packet *old_packet = packet;
+
+ packet = kmalloc(sizeof(struct ipw_rx_packet) +
+ old_packet->length + minimum_free_space,
+ GFP_ATOMIC);
+ if (!packet)
+ return NULL;
+ memcpy(packet, old_packet,
+ sizeof(struct ipw_rx_packet)
+ + old_packet->length);
+ packet->capacity = old_packet->length + minimum_free_space;
+ kfree(old_packet);
+ }
+
+ return packet;
+}
+
+static void pool_free(struct ipw_hardware *hw, struct ipw_rx_packet *packet)
+{
+ if (hw->rx_pool_size > 6)
+ kfree(packet);
+ else {
+ hw->rx_pool_size++;
+ list_add_tail(&packet->queue, &hw->rx_pool);
+ }
+}
+
+static void queue_received_packet(struct ipw_hardware *hw,
+ unsigned int protocol, unsigned int address,
+ unsigned char *data, int length, int is_last)
+{
+ unsigned int channel_idx = address - 1;
+ struct ipw_rx_packet *packet = NULL;
+ unsigned long flags;
+
+ /* Discard packet if channel index is out of range. */
+ if (channel_idx >= NL_NUM_OF_ADDRESSES) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": data packet has bad address %u\n", address);
+ return;
+ }
+
+ /*
+ * ->packet_assembler is safe to touch unlocked, this is the only place
+ */
+ if (protocol == TL_PROTOCOLID_COM_DATA) {
+ struct ipw_rx_packet **assem =
+ &hw->packet_assembler[channel_idx];
+
+ /*
+ * Create a new packet, or assembler already contains one
+ * enlarge it by 'length' bytes.
+ */
+ (*assem) = pool_allocate(hw, *assem, length);
+ if (!(*assem)) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": no memory for incomming data packet, dropped!\n");
+ return;
+ }
+ (*assem)->protocol = protocol;
+ (*assem)->channel_idx = channel_idx;
+
+ /* Append this packet data onto existing data. */
+ memcpy((unsigned char *)(*assem) +
+ sizeof(struct ipw_rx_packet)
+ + (*assem)->length, data, length);
+ (*assem)->length += length;
+ if (is_last) {
+ packet = *assem;
+ *assem = NULL;
+ /* Count queued DATA bytes only */
+ spin_lock_irqsave(&hw->spinlock, flags);
+ hw->rx_bytes_queued += packet->length;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ }
+ } else {
+ /* If it's a CTRL packet, don't assemble, just queue it. */
+ packet = pool_allocate(hw, NULL, length);
+ if (!packet) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": no memory for incomming ctrl packet, dropped!\n");
+ return;
+ }
+ packet->protocol = protocol;
+ packet->channel_idx = channel_idx;
+ memcpy((unsigned char *)packet + sizeof(struct ipw_rx_packet),
+ data, length);
+ packet->length = length;
+ }
+
+ /*
+ * If this is the last packet, then send the assembled packet on to the
+ * network layer.
+ */
+ if (packet) {
+ spin_lock_irqsave(&hw->spinlock, flags);
+ list_add_tail(&packet->queue, &hw->rx_queue);
+ /* Block reception of incoming packets if queue is full. */
+ hw->blocking_rx =
+ hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE;
+
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ schedule_work(&hw->work_rx);
+ }
+}
+
+/*
+ * Workqueue callback
+ */
+static void ipw_receive_data_work(struct work_struct *work_rx)
+{
+ struct ipw_hardware *hw =
+ container_of(work_rx, struct ipw_hardware, work_rx);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ while (!list_empty(&hw->rx_queue)) {
+ struct ipw_rx_packet *packet =
+ list_first_entry(&hw->rx_queue,
+ struct ipw_rx_packet, queue);
+
+ if (hw->shutting_down)
+ break;
+ list_del(&packet->queue);
+
+ /*
+ * Note: ipwireless_network_packet_received must be called in a
+ * process context (i.e. via schedule_work) because the tty
+ * output code can sleep in the tty_flip_buffer_push call.
+ */
+ if (packet->protocol == TL_PROTOCOLID_COM_DATA) {
+ if (hw->network != NULL) {
+ /* If the network hasn't been disconnected. */
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ /*
+ * This must run unlocked due to tty processing
+ * and mutex locking
+ */
+ ipwireless_network_packet_received(
+ hw->network,
+ packet->channel_idx,
+ (unsigned char *)packet
+ + sizeof(struct ipw_rx_packet),
+ packet->length);
+ spin_lock_irqsave(&hw->spinlock, flags);
+ }
+ /* Count queued DATA bytes only */
+ hw->rx_bytes_queued -= packet->length;
+ } else {
+ /*
+ * This is safe to be called locked, callchain does
+ * not block
+ */
+ handle_received_CTRL_packet(hw, packet->channel_idx,
+ (unsigned char *)packet
+ + sizeof(struct ipw_rx_packet),
+ packet->length);
+ }
+ pool_free(hw, packet);
+ /*
+ * Unblock reception of incoming packets if queue is no longer
+ * full.
+ */
+ hw->blocking_rx =
+ hw->rx_bytes_queued >= IPWIRELESS_RX_QUEUE_SIZE;
+ if (hw->shutting_down)
+ break;
+ }
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+}
+
+static void handle_received_CTRL_packet(struct ipw_hardware *hw,
+ unsigned int channel_idx,
+ unsigned char *data, int len)
+{
+ struct ipw_control_packet_body *body =
+ (struct ipw_control_packet_body *) data;
+ unsigned int changed_mask;
+
+ if (len != sizeof(struct ipw_control_packet_body)) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": control packet was %d bytes - wrong size!\n",
+ len);
+ return;
+ }
+
+ switch (body->sig_no) {
+ case COMCTRL_CTS:
+ changed_mask = IPW_CONTROL_LINE_CTS;
+ break;
+ case COMCTRL_DCD:
+ changed_mask = IPW_CONTROL_LINE_DCD;
+ break;
+ case COMCTRL_DSR:
+ changed_mask = IPW_CONTROL_LINE_DSR;
+ break;
+ case COMCTRL_RI:
+ changed_mask = IPW_CONTROL_LINE_RI;
+ break;
+ default:
+ changed_mask = 0;
+ }
+
+ if (changed_mask != 0) {
+ if (body->value)
+ hw->control_lines[channel_idx] |= changed_mask;
+ else
+ hw->control_lines[channel_idx] &= ~changed_mask;
+ if (hw->network)
+ ipwireless_network_notify_control_line_change(
+ hw->network,
+ channel_idx,
+ hw->control_lines[channel_idx],
+ changed_mask);
+ }
+}
+
+static void handle_received_packet(struct ipw_hardware *hw,
+ union nl_packet *packet,
+ unsigned short len)
+{
+ unsigned int protocol = packet->hdr.protocol;
+ unsigned int address = packet->hdr.address;
+ unsigned int header_length;
+ unsigned char *data;
+ unsigned int data_len;
+ int is_last = packet->hdr.packet_rank & NL_LAST_PACKET;
+
+ if (packet->hdr.packet_rank & NL_FIRST_PACKET)
+ header_length = NL_FIRST_PACKET_HEADER_SIZE;
+ else
+ header_length = NL_FOLLOWING_PACKET_HEADER_SIZE;
+
+ data = packet->rawpkt + header_length;
+ data_len = len - header_length;
+ switch (protocol) {
+ case TL_PROTOCOLID_COM_DATA:
+ case TL_PROTOCOLID_COM_CTRL:
+ queue_received_packet(hw, protocol, address, data, data_len,
+ is_last);
+ break;
+ case TL_PROTOCOLID_SETUP:
+ handle_received_SETUP_packet(hw, address, data, data_len,
+ is_last);
+ break;
+ }
+}
+
+static void acknowledge_data_read(struct ipw_hardware *hw)
+{
+ if (hw->hw_version == HW_VERSION_1)
+ outw(DCR_RXDONE, hw->base_port + IODCR);
+ else
+ writew(MEMRX_PCINTACKK,
+ &hw->memory_info_regs->memreg_pc_interrupt_ack);
+}
+
+/*
+ * Retrieve a packet from the IPW hardware.
+ */
+static void do_receive_packet(struct ipw_hardware *hw)
+{
+ unsigned len;
+ unsigned int i;
+ unsigned char pkt[LL_MTU_MAX];
+
+ start_timing();
+
+ if (hw->hw_version == HW_VERSION_1) {
+ len = inw(hw->base_port + IODRR);
+ if (len > hw->ll_mtu) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": received a packet of %u bytes - "
+ "longer than the MTU!\n", len);
+ outw(DCR_RXDONE | DCR_RXRESET, hw->base_port + IODCR);
+ return;
+ }
+
+ for (i = 0; i < len; i += 2) {
+ __le16 raw_data = inw(hw->base_port + IODRR);
+ unsigned short data = le16_to_cpu(raw_data);
+
+ pkt[i] = (unsigned char) data;
+ pkt[i + 1] = (unsigned char) (data >> 8);
+ }
+ } else {
+ len = inw(hw->base_port + IODMADPR);
+ if (len > hw->ll_mtu) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": received a packet of %u bytes - "
+ "longer than the MTU!\n", len);
+ writew(MEMRX_PCINTACKK,
+ &hw->memory_info_regs->memreg_pc_interrupt_ack);
+ return;
+ }
+
+ for (i = 0; i < len; i += 2) {
+ __le16 raw_data = inw(hw->base_port + IODMADPR);
+ unsigned short data = le16_to_cpu(raw_data);
+
+ pkt[i] = (unsigned char) data;
+ pkt[i + 1] = (unsigned char) (data >> 8);
+ }
+
+ while ((i & 3) != 2) {
+ inw(hw->base_port + IODMADPR);
+ i += 2;
+ }
+ }
+
+ acknowledge_data_read(hw);
+
+ if (ipwireless_debug)
+ dump_data_bytes("recv", pkt, len);
+
+ handle_received_packet(hw, (union nl_packet *) pkt, len);
+
+ end_read_timing(len);
+}
+
+static int get_current_packet_priority(struct ipw_hardware *hw)
+{
+ /*
+ * If we're initializing, don't send anything of higher priority than
+ * PRIO_SETUP. The network layer therefore need not care about
+ * hardware initialization - any of its stuff will simply be queued
+ * until setup is complete.
+ */
+ return (hw->to_setup || hw->initializing
+ ? PRIO_SETUP + 1 :
+ NL_NUM_OF_PRIORITIES);
+}
+
+/*
+ * return 1 if something has been received from hw
+ */
+static int get_packets_from_hw(struct ipw_hardware *hw)
+{
+ int received = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ while (hw->rx_ready && !hw->blocking_rx) {
+ received = 1;
+ hw->rx_ready--;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+
+ do_receive_packet(hw);
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ }
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+
+ return received;
+}
+
+/*
+ * Send pending packet up to given priority, prioritize SETUP data until
+ * hardware is fully setup.
+ *
+ * return 1 if more packets can be sent
+ */
+static int send_pending_packet(struct ipw_hardware *hw, int priority_limit)
+{
+ int more_to_send = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ if (hw->tx_queued && hw->tx_ready != 0) {
+ int priority;
+ struct ipw_tx_packet *packet = NULL;
+
+ hw->tx_ready--;
+
+ /* Pick a packet */
+ for (priority = 0; priority < priority_limit; priority++) {
+ if (!list_empty(&hw->tx_queue[priority])) {
+ packet = list_first_entry(
+ &hw->tx_queue[priority],
+ struct ipw_tx_packet,
+ queue);
+
+ list_del(&packet->queue);
+
+ break;
+ }
+ }
+ if (!packet) {
+ hw->tx_queued = 0;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+
+ /* Send */
+ do_send_packet(hw, packet);
+
+ /* Check if more to send */
+ spin_lock_irqsave(&hw->spinlock, flags);
+ for (priority = 0; priority < priority_limit; priority++)
+ if (!list_empty(&hw->tx_queue[priority])) {
+ more_to_send = 1;
+ break;
+ }
+
+ if (!more_to_send)
+ hw->tx_queued = 0;
+ }
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+
+ return more_to_send;
+}
+
+/*
+ * Send and receive all queued packets.
+ */
+static void ipwireless_do_tasklet(unsigned long hw_)
+{
+ struct ipw_hardware *hw = (struct ipw_hardware *) hw_;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ if (hw->shutting_down) {
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ return;
+ }
+
+ if (hw->to_setup == 1) {
+ /*
+ * Initial setup data sent to hardware
+ */
+ hw->to_setup = 2;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+
+ ipw_setup_hardware(hw);
+ ipw_send_setup_packet(hw);
+
+ send_pending_packet(hw, PRIO_SETUP + 1);
+ get_packets_from_hw(hw);
+ } else {
+ int priority_limit = get_current_packet_priority(hw);
+ int again;
+
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+
+ do {
+ again = send_pending_packet(hw, priority_limit);
+ again |= get_packets_from_hw(hw);
+ } while (again);
+ }
+}
+
+/*
+ * return true if the card is physically present.
+ */
+static int is_card_present(struct ipw_hardware *hw)
+{
+ if (hw->hw_version == HW_VERSION_1)
+ return inw(hw->base_port + IOIR) != 0xFFFF;
+ else
+ return readl(&hw->memory_info_regs->memreg_card_present) ==
+ CARD_PRESENT_VALUE;
+}
+
+static irqreturn_t ipwireless_handle_v1_interrupt(int irq,
+ struct ipw_hardware *hw)
+{
+ unsigned short irqn;
+
+ irqn = inw(hw->base_port + IOIR);
+
+ /* Check if card is present */
+ if (irqn == 0xFFFF)
+ return IRQ_NONE;
+ else if (irqn != 0) {
+ unsigned short ack = 0;
+ unsigned long flags;
+
+ /* Transmit complete. */
+ if (irqn & IR_TXINTR) {
+ ack |= IR_TXINTR;
+ spin_lock_irqsave(&hw->spinlock, flags);
+ hw->tx_ready++;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ }
+ /* Received data */
+ if (irqn & IR_RXINTR) {
+ ack |= IR_RXINTR;
+ spin_lock_irqsave(&hw->spinlock, flags);
+ hw->rx_ready++;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ }
+ if (ack != 0) {
+ outw(ack, hw->base_port + IOIR);
+ tasklet_schedule(&hw->tasklet);
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static void acknowledge_pcmcia_interrupt(struct ipw_hardware *hw)
+{
+ unsigned short csr = readw(&hw->memregs_CCR->reg_config_and_status);
+
+ csr &= 0xfffd;
+ writew(csr, &hw->memregs_CCR->reg_config_and_status);
+}
+
+static irqreturn_t ipwireless_handle_v2_v3_interrupt(int irq,
+ struct ipw_hardware *hw)
+{
+ int tx = 0;
+ int rx = 0;
+ int rx_repeat = 0;
+ int try_mem_tx_old;
+ unsigned long flags;
+
+ do {
+
+ unsigned short memtx = readw(hw->memreg_tx);
+ unsigned short memtx_serial;
+ unsigned short memrxdone =
+ readw(&hw->memory_info_regs->memreg_rx_done);
+
+ try_mem_tx_old = 0;
+
+ /* check whether the interrupt was generated by ipwireless card */
+ if (!(memtx & MEMTX_TX) && !(memrxdone & MEMRX_RX_DONE)) {
+
+ /* check if the card uses memreg_tx_old register */
+ if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
+ memtx = readw(&hw->memory_info_regs->memreg_tx_old);
+ if (memtx & MEMTX_TX) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": Using memreg_tx_old\n");
+ hw->memreg_tx =
+ &hw->memory_info_regs->memreg_tx_old;
+ } else {
+ return IRQ_NONE;
+ }
+ } else {
+ return IRQ_NONE;
+ }
+ }
+
+ /*
+ * See if the card is physically present. Note that while it is
+ * powering up, it appears not to be present.
+ */
+ if (!is_card_present(hw)) {
+ acknowledge_pcmcia_interrupt(hw);
+ return IRQ_HANDLED;
+ }
+
+ memtx_serial = memtx & (unsigned short) 0xff00;
+ if (memtx & MEMTX_TX) {
+ writew(memtx_serial, hw->memreg_tx);
+
+ if (hw->serial_number_detected) {
+ if (memtx_serial != hw->last_memtx_serial) {
+ hw->last_memtx_serial = memtx_serial;
+ spin_lock_irqsave(&hw->spinlock, flags);
+ hw->rx_ready++;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ rx = 1;
+ } else
+ /* Ignore 'Timer Recovery' duplicates. */
+ rx_repeat = 1;
+ } else {
+ /*
+ * If a non-zero serial number is seen, then enable
+ * serial number checking.
+ */
+ if (memtx_serial != 0) {
+ hw->serial_number_detected = 1;
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": memreg_tx serial num detected\n");
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ hw->rx_ready++;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ }
+ rx = 1;
+ }
+ }
+ if (memrxdone & MEMRX_RX_DONE) {
+ writew(0, &hw->memory_info_regs->memreg_rx_done);
+ spin_lock_irqsave(&hw->spinlock, flags);
+ hw->tx_ready++;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ tx = 1;
+ }
+ if (tx)
+ writew(MEMRX_PCINTACKK,
+ &hw->memory_info_regs->memreg_pc_interrupt_ack);
+
+ acknowledge_pcmcia_interrupt(hw);
+
+ if (tx || rx)
+ tasklet_schedule(&hw->tasklet);
+ else if (!rx_repeat) {
+ if (hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
+ if (hw->serial_number_detected)
+ printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
+ ": spurious interrupt - new_tx mode\n");
+ else {
+ printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
+ ": no valid memreg_tx value - "
+ "switching to the old memreg_tx\n");
+ hw->memreg_tx =
+ &hw->memory_info_regs->memreg_tx_old;
+ try_mem_tx_old = 1;
+ }
+ } else
+ printk(KERN_WARNING IPWIRELESS_PCCARD_NAME
+ ": spurious interrupt - old_tx mode\n");
+ }
+
+ } while (try_mem_tx_old == 1);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t ipwireless_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct ipw_hardware *hw = dev_id;
+
+ if (hw->hw_version == HW_VERSION_1)
+ return ipwireless_handle_v1_interrupt(irq, hw);
+ else
+ return ipwireless_handle_v2_v3_interrupt(irq, hw);
+}
+
+static void flush_packets_to_hw(struct ipw_hardware *hw)
+{
+ int priority_limit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ priority_limit = get_current_packet_priority(hw);
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+
+ while (send_pending_packet(hw, priority_limit));
+}
+
+static void send_packet(struct ipw_hardware *hw, int priority,
+ struct ipw_tx_packet *packet)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ list_add_tail(&packet->queue, &hw->tx_queue[priority]);
+ hw->tx_queued = 1;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+
+ flush_packets_to_hw(hw);
+}
+
+/* Create data packet, non-atomic allocation */
+static void *alloc_data_packet(int data_size,
+ unsigned char dest_addr,
+ unsigned char protocol)
+{
+ struct ipw_tx_packet *packet = kzalloc(
+ sizeof(struct ipw_tx_packet) + data_size,
+ GFP_ATOMIC);
+
+ if (!packet)
+ return NULL;
+
+ INIT_LIST_HEAD(&packet->queue);
+ packet->dest_addr = dest_addr;
+ packet->protocol = protocol;
+ packet->length = data_size;
+
+ return packet;
+}
+
+static void *alloc_ctrl_packet(int header_size,
+ unsigned char dest_addr,
+ unsigned char protocol,
+ unsigned char sig_no)
+{
+ /*
+ * sig_no is located right after ipw_tx_packet struct in every
+ * CTRL or SETUP packets, we can use ipw_control_packet as a
+ * common struct
+ */
+ struct ipw_control_packet *packet = kzalloc(header_size, GFP_ATOMIC);
+
+ if (!packet)
+ return NULL;
+
+ INIT_LIST_HEAD(&packet->header.queue);
+ packet->header.dest_addr = dest_addr;
+ packet->header.protocol = protocol;
+ packet->header.length = header_size - sizeof(struct ipw_tx_packet);
+ packet->body.sig_no = sig_no;
+
+ return packet;
+}
+
+int ipwireless_send_packet(struct ipw_hardware *hw, unsigned int channel_idx,
+ unsigned char *data, unsigned int length,
+ void (*callback) (void *cb, unsigned int length),
+ void *callback_data)
+{
+ struct ipw_tx_packet *packet;
+
+ packet = alloc_data_packet(length,
+ (unsigned char) (channel_idx + 1),
+ TL_PROTOCOLID_COM_DATA);
+ if (!packet)
+ return -ENOMEM;
+ packet->packet_callback = callback;
+ packet->callback_data = callback_data;
+ memcpy((unsigned char *) packet +
+ sizeof(struct ipw_tx_packet), data, length);
+
+ send_packet(hw, PRIO_DATA, packet);
+ return 0;
+}
+
+static int set_control_line(struct ipw_hardware *hw, int prio,
+ unsigned int channel_idx, int line, int state)
+{
+ struct ipw_control_packet *packet;
+ int protocolid = TL_PROTOCOLID_COM_CTRL;
+
+ if (prio == PRIO_SETUP)
+ protocolid = TL_PROTOCOLID_SETUP;
+
+ packet = alloc_ctrl_packet(sizeof(struct ipw_control_packet),
+ (unsigned char) (channel_idx + 1),
+ protocolid, line);
+ if (!packet)
+ return -ENOMEM;
+ packet->header.length = sizeof(struct ipw_control_packet_body);
+ packet->body.value = (unsigned char) (state == 0 ? 0 : 1);
+ send_packet(hw, prio, &packet->header);
+ return 0;
+}
+
+
+static int set_DTR(struct ipw_hardware *hw, int priority,
+ unsigned int channel_idx, int state)
+{
+ if (state != 0)
+ hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_DTR;
+ else
+ hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_DTR;
+
+ return set_control_line(hw, priority, channel_idx, COMCTRL_DTR, state);
+}
+
+static int set_RTS(struct ipw_hardware *hw, int priority,
+ unsigned int channel_idx, int state)
+{
+ if (state != 0)
+ hw->control_lines[channel_idx] |= IPW_CONTROL_LINE_RTS;
+ else
+ hw->control_lines[channel_idx] &= ~IPW_CONTROL_LINE_RTS;
+
+ return set_control_line(hw, priority, channel_idx, COMCTRL_RTS, state);
+}
+
+int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx,
+ int state)
+{
+ return set_DTR(hw, PRIO_CTRL, channel_idx, state);
+}
+
+int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx,
+ int state)
+{
+ return set_RTS(hw, PRIO_CTRL, channel_idx, state);
+}
+
+struct ipw_setup_get_version_query_packet {
+ struct ipw_tx_packet header;
+ struct tl_setup_get_version_qry body;
+};
+
+struct ipw_setup_config_packet {
+ struct ipw_tx_packet header;
+ struct tl_setup_config_msg body;
+};
+
+struct ipw_setup_config_done_packet {
+ struct ipw_tx_packet header;
+ struct tl_setup_config_done_msg body;
+};
+
+struct ipw_setup_open_packet {
+ struct ipw_tx_packet header;
+ struct tl_setup_open_msg body;
+};
+
+struct ipw_setup_info_packet {
+ struct ipw_tx_packet header;
+ struct tl_setup_info_msg body;
+};
+
+struct ipw_setup_reboot_msg_ack {
+ struct ipw_tx_packet header;
+ struct TlSetupRebootMsgAck body;
+};
+
+/* This handles the actual initialization of the card */
+static void __handle_setup_get_version_rsp(struct ipw_hardware *hw)
+{
+ struct ipw_setup_config_packet *config_packet;
+ struct ipw_setup_config_done_packet *config_done_packet;
+ struct ipw_setup_open_packet *open_packet;
+ struct ipw_setup_info_packet *info_packet;
+ int port;
+ unsigned int channel_idx;
+
+ /* generate config packet */
+ for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) {
+ config_packet = alloc_ctrl_packet(
+ sizeof(struct ipw_setup_config_packet),
+ ADDR_SETUP_PROT,
+ TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_CONFIG_MSG);
+ if (!config_packet)
+ goto exit_nomem;
+ config_packet->header.length = sizeof(struct tl_setup_config_msg);
+ config_packet->body.port_no = port;
+ config_packet->body.prio_data = PRIO_DATA;
+ config_packet->body.prio_ctrl = PRIO_CTRL;
+ send_packet(hw, PRIO_SETUP, &config_packet->header);
+ }
+ config_done_packet = alloc_ctrl_packet(
+ sizeof(struct ipw_setup_config_done_packet),
+ ADDR_SETUP_PROT,
+ TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_CONFIG_DONE_MSG);
+ if (!config_done_packet)
+ goto exit_nomem;
+ config_done_packet->header.length = sizeof(struct tl_setup_config_done_msg);
+ send_packet(hw, PRIO_SETUP, &config_done_packet->header);
+
+ /* generate open packet */
+ for (port = 1; port <= NL_NUM_OF_ADDRESSES; port++) {
+ open_packet = alloc_ctrl_packet(
+ sizeof(struct ipw_setup_open_packet),
+ ADDR_SETUP_PROT,
+ TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_OPEN_MSG);
+ if (!open_packet)
+ goto exit_nomem;
+ open_packet->header.length = sizeof(struct tl_setup_open_msg);
+ open_packet->body.port_no = port;
+ send_packet(hw, PRIO_SETUP, &open_packet->header);
+ }
+ for (channel_idx = 0;
+ channel_idx < NL_NUM_OF_ADDRESSES; channel_idx++) {
+ int ret;
+
+ ret = set_DTR(hw, PRIO_SETUP, channel_idx,
+ (hw->control_lines[channel_idx] &
+ IPW_CONTROL_LINE_DTR) != 0);
+ if (ret) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": error setting DTR (%d)\n", ret);
+ return;
+ }
+
+ set_RTS(hw, PRIO_SETUP, channel_idx,
+ (hw->control_lines [channel_idx] &
+ IPW_CONTROL_LINE_RTS) != 0);
+ if (ret) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": error setting RTS (%d)\n", ret);
+ return;
+ }
+ }
+ /*
+ * For NDIS we assume that we are using sync PPP frames, for COM async.
+ * This driver uses NDIS mode too. We don't bother with translation
+ * from async -> sync PPP.
+ */
+ info_packet = alloc_ctrl_packet(sizeof(struct ipw_setup_info_packet),
+ ADDR_SETUP_PROT,
+ TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_INFO_MSG);
+ if (!info_packet)
+ goto exit_nomem;
+ info_packet->header.length = sizeof(struct tl_setup_info_msg);
+ info_packet->body.driver_type = NDISWAN_DRIVER;
+ info_packet->body.major_version = NDISWAN_DRIVER_MAJOR_VERSION;
+ info_packet->body.minor_version = NDISWAN_DRIVER_MINOR_VERSION;
+ send_packet(hw, PRIO_SETUP, &info_packet->header);
+
+ /* Initialization is now complete, so we clear the 'to_setup' flag */
+ hw->to_setup = 0;
+
+ return;
+
+exit_nomem:
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": not enough memory to alloc control packet\n");
+ hw->to_setup = -1;
+}
+
+static void handle_setup_get_version_rsp(struct ipw_hardware *hw,
+ unsigned char vers_no)
+{
+ del_timer(&hw->setup_timer);
+ hw->initializing = 0;
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": card is ready.\n");
+
+ if (vers_no == TL_SETUP_VERSION)
+ __handle_setup_get_version_rsp(hw);
+ else
+ printk(KERN_ERR
+ IPWIRELESS_PCCARD_NAME
+ ": invalid hardware version no %u\n",
+ (unsigned int) vers_no);
+}
+
+static void ipw_send_setup_packet(struct ipw_hardware *hw)
+{
+ struct ipw_setup_get_version_query_packet *ver_packet;
+
+ ver_packet = alloc_ctrl_packet(
+ sizeof(struct ipw_setup_get_version_query_packet),
+ ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_GET_VERSION_QRY);
+ ver_packet->header.length = sizeof(struct tl_setup_get_version_qry);
+
+ /*
+ * Response is handled in handle_received_SETUP_packet
+ */
+ send_packet(hw, PRIO_SETUP, &ver_packet->header);
+}
+
+static void handle_received_SETUP_packet(struct ipw_hardware *hw,
+ unsigned int address,
+ unsigned char *data, int len,
+ int is_last)
+{
+ union ipw_setup_rx_msg *rx_msg = (union ipw_setup_rx_msg *) data;
+
+ if (address != ADDR_SETUP_PROT) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": setup packet has bad address %d\n", address);
+ return;
+ }
+
+ switch (rx_msg->sig_no) {
+ case TL_SETUP_SIGNO_GET_VERSION_RSP:
+ if (hw->to_setup)
+ handle_setup_get_version_rsp(hw,
+ rx_msg->version_rsp_msg.version);
+ break;
+
+ case TL_SETUP_SIGNO_OPEN_MSG:
+ if (ipwireless_debug) {
+ unsigned int channel_idx = rx_msg->open_msg.port_no - 1;
+
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": OPEN_MSG [channel %u] reply received\n",
+ channel_idx);
+ }
+ break;
+
+ case TL_SETUP_SIGNO_INFO_MSG_ACK:
+ if (ipwireless_debug)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": card successfully configured as NDISWAN\n");
+ break;
+
+ case TL_SETUP_SIGNO_REBOOT_MSG:
+ if (hw->to_setup)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": Setup not completed - ignoring reboot msg\n");
+ else {
+ struct ipw_setup_reboot_msg_ack *packet;
+
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": Acknowledging REBOOT message\n");
+ packet = alloc_ctrl_packet(
+ sizeof(struct ipw_setup_reboot_msg_ack),
+ ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_REBOOT_MSG_ACK);
+ packet->header.length =
+ sizeof(struct TlSetupRebootMsgAck);
+ send_packet(hw, PRIO_SETUP, &packet->header);
+ if (hw->reboot_callback)
+ hw->reboot_callback(hw->reboot_callback_data);
+ }
+ break;
+
+ default:
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": unknown setup message %u received\n",
+ (unsigned int) rx_msg->sig_no);
+ }
+}
+
+static void do_close_hardware(struct ipw_hardware *hw)
+{
+ unsigned int irqn;
+
+ if (hw->hw_version == HW_VERSION_1) {
+ /* Disable TX and RX interrupts. */
+ outw(0, hw->base_port + IOIER);
+
+ /* Acknowledge any outstanding interrupt requests */
+ irqn = inw(hw->base_port + IOIR);
+ if (irqn & IR_TXINTR)
+ outw(IR_TXINTR, hw->base_port + IOIR);
+ if (irqn & IR_RXINTR)
+ outw(IR_RXINTR, hw->base_port + IOIR);
+
+ synchronize_irq(hw->irq);
+ }
+}
+
+struct ipw_hardware *ipwireless_hardware_create(void)
+{
+ int i;
+ struct ipw_hardware *hw =
+ kzalloc(sizeof(struct ipw_hardware), GFP_KERNEL);
+
+ if (!hw)
+ return NULL;
+
+ hw->irq = -1;
+ hw->initializing = 1;
+ hw->tx_ready = 1;
+ hw->rx_bytes_queued = 0;
+ hw->rx_pool_size = 0;
+ hw->last_memtx_serial = (unsigned short) 0xffff;
+ for (i = 0; i < NL_NUM_OF_PRIORITIES; i++)
+ INIT_LIST_HEAD(&hw->tx_queue[i]);
+
+ INIT_LIST_HEAD(&hw->rx_queue);
+ INIT_LIST_HEAD(&hw->rx_pool);
+ spin_lock_init(&hw->spinlock);
+ tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw);
+ INIT_WORK(&hw->work_rx, ipw_receive_data_work);
+ setup_timer(&hw->setup_timer, ipwireless_setup_timer,
+ (unsigned long) hw);
+
+ return hw;
+}
+
+void ipwireless_init_hardware_v1(struct ipw_hardware *hw,
+ unsigned int base_port,
+ void __iomem *attr_memory,
+ void __iomem *common_memory,
+ int is_v2_card,
+ void (*reboot_callback) (void *data),
+ void *reboot_callback_data)
+{
+ if (hw->removed) {
+ hw->removed = 0;
+ enable_irq(hw->irq);
+ }
+ hw->base_port = base_port;
+ hw->hw_version = is_v2_card ? HW_VERSION_2 : HW_VERSION_1;
+ hw->ll_mtu = hw->hw_version == HW_VERSION_1 ? LL_MTU_V1 : LL_MTU_V2;
+ hw->memregs_CCR = (struct MEMCCR __iomem *)
+ ((unsigned short __iomem *) attr_memory + 0x200);
+ hw->memory_info_regs = (struct MEMINFREG __iomem *) common_memory;
+ hw->memreg_tx = &hw->memory_info_regs->memreg_tx_new;
+ hw->reboot_callback = reboot_callback;
+ hw->reboot_callback_data = reboot_callback_data;
+}
+
+void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw)
+{
+ hw->initializing = 1;
+ hw->init_loops = 0;
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": waiting for card to start up...\n");
+ ipwireless_setup_timer((unsigned long) hw);
+}
+
+static void ipwireless_setup_timer(unsigned long data)
+{
+ struct ipw_hardware *hw = (struct ipw_hardware *) data;
+
+ hw->init_loops++;
+
+ if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY &&
+ hw->hw_version == HW_VERSION_2 &&
+ hw->memreg_tx == &hw->memory_info_regs->memreg_tx_new) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": failed to startup using TX2, trying TX\n");
+
+ hw->memreg_tx = &hw->memory_info_regs->memreg_tx_old;
+ hw->init_loops = 0;
+ }
+ /* Give up after a certain number of retries */
+ if (hw->init_loops == TL_SETUP_MAX_VERSION_QRY) {
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": card failed to start up!\n");
+ hw->initializing = 0;
+ } else {
+ /* Do not attempt to write to the board if it is not present. */
+ if (is_card_present(hw)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->spinlock, flags);
+ hw->to_setup = 1;
+ hw->tx_ready = 1;
+ spin_unlock_irqrestore(&hw->spinlock, flags);
+ tasklet_schedule(&hw->tasklet);
+ }
+
+ mod_timer(&hw->setup_timer,
+ jiffies + msecs_to_jiffies(TL_SETUP_VERSION_QRY_TMO));
+ }
+}
+
+/*
+ * Stop any interrupts from executing so that, once this function returns,
+ * other layers of the driver can be sure they won't get any more callbacks.
+ * Thus must be called on a proper process context.
+ */
+void ipwireless_stop_interrupts(struct ipw_hardware *hw)
+{
+ if (!hw->shutting_down) {
+ /* Tell everyone we are going down. */
+ hw->shutting_down = 1;
+ del_timer(&hw->setup_timer);
+
+ /* Prevent the hardware from sending any more interrupts */
+ do_close_hardware(hw);
+ }
+}
+
+void ipwireless_hardware_free(struct ipw_hardware *hw)
+{
+ int i;
+ struct ipw_rx_packet *rp, *rq;
+ struct ipw_tx_packet *tp, *tq;
+
+ ipwireless_stop_interrupts(hw);
+
+ flush_scheduled_work();
+
+ for (i = 0; i < NL_NUM_OF_ADDRESSES; i++)
+ if (hw->packet_assembler[i] != NULL)
+ kfree(hw->packet_assembler[i]);
+
+ for (i = 0; i < NL_NUM_OF_PRIORITIES; i++)
+ list_for_each_entry_safe(tp, tq, &hw->tx_queue[i], queue) {
+ list_del(&tp->queue);
+ kfree(tp);
+ }
+
+ list_for_each_entry_safe(rp, rq, &hw->rx_queue, queue) {
+ list_del(&rp->queue);
+ kfree(rp);
+ }
+
+ list_for_each_entry_safe(rp, rq, &hw->rx_pool, queue) {
+ list_del(&rp->queue);
+ kfree(rp);
+ }
+ kfree(hw);
+}
+
+/*
+ * Associate the specified network with this hardware, so it will receive events
+ * from it.
+ */
+void ipwireless_associate_network(struct ipw_hardware *hw,
+ struct ipw_network *network)
+{
+ hw->network = network;
+}
diff --git a/drivers/char/pcmcia/ipwireless/hardware.h b/drivers/char/pcmcia/ipwireless/hardware.h
new file mode 100644
index 000000000000..c83190ffb0e7
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/hardware.h
@@ -0,0 +1,64 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#ifndef _IPWIRELESS_CS_HARDWARE_H_
+#define _IPWIRELESS_CS_HARDWARE_H_
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#define IPW_CONTROL_LINE_CTS 0x0001
+#define IPW_CONTROL_LINE_DCD 0x0002
+#define IPW_CONTROL_LINE_DSR 0x0004
+#define IPW_CONTROL_LINE_RI 0x0008
+#define IPW_CONTROL_LINE_DTR 0x0010
+#define IPW_CONTROL_LINE_RTS 0x0020
+
+struct ipw_hardware;
+struct ipw_network;
+
+struct ipw_hardware *ipwireless_hardware_create(void);
+void ipwireless_hardware_free(struct ipw_hardware *hw);
+irqreturn_t ipwireless_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+int ipwireless_set_DTR(struct ipw_hardware *hw, unsigned int channel_idx,
+ int state);
+int ipwireless_set_RTS(struct ipw_hardware *hw, unsigned int channel_idx,
+ int state);
+int ipwireless_send_packet(struct ipw_hardware *hw,
+ unsigned int channel_idx,
+ unsigned char *data,
+ unsigned int length,
+ void (*packet_sent_callback) (void *cb,
+ unsigned int length),
+ void *sent_cb_data);
+void ipwireless_associate_network(struct ipw_hardware *hw,
+ struct ipw_network *net);
+void ipwireless_stop_interrupts(struct ipw_hardware *hw);
+void ipwireless_init_hardware_v1(struct ipw_hardware *hw,
+ unsigned int base_port,
+ void __iomem *attr_memory,
+ void __iomem *common_memory,
+ int is_v2_card,
+ void (*reboot_cb) (void *data),
+ void *reboot_cb_data);
+void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw);
+void ipwireless_sleep(unsigned int tenths);
+int ipwireless_dump_hardware_state(char *p, size_t limit,
+ struct ipw_hardware *hw);
+
+#endif
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
new file mode 100644
index 000000000000..00c7f8407e3e
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -0,0 +1,501 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#include "hardware.h"
+#include "network.h"
+#include "main.h"
+#include "tty.h"
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <pcmcia/version.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/device_id.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/cs.h>
+
+static struct pcmcia_device_id ipw_ids[] = {
+ PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0100),
+ PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0200),
+ PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, ipw_ids);
+
+static void ipwireless_detach(struct pcmcia_device *link);
+
+/*
+ * Module params
+ */
+/* Debug mode: more verbose, print sent/recv bytes */
+int ipwireless_debug;
+int ipwireless_loopback;
+int ipwireless_out_queue = 1;
+
+module_param_named(debug, ipwireless_debug, int, 0);
+module_param_named(loopback, ipwireless_loopback, int, 0);
+module_param_named(out_queue, ipwireless_out_queue, int, 0);
+MODULE_PARM_DESC(debug, "switch on debug messages [0]");
+MODULE_PARM_DESC(loopback,
+ "debug: enable ras_raw channel [0]");
+MODULE_PARM_DESC(out_queue, "debug: set size of outgoing queue [1]");
+
+/* Executes in process context. */
+static void signalled_reboot_work(struct work_struct *work_reboot)
+{
+ struct ipw_dev *ipw = container_of(work_reboot, struct ipw_dev,
+ work_reboot);
+ struct pcmcia_device *link = ipw->link;
+ int ret = pccard_reset_card(link->socket);
+
+ if (ret != CS_SUCCESS)
+ cs_error(link, ResetCard, ret);
+}
+
+static void signalled_reboot_callback(void *callback_data)
+{
+ struct ipw_dev *ipw = (struct ipw_dev *) callback_data;
+
+ /* Delegate to process context. */
+ schedule_work(&ipw->work_reboot);
+}
+
+static int config_ipwireless(struct ipw_dev *ipw)
+{
+ struct pcmcia_device *link = ipw->link;
+ int ret;
+ config_info_t conf;
+ tuple_t tuple;
+ unsigned short buf[64];
+ cisparse_t parse;
+ unsigned short cor_value;
+ win_req_t request_attr_memory;
+ win_req_t request_common_memory;
+ memreq_t memreq_attr_memory;
+ memreq_t memreq_common_memory;
+
+ ipw->is_v2_card = 0;
+
+ tuple.Attributes = 0;
+ tuple.TupleData = (cisdata_t *) buf;
+ tuple.TupleDataMax = sizeof(buf);
+ tuple.TupleOffset = 0;
+
+ tuple.DesiredTuple = RETURN_FIRST_TUPLE;
+
+ ret = pcmcia_get_first_tuple(link, &tuple);
+
+ while (ret == 0) {
+ ret = pcmcia_get_tuple_data(link, &tuple);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, GetTupleData, ret);
+ goto exit0;
+ }
+ ret = pcmcia_get_next_tuple(link, &tuple);
+ }
+
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+
+ ret = pcmcia_get_first_tuple(link, &tuple);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, GetFirstTuple, ret);
+ goto exit0;
+ }
+
+ ret = pcmcia_get_tuple_data(link, &tuple);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, GetTupleData, ret);
+ goto exit0;
+ }
+
+ ret = pcmcia_parse_tuple(link, &tuple, &parse);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, ParseTuple, ret);
+ goto exit0;
+ }
+
+ link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+ link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
+ link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
+ link->io.IOAddrLines = 16;
+
+ link->irq.IRQInfo1 = parse.cftable_entry.irq.IRQInfo1;
+
+ /* 0x40 causes it to generate level mode interrupts. */
+ /* 0x04 enables IREQ pin. */
+ cor_value = parse.cftable_entry.index | 0x44;
+ link->conf.ConfigIndex = cor_value;
+
+ /* IRQ and I/O settings */
+ tuple.DesiredTuple = CISTPL_CONFIG;
+
+ ret = pcmcia_get_first_tuple(link, &tuple);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, GetFirstTuple, ret);
+ goto exit0;
+ }
+
+ ret = pcmcia_get_tuple_data(link, &tuple);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, GetTupleData, ret);
+ goto exit0;
+ }
+
+ ret = pcmcia_parse_tuple(link, &tuple, &parse);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, GetTupleData, ret);
+ goto exit0;
+ }
+ link->conf.Attributes = CONF_ENABLE_IRQ;
+ link->conf.ConfigBase = parse.config.base;
+ link->conf.Present = parse.config.rmask[0];
+ link->conf.IntType = INT_MEMORY_AND_IO;
+
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING | IRQ_HANDLE_PRESENT;
+ link->irq.Handler = ipwireless_interrupt;
+ link->irq.Instance = ipw->hardware;
+
+ ret = pcmcia_request_io(link, &link->io);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, RequestIO, ret);
+ goto exit0;
+ }
+
+ /* memory settings */
+
+ tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+
+ ret = pcmcia_get_first_tuple(link, &tuple);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, GetFirstTuple, ret);
+ goto exit1;
+ }
+
+ ret = pcmcia_get_tuple_data(link, &tuple);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, GetTupleData, ret);
+ goto exit1;
+ }
+
+ ret = pcmcia_parse_tuple(link, &tuple, &parse);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, ParseTuple, ret);
+ goto exit1;
+ }
+
+ if (parse.cftable_entry.mem.nwin > 0) {
+ request_common_memory.Attributes =
+ WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+ request_common_memory.Base =
+ parse.cftable_entry.mem.win[0].host_addr;
+ request_common_memory.Size = parse.cftable_entry.mem.win[0].len;
+ if (request_common_memory.Size < 0x1000)
+ request_common_memory.Size = 0x1000;
+ request_common_memory.AccessSpeed = 0;
+
+ ret = pcmcia_request_window(&link, &request_common_memory,
+ &ipw->handle_common_memory);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, RequestWindow, ret);
+ goto exit1;
+ }
+
+ memreq_common_memory.CardOffset =
+ parse.cftable_entry.mem.win[0].card_addr;
+ memreq_common_memory.Page = 0;
+
+ ret = pcmcia_map_mem_page(ipw->handle_common_memory,
+ &memreq_common_memory);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, MapMemPage, ret);
+ goto exit1;
+ }
+
+ ipw->is_v2_card =
+ parse.cftable_entry.mem.win[0].len == 0x100;
+
+ ipw->common_memory = ioremap(request_common_memory.Base,
+ request_common_memory.Size);
+
+ request_attr_memory.Attributes =
+ WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE;
+ request_attr_memory.Base = 0;
+ request_attr_memory.Size = 0; /* this used to be 0x1000 */
+ request_attr_memory.AccessSpeed = 0;
+
+ ret = pcmcia_request_window(&link, &request_attr_memory,
+ &ipw->handle_attr_memory);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, RequestWindow, ret);
+ goto exit2;
+ }
+
+ memreq_attr_memory.CardOffset = 0;
+ memreq_attr_memory.Page = 0;
+
+ ret = pcmcia_map_mem_page(ipw->handle_attr_memory,
+ &memreq_attr_memory);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, MapMemPage, ret);
+ goto exit2;
+ }
+
+ ipw->attr_memory = ioremap(request_attr_memory.Base,
+ request_attr_memory.Size);
+ }
+
+ INIT_WORK(&ipw->work_reboot, signalled_reboot_work);
+
+ ipwireless_init_hardware_v1(ipw->hardware, link->io.BasePort1,
+ ipw->attr_memory, ipw->common_memory,
+ ipw->is_v2_card, signalled_reboot_callback,
+ ipw);
+
+ ret = pcmcia_request_irq(link, &link->irq);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, RequestIRQ, ret);
+ goto exit3;
+ }
+
+ /* Look up current Vcc */
+
+ ret = pcmcia_get_configuration_info(link, &conf);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, GetConfigurationInfo, ret);
+ goto exit4;
+ }
+
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME ": Card type %s\n",
+ ipw->is_v2_card ? "V2/V3" : "V1");
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": I/O ports 0x%04x-0x%04x, irq %d\n",
+ (unsigned int) link->io.BasePort1,
+ (unsigned int) (link->io.BasePort1 +
+ link->io.NumPorts1 - 1),
+ (unsigned int) link->irq.AssignedIRQ);
+ if (ipw->attr_memory && ipw->common_memory)
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": attr memory 0x%08lx-0x%08lx, "
+ "common memory 0x%08lx-0x%08lx\n",
+ request_attr_memory.Base,
+ request_attr_memory.Base
+ + request_attr_memory.Size - 1,
+ request_common_memory.Base,
+ request_common_memory.Base
+ + request_common_memory.Size - 1);
+
+ ipw->network = ipwireless_network_create(ipw->hardware);
+ if (!ipw->network)
+ goto exit3;
+
+ ipw->tty = ipwireless_tty_create(ipw->hardware, ipw->network,
+ ipw->nodes);
+ if (!ipw->tty)
+ goto exit3;
+
+ ipwireless_init_hardware_v2_v3(ipw->hardware);
+
+ /*
+ * Do the RequestConfiguration last, because it enables interrupts.
+ * Then we don't get any interrupts before we're ready for them.
+ */
+ ret = pcmcia_request_configuration(link, &link->conf);
+
+ if (ret != CS_SUCCESS) {
+ cs_error(link, RequestConfiguration, ret);
+ goto exit4;
+ }
+
+ link->dev_node = &ipw->nodes[0];
+
+ return 0;
+
+exit4:
+ pcmcia_disable_device(link);
+exit3:
+ if (ipw->attr_memory) {
+ iounmap(ipw->attr_memory);
+ pcmcia_release_window(ipw->handle_attr_memory);
+ pcmcia_disable_device(link);
+ }
+exit2:
+ if (ipw->common_memory) {
+ iounmap(ipw->common_memory);
+ pcmcia_release_window(ipw->handle_common_memory);
+ }
+exit1:
+ pcmcia_disable_device(link);
+exit0:
+ return -1;
+}
+
+static void release_ipwireless(struct ipw_dev *ipw)
+{
+ struct pcmcia_device *link = ipw->link;
+
+ pcmcia_disable_device(link);
+
+ if (ipw->common_memory)
+ iounmap(ipw->common_memory);
+ if (ipw->attr_memory)
+ iounmap(ipw->attr_memory);
+ if (ipw->common_memory)
+ pcmcia_release_window(ipw->handle_common_memory);
+ if (ipw->attr_memory)
+ pcmcia_release_window(ipw->handle_attr_memory);
+ pcmcia_disable_device(link);
+}
+
+/*
+ * ipwireless_attach() creates an "instance" of the driver, allocating
+ * local data structures for one device (one interface). The device
+ * is registered with Card Services.
+ *
+ * The pcmcia_device structure is initialized, but we don't actually
+ * configure the card at this point -- we wait until we receive a
+ * card insertion event.
+ */
+static int ipwireless_attach(struct pcmcia_device *link)
+{
+ struct ipw_dev *ipw;
+ int ret;
+
+ ipw = kzalloc(sizeof(struct ipw_dev), GFP_KERNEL);
+ if (!ipw)
+ return -ENOMEM;
+
+ ipw->link = link;
+ link->priv = ipw;
+ link->irq.Instance = ipw;
+
+ /* Link this device into our device list. */
+ link->dev_node = &ipw->nodes[0];
+
+ ipw->hardware = ipwireless_hardware_create();
+ if (!ipw->hardware) {
+ kfree(ipw);
+ return -ENOMEM;
+ }
+ /* RegisterClient will call config_ipwireless */
+
+ ret = config_ipwireless(ipw);
+
+ if (ret != 0) {
+ cs_error(link, RegisterClient, ret);
+ ipwireless_detach(link);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * This deletes a driver "instance". The device is de-registered with
+ * Card Services. If it has been released, all local data structures
+ * are freed. Otherwise, the structures will be freed when the device
+ * is released.
+ */
+static void ipwireless_detach(struct pcmcia_device *link)
+{
+ struct ipw_dev *ipw = link->priv;
+
+ release_ipwireless(ipw);
+
+ /* Break the link with Card Services */
+ if (link)
+ pcmcia_disable_device(link);
+
+ if (ipw->tty != NULL)
+ ipwireless_tty_free(ipw->tty);
+ if (ipw->network != NULL)
+ ipwireless_network_free(ipw->network);
+ if (ipw->hardware != NULL)
+ ipwireless_hardware_free(ipw->hardware);
+ kfree(ipw);
+}
+
+static struct pcmcia_driver me = {
+ .owner = THIS_MODULE,
+ .probe = ipwireless_attach,
+ .remove = ipwireless_detach,
+ .drv = { .name = IPWIRELESS_PCCARD_NAME },
+ .id_table = ipw_ids
+};
+
+/*
+ * Module insertion : initialisation of the module.
+ * Register the card with cardmgr...
+ */
+static int __init init_ipwireless(void)
+{
+ int ret;
+
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME " "
+ IPWIRELESS_PCMCIA_VERSION " by " IPWIRELESS_PCMCIA_AUTHOR "\n");
+
+ ret = ipwireless_tty_init();
+ if (ret != 0)
+ return ret;
+
+ ret = pcmcia_register_driver(&me);
+ if (ret != 0)
+ ipwireless_tty_release();
+
+ return ret;
+}
+
+/*
+ * Module removal
+ */
+static void __exit exit_ipwireless(void)
+{
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME " "
+ IPWIRELESS_PCMCIA_VERSION " removed\n");
+
+ pcmcia_unregister_driver(&me);
+ ipwireless_tty_release();
+}
+
+module_init(init_ipwireless);
+module_exit(exit_ipwireless);
+
+MODULE_AUTHOR(IPWIRELESS_PCMCIA_AUTHOR);
+MODULE_DESCRIPTION(IPWIRELESS_PCCARD_NAME " " IPWIRELESS_PCMCIA_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/pcmcia/ipwireless/main.h b/drivers/char/pcmcia/ipwireless/main.h
new file mode 100644
index 000000000000..1bfdcc8d47d6
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/main.h
@@ -0,0 +1,70 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#ifndef _IPWIRELESS_CS_H_
+#define _IPWIRELESS_CS_H_
+
+#include <linux/sched.h>
+#include <linux/types.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "hardware.h"
+
+#define IPWIRELESS_PCCARD_NAME "ipwireless"
+#define IPWIRELESS_PCMCIA_VERSION "1.1"
+#define IPWIRELESS_PCMCIA_AUTHOR \
+ "Stephen Blackheath, Ben Martel, Jiri Kosina and David Sterba"
+
+#define IPWIRELESS_TX_QUEUE_SIZE 262144
+#define IPWIRELESS_RX_QUEUE_SIZE 262144
+
+#define IPWIRELESS_STATE_DEBUG
+
+struct ipw_hardware;
+struct ipw_network;
+struct ipw_tty;
+
+struct ipw_dev {
+ struct pcmcia_device *link;
+ int is_v2_card;
+ window_handle_t handle_attr_memory;
+ void __iomem *attr_memory;
+ window_handle_t handle_common_memory;
+ void __iomem *common_memory;
+ dev_node_t nodes[2];
+ /* Reference to attribute memory, containing CIS data */
+ void *attribute_memory;
+
+ /* Hardware context */
+ struct ipw_hardware *hardware;
+ /* Network layer context */
+ struct ipw_network *network;
+ /* TTY device context */
+ struct ipw_tty *tty;
+ struct work_struct work_reboot;
+};
+
+/* Module parametres */
+extern int ipwireless_debug;
+extern int ipwireless_loopback;
+extern int ipwireless_out_queue;
+
+#endif
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c
new file mode 100644
index 000000000000..ff35230058d3
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/network.c
@@ -0,0 +1,512 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/skbuff.h>
+
+#include "network.h"
+#include "hardware.h"
+#include "main.h"
+#include "tty.h"
+
+#define MAX_OUTGOING_PACKETS_QUEUED ipwireless_out_queue
+#define MAX_ASSOCIATED_TTYS 2
+
+#define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
+
+struct ipw_network {
+ /* Hardware context, used for calls to hardware layer. */
+ struct ipw_hardware *hardware;
+ /* Context for kernel 'generic_ppp' functionality */
+ struct ppp_channel *ppp_channel;
+ /* tty context connected with IPW console */
+ struct ipw_tty *associated_ttys[NO_OF_IPW_CHANNELS][MAX_ASSOCIATED_TTYS];
+ /* True if ppp needs waking up once we're ready to xmit */
+ int ppp_blocked;
+ /* Number of packets queued up in hardware module. */
+ int outgoing_packets_queued;
+ /* Spinlock to avoid interrupts during shutdown */
+ spinlock_t spinlock;
+ struct mutex close_lock;
+
+ /* PPP ioctl data, not actually used anywere */
+ unsigned int flags;
+ unsigned int rbits;
+ u32 xaccm[8];
+ u32 raccm;
+ int mru;
+
+ int shutting_down;
+ unsigned int ras_control_lines;
+
+ struct work_struct work_go_online;
+ struct work_struct work_go_offline;
+};
+
+
+#ifdef IPWIRELESS_STATE_DEBUG
+int ipwireless_dump_network_state(char *p, size_t limit,
+ struct ipw_network *network)
+{
+ return snprintf(p, limit,
+ "debug: ppp_blocked=%d\n"
+ "debug: outgoing_packets_queued=%d\n"
+ "debug: network.shutting_down=%d\n",
+ network->ppp_blocked,
+ network->outgoing_packets_queued,
+ network->shutting_down);
+}
+#endif
+
+static void notify_packet_sent(void *callback_data, unsigned int packet_length)
+{
+ struct ipw_network *network = callback_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&network->spinlock, flags);
+ network->outgoing_packets_queued--;
+ if (network->ppp_channel != NULL) {
+ if (network->ppp_blocked) {
+ network->ppp_blocked = 0;
+ spin_unlock_irqrestore(&network->spinlock, flags);
+ ppp_output_wakeup(network->ppp_channel);
+ if (ipwireless_debug)
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": ppp unblocked\n");
+ } else
+ spin_unlock_irqrestore(&network->spinlock, flags);
+ } else
+ spin_unlock_irqrestore(&network->spinlock, flags);
+}
+
+/*
+ * Called by the ppp system when it has a packet to send to the hardware.
+ */
+static int ipwireless_ppp_start_xmit(struct ppp_channel *ppp_channel,
+ struct sk_buff *skb)
+{
+ struct ipw_network *network = ppp_channel->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&network->spinlock, flags);
+ if (network->outgoing_packets_queued < MAX_OUTGOING_PACKETS_QUEUED) {
+ unsigned char *buf;
+ static unsigned char header[] = {
+ PPP_ALLSTATIONS, /* 0xff */
+ PPP_UI, /* 0x03 */
+ };
+ int ret;
+
+ network->outgoing_packets_queued++;
+ spin_unlock_irqrestore(&network->spinlock, flags);
+
+ /*
+ * If we have the requested amount of headroom in the skb we
+ * were handed, then we can add the header efficiently.
+ */
+ if (skb_headroom(skb) >= 2) {
+ memcpy(skb_push(skb, 2), header, 2);
+ ret = ipwireless_send_packet(network->hardware,
+ IPW_CHANNEL_RAS, skb->data,
+ skb->len,
+ notify_packet_sent,
+ network);
+ if (ret == -1) {
+ skb_pull(skb, 2);
+ return 0;
+ }
+ } else {
+ /* Otherwise (rarely) we do it inefficiently. */
+ buf = kmalloc(skb->len + 2, GFP_ATOMIC);
+ if (!buf)
+ return 0;
+ memcpy(buf + 2, skb->data, skb->len);
+ memcpy(buf, header, 2);
+ ret = ipwireless_send_packet(network->hardware,
+ IPW_CHANNEL_RAS, buf,
+ skb->len + 2,
+ notify_packet_sent,
+ network);
+ kfree(buf);
+ if (ret == -1)
+ return 0;
+ }
+ kfree_skb(skb);
+ return 1;
+ } else {
+ /*
+ * Otherwise reject the packet, and flag that the ppp system
+ * needs to be unblocked once we are ready to send.
+ */
+ network->ppp_blocked = 1;
+ spin_unlock_irqrestore(&network->spinlock, flags);
+ return 0;
+ }
+}
+
+/* Handle an ioctl call that has come in via ppp. (copy of ppp_async_ioctl() */
+static int ipwireless_ppp_ioctl(struct ppp_channel *ppp_channel,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ipw_network *network = ppp_channel->private;
+ int err, val;
+ u32 accm[8];
+ int __user *user_arg = (int __user *) arg;
+
+ err = -EFAULT;
+ switch (cmd) {
+ case PPPIOCGFLAGS:
+ val = network->flags | network->rbits;
+ if (put_user(val, user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSFLAGS:
+ if (get_user(val, user_arg))
+ break;
+ network->flags = val & ~SC_RCV_BITS;
+ network->rbits = val & SC_RCV_BITS;
+ err = 0;
+ break;
+
+ case PPPIOCGASYNCMAP:
+ if (put_user(network->xaccm[0], user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSASYNCMAP:
+ if (get_user(network->xaccm[0], user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGRASYNCMAP:
+ if (put_user(network->raccm, user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSRASYNCMAP:
+ if (get_user(network->raccm, user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCGXASYNCMAP:
+ if (copy_to_user((void __user *) arg, network->xaccm,
+ sizeof(network->xaccm)))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSXASYNCMAP:
+ if (copy_from_user(accm, (void __user *) arg, sizeof(accm)))
+ break;
+ accm[2] &= ~0x40000000U; /* can't escape 0x5e */
+ accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
+ memcpy(network->xaccm, accm, sizeof(network->xaccm));
+ err = 0;
+ break;
+
+ case PPPIOCGMRU:
+ if (put_user(network->mru, user_arg))
+ break;
+ err = 0;
+ break;
+
+ case PPPIOCSMRU:
+ if (get_user(val, user_arg))
+ break;
+ if (val < PPP_MRU)
+ val = PPP_MRU;
+ network->mru = val;
+ err = 0;
+ break;
+
+ default:
+ err = -ENOTTY;
+ }
+
+ return err;
+}
+
+static struct ppp_channel_ops ipwireless_ppp_channel_ops = {
+ .start_xmit = ipwireless_ppp_start_xmit,
+ .ioctl = ipwireless_ppp_ioctl
+};
+
+static void do_go_online(struct work_struct *work_go_online)
+{
+ struct ipw_network *network =
+ container_of(work_go_online, struct ipw_network,
+ work_go_online);
+ unsigned long flags;
+
+ spin_lock_irqsave(&network->spinlock, flags);
+ if (!network->ppp_channel) {
+ struct ppp_channel *channel;
+
+ spin_unlock_irqrestore(&network->spinlock, flags);
+ channel = kzalloc(sizeof(struct ppp_channel), GFP_KERNEL);
+ if (!channel) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": unable to allocate PPP channel\n");
+ return;
+ }
+ channel->private = network;
+ channel->mtu = 16384; /* Wild guess */
+ channel->hdrlen = 2;
+ channel->ops = &ipwireless_ppp_channel_ops;
+
+ network->flags = 0;
+ network->rbits = 0;
+ network->mru = PPP_MRU;
+ memset(network->xaccm, 0, sizeof(network->xaccm));
+ network->xaccm[0] = ~0U;
+ network->xaccm[3] = 0x60000000U;
+ network->raccm = ~0U;
+ ppp_register_channel(channel);
+ spin_lock_irqsave(&network->spinlock, flags);
+ network->ppp_channel = channel;
+ }
+ spin_unlock_irqrestore(&network->spinlock, flags);
+}
+
+static void do_go_offline(struct work_struct *work_go_offline)
+{
+ struct ipw_network *network =
+ container_of(work_go_offline, struct ipw_network,
+ work_go_offline);
+ unsigned long flags;
+
+ mutex_lock(&network->close_lock);
+ spin_lock_irqsave(&network->spinlock, flags);
+ if (network->ppp_channel != NULL) {
+ struct ppp_channel *channel = network->ppp_channel;
+
+ network->ppp_channel = NULL;
+ spin_unlock_irqrestore(&network->spinlock, flags);
+ mutex_unlock(&network->close_lock);
+ ppp_unregister_channel(channel);
+ } else {
+ spin_unlock_irqrestore(&network->spinlock, flags);
+ mutex_unlock(&network->close_lock);
+ }
+}
+
+void ipwireless_network_notify_control_line_change(struct ipw_network *network,
+ unsigned int channel_idx,
+ unsigned int control_lines,
+ unsigned int changed_mask)
+{
+ int i;
+
+ if (channel_idx == IPW_CHANNEL_RAS)
+ network->ras_control_lines = control_lines;
+
+ for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) {
+ struct ipw_tty *tty =
+ network->associated_ttys[channel_idx][i];
+
+ /*
+ * If it's associated with a tty (other than the RAS channel
+ * when we're online), then send the data to that tty. The RAS
+ * channel's data is handled above - it always goes through
+ * ppp_generic.
+ */
+ if (tty)
+ ipwireless_tty_notify_control_line_change(tty,
+ channel_idx,
+ control_lines,
+ changed_mask);
+ }
+}
+
+/*
+ * Some versions of firmware stuff packets with 0xff 0x03 (PPP: ALLSTATIONS, UI)
+ * bytes, which are required on sent packet, but not always present on received
+ * packets
+ */
+static struct sk_buff *ipw_packet_received_skb(unsigned char *data,
+ unsigned int length)
+{
+ struct sk_buff *skb;
+
+ if (length > 2 && data[0] == PPP_ALLSTATIONS && data[1] == PPP_UI) {
+ length -= 2;
+ data += 2;
+ }
+
+ skb = dev_alloc_skb(length + 4);
+ skb_reserve(skb, 2);
+ memcpy(skb_put(skb, length), data, length);
+
+ return skb;
+}
+
+void ipwireless_network_packet_received(struct ipw_network *network,
+ unsigned int channel_idx,
+ unsigned char *data,
+ unsigned int length)
+{
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < MAX_ASSOCIATED_TTYS; i++) {
+ struct ipw_tty *tty = network->associated_ttys[channel_idx][i];
+
+ /*
+ * If it's associated with a tty (other than the RAS channel
+ * when we're online), then send the data to that tty. The RAS
+ * channel's data is handled above - it always goes through
+ * ppp_generic.
+ */
+ if (tty && channel_idx == IPW_CHANNEL_RAS
+ && (network->ras_control_lines &
+ IPW_CONTROL_LINE_DCD) != 0
+ && ipwireless_tty_is_modem(tty)) {
+ /*
+ * If data came in on the RAS channel and this tty is
+ * the modem tty, and we are online, then we send it to
+ * the PPP layer.
+ */
+ mutex_lock(&network->close_lock);
+ spin_lock_irqsave(&network->spinlock, flags);
+ if (network->ppp_channel != NULL) {
+ struct sk_buff *skb;
+
+ spin_unlock_irqrestore(&network->spinlock,
+ flags);
+
+ /* Send the data to the ppp_generic module. */
+ skb = ipw_packet_received_skb(data, length);
+ ppp_input(network->ppp_channel, skb);
+ } else
+ spin_unlock_irqrestore(&network->spinlock,
+ flags);
+ mutex_unlock(&network->close_lock);
+ }
+ /* Otherwise we send it out the tty. */
+ else
+ ipwireless_tty_received(tty, data, length);
+ }
+}
+
+struct ipw_network *ipwireless_network_create(struct ipw_hardware *hw)
+{
+ struct ipw_network *network =
+ kzalloc(sizeof(struct ipw_network), GFP_ATOMIC);
+
+ if (!network)
+ return NULL;
+
+ spin_lock_init(&network->spinlock);
+ mutex_init(&network->close_lock);
+
+ network->hardware = hw;
+
+ INIT_WORK(&network->work_go_online, do_go_online);
+ INIT_WORK(&network->work_go_offline, do_go_offline);
+
+ ipwireless_associate_network(hw, network);
+
+ return network;
+}
+
+void ipwireless_network_free(struct ipw_network *network)
+{
+ network->shutting_down = 1;
+
+ ipwireless_ppp_close(network);
+ flush_scheduled_work();
+
+ ipwireless_stop_interrupts(network->hardware);
+ ipwireless_associate_network(network->hardware, NULL);
+
+ kfree(network);
+}
+
+void ipwireless_associate_network_tty(struct ipw_network *network,
+ unsigned int channel_idx,
+ struct ipw_tty *tty)
+{
+ int i;
+
+ for (i = 0; i < MAX_ASSOCIATED_TTYS; i++)
+ if (network->associated_ttys[channel_idx][i] == NULL) {
+ network->associated_ttys[channel_idx][i] = tty;
+ break;
+ }
+}
+
+void ipwireless_disassociate_network_ttys(struct ipw_network *network,
+ unsigned int channel_idx)
+{
+ int i;
+
+ for (i = 0; i < MAX_ASSOCIATED_TTYS; i++)
+ network->associated_ttys[channel_idx][i] = NULL;
+}
+
+void ipwireless_ppp_open(struct ipw_network *network)
+{
+ if (ipwireless_debug)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": online\n");
+ schedule_work(&network->work_go_online);
+}
+
+void ipwireless_ppp_close(struct ipw_network *network)
+{
+ /* Disconnect from the wireless network. */
+ if (ipwireless_debug)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME ": offline\n");
+ schedule_work(&network->work_go_offline);
+}
+
+int ipwireless_ppp_channel_index(struct ipw_network *network)
+{
+ int ret = -1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&network->spinlock, flags);
+ if (network->ppp_channel != NULL)
+ ret = ppp_channel_index(network->ppp_channel);
+ spin_unlock_irqrestore(&network->spinlock, flags);
+
+ return ret;
+}
+
+int ipwireless_ppp_unit_number(struct ipw_network *network)
+{
+ int ret = -1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&network->spinlock, flags);
+ if (network->ppp_channel != NULL)
+ ret = ppp_unit_number(network->ppp_channel);
+ spin_unlock_irqrestore(&network->spinlock, flags);
+
+ return ret;
+}
diff --git a/drivers/char/pcmcia/ipwireless/network.h b/drivers/char/pcmcia/ipwireless/network.h
new file mode 100644
index 000000000000..b0e1e952fd14
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/network.h
@@ -0,0 +1,55 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#ifndef _IPWIRELESS_CS_NETWORK_H_
+#define _IPWIRELESS_CS_NETWORK_H_
+
+#include <linux/types.h>
+
+struct ipw_network;
+struct ipw_tty;
+struct ipw_hardware;
+
+/* Definitions of the different channels on the PCMCIA UE */
+#define IPW_CHANNEL_RAS 0
+#define IPW_CHANNEL_DIALLER 1
+#define IPW_CHANNEL_CONSOLE 2
+#define NO_OF_IPW_CHANNELS 5
+
+void ipwireless_network_notify_control_line_change(struct ipw_network *net,
+ unsigned int channel_idx, unsigned int control_lines,
+ unsigned int control_mask);
+void ipwireless_network_packet_received(struct ipw_network *net,
+ unsigned int channel_idx, unsigned char *data,
+ unsigned int length);
+struct ipw_network *ipwireless_network_create(struct ipw_hardware *hw);
+void ipwireless_network_free(struct ipw_network *net);
+void ipwireless_associate_network_tty(struct ipw_network *net,
+ unsigned int channel_idx, struct ipw_tty *tty);
+void ipwireless_disassociate_network_ttys(struct ipw_network *net,
+ unsigned int channel_idx);
+
+void ipwireless_ppp_open(struct ipw_network *net);
+
+void ipwireless_ppp_close(struct ipw_network *net);
+int ipwireless_ppp_channel_index(struct ipw_network *net);
+int ipwireless_ppp_unit_number(struct ipw_network *net);
+
+int ipwireless_dump_network_state(char *p, size_t limit,
+ struct ipw_network *net);
+
+#endif
diff --git a/drivers/char/pcmcia/ipwireless/setup_protocol.h b/drivers/char/pcmcia/ipwireless/setup_protocol.h
new file mode 100644
index 000000000000..9d6bcc77c73c
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/setup_protocol.h
@@ -0,0 +1,108 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#ifndef _IPWIRELESS_CS_SETUP_PROTOCOL_H_
+#define _IPWIRELESS_CS_SETUP_PROTOCOL_H_
+
+/* Version of the setup protocol and transport protocols */
+#define TL_SETUP_VERSION 1
+
+#define TL_SETUP_VERSION_QRY_TMO 1000
+#define TL_SETUP_MAX_VERSION_QRY 30
+
+/* Message numbers 0-9 are obsoleted and must not be reused! */
+#define TL_SETUP_SIGNO_GET_VERSION_QRY 10
+#define TL_SETUP_SIGNO_GET_VERSION_RSP 11
+#define TL_SETUP_SIGNO_CONFIG_MSG 12
+#define TL_SETUP_SIGNO_CONFIG_DONE_MSG 13
+#define TL_SETUP_SIGNO_OPEN_MSG 14
+#define TL_SETUP_SIGNO_CLOSE_MSG 15
+
+#define TL_SETUP_SIGNO_INFO_MSG 20
+#define TL_SETUP_SIGNO_INFO_MSG_ACK 21
+
+#define TL_SETUP_SIGNO_REBOOT_MSG 22
+#define TL_SETUP_SIGNO_REBOOT_MSG_ACK 23
+
+/* Synchronous start-messages */
+struct tl_setup_get_version_qry {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_GET_VERSION_QRY */
+} __attribute__ ((__packed__));
+
+struct tl_setup_get_version_rsp {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_GET_VERSION_RSP */
+ unsigned char version; /* TL_SETUP_VERSION */
+} __attribute__ ((__packed__));
+
+struct tl_setup_config_msg {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_CONFIG_MSG */
+ unsigned char port_no;
+ unsigned char prio_data;
+ unsigned char prio_ctrl;
+} __attribute__ ((__packed__));
+
+struct tl_setup_config_done_msg {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_CONFIG_DONE_MSG */
+} __attribute__ ((__packed__));
+
+/* Asyncronous messages */
+struct tl_setup_open_msg {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_OPEN_MSG */
+ unsigned char port_no;
+} __attribute__ ((__packed__));
+
+struct tl_setup_close_msg {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_CLOSE_MSG */
+ unsigned char port_no;
+} __attribute__ ((__packed__));
+
+/* Driver type - for use in tl_setup_info_msg.driver_type */
+#define COMM_DRIVER 0
+#define NDISWAN_DRIVER 1
+#define NDISWAN_DRIVER_MAJOR_VERSION 2
+#define NDISWAN_DRIVER_MINOR_VERSION 0
+
+/*
+ * It should not matter when this message comes over as we just store the
+ * results and send the ACK.
+ */
+struct tl_setup_info_msg {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_INFO_MSG */
+ unsigned char driver_type;
+ unsigned char major_version;
+ unsigned char minor_version;
+} __attribute__ ((__packed__));
+
+struct tl_setup_info_msgAck {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_INFO_MSG_ACK */
+} __attribute__ ((__packed__));
+
+struct TlSetupRebootMsgAck {
+ unsigned char sig_no; /* TL_SETUP_SIGNO_REBOOT_MSG_ACK */
+} __attribute__ ((__packed__));
+
+/* Define a union of all the msgs that the driver can receive from the card.*/
+union ipw_setup_rx_msg {
+ unsigned char sig_no;
+ struct tl_setup_get_version_rsp version_rsp_msg;
+ struct tl_setup_open_msg open_msg;
+ struct tl_setup_close_msg close_msg;
+ struct tl_setup_info_msg InfoMsg;
+ struct tl_setup_info_msgAck info_msg_ack;
+} __attribute__ ((__packed__));
+
+#endif /* _IPWIRELESS_CS_SETUP_PROTOCOL_H_ */
diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
new file mode 100644
index 000000000000..42f3815c5ce3
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/tty.c
@@ -0,0 +1,688 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/sched.h>
+#include <linux/serial.h>
+#include <linux/slab.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+
+#include "tty.h"
+#include "network.h"
+#include "hardware.h"
+#include "main.h"
+
+#define IPWIRELESS_PCMCIA_START (0)
+#define IPWIRELESS_PCMCIA_MINORS (24)
+#define IPWIRELESS_PCMCIA_MINOR_RANGE (8)
+
+#define TTYTYPE_MODEM (0)
+#define TTYTYPE_MONITOR (1)
+#define TTYTYPE_RAS_RAW (2)
+
+struct ipw_tty {
+ int index;
+ struct ipw_hardware *hardware;
+ unsigned int channel_idx;
+ unsigned int secondary_channel_idx;
+ int tty_type;
+ struct ipw_network *network;
+ struct tty_struct *linux_tty;
+ int open_count;
+ unsigned int control_lines;
+ struct mutex ipw_tty_mutex;
+ int tx_bytes_queued;
+ int closing;
+};
+
+static struct ipw_tty *ttys[IPWIRELESS_PCMCIA_MINORS];
+
+static struct tty_driver *ipw_tty_driver;
+
+static char *tty_type_name(int tty_type)
+{
+ static char *channel_names[] = {
+ "modem",
+ "monitor",
+ "RAS-raw"
+ };
+
+ return channel_names[tty_type];
+}
+
+static void report_registering(struct ipw_tty *tty)
+{
+ char *iftype = tty_type_name(tty->tty_type);
+
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": registering %s device ttyIPWp%d\n", iftype, tty->index);
+}
+
+static void report_deregistering(struct ipw_tty *tty)
+{
+ char *iftype = tty_type_name(tty->tty_type);
+
+ printk(KERN_INFO IPWIRELESS_PCCARD_NAME
+ ": deregistering %s device ttyIPWp%d\n", iftype,
+ tty->index);
+}
+
+static struct ipw_tty *get_tty(int minor)
+{
+ if (minor < ipw_tty_driver->minor_start
+ || minor >= ipw_tty_driver->minor_start +
+ IPWIRELESS_PCMCIA_MINORS)
+ return NULL;
+ else {
+ int minor_offset = minor - ipw_tty_driver->minor_start;
+
+ /*
+ * The 'ras_raw' channel is only available when 'loopback' mode
+ * is enabled.
+ * Number of minor starts with 16 (_RANGE * _RAS_RAW).
+ */
+ if (!ipwireless_loopback &&
+ minor_offset >=
+ IPWIRELESS_PCMCIA_MINOR_RANGE * TTYTYPE_RAS_RAW)
+ return NULL;
+
+ return ttys[minor_offset];
+ }
+}
+
+static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
+{
+ int minor = linux_tty->index;
+ struct ipw_tty *tty = get_tty(minor);
+
+ if (!tty)
+ return -ENODEV;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+
+ if (tty->closing) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -ENODEV;
+ }
+ if (tty->open_count == 0)
+ tty->tx_bytes_queued = 0;
+
+ tty->open_count++;
+
+ tty->linux_tty = linux_tty;
+ linux_tty->driver_data = tty;
+ linux_tty->low_latency = 1;
+
+ if (tty->tty_type == TTYTYPE_MODEM)
+ ipwireless_ppp_open(tty->network);
+
+ mutex_unlock(&tty->ipw_tty_mutex);
+
+ return 0;
+}
+
+static void do_ipw_close(struct ipw_tty *tty)
+{
+ tty->open_count--;
+
+ if (tty->open_count == 0) {
+ struct tty_struct *linux_tty = tty->linux_tty;
+
+ if (linux_tty != NULL) {
+ tty->linux_tty = NULL;
+ linux_tty->driver_data = NULL;
+
+ if (tty->tty_type == TTYTYPE_MODEM)
+ ipwireless_ppp_close(tty->network);
+ }
+ }
+}
+
+static void ipw_hangup(struct tty_struct *linux_tty)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+
+ if (!tty)
+ return;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+ if (tty->open_count == 0) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+
+ do_ipw_close(tty);
+
+ mutex_unlock(&tty->ipw_tty_mutex);
+}
+
+static void ipw_close(struct tty_struct *linux_tty, struct file *filp)
+{
+ ipw_hangup(linux_tty);
+}
+
+/* Take data received from hardware, and send it out the tty */
+void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
+ unsigned int length)
+{
+ struct tty_struct *linux_tty;
+ int work = 0;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+ linux_tty = tty->linux_tty;
+ if (linux_tty == NULL) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+
+ if (!tty->open_count) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return;
+ }
+ mutex_unlock(&tty->ipw_tty_mutex);
+
+ work = tty_insert_flip_string(linux_tty, data, length);
+
+ if (work != length)
+ printk(KERN_DEBUG IPWIRELESS_PCCARD_NAME
+ ": %d chars not inserted to flip buffer!\n",
+ length - work);
+
+ /*
+ * This may sleep if ->low_latency is set
+ */
+ if (work)
+ tty_flip_buffer_push(linux_tty);
+}
+
+static void ipw_write_packet_sent_callback(void *callback_data,
+ unsigned int packet_length)
+{
+ struct ipw_tty *tty = callback_data;
+
+ /*
+ * Packet has been sent, so we subtract the number of bytes from our
+ * tally of outstanding TX bytes.
+ */
+ tty->tx_bytes_queued -= packet_length;
+}
+
+static int ipw_write(struct tty_struct *linux_tty,
+ const unsigned char *buf, int count)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+ int room, ret;
+
+ if (!tty)
+ return -ENODEV;
+
+ mutex_lock(&tty->ipw_tty_mutex);
+ if (!tty->open_count) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return -EINVAL;
+ }
+
+ room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
+ if (room < 0)
+ room = 0;
+ /* Don't allow caller to write any more than we have room for */
+ if (count > room)
+ count = room;
+
+ if (count == 0) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return 0;
+ }
+
+ ret = ipwireless_send_packet(tty->hardware, IPW_CHANNEL_RAS,
+ (unsigned char *) buf, count,
+ ipw_write_packet_sent_callback, tty);
+ if (ret == -1) {
+ mutex_unlock(&tty->ipw_tty_mutex);
+ return 0;
+ }
+
+ tty->tx_bytes_queued += count;
+ mutex_unlock(&tty->ipw_tty_mutex);
+
+ return count;
+}
+
+static int ipw_write_room(struct tty_struct *linux_tty)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+ int room;
+
+ if (!tty)
+ return -ENODEV;
+
+ if (!tty->open_count)
+ return -EINVAL;
+
+ room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
+ if (room < 0)
+ room = 0;
+
+ return room;
+}
+
+static int ipwireless_get_serial_info(struct ipw_tty *tty,
+ struct serial_struct __user *retinfo)
+{
+ struct serial_struct tmp;
+
+ if (!retinfo)
+ return (-EFAULT);
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.type = PORT_UNKNOWN;
+ tmp.line = tty->index;
+ tmp.port = 0;
+ tmp.irq = 0;
+ tmp.flags = 0;
+ tmp.baud_base = 115200;
+ tmp.close_delay = 0;
+ tmp.closing_wait = 0;
+ tmp.custom_divisor = 0;
+ tmp.hub6 = 0;
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+
+ if (!tty)
+ return -ENODEV;
+
+ if (!tty->open_count)
+ return -EINVAL;
+
+ return tty->tx_bytes_queued;
+}
+
+static int get_control_lines(struct ipw_tty *tty)
+{
+ unsigned int my = tty->control_lines;
+ unsigned int out = 0;
+
+ if (my & IPW_CONTROL_LINE_RTS)
+ out |= TIOCM_RTS;
+ if (my & IPW_CONTROL_LINE_DTR)
+ out |= TIOCM_DTR;
+ if (my & IPW_CONTROL_LINE_CTS)
+ out |= TIOCM_CTS;
+ if (my & IPW_CONTROL_LINE_DSR)
+ out |= TIOCM_DSR;
+ if (my & IPW_CONTROL_LINE_DCD)
+ out |= TIOCM_CD;
+
+ return out;
+}
+
+static int set_control_lines(struct ipw_tty *tty, unsigned int set,
+ unsigned int clear)
+{
+ int ret;
+
+ if (set & TIOCM_RTS) {
+ ret = ipwireless_set_RTS(tty->hardware, tty->channel_idx, 1);
+ if (ret)
+ return ret;
+ if (tty->secondary_channel_idx != -1) {
+ ret = ipwireless_set_RTS(tty->hardware,
+ tty->secondary_channel_idx, 1);
+ if (ret)
+ return ret;
+ }
+ }
+ if (set & TIOCM_DTR) {
+ ret = ipwireless_set_DTR(tty->hardware, tty->channel_idx, 1);
+ if (ret)
+ return ret;
+ if (tty->secondary_channel_idx != -1) {
+ ret = ipwireless_set_DTR(tty->hardware,
+ tty->secondary_channel_idx, 1);
+ if (ret)
+ return ret;
+ }
+ }
+ if (clear & TIOCM_RTS) {
+ ret = ipwireless_set_RTS(tty->hardware, tty->channel_idx, 0);
+ if (tty->secondary_channel_idx != -1) {
+ ret = ipwireless_set_RTS(tty->hardware,
+ tty->secondary_channel_idx, 0);
+ if (ret)
+ return ret;
+ }
+ }
+ if (clear & TIOCM_DTR) {
+ ret = ipwireless_set_DTR(tty->hardware, tty->channel_idx, 0);
+ if (tty->secondary_channel_idx != -1) {
+ ret = ipwireless_set_DTR(tty->hardware,
+ tty->secondary_channel_idx, 0);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+
+ if (!tty)
+ return -ENODEV;
+
+ if (!tty->open_count)
+ return -EINVAL;
+
+ return get_control_lines(tty);
+}
+
+static int
+ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+
+ if (!tty)
+ return -ENODEV;
+
+ if (!tty->open_count)
+ return -EINVAL;
+
+ return set_control_lines(tty, set, clear);
+}
+
+static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ipw_tty *tty = linux_tty->driver_data;
+
+ if (!tty)
+ return -ENODEV;
+
+ if (!tty->open_count)
+ return -EINVAL;
+
+ switch (cmd) {
+ case TIOCGSERIAL:
+ return ipwireless_get_serial_info(tty, (void __user *) arg);
+
+ case TIOCSSERIAL:
+ return 0; /* Keeps the PCMCIA scripts happy. */
+ }
+
+ if (tty->tty_type == TTYTYPE_MODEM) {
+ switch (cmd) {
+ case PPPIOCGCHAN:
+ {
+ int chan = ipwireless_ppp_channel_index(
+ tty->network);
+
+ if (chan < 0)
+ return -ENODEV;
+ if (put_user(chan, (int __user *) arg))
+ return -EFAULT;
+ }
+ return 0;
+
+ case PPPIOCGUNIT:
+ {
+ int unit = ipwireless_ppp_unit_number(
+ tty->network);
+
+ if (unit < 0)
+ return -ENODEV;
+ if (put_user(unit, (int __user *) arg))
+ return -EFAULT;
+ }
+ return 0;
+
+ case TCGETS:
+ case TCGETA:
+ return n_tty_ioctl(linux_tty, file, cmd, arg);
+
+ case TCFLSH:
+ return n_tty_ioctl(linux_tty, file, cmd, arg);
+
+ case FIONREAD:
+ {
+ int val = 0;
+
+ if (put_user(val, (int __user *) arg))
+ return -EFAULT;
+ }
+ return 0;
+ }
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static int add_tty(dev_node_t *nodesp, int j,
+ struct ipw_hardware *hardware,
+ struct ipw_network *network, int channel_idx,
+ int secondary_channel_idx, int tty_type)
+{
+ ttys[j] = kzalloc(sizeof(struct ipw_tty), GFP_KERNEL);
+ if (!ttys[j])
+ return -ENOMEM;
+ ttys[j]->index = j;
+ ttys[j]->hardware = hardware;
+ ttys[j]->channel_idx = channel_idx;
+ ttys[j]->secondary_channel_idx = secondary_channel_idx;
+ ttys[j]->network = network;
+ ttys[j]->tty_type = tty_type;
+ mutex_init(&ttys[j]->ipw_tty_mutex);
+
+ tty_register_device(ipw_tty_driver, j, NULL);
+ ipwireless_associate_network_tty(network, channel_idx, ttys[j]);
+
+ if (secondary_channel_idx != -1)
+ ipwireless_associate_network_tty(network,
+ secondary_channel_idx,
+ ttys[j]);
+ if (nodesp != NULL) {
+ sprintf(nodesp->dev_name, "ttyIPWp%d", j);
+ nodesp->major = ipw_tty_driver->major;
+ nodesp->minor = j + ipw_tty_driver->minor_start;
+ }
+ if (get_tty(j + ipw_tty_driver->minor_start) == ttys[j])
+ report_registering(ttys[j]);
+ return 0;
+}
+
+struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hardware,
+ struct ipw_network *network,
+ dev_node_t *nodes)
+{
+ int i, j;
+
+ for (i = 0; i < IPWIRELESS_PCMCIA_MINOR_RANGE; i++) {
+ int allfree = 1;
+
+ for (j = i; j < IPWIRELESS_PCMCIA_MINORS;
+ j += IPWIRELESS_PCMCIA_MINOR_RANGE)
+ if (ttys[j] != NULL) {
+ allfree = 0;
+ break;
+ }
+
+ if (allfree) {
+ j = i;
+
+ if (add_tty(&nodes[0], j, hardware, network,
+ IPW_CHANNEL_DIALLER, IPW_CHANNEL_RAS,
+ TTYTYPE_MODEM))
+ return NULL;
+
+ j += IPWIRELESS_PCMCIA_MINOR_RANGE;
+ if (add_tty(&nodes[1], j, hardware, network,
+ IPW_CHANNEL_DIALLER, -1,
+ TTYTYPE_MONITOR))
+ return NULL;
+
+ j += IPWIRELESS_PCMCIA_MINOR_RANGE;
+ if (add_tty(NULL, j, hardware, network,
+ IPW_CHANNEL_RAS, -1,
+ TTYTYPE_RAS_RAW))
+ return NULL;
+
+ nodes[0].next = &nodes[1];
+ nodes[1].next = NULL;
+
+ return ttys[i];
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Must be called before ipwireless_network_free().
+ */
+void ipwireless_tty_free(struct ipw_tty *tty)
+{
+ int j;
+ struct ipw_network *network = ttys[tty->index]->network;
+
+ for (j = tty->index; j < IPWIRELESS_PCMCIA_MINORS;
+ j += IPWIRELESS_PCMCIA_MINOR_RANGE) {
+ struct ipw_tty *ttyj = ttys[j];
+
+ if (ttyj) {
+ mutex_lock(&ttyj->ipw_tty_mutex);
+ if (get_tty(j + ipw_tty_driver->minor_start) == ttyj)
+ report_deregistering(ttyj);
+ ttyj->closing = 1;
+ if (ttyj->linux_tty != NULL) {
+ mutex_unlock(&ttyj->ipw_tty_mutex);
+ tty_hangup(ttyj->linux_tty);
+ /* Wait till the tty_hangup has completed */
+ flush_scheduled_work();
+ mutex_lock(&ttyj->ipw_tty_mutex);
+ }
+ while (ttyj->open_count)
+ do_ipw_close(ttyj);
+ ipwireless_disassociate_network_ttys(network,
+ ttyj->channel_idx);
+ tty_unregister_device(ipw_tty_driver, j);
+ ttys[j] = NULL;
+ mutex_unlock(&ttyj->ipw_tty_mutex);
+ kfree(ttyj);
+ }
+ }
+}
+
+static struct tty_operations tty_ops = {
+ .open = ipw_open,
+ .close = ipw_close,
+ .hangup = ipw_hangup,
+ .write = ipw_write,
+ .write_room = ipw_write_room,
+ .ioctl = ipw_ioctl,
+ .chars_in_buffer = ipw_chars_in_buffer,
+ .tiocmget = ipw_tiocmget,
+ .tiocmset = ipw_tiocmset,
+};
+
+int ipwireless_tty_init(void)
+{
+ int result;
+
+ ipw_tty_driver = alloc_tty_driver(IPWIRELESS_PCMCIA_MINORS);
+ if (!ipw_tty_driver)
+ return -ENOMEM;
+
+ ipw_tty_driver->owner = THIS_MODULE;
+ ipw_tty_driver->driver_name = IPWIRELESS_PCCARD_NAME;
+ ipw_tty_driver->name = "ttyIPWp";
+ ipw_tty_driver->major = 0;
+ ipw_tty_driver->minor_start = IPWIRELESS_PCMCIA_START;
+ ipw_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ ipw_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ ipw_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ ipw_tty_driver->init_termios = tty_std_termios;
+ ipw_tty_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ ipw_tty_driver->init_termios.c_ispeed = 9600;
+ ipw_tty_driver->init_termios.c_ospeed = 9600;
+ tty_set_operations(ipw_tty_driver, &tty_ops);
+ result = tty_register_driver(ipw_tty_driver);
+ if (result) {
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": failed to register tty driver\n");
+ put_tty_driver(ipw_tty_driver);
+ return result;
+ }
+
+ return 0;
+}
+
+void ipwireless_tty_release(void)
+{
+ int ret;
+
+ ret = tty_unregister_driver(ipw_tty_driver);
+ put_tty_driver(ipw_tty_driver);
+ if (ret != 0)
+ printk(KERN_ERR IPWIRELESS_PCCARD_NAME
+ ": tty_unregister_driver failed with code %d\n", ret);
+}
+
+int ipwireless_tty_is_modem(struct ipw_tty *tty)
+{
+ return tty->tty_type == TTYTYPE_MODEM;
+}
+
+void
+ipwireless_tty_notify_control_line_change(struct ipw_tty *tty,
+ unsigned int channel_idx,
+ unsigned int control_lines,
+ unsigned int changed_mask)
+{
+ unsigned int old_control_lines = tty->control_lines;
+
+ tty->control_lines = (tty->control_lines & ~changed_mask)
+ | (control_lines & changed_mask);
+
+ /*
+ * If DCD is de-asserted, we close the tty so pppd can tell that we
+ * have gone offline.
+ */
+ if ((old_control_lines & IPW_CONTROL_LINE_DCD)
+ && !(tty->control_lines & IPW_CONTROL_LINE_DCD)
+ && tty->linux_tty) {
+ tty_hangup(tty->linux_tty);
+ }
+}
+
diff --git a/drivers/char/pcmcia/ipwireless/tty.h b/drivers/char/pcmcia/ipwireless/tty.h
new file mode 100644
index 000000000000..b0deb9168b6b
--- /dev/null
+++ b/drivers/char/pcmcia/ipwireless/tty.h
@@ -0,0 +1,48 @@
+/*
+ * IPWireless 3G PCMCIA Network Driver
+ *
+ * Original code
+ * by Stephen Blackheath <stephen@blacksapphire.com>,
+ * Ben Martel <benm@symmetric.co.nz>
+ *
+ * Copyrighted as follows:
+ * Copyright (C) 2004 by Symmetric Systems Ltd (NZ)
+ *
+ * Various driver changes and rewrites, port to new kernels
+ * Copyright (C) 2006-2007 Jiri Kosina
+ *
+ * Misc code cleanups and updates
+ * Copyright (C) 2007 David Sterba
+ */
+
+#ifndef _IPWIRELESS_CS_TTY_H_
+#define _IPWIRELESS_CS_TTY_H_
+
+#include <linux/types.h>
+#include <linux/sched.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+struct ipw_tty;
+struct ipw_network;
+struct ipw_hardware;
+
+int ipwireless_tty_init(void);
+void ipwireless_tty_release(void);
+
+struct ipw_tty *ipwireless_tty_create(struct ipw_hardware *hw,
+ struct ipw_network *net,
+ dev_node_t *nodes);
+void ipwireless_tty_free(struct ipw_tty *tty);
+void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
+ unsigned int length);
+int ipwireless_tty_is_modem(struct ipw_tty *tty);
+void ipwireless_tty_notify_control_line_change(struct ipw_tty *tty,
+ unsigned int channel_idx,
+ unsigned int control_lines,
+ unsigned int changed_mask);
+
+#endif
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 8caff0ca80ff..279ff5005cec 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -57,6 +57,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/ioctl.h>
+#include <linux/synclink.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -87,8 +88,6 @@
#include <asm/uaccess.h>
-#include "linux/synclink.h"
-
static MGSL_PARAMS default_params = {
MGSL_MODE_HDLC, /* unsigned long mode */
0, /* unsigned char loopback; */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c511a831f0c0..f43c89f7c449 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1039,6 +1039,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
p += bytes;
add_entropy_words(r, buf, (bytes + 3) / 4);
+ cond_resched();
}
return 0;
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c
index 102ece4c4e0e..8fc4fe4e38f1 100644
--- a/drivers/char/riscom8.c
+++ b/drivers/char/riscom8.c
@@ -47,6 +47,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/tty_flip.h>
+#include <linux/spinlock.h>
#include <asm/uaccess.h>
@@ -77,10 +78,10 @@
ASYNC_SPD_HI | ASYNC_SPEED_VHI | ASYNC_SESSION_LOCKOUT | \
ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP)
-#define RS_EVENT_WRITE_WAKEUP 0
-
static struct tty_driver *riscom_driver;
+static DEFINE_SPINLOCK(riscom_lock);
+
static struct riscom_board rc_board[RC_NBOARD] = {
{
.base = RC_IOBASE1,
@@ -217,13 +218,14 @@ static void __init rc_init_CD180(struct riscom_board const * bp)
{
unsigned long flags;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
rc_out(bp, RC_CTOUT, 0); /* Clear timeout */
rc_wait_CCR(bp); /* Wait for CCR ready */
rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */
- sti();
+ spin_unlock_irqrestore(&riscom_lock, flags);
msleep(50); /* Delay 0.05 sec */
- cli();
+ spin_lock_irqsave(&riscom_lock, flags);
rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */
rc_out(bp, CD180_GICR, 0); /* Clear all bits */
rc_out(bp, CD180_PILR1, RC_ACK_MINT); /* Prio for modem intr */
@@ -234,7 +236,7 @@ static void __init rc_init_CD180(struct riscom_board const * bp)
rc_out(bp, CD180_PPRH, (RC_OSCFREQ/(1000000/RISCOM_TPS)) >> 8);
rc_out(bp, CD180_PPRL, (RC_OSCFREQ/(1000000/RISCOM_TPS)) & 0xff);
- restore_flags(flags);
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
/* Main probing routine, also sets irq. */
@@ -310,12 +312,6 @@ out_release:
*
*/
-static inline void rc_mark_event(struct riscom_port * port, int event)
-{
- set_bit(event, &port->event);
- schedule_work(&port->tqueue);
-}
-
static inline struct riscom_port * rc_get_port(struct riscom_board const * bp,
unsigned char const * what)
{
@@ -482,7 +478,7 @@ static inline void rc_transmit(struct riscom_board const * bp)
rc_out(bp, CD180_IER, port->IER);
}
if (port->xmit_cnt <= port->wakeup_chars)
- rc_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
}
static inline void rc_check_modem(struct riscom_board const * bp)
@@ -501,7 +497,7 @@ static inline void rc_check_modem(struct riscom_board const * bp)
if (rc_in(bp, CD180_MSVR) & MSVR_CD)
wake_up_interruptible(&port->open_wait);
else
- schedule_work(&port->tqueue_hangup);
+ tty_hangup(tty);
}
#ifdef RISCOM_BRAIN_DAMAGED_CTS
@@ -510,7 +506,7 @@ static inline void rc_check_modem(struct riscom_board const * bp)
tty->hw_stopped = 0;
port->IER |= IER_TXRDY;
if (port->xmit_cnt <= port->wakeup_chars)
- rc_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
} else {
tty->hw_stopped = 1;
port->IER &= ~IER_TXRDY;
@@ -522,7 +518,7 @@ static inline void rc_check_modem(struct riscom_board const * bp)
tty->hw_stopped = 0;
port->IER |= IER_TXRDY;
if (port->xmit_cnt <= port->wakeup_chars)
- rc_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
} else {
tty->hw_stopped = 1;
port->IER &= ~IER_TXRDY;
@@ -812,9 +808,9 @@ static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port)
}
port->xmit_buf = (unsigned char *) tmp;
}
-
- save_flags(flags); cli();
-
+
+ spin_lock_irqsave(&riscom_lock, flags);
+
if (port->tty)
clear_bit(TTY_IO_ERROR, &port->tty->flags);
@@ -825,7 +821,7 @@ static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port)
rc_change_speed(bp, port);
port->flags |= ASYNC_INITIALIZED;
- restore_flags(flags);
+ spin_unlock_irqrestore(&riscom_lock, flags);
return 0;
}
@@ -901,6 +897,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
int retval;
int do_clocal = 0;
int CD;
+ unsigned long flags;
/*
* If the device is in the middle of being closed, then block
@@ -936,19 +933,26 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
*/
retval = 0;
add_wait_queue(&port->open_wait, &wait);
- cli();
+
+ spin_lock_irqsave(&riscom_lock, flags);
+
if (!tty_hung_up_p(filp))
port->count--;
- sti();
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
+
port->blocked_open++;
while (1) {
- cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
rc_out(bp, CD180_CAR, port_No(port));
CD = rc_in(bp, CD180_MSVR) & MSVR_CD;
rc_out(bp, CD180_MSVR, MSVR_RTS);
bp->DTR &= ~(1u << port_No(port));
rc_out(bp, RC_DTR, bp->DTR);
- sti();
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
+
set_current_state(TASK_INTERRUPTIBLE);
if (tty_hung_up_p(filp) ||
!(port->flags & ASYNC_INITIALIZED)) {
@@ -1020,8 +1024,9 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
if (!port || rc_paranoia_check(port, tty->name, "close"))
return;
-
- save_flags(flags); cli();
+
+ spin_lock_irqsave(&riscom_lock, flags);
+
if (tty_hung_up_p(filp))
goto out;
@@ -1078,7 +1083,6 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
tty_ldisc_flush(tty);
tty->closing = 0;
- port->event = 0;
port->tty = NULL;
if (port->blocked_open) {
if (port->close_delay) {
@@ -1088,7 +1092,9 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
}
port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
wake_up_interruptible(&port->close_wait);
-out: restore_flags(flags);
+
+out:
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static int rc_write(struct tty_struct * tty,
@@ -1107,34 +1113,33 @@ static int rc_write(struct tty_struct * tty,
if (!tty || !port->xmit_buf)
return 0;
- save_flags(flags);
while (1) {
- cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
c = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt - 1,
SERIAL_XMIT_SIZE - port->xmit_head));
- if (c <= 0) {
- restore_flags(flags);
- break;
- }
+ if (c <= 0)
+ break; /* lock continues to be held */
memcpy(port->xmit_buf + port->xmit_head, buf, c);
port->xmit_head = (port->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
port->xmit_cnt += c;
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
buf += c;
count -= c;
total += c;
}
- cli();
if (port->xmit_cnt && !tty->stopped && !tty->hw_stopped &&
!(port->IER & IER_TXRDY)) {
port->IER |= IER_TXRDY;
rc_out(bp, CD180_CAR, port_No(port));
rc_out(bp, CD180_IER, port->IER);
}
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
return total;
}
@@ -1150,7 +1155,7 @@ static void rc_put_char(struct tty_struct * tty, unsigned char ch)
if (!tty || !port->xmit_buf)
return;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
goto out;
@@ -1158,7 +1163,9 @@ static void rc_put_char(struct tty_struct * tty, unsigned char ch)
port->xmit_buf[port->xmit_head++] = ch;
port->xmit_head &= SERIAL_XMIT_SIZE - 1;
port->xmit_cnt++;
-out: restore_flags(flags);
+
+out:
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static void rc_flush_chars(struct tty_struct * tty)
@@ -1173,11 +1180,13 @@ static void rc_flush_chars(struct tty_struct * tty)
!port->xmit_buf)
return;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->IER |= IER_TXRDY;
rc_out(port_Board(port), CD180_CAR, port_No(port));
rc_out(port_Board(port), CD180_IER, port->IER);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static int rc_write_room(struct tty_struct * tty)
@@ -1212,9 +1221,11 @@ static void rc_flush_buffer(struct tty_struct *tty)
if (rc_paranoia_check(port, tty->name, "rc_flush_buffer"))
return;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
tty_wakeup(tty);
}
@@ -1231,11 +1242,15 @@ static int rc_tiocmget(struct tty_struct *tty, struct file *file)
return -ENODEV;
bp = port_Board(port);
- save_flags(flags); cli();
+
+ spin_lock_irqsave(&riscom_lock, flags);
+
rc_out(bp, CD180_CAR, port_No(port));
status = rc_in(bp, CD180_MSVR);
result = rc_in(bp, RC_RI) & (1u << port_No(port)) ? 0 : TIOCM_RNG;
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
+
result |= ((status & MSVR_RTS) ? TIOCM_RTS : 0)
| ((status & MSVR_DTR) ? TIOCM_DTR : 0)
| ((status & MSVR_CD) ? TIOCM_CAR : 0)
@@ -1256,7 +1271,8 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file,
bp = port_Board(port);
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
if (set & TIOCM_RTS)
port->MSVR |= MSVR_RTS;
if (set & TIOCM_DTR)
@@ -1270,7 +1286,9 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file,
rc_out(bp, CD180_CAR, port_No(port));
rc_out(bp, CD180_MSVR, port->MSVR);
rc_out(bp, RC_DTR, bp->DTR);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
+
return 0;
}
@@ -1279,7 +1297,8 @@ static inline void rc_send_break(struct riscom_port * port, unsigned long length
struct riscom_board *bp = port_Board(port);
unsigned long flags;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->break_length = RISCOM_TPS / HZ * length;
port->COR2 |= COR2_ETC;
port->IER |= IER_TXRDY;
@@ -1289,7 +1308,8 @@ static inline void rc_send_break(struct riscom_port * port, unsigned long length
rc_wait_CCR(bp);
rc_out(bp, CD180_CCR, CCR_CORCHG2);
rc_wait_CCR(bp);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static inline int rc_set_serial_info(struct riscom_port * port,
@@ -1298,7 +1318,6 @@ static inline int rc_set_serial_info(struct riscom_port * port,
struct serial_struct tmp;
struct riscom_board *bp = port_Board(port);
int change_speed;
- unsigned long flags;
if (copy_from_user(&tmp, newinfo, sizeof(tmp)))
return -EFAULT;
@@ -1332,9 +1351,11 @@ static inline int rc_set_serial_info(struct riscom_port * port,
port->closing_wait = tmp.closing_wait;
}
if (change_speed) {
- save_flags(flags); cli();
+ unsigned long flags;
+
+ spin_lock_irqsave(&riscom_lock, flags);
rc_change_speed(bp, port);
- restore_flags(flags);
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
return 0;
}
@@ -1414,17 +1435,19 @@ static void rc_throttle(struct tty_struct * tty)
return;
bp = port_Board(port);
-
- save_flags(flags); cli();
+
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->MSVR &= ~MSVR_RTS;
rc_out(bp, CD180_CAR, port_No(port));
- if (I_IXOFF(tty)) {
+ if (I_IXOFF(tty)) {
rc_wait_CCR(bp);
rc_out(bp, CD180_CCR, CCR_SSCH2);
rc_wait_CCR(bp);
}
rc_out(bp, CD180_MSVR, port->MSVR);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static void rc_unthrottle(struct tty_struct * tty)
@@ -1438,7 +1461,8 @@ static void rc_unthrottle(struct tty_struct * tty)
bp = port_Board(port);
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->MSVR |= MSVR_RTS;
rc_out(bp, CD180_CAR, port_No(port));
if (I_IXOFF(tty)) {
@@ -1447,7 +1471,8 @@ static void rc_unthrottle(struct tty_struct * tty)
rc_wait_CCR(bp);
}
rc_out(bp, CD180_MSVR, port->MSVR);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static void rc_stop(struct tty_struct * tty)
@@ -1461,11 +1486,13 @@ static void rc_stop(struct tty_struct * tty)
bp = port_Board(port);
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
port->IER &= ~IER_TXRDY;
rc_out(bp, CD180_CAR, port_No(port));
rc_out(bp, CD180_IER, port->IER);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static void rc_start(struct tty_struct * tty)
@@ -1479,32 +1506,15 @@ static void rc_start(struct tty_struct * tty)
bp = port_Board(port);
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) {
port->IER |= IER_TXRDY;
rc_out(bp, CD180_CAR, port_No(port));
rc_out(bp, CD180_IER, port->IER);
}
- restore_flags(flags);
-}
-/*
- * This routine is called from the work queue when the interrupt
- * routine has signalled that a hangup has occurred. The path of
- * hangup processing is:
- *
- * serial interrupt routine -> (workqueue) ->
- * do_rc_hangup() -> tty->hangup() -> rc_hangup()
- *
- */
-static void do_rc_hangup(struct work_struct *ugly_api)
-{
- struct riscom_port *port = container_of(ugly_api, struct riscom_port, tqueue_hangup);
- struct tty_struct *tty;
-
- tty = port->tty;
- if (tty)
- tty_hangup(tty); /* FIXME: module removal race still here */
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
static void rc_hangup(struct tty_struct * tty)
@@ -1518,7 +1528,6 @@ static void rc_hangup(struct tty_struct * tty)
bp = port_Board(port);
rc_shutdown_port(bp, port);
- port->event = 0;
port->count = 0;
port->flags &= ~ASYNC_NORMAL_ACTIVE;
port->tty = NULL;
@@ -1537,9 +1546,9 @@ static void rc_set_termios(struct tty_struct * tty, struct ktermios * old_termio
tty->termios->c_iflag == old_termios->c_iflag)
return;
- save_flags(flags); cli();
+ spin_lock_irqsave(&riscom_lock, flags);
rc_change_speed(port_Board(port), port);
- restore_flags(flags);
+ spin_unlock_irqrestore(&riscom_lock, flags);
if ((old_termios->c_cflag & CRTSCTS) &&
!(tty->termios->c_cflag & CRTSCTS)) {
@@ -1548,18 +1557,6 @@ static void rc_set_termios(struct tty_struct * tty, struct ktermios * old_termio
}
}
-static void do_softint(struct work_struct *ugly_api)
-{
- struct riscom_port *port = container_of(ugly_api, struct riscom_port, tqueue);
- struct tty_struct *tty;
-
- if(!(tty = port->tty))
- return;
-
- if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &port->event))
- tty_wakeup(tty);
-}
-
static const struct tty_operations riscom_ops = {
.open = rc_open,
.close = rc_close,
@@ -1580,7 +1577,7 @@ static const struct tty_operations riscom_ops = {
.tiocmset = rc_tiocmset,
};
-static inline int rc_init_drivers(void)
+static int __init rc_init_drivers(void)
{
int error;
int i;
@@ -1612,8 +1609,6 @@ static inline int rc_init_drivers(void)
memset(rc_port, 0, sizeof(rc_port));
for (i = 0; i < RC_NPORT * RC_NBOARD; i++) {
rc_port[i].magic = RISCOM8_MAGIC;
- INIT_WORK(&rc_port[i].tqueue, do_softint);
- INIT_WORK(&rc_port[i].tqueue_hangup, do_rc_hangup);
rc_port[i].close_delay = 50 * HZ/100;
rc_port[i].closing_wait = 3000 * HZ/100;
init_waitqueue_head(&rc_port[i].open_wait);
@@ -1627,11 +1622,12 @@ static void rc_release_drivers(void)
{
unsigned long flags;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&riscom_lock, flags);
+
tty_unregister_driver(riscom_driver);
put_tty_driver(riscom_driver);
- restore_flags(flags);
+
+ spin_unlock_irqrestore(&riscom_lock, flags);
}
#ifndef MODULE
diff --git a/drivers/char/riscom8.h b/drivers/char/riscom8.h
index 9cc1313d5e67..cdfdf4394477 100644
--- a/drivers/char/riscom8.h
+++ b/drivers/char/riscom8.h
@@ -71,7 +71,6 @@ struct riscom_port {
struct tty_struct * tty;
int count;
int blocked_open;
- unsigned long event; /* long req'd for set_bit --RR */
int timeout;
int close_delay;
unsigned char * xmit_buf;
@@ -81,8 +80,6 @@ struct riscom_port {
int xmit_cnt;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
- struct work_struct tqueue;
- struct work_struct tqueue_hangup;
short wakeup_chars;
short break_length;
unsigned short closing_wait;
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
index d83419c3857e..68c289fe2dc2 100644
--- a/drivers/char/rocket.c
+++ b/drivers/char/rocket.c
@@ -40,12 +40,6 @@
*/
/****** Defines ******/
-#ifdef PCI_NUM_RESOURCES
-#define PCI_BASE_ADDRESS(dev, r) ((dev)->resource[r].start)
-#else
-#define PCI_BASE_ADDRESS(dev, r) ((dev)->base_address[r])
-#endif
-
#define ROCKET_PARANOIA_CHECK
#define ROCKET_DISABLE_SIMUSAGE
@@ -305,8 +299,8 @@ static inline int rocket_paranoia_check(struct r_port *info,
if (!info)
return 1;
if (info->magic != RPORT_MAGIC) {
- printk(KERN_INFO "Warning: bad magic number for rocketport struct in %s\n",
- routine);
+ printk(KERN_WARNING "Warning: bad magic number for rocketport "
+ "struct in %s\n", routine);
return 1;
}
#endif
@@ -328,7 +322,7 @@ static void rp_do_receive(struct r_port *info,
ToRecv = sGetRxCnt(cp);
#ifdef ROCKET_DEBUG_INTR
- printk(KERN_INFO "rp_do_receive(%d)...", ToRecv);
+ printk(KERN_INFO "rp_do_receive(%d)...\n", ToRecv);
#endif
if (ToRecv == 0)
return;
@@ -341,7 +335,7 @@ static void rp_do_receive(struct r_port *info,
if (ChanStatus & (RXFOVERFL | RXBREAK | RXFRAME | RXPARITY)) {
if (!(ChanStatus & STATMODE)) {
#ifdef ROCKET_DEBUG_RECEIVE
- printk(KERN_INFO "Entering STATMODE...");
+ printk(KERN_INFO "Entering STATMODE...\n");
#endif
ChanStatus |= STATMODE;
sEnRxStatusMode(cp);
@@ -355,15 +349,15 @@ static void rp_do_receive(struct r_port *info,
*/
if (ChanStatus & STATMODE) {
#ifdef ROCKET_DEBUG_RECEIVE
- printk(KERN_INFO "Ignore %x, read %x...", info->ignore_status_mask,
- info->read_status_mask);
+ printk(KERN_INFO "Ignore %x, read %x...\n",
+ info->ignore_status_mask, info->read_status_mask);
#endif
while (ToRecv) {
char flag;
CharNStat = sInW(sGetTxRxDataIO(cp));
#ifdef ROCKET_DEBUG_RECEIVE
- printk(KERN_INFO "%x...", CharNStat);
+ printk(KERN_INFO "%x...\n", CharNStat);
#endif
if (CharNStat & STMBREAKH)
CharNStat &= ~(STMFRAMEH | STMPARITYH);
@@ -435,12 +429,13 @@ static void rp_do_transmit(struct r_port *info)
unsigned long flags;
#ifdef ROCKET_DEBUG_INTR
- printk(KERN_INFO "rp_do_transmit ");
+ printk(KERN_DEBUG "%s\n", __func__);
#endif
if (!info)
return;
if (!info->tty) {
- printk(KERN_INFO "rp: WARNING rp_do_transmit called with info->tty==NULL\n");
+ printk(KERN_WARNING "rp: WARNING %s called with "
+ "info->tty==NULL\n", __func__);
clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
return;
}
@@ -464,7 +459,7 @@ static void rp_do_transmit(struct r_port *info)
info->xmit_cnt -= c;
info->xmit_fifo_room -= c;
#ifdef ROCKET_DEBUG_INTR
- printk(KERN_INFO "tx %d chars...", c);
+ printk(KERN_INFO "tx %d chars...\n", c);
#endif
}
@@ -481,7 +476,7 @@ static void rp_do_transmit(struct r_port *info)
spin_unlock_irqrestore(&info->slock, flags);
#ifdef ROCKET_DEBUG_INTR
- printk(KERN_INFO "(%d,%d,%d,%d)...", info->xmit_cnt, info->xmit_head,
+ printk(KERN_DEBUG "(%d,%d,%d,%d)...\n", info->xmit_cnt, info->xmit_head,
info->xmit_tail, info->xmit_fifo_room);
#endif
}
@@ -501,11 +496,13 @@ static void rp_handle_port(struct r_port *info)
return;
if ((info->flags & ROCKET_INITIALIZED) == 0) {
- printk(KERN_INFO "rp: WARNING: rp_handle_port called with info->flags & NOT_INIT\n");
+ printk(KERN_WARNING "rp: WARNING: rp_handle_port called with "
+ "info->flags & NOT_INIT\n");
return;
}
if (!info->tty) {
- printk(KERN_INFO "rp: WARNING: rp_handle_port called with info->tty==NULL\n");
+ printk(KERN_WARNING "rp: WARNING: rp_handle_port called with "
+ "info->tty==NULL\n");
return;
}
cp = &info->channel;
@@ -513,7 +510,7 @@ static void rp_handle_port(struct r_port *info)
IntMask = sGetChanIntID(cp) & info->intmask;
#ifdef ROCKET_DEBUG_INTR
- printk(KERN_INFO "rp_interrupt %02x...", IntMask);
+ printk(KERN_INFO "rp_interrupt %02x...\n", IntMask);
#endif
ChanStatus = sGetChanStatus(cp);
if (IntMask & RXF_TRIG) { /* Rx FIFO trigger level */
@@ -521,7 +518,7 @@ static void rp_handle_port(struct r_port *info)
}
if (IntMask & DELTA_CD) { /* CD change */
#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_INTR) || defined(ROCKET_DEBUG_HANGUP))
- printk(KERN_INFO "ttyR%d CD now %s...", info->line,
+ printk(KERN_INFO "ttyR%d CD now %s...\n", info->line,
(ChanStatus & CD_ACT) ? "on" : "off");
#endif
if (!(ChanStatus & CD_ACT) && info->cd_status) {
@@ -638,7 +635,8 @@ static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
/* Get a r_port struct for the port, fill it in and save it globally, indexed by line number */
info = kzalloc(sizeof (struct r_port), GFP_KERNEL);
if (!info) {
- printk(KERN_INFO "Couldn't allocate info struct for line #%d\n", line);
+ printk(KERN_ERR "Couldn't allocate info struct for line #%d\n",
+ line);
return;
}
@@ -668,7 +666,8 @@ static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR;
if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {
- printk(KERN_INFO "RocketPort sInitChan(%d, %d, %d) failed!\n", board, aiop, chan);
+ printk(KERN_ERR "RocketPort sInitChan(%d, %d, %d) failed!\n",
+ board, aiop, chan);
kfree(info);
return;
}
@@ -976,7 +975,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
CHANNEL_t *cp;
unsigned long page;
- line = TTY_GET_LINE(tty);
+ line = tty->index;
if ((line < 0) || (line >= MAX_RP_PORTS) || ((info = rp_table[line]) == NULL))
return -ENXIO;
@@ -1007,7 +1006,8 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
atomic_inc(&rp_num_ports_open);
#ifdef ROCKET_DEBUG_OPEN
- printk(KERN_INFO "rocket mod++ = %d...", atomic_read(&rp_num_ports_open));
+ printk(KERN_INFO "rocket mod++ = %d...\n",
+ atomic_read(&rp_num_ports_open));
#endif
}
#ifdef ROCKET_DEBUG_OPEN
@@ -1103,13 +1103,13 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
* one, we've got real problems, since it means the
* serial port won't be shutdown.
*/
- printk(KERN_INFO "rp_close: bad serial port count; tty->count is 1, "
- "info->count is %d\n", info->count);
+ printk(KERN_WARNING "rp_close: bad serial port count; "
+ "tty->count is 1, info->count is %d\n", info->count);
info->count = 1;
}
if (--info->count < 0) {
- printk(KERN_INFO "rp_close: bad serial port count for ttyR%d: %d\n",
- info->line, info->count);
+ printk(KERN_WARNING "rp_close: bad serial port count for "
+ "ttyR%d: %d\n", info->line, info->count);
info->count = 0;
}
if (info->count) {
@@ -1160,8 +1160,7 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
if (C_HUPCL(tty))
sClrDTR(cp);
- if (TTY_DRIVER_FLUSH_BUFFER_EXISTS(tty))
- TTY_DRIVER_FLUSH_BUFFER(tty);
+ rp_flush_buffer(tty);
tty_ldisc_flush(tty);
@@ -1184,7 +1183,8 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
atomic_dec(&rp_num_ports_open);
#ifdef ROCKET_DEBUG_OPEN
- printk(KERN_INFO "rocket mod-- = %d...", atomic_read(&rp_num_ports_open));
+ printk(KERN_INFO "rocket mod-- = %d...\n",
+ atomic_read(&rp_num_ports_open));
printk(KERN_INFO "rp_close ttyR%d complete shutdown\n", info->line);
#endif
@@ -1569,9 +1569,9 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
orig_jiffies = jiffies;
#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
- printk(KERN_INFO "In RP_wait_until_sent(%d) (jiff=%lu)...", timeout,
+ printk(KERN_INFO "In RP_wait_until_sent(%d) (jiff=%lu)...\n", timeout,
jiffies);
- printk(KERN_INFO "cps=%d...", info->cps);
+ printk(KERN_INFO "cps=%d...\n", info->cps);
#endif
while (1) {
txcnt = sGetTxCnt(cp);
@@ -1592,7 +1592,8 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
if (check_time == 0)
check_time = 1;
#ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
- printk(KERN_INFO "txcnt = %d (jiff=%lu,check=%d)...", txcnt, jiffies, check_time);
+ printk(KERN_INFO "txcnt = %d (jiff=%lu,check=%d)...\n", txcnt,
+ jiffies, check_time);
#endif
msleep_interruptible(jiffies_to_msecs(check_time));
if (signal_pending(current))
@@ -1616,7 +1617,7 @@ static void rp_hangup(struct tty_struct *tty)
return;
#if (defined(ROCKET_DEBUG_OPEN) || defined(ROCKET_DEBUG_HANGUP))
- printk(KERN_INFO "rp_hangup of ttyR%d...", info->line);
+ printk(KERN_INFO "rp_hangup of ttyR%d...\n", info->line);
#endif
rp_flush_buffer(tty);
if (info->flags & ROCKET_CLOSING)
@@ -1664,7 +1665,7 @@ static void rp_put_char(struct tty_struct *tty, unsigned char ch)
mutex_lock(&info->write_mtx);
#ifdef ROCKET_DEBUG_WRITE
- printk(KERN_INFO "rp_put_char %c...", ch);
+ printk(KERN_INFO "rp_put_char %c...\n", ch);
#endif
spin_lock_irqsave(&info->slock, flags);
@@ -1709,7 +1710,7 @@ static int rp_write(struct tty_struct *tty,
return -ERESTARTSYS;
#ifdef ROCKET_DEBUG_WRITE
- printk(KERN_INFO "rp_write %d chars...", count);
+ printk(KERN_INFO "rp_write %d chars...\n", count);
#endif
cp = &info->channel;
@@ -1798,7 +1799,7 @@ static int rp_write_room(struct tty_struct *tty)
if (ret < 0)
ret = 0;
#ifdef ROCKET_DEBUG_WRITE
- printk(KERN_INFO "rp_write_room returns %d...", ret);
+ printk(KERN_INFO "rp_write_room returns %d...\n", ret);
#endif
return ret;
}
@@ -1818,7 +1819,7 @@ static int rp_chars_in_buffer(struct tty_struct *tty)
cp = &info->channel;
#ifdef ROCKET_DEBUG_WRITE
- printk(KERN_INFO "rp_chars_in_buffer returns %d...", info->xmit_cnt);
+ printk(KERN_INFO "rp_chars_in_buffer returns %d...\n", info->xmit_cnt);
#endif
return info->xmit_cnt;
}
@@ -2161,14 +2162,11 @@ static __init int register_PCI(int i, struct pci_dev *dev)
for (aiop = 0; aiop < max_num_aiops; aiop++)
ctlp->AiopNumChan[aiop] = ports_per_aiop;
- printk("Comtrol PCI controller #%d ID 0x%x found in bus:slot:fn %s at address %04lx, "
- "%d AIOP(s) (%s)\n", i, dev->device, pci_name(dev),
- rcktpt_io_addr[i], num_aiops, rocketModel[i].modelString);
- printk(KERN_INFO "Installing %s, creating /dev/ttyR%d - %ld\n",
- rocketModel[i].modelString,
- rocketModel[i].startingPortNumber,
- rocketModel[i].startingPortNumber +
- rocketModel[i].numPorts - 1);
+ dev_info(&dev->dev, "comtrol PCI controller #%d found at "
+ "address %04lx, %d AIOP(s) (%s), creating ttyR%d - %ld\n",
+ i, rcktpt_io_addr[i], num_aiops, rocketModel[i].modelString,
+ rocketModel[i].startingPortNumber,
+ rocketModel[i].startingPortNumber + rocketModel[i].numPorts-1);
if (num_aiops <= 0) {
rcktpt_io_addr[i] = 0;
@@ -2191,10 +2189,10 @@ static __init int register_PCI(int i, struct pci_dev *dev)
num_chan = ports_per_aiop;
for (chan = 0; chan < num_chan; chan++)
sPCIModemReset(ctlp, chan, 1);
- mdelay(500);
+ msleep(500);
for (chan = 0; chan < num_chan; chan++)
sPCIModemReset(ctlp, chan, 0);
- mdelay(500);
+ msleep(500);
rmSpeakerReset(ctlp, rocketModel[i].model);
}
return (1);
@@ -2240,7 +2238,9 @@ static int __init init_ISA(int i)
/* Reserve the IO region */
if (!request_region(rcktpt_io_addr[i], 64, "Comtrol RocketPort")) {
- printk(KERN_INFO "Unable to reserve IO region for configured ISA RocketPort at address 0x%lx, board not installed...\n", rcktpt_io_addr[i]);
+ printk(KERN_ERR "Unable to reserve IO region for configured "
+ "ISA RocketPort at address 0x%lx, board not "
+ "installed...\n", rcktpt_io_addr[i]);
rcktpt_io_addr[i] = 0;
return (0);
}
@@ -2309,10 +2309,10 @@ static int __init init_ISA(int i)
total_num_chan = num_chan;
for (chan = 0; chan < num_chan; chan++)
sModemReset(ctlp, chan, 1);
- mdelay(500);
+ msleep(500);
for (chan = 0; chan < num_chan; chan++)
sModemReset(ctlp, chan, 0);
- mdelay(500);
+ msleep(500);
strcpy(rocketModel[i].modelString, "RocketModem ISA");
} else {
strcpy(rocketModel[i].modelString, "RocketPort ISA");
@@ -2480,7 +2480,7 @@ static void rp_cleanup_module(void)
retval = tty_unregister_driver(rocket_driver);
if (retval)
- printk(KERN_INFO "Error %d while trying to unregister "
+ printk(KERN_ERR "Error %d while trying to unregister "
"rocketport driver\n", -retval);
for (i = 0; i < MAX_RP_PORTS; i++)
diff --git a/drivers/char/rocket_int.h b/drivers/char/rocket_int.h
index 55b8f2d71a96..f3a75791b811 100644
--- a/drivers/char/rocket_int.h
+++ b/drivers/char/rocket_int.h
@@ -42,7 +42,7 @@ typedef unsigned int DWordIO_t;
static inline void sOutB(unsigned short port, unsigned char value)
{
#ifdef ROCKET_DEBUG_IO
- printk("sOutB(%x, %x)...", port, value);
+ printk(KERN_DEBUG "sOutB(%x, %x)...\n", port, value);
#endif
outb_p(value, port);
}
@@ -50,7 +50,7 @@ static inline void sOutB(unsigned short port, unsigned char value)
static inline void sOutW(unsigned short port, unsigned short value)
{
#ifdef ROCKET_DEBUG_IO
- printk("sOutW(%x, %x)...", port, value);
+ printk(KERN_DEBUG "sOutW(%x, %x)...\n", port, value);
#endif
outw_p(value, port);
}
@@ -58,7 +58,7 @@ static inline void sOutW(unsigned short port, unsigned short value)
static inline void sOutDW(unsigned short port, unsigned long value)
{
#ifdef ROCKET_DEBUG_IO
- printk("sOutDW(%x, %lx)...", port, value);
+ printk(KERN_DEBUG "sOutDW(%x, %lx)...\n", port, value);
#endif
outl_p(cpu_to_le32(value), port);
}
@@ -105,12 +105,6 @@ static inline unsigned short sInW(unsigned short port)
#define AIOPID_NULL -1 /* no AIOP or channel exists */
#define AIOPID_0001 0x0001 /* AIOP release 1 */
-#define NULLDEV -1 /* identifies non-existant device */
-#define NULLCTL -1 /* identifies non-existant controller */
-#define NULLCTLPTR (CONTROLLER_T *)0 /* identifies non-existant controller */
-#define NULLAIOP -1 /* identifies non-existant AIOP */
-#define NULLCHAN -1 /* identifies non-existant channel */
-
/************************************************************************
Global Register Offsets - Direct Access - Fixed values
************************************************************************/
@@ -1187,9 +1181,6 @@ struct r_port {
#define ROCKET_CLOSING 0x40000000 /* Serial port is closing */
#define ROCKET_NORMAL_ACTIVE 0x20000000 /* Normal port is active */
-/* tty subtypes */
-#define SERIAL_TYPE_NORMAL 1
-
/*
* Assigned major numbers for the Comtrol Rocketport
*/
@@ -1240,12 +1231,3 @@ struct r_port {
/* Compact PCI device */
#define PCI_DEVICE_ID_CRP16INTF 0x0903 /* Rocketport Compact PCI 16 port w/external I/F */
-#define TTY_GET_LINE(t) t->index
-#define TTY_DRIVER_MINOR_START(t) t->driver->minor_start
-#define TTY_DRIVER_SUBTYPE(t) t->driver->subtype
-#define TTY_DRIVER_NAME(t) t->driver->name
-#define TTY_DRIVER_NAME_BASE(t) t->driver->name_base
-#define TTY_DRIVER_FLUSH_BUFFER_EXISTS(t) t->driver->flush_buffer
-#define TTY_DRIVER_FLUSH_BUFFER(t) t->driver->flush_buffer(t)
-
-
diff --git a/drivers/char/ser_a2232.c b/drivers/char/ser_a2232.c
index 3c869145bfdc..4ba3aec9e1cd 100644
--- a/drivers/char/ser_a2232.c
+++ b/drivers/char/ser_a2232.c
@@ -653,7 +653,7 @@ static void a2232_init_portstructs(void)
port->gs.closing_wait = 30 * HZ;
port->gs.rd = &a2232_real_driver;
#ifdef NEW_WRITE_LOCKING
- init_MUTEX(&(port->gs.port_write_mutex));
+ mutex_init(&(port->gs.port_write_mutex));
#endif
init_waitqueue_head(&port->gs.open_wait);
init_waitqueue_head(&port->gs.close_wait);
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index f1497cecffd8..df8cd0ca97eb 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -90,8 +90,6 @@
#define STD_COM_FLAGS (0)
-#define SERIAL_TYPE_NORMAL 1
-
static struct tty_driver *cy_serial_driver;
extern int serial_console;
static struct cyclades_port *serial_console_info = NULL;
@@ -359,18 +357,6 @@ static void cy_start(struct tty_struct *tty)
local_irq_restore(flags);
} /* cy_start */
-/*
- * This routine is used by the interrupt handler to schedule
- * processing in the software interrupt portion of the driver
- * (also known as the "bottom half"). This can be called any
- * number of times for any channel without harm.
- */
-static inline void cy_sched_event(struct cyclades_port *info, int event)
-{
- info->event |= 1 << event; /* remember what kind of event and who */
- schedule_work(&info->tqueue);
-} /* cy_sched_event */
-
/* The real interrupt service routines are called
whenever the card wants its hand held--chars
received, out buffer empty, modem change, etc.
@@ -485,10 +471,12 @@ static irqreturn_t cd2401_modem_interrupt(int irq, void *dev_id)
&& (info->flags & ASYNC_CHECK_CD)) {
if (mdm_status & CyDCD) {
/* CP('!'); */
- cy_sched_event(info, Cy_EVENT_OPEN_WAKEUP);
+ wake_up_interruptible(&info->open_wait);
} else {
/* CP('@'); */
- cy_sched_event(info, Cy_EVENT_HANGUP);
+ tty_hangup(info->tty);
+ wake_up_interruptible(&info->open_wait);
+ info->flags &= ~ASYNC_NORMAL_ACTIVE;
}
}
if ((mdm_change & CyCTS)
@@ -498,8 +486,7 @@ static irqreturn_t cd2401_modem_interrupt(int irq, void *dev_id)
/* !!! cy_start isn't used because... */
info->tty->stopped = 0;
base_addr[CyIER] |= CyTxMpty;
- cy_sched_event(info,
- Cy_EVENT_WRITE_WAKEUP);
+ tty_wakeup(info->tty);
}
} else {
if (!(mdm_status & CyCTS)) {
@@ -545,9 +532,6 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
info->last_active = jiffies;
if (info->tty == 0) {
base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
- if (info->xmit_cnt < WAKEUP_CHARS) {
- cy_sched_event(info, Cy_EVENT_WRITE_WAKEUP);
- }
base_addr[CyTEOIR] = CyNOTRANS;
return IRQ_HANDLED;
}
@@ -629,9 +613,9 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
}
}
- if (info->xmit_cnt < WAKEUP_CHARS) {
- cy_sched_event(info, Cy_EVENT_WRITE_WAKEUP);
- }
+ if (info->xmit_cnt < WAKEUP_CHARS)
+ tty_wakeup(info->tty);
+
base_addr[CyTEOIR] = (char_count != saved_cnt) ? 0 : CyNOTRANS;
return IRQ_HANDLED;
} /* cy_tx_interrupt */
@@ -692,49 +676,6 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
} /* cy_rx_interrupt */
-/*
- * This routine is used to handle the "bottom half" processing for the
- * serial driver, known also the "software interrupt" processing.
- * This processing is done at the kernel interrupt level, after the
- * cy#/_interrupt() has returned, BUT WITH INTERRUPTS TURNED ON. This
- * is where time-consuming activities which can not be done in the
- * interrupt driver proper are done; the interrupt driver schedules
- * them using cy_sched_event(), and they get done here.
- *
- * This is done through one level of indirection--the task queue.
- * When a hardware interrupt service routine wants service by the
- * driver's bottom half, it enqueues the appropriate tq_struct (one
- * per port) to the keventd work queue and sets a request flag
- * that the work queue be processed.
- *
- * Although this may seem unwieldy, it gives the system a way to
- * pass an argument (in this case the pointer to the cyclades_port
- * structure) to the bottom half of the driver. Previous kernels
- * had to poll every port to see if that port needed servicing.
- */
-static void do_softint(struct work_struct *ugly_api)
-{
- struct cyclades_port *info =
- container_of(ugly_api, struct cyclades_port, tqueue);
- struct tty_struct *tty;
-
- tty = info->tty;
- if (!tty)
- return;
-
- if (test_and_clear_bit(Cy_EVENT_HANGUP, &info->event)) {
- tty_hangup(info->tty);
- wake_up_interruptible(&info->open_wait);
- info->flags &= ~ASYNC_NORMAL_ACTIVE;
- }
- if (test_and_clear_bit(Cy_EVENT_OPEN_WAKEUP, &info->event)) {
- wake_up_interruptible(&info->open_wait);
- }
- if (test_and_clear_bit(Cy_EVENT_WRITE_WAKEUP, &info->event)) {
- tty_wakeup(tty);
- }
-} /* do_softint */
-
/* This is called whenever a port becomes active;
interrupts are enabled and DTR & RTS are turned on.
*/
@@ -1745,7 +1686,6 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
if (tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
tty_ldisc_flush(tty);
- info->event = 0;
info->tty = NULL;
if (info->blocked_open) {
if (info->close_delay) {
@@ -2236,7 +2176,6 @@ static int __init serial167_init(void)
info->rco = baud_co[DefSpeed] >> 5; /* Rx CO */
info->close_delay = 0;
info->x_char = 0;
- info->event = 0;
info->count = 0;
#ifdef SERIAL_DEBUG_COUNT
printk("cyc: %d: setting count to 0\n",
@@ -2245,7 +2184,6 @@ static int __init serial167_init(void)
info->blocked_open = 0;
info->default_threshold = 0;
info->default_timeout = 0;
- INIT_WORK(&info->tqueue, do_softint);
init_waitqueue_head(&info->open_wait);
init_waitqueue_head(&info->close_wait);
/* info->session */
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 455855631aef..c0e08c7bca2f 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -178,9 +178,6 @@ static int sx_poll = HZ;
ASYNC_SPD_HI | ASYNC_SPEED_VHI | ASYNC_SESSION_LOCKOUT | \
ASYNC_PGRP_LOCKOUT | ASYNC_CALLOUT_NOHUP)
-#undef RS_EVENT_WRITE_WAKEUP
-#define RS_EVENT_WRITE_WAKEUP 0
-
static struct tty_driver *specialix_driver;
static struct specialix_board sx_board[SX_NBOARD] = {
@@ -602,17 +599,6 @@ static int sx_probe(struct specialix_board *bp)
* Interrupt processing routines.
* */
-static inline void sx_mark_event(struct specialix_port * port, int event)
-{
- func_enter();
-
- set_bit(event, &port->event);
- schedule_work(&port->tqueue);
-
- func_exit();
-}
-
-
static inline struct specialix_port * sx_get_port(struct specialix_board * bp,
unsigned char const * what)
{
@@ -809,7 +795,7 @@ static inline void sx_transmit(struct specialix_board * bp)
sx_out(bp, CD186x_IER, port->IER);
}
if (port->xmit_cnt <= port->wakeup_chars)
- sx_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
func_exit();
}
@@ -839,7 +825,7 @@ static inline void sx_check_modem(struct specialix_board * bp)
wake_up_interruptible(&port->open_wait);
} else {
dprintk (SX_DEBUG_SIGNALS, "Sending HUP.\n");
- schedule_work(&port->tqueue_hangup);
+ tty_hangup(tty);
}
}
@@ -849,7 +835,7 @@ static inline void sx_check_modem(struct specialix_board * bp)
tty->hw_stopped = 0;
port->IER |= IER_TXRDY;
if (port->xmit_cnt <= port->wakeup_chars)
- sx_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
} else {
tty->hw_stopped = 1;
port->IER &= ~IER_TXRDY;
@@ -861,7 +847,7 @@ static inline void sx_check_modem(struct specialix_board * bp)
tty->hw_stopped = 0;
port->IER |= IER_TXRDY;
if (port->xmit_cnt <= port->wakeup_chars)
- sx_mark_event(port, RS_EVENT_WRITE_WAKEUP);
+ tty_wakeup(tty);
} else {
tty->hw_stopped = 1;
port->IER &= ~IER_TXRDY;
@@ -1618,7 +1604,6 @@ static void sx_close(struct tty_struct * tty, struct file * filp)
tty_ldisc_flush(tty);
spin_lock_irqsave(&port->lock, flags);
tty->closing = 0;
- port->event = 0;
port->tty = NULL;
spin_unlock_irqrestore(&port->lock, flags);
if (port->blocked_open) {
@@ -2235,32 +2220,6 @@ static void sx_start(struct tty_struct * tty)
func_exit();
}
-
-/*
- * This routine is called from the work-queue when the interrupt
- * routine has signalled that a hangup has occurred. The path of
- * hangup processing is:
- *
- * serial interrupt routine -> (workqueue) ->
- * do_sx_hangup() -> tty->hangup() -> sx_hangup()
- *
- */
-static void do_sx_hangup(struct work_struct *work)
-{
- struct specialix_port *port =
- container_of(work, struct specialix_port, tqueue_hangup);
- struct tty_struct *tty;
-
- func_enter();
-
- tty = port->tty;
- if (tty)
- tty_hangup(tty); /* FIXME: module removal race here */
-
- func_exit();
-}
-
-
static void sx_hangup(struct tty_struct * tty)
{
struct specialix_port *port = (struct specialix_port *)tty->driver_data;
@@ -2278,7 +2237,6 @@ static void sx_hangup(struct tty_struct * tty)
sx_shutdown_port(bp, port);
spin_lock_irqsave(&port->lock, flags);
- port->event = 0;
bp->count -= port->count;
if (bp->count < 0) {
printk(KERN_ERR "sx%d: sx_hangup: bad board count: %d port: %d\n",
@@ -2320,26 +2278,6 @@ static void sx_set_termios(struct tty_struct * tty, struct ktermios * old_termio
}
}
-
-static void do_softint(struct work_struct *work)
-{
- struct specialix_port *port =
- container_of(work, struct specialix_port, tqueue);
- struct tty_struct *tty;
-
- func_enter();
-
- if(!(tty = port->tty)) {
- func_exit();
- return;
- }
-
- if (test_and_clear_bit(RS_EVENT_WRITE_WAKEUP, &port->event))
- tty_wakeup(tty);
-
- func_exit();
-}
-
static const struct tty_operations sx_ops = {
.open = sx_open,
.close = sx_close,
@@ -2397,8 +2335,6 @@ static int sx_init_drivers(void)
memset(sx_port, 0, sizeof(sx_port));
for (i = 0; i < SX_NPORT * SX_NBOARD; i++) {
sx_port[i].magic = SPECIALIX_MAGIC;
- INIT_WORK(&sx_port[i].tqueue, do_softint);
- INIT_WORK(&sx_port[i].tqueue_hangup, do_sx_hangup);
sx_port[i].close_delay = 50 * HZ/100;
sx_port[i].closing_wait = 3000 * HZ/100;
init_waitqueue_head(&sx_port[i].open_wait);
diff --git a/drivers/char/specialix_io8.h b/drivers/char/specialix_io8.h
index 895bd90de363..3f2f85bdf516 100644
--- a/drivers/char/specialix_io8.h
+++ b/drivers/char/specialix_io8.h
@@ -112,7 +112,6 @@ struct specialix_port {
struct tty_struct * tty;
int count;
int blocked_open;
- ulong event;
int timeout;
int close_delay;
unsigned char * xmit_buf;
@@ -122,8 +121,6 @@ struct specialix_port {
int xmit_cnt;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
- struct work_struct tqueue;
- struct work_struct tqueue_hangup;
short wakeup_chars;
short break_length;
unsigned short closing_wait;
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index 45758d5b56ef..feac54e32a12 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -145,8 +145,7 @@ static struct stlbrd *stl_brds[STL_MAXBRDS];
*/
#define ASYI_TXBUSY 1
#define ASYI_TXLOW 2
-#define ASYI_DCDCHANGE 3
-#define ASYI_TXFLOWED 4
+#define ASYI_TXFLOWED 3
/*
* Define an array of board names as printable strings. Handy for
@@ -610,6 +609,23 @@ static const struct file_operations stl_fsiomem = {
static struct class *stallion_class;
+static void stl_cd_change(struct stlport *portp)
+{
+ unsigned int oldsigs = portp->sigs;
+
+ if (!portp->tty)
+ return;
+
+ portp->sigs = stl_getsignals(portp);
+
+ if ((portp->sigs & TIOCM_CD) && ((oldsigs & TIOCM_CD) == 0))
+ wake_up_interruptible(&portp->open_wait);
+
+ if ((oldsigs & TIOCM_CD) && ((portp->sigs & TIOCM_CD) == 0))
+ if (portp->flags & ASYNC_CHECK_CD)
+ tty_hangup(portp->tty);
+}
+
/*
* Check for any arguments passed in on the module load command line.
*/
@@ -1771,41 +1787,6 @@ static int stl_echpci64intr(struct stlbrd *brdp)
/*****************************************************************************/
/*
- * Service an off-level request for some channel.
- */
-static void stl_offintr(struct work_struct *work)
-{
- struct stlport *portp = container_of(work, struct stlport, tqueue);
- struct tty_struct *tty;
- unsigned int oldsigs;
-
- pr_debug("stl_offintr(portp=%p)\n", portp);
-
- if (portp == NULL)
- return;
-
- tty = portp->tty;
- if (tty == NULL)
- return;
-
- if (test_bit(ASYI_TXLOW, &portp->istate))
- tty_wakeup(tty);
-
- if (test_bit(ASYI_DCDCHANGE, &portp->istate)) {
- clear_bit(ASYI_DCDCHANGE, &portp->istate);
- oldsigs = portp->sigs;
- portp->sigs = stl_getsignals(portp);
- if ((portp->sigs & TIOCM_CD) && ((oldsigs & TIOCM_CD) == 0))
- wake_up_interruptible(&portp->open_wait);
- if ((oldsigs & TIOCM_CD) && ((portp->sigs & TIOCM_CD) == 0))
- if (portp->flags & ASYNC_CHECK_CD)
- tty_hangup(tty); /* FIXME: module removal race here - AKPM */
- }
-}
-
-/*****************************************************************************/
-
-/*
* Initialize all the ports on a panel.
*/
@@ -1840,7 +1821,6 @@ static int __devinit stl_initports(struct stlbrd *brdp, struct stlpanel *panelp)
portp->baud_base = STL_BAUDBASE;
portp->close_delay = STL_CLOSEDELAY;
portp->closing_wait = 30 * HZ;
- INIT_WORK(&portp->tqueue, stl_offintr);
init_waitqueue_head(&portp->open_wait);
init_waitqueue_head(&portp->close_wait);
portp->stats.brd = portp->brdnr;
@@ -3530,7 +3510,8 @@ static void stl_cd1400txisr(struct stlpanel *panelp, int ioaddr)
if ((len == 0) || ((len < STL_TXBUFLOW) &&
(test_bit(ASYI_TXLOW, &portp->istate) == 0))) {
set_bit(ASYI_TXLOW, &portp->istate);
- schedule_work(&portp->tqueue);
+ if (portp->tty)
+ tty_wakeup(portp->tty);
}
if (len == 0) {
@@ -3546,7 +3527,8 @@ static void stl_cd1400txisr(struct stlpanel *panelp, int ioaddr)
} else {
len = min(len, CD1400_TXFIFOSIZE);
portp->stats.txtotal += len;
- stlen = min(len, ((portp->tx.buf + STL_TXBUFSIZE) - tail));
+ stlen = min_t(unsigned int, len,
+ (portp->tx.buf + STL_TXBUFSIZE) - tail);
outb((TDR + portp->uartaddr), ioaddr);
outsb((ioaddr + EREG_DATA), tail, stlen);
len -= stlen;
@@ -3599,7 +3581,7 @@ static void stl_cd1400rxisr(struct stlpanel *panelp, int ioaddr)
outb((RDCR + portp->uartaddr), ioaddr);
len = inb(ioaddr + EREG_DATA);
if (tty == NULL || (buflen = tty_buffer_request_room(tty, len)) == 0) {
- len = min(len, sizeof(stl_unwanted));
+ len = min_t(unsigned int, len, sizeof(stl_unwanted));
outb((RDSR + portp->uartaddr), ioaddr);
insb((ioaddr + EREG_DATA), &stl_unwanted[0], len);
portp->stats.rxlost += len;
@@ -3692,8 +3674,7 @@ static void stl_cd1400mdmisr(struct stlpanel *panelp, int ioaddr)
outb((MISR + portp->uartaddr), ioaddr);
misr = inb(ioaddr + EREG_DATA);
if (misr & MISR_DCD) {
- set_bit(ASYI_DCDCHANGE, &portp->istate);
- schedule_work(&portp->tqueue);
+ stl_cd_change(portp);
portp->stats.modem++;
}
@@ -4447,7 +4428,8 @@ static void stl_sc26198txisr(struct stlport *portp)
if ((len == 0) || ((len < STL_TXBUFLOW) &&
(test_bit(ASYI_TXLOW, &portp->istate) == 0))) {
set_bit(ASYI_TXLOW, &portp->istate);
- schedule_work(&portp->tqueue);
+ if (portp->tty)
+ tty_wakeup(portp->tty);
}
if (len == 0) {
@@ -4465,7 +4447,8 @@ static void stl_sc26198txisr(struct stlport *portp)
} else {
len = min(len, SC26198_TXFIFOSIZE);
portp->stats.txtotal += len;
- stlen = min(len, ((portp->tx.buf + STL_TXBUFSIZE) - tail));
+ stlen = min_t(unsigned int, len,
+ (portp->tx.buf + STL_TXBUFSIZE) - tail);
outb(GTXFIFO, (ioaddr + XP_ADDR));
outsb((ioaddr + XP_DATA), tail, stlen);
len -= stlen;
@@ -4506,7 +4489,7 @@ static void stl_sc26198rxisr(struct stlport *portp, unsigned int iack)
if ((iack & IVR_TYPEMASK) == IVR_RXDATA) {
if (tty == NULL || (buflen = tty_buffer_request_room(tty, len)) == 0) {
- len = min(len, sizeof(stl_unwanted));
+ len = min_t(unsigned int, len, sizeof(stl_unwanted));
outb(GRXFIFO, (ioaddr + XP_ADDR));
insb((ioaddr + XP_DATA), &stl_unwanted[0], len);
portp->stats.rxlost += len;
@@ -4647,8 +4630,7 @@ static void stl_sc26198otherisr(struct stlport *portp, unsigned int iack)
case CIR_SUBCOS:
ipr = stl_sc26198getreg(portp, IPR);
if (ipr & IPR_DCDCHANGE) {
- set_bit(ASYI_DCDCHANGE, &portp->istate);
- schedule_work(&portp->tqueue);
+ stl_cd_change(portp);
portp->stats.modem++;
}
break;
diff --git a/drivers/char/sx.h b/drivers/char/sx.h
index 70d9783c7323..87c2defdead7 100644
--- a/drivers/char/sx.h
+++ b/drivers/char/sx.h
@@ -88,8 +88,6 @@ struct vpd_prom {
#define IS_CF_BOARD(board) (board->flags & (SX_CFISA_BOARD | SX_CFPCI_BOARD))
-#define SERIAL_TYPE_NORMAL 1
-
/* The SI processor clock is required to calculate the cc_int_count register
value for the SI cards. */
#define SI_PROCESSOR_CLOCK 25000000
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index d010ed95ed3b..ddc74d1f4f1b 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -85,6 +85,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/ioctl.h>
+#include <linux/synclink.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -110,8 +111,6 @@
#include <asm/uaccess.h>
-#include "linux/synclink.h"
-
#define RCLRVALUE 0xffff
static MGSL_PARAMS default_params = {
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 64e835f62438..1f954acf2bac 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -73,6 +73,7 @@
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/hdlc.h>
+#include <linux/synclink.h>
#include <asm/system.h>
#include <asm/io.h>
@@ -81,8 +82,6 @@
#include <asm/types.h>
#include <asm/uaccess.h>
-#include "linux/synclink.h"
-
#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_GT_MODULE))
#define SYNCLINK_GENERIC_HDLC 1
#else
@@ -2040,37 +2039,41 @@ static void bh_transmit(struct slgt_info *info)
tty_wakeup(tty);
}
-static void dsr_change(struct slgt_info *info)
+static void dsr_change(struct slgt_info *info, unsigned short status)
{
- get_signals(info);
+ if (status & BIT3) {
+ info->signals |= SerialSignal_DSR;
+ info->input_signal_events.dsr_up++;
+ } else {
+ info->signals &= ~SerialSignal_DSR;
+ info->input_signal_events.dsr_down++;
+ }
DBGISR(("dsr_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->dsr_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_DSR);
return;
}
info->icount.dsr++;
- if (info->signals & SerialSignal_DSR)
- info->input_signal_events.dsr_up++;
- else
- info->input_signal_events.dsr_down++;
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
}
-static void cts_change(struct slgt_info *info)
+static void cts_change(struct slgt_info *info, unsigned short status)
{
- get_signals(info);
+ if (status & BIT2) {
+ info->signals |= SerialSignal_CTS;
+ info->input_signal_events.cts_up++;
+ } else {
+ info->signals &= ~SerialSignal_CTS;
+ info->input_signal_events.cts_down++;
+ }
DBGISR(("cts_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->cts_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_CTS);
return;
}
info->icount.cts++;
- if (info->signals & SerialSignal_CTS)
- info->input_signal_events.cts_up++;
- else
- info->input_signal_events.cts_down++;
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
@@ -2091,20 +2094,21 @@ static void cts_change(struct slgt_info *info)
}
}
-static void dcd_change(struct slgt_info *info)
+static void dcd_change(struct slgt_info *info, unsigned short status)
{
- get_signals(info);
+ if (status & BIT1) {
+ info->signals |= SerialSignal_DCD;
+ info->input_signal_events.dcd_up++;
+ } else {
+ info->signals &= ~SerialSignal_DCD;
+ info->input_signal_events.dcd_down++;
+ }
DBGISR(("dcd_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->dcd_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_DCD);
return;
}
info->icount.dcd++;
- if (info->signals & SerialSignal_DCD) {
- info->input_signal_events.dcd_up++;
- } else {
- info->input_signal_events.dcd_down++;
- }
#if SYNCLINK_GENERIC_HDLC
if (info->netcount) {
if (info->signals & SerialSignal_DCD)
@@ -2127,20 +2131,21 @@ static void dcd_change(struct slgt_info *info)
}
}
-static void ri_change(struct slgt_info *info)
+static void ri_change(struct slgt_info *info, unsigned short status)
{
- get_signals(info);
+ if (status & BIT0) {
+ info->signals |= SerialSignal_RI;
+ info->input_signal_events.ri_up++;
+ } else {
+ info->signals &= ~SerialSignal_RI;
+ info->input_signal_events.ri_down++;
+ }
DBGISR(("ri_change %s signals=%04X\n", info->device_name, info->signals));
if ((info->ri_chkcount)++ == IO_PIN_SHUTDOWN_LIMIT) {
slgt_irq_off(info, IRQ_RI);
return;
}
- info->icount.dcd++;
- if (info->signals & SerialSignal_RI) {
- info->input_signal_events.ri_up++;
- } else {
- info->input_signal_events.ri_down++;
- }
+ info->icount.rng++;
wake_up_interruptible(&info->status_event_wait_q);
wake_up_interruptible(&info->event_wait_q);
info->pending_bh |= BH_STATUS;
@@ -2191,13 +2196,13 @@ static void isr_serial(struct slgt_info *info)
}
if (status & IRQ_DSR)
- dsr_change(info);
+ dsr_change(info, status);
if (status & IRQ_CTS)
- cts_change(info);
+ cts_change(info, status);
if (status & IRQ_DCD)
- dcd_change(info);
+ dcd_change(info, status);
if (status & IRQ_RI)
- ri_change(info);
+ ri_change(info, status);
}
static void isr_rdma(struct slgt_info *info)
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index c63013b2fc36..f3e7807f78d9 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -66,6 +66,7 @@
#include <linux/termios.h>
#include <linux/workqueue.h>
#include <linux/hdlc.h>
+#include <linux/synclink.h>
#if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINKMP_MODULE))
#define SYNCLINK_GENERIC_HDLC 1
@@ -80,8 +81,6 @@
#include <asm/uaccess.h>
-#include "linux/synclink.h"
-
static MGSL_PARAMS default_params = {
MGSL_MODE_HDLC, /* unsigned long mode */
0, /* unsigned char loopback; */
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index c88424a0c89b..a5d8bcb40000 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1031,18 +1031,13 @@ void tpm_remove_hardware(struct device *dev)
spin_unlock(&driver_lock);
- dev_set_drvdata(dev, NULL);
misc_deregister(&chip->vendor.miscdev);
- kfree(chip->vendor.miscdev.name);
sysfs_remove_group(&dev->kobj, chip->vendor.attr_group);
tpm_bios_log_teardown(chip->bios_dir);
- clear_bit(chip->dev_num, dev_mask);
-
- kfree(chip);
-
- put_device(dev);
+ /* write it this way to be explicit (chip->dev == dev) */
+ put_device(chip->dev);
}
EXPORT_SYMBOL_GPL(tpm_remove_hardware);
@@ -1083,6 +1078,26 @@ int tpm_pm_resume(struct device *dev)
EXPORT_SYMBOL_GPL(tpm_pm_resume);
/*
+ * Once all references to platform device are down to 0,
+ * release all allocated structures.
+ * In case vendor provided release function,
+ * call it too.
+ */
+static void tpm_dev_release(struct device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip->vendor.release)
+ chip->vendor.release(dev);
+
+ chip->release(dev);
+
+ clear_bit(chip->dev_num, dev_mask);
+ kfree(chip->vendor.miscdev.name);
+ kfree(chip);
+}
+
+/*
* Called from tpm_<specific>.c probe function only for devices
* the driver has determined it should claim. Prior to calling
* this function the specific probe function has called pci_enable_device
@@ -1136,23 +1151,21 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
chip->vendor.miscdev.parent = dev;
chip->dev = get_device(dev);
+ chip->release = dev->release;
+ dev->release = tpm_dev_release;
+ dev_set_drvdata(dev, chip);
if (misc_register(&chip->vendor.miscdev)) {
dev_err(chip->dev,
"unable to misc_register %s, minor %d\n",
chip->vendor.miscdev.name,
chip->vendor.miscdev.minor);
- put_device(dev);
- clear_bit(chip->dev_num, dev_mask);
- kfree(chip);
- kfree(devname);
+ put_device(chip->dev);
return NULL;
}
spin_lock(&driver_lock);
- dev_set_drvdata(dev, chip);
-
list_add(&chip->list, &tpm_chip_list);
spin_unlock(&driver_lock);
@@ -1160,10 +1173,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev, const struct tpm_vend
if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
list_del(&chip->list);
misc_deregister(&chip->vendor.miscdev);
- put_device(dev);
- clear_bit(chip->dev_num, dev_mask);
- kfree(chip);
- kfree(devname);
+ put_device(chip->dev);
return NULL;
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index d15ccddc92eb..e885148b4cfb 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -74,6 +74,7 @@ struct tpm_vendor_specific {
int (*send) (struct tpm_chip *, u8 *, size_t);
void (*cancel) (struct tpm_chip *);
u8 (*status) (struct tpm_chip *);
+ void (*release) (struct device *);
struct miscdevice miscdev;
struct attribute_group *attr_group;
struct list_head list;
@@ -106,6 +107,7 @@ struct tpm_chip {
struct dentry **bios_dir;
struct list_head list;
+ void (*release) (struct device *);
};
#define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 967002a5a1e5..726ee8a0277f 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -611,7 +611,7 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
}
}
-static struct pnp_driver tpm_inf_pnp = {
+static struct pnp_driver tpm_inf_pnp_driver = {
.name = "tpm_inf_pnp",
.driver = {
.owner = THIS_MODULE,
@@ -625,12 +625,12 @@ static struct pnp_driver tpm_inf_pnp = {
static int __init init_inf(void)
{
- return pnp_register_driver(&tpm_inf_pnp);
+ return pnp_register_driver(&tpm_inf_pnp_driver);
}
static void __exit cleanup_inf(void)
{
- pnp_unregister_driver(&tpm_inf_pnp);
+ pnp_unregister_driver(&tpm_inf_pnp_driver);
}
module_init(init_inf);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index f36fecd3fd26..79c86c47947f 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(tty_mutex);
extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */
extern int pty_limit; /* Config limit on Unix98 ptys */
static DEFINE_IDR(allocated_ptys);
-static DECLARE_MUTEX(allocated_ptys_lock);
+static DEFINE_MUTEX(allocated_ptys_lock);
static int ptmx_open(struct inode *, struct file *);
#endif
@@ -2571,9 +2571,9 @@ static void release_dev(struct file * filp)
#ifdef CONFIG_UNIX98_PTYS
/* Make this pty number available for reallocation */
if (devpts) {
- down(&allocated_ptys_lock);
+ mutex_lock(&allocated_ptys_lock);
idr_remove(&allocated_ptys, idx);
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
}
#endif
@@ -2737,24 +2737,24 @@ static int ptmx_open(struct inode * inode, struct file * filp)
nonseekable_open(inode, filp);
/* find a device that is not in use. */
- down(&allocated_ptys_lock);
+ mutex_lock(&allocated_ptys_lock);
if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
return -ENOMEM;
}
idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
if (idr_ret < 0) {
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
if (idr_ret == -EAGAIN)
return -ENOMEM;
return -EIO;
}
if (index >= pty_limit) {
idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
return -EIO;
}
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
mutex_lock(&tty_mutex);
retval = init_dev(ptm_driver, index, &tty);
@@ -2781,9 +2781,9 @@ out1:
release_dev(filp);
return retval;
out:
- down(&allocated_ptys_lock);
+ mutex_lock(&allocated_ptys_lock);
idr_remove(&allocated_ptys, index);
- up(&allocated_ptys_lock);
+ mutex_unlock(&allocated_ptys_lock);
return retval;
}
#endif
@@ -3721,7 +3721,6 @@ static void initialize_tty_struct(struct tty_struct *tty)
tty->buf.head = tty->buf.tail = NULL;
tty_buffer_init(tty);
INIT_DELAYED_WORK(&tty->buf.work, flush_to_ldisc);
- init_MUTEX(&tty->buf.pty_sem);
mutex_init(&tty->termios_mutex);
init_waitqueue_head(&tty->write_wait);
init_waitqueue_head(&tty->read_wait);
@@ -4048,10 +4047,6 @@ void __init console_init(void)
}
}
-#ifdef CONFIG_VT
-extern int vty_init(void);
-#endif
-
static int __init tty_class_init(void)
{
tty_class = class_create(THIS_MODULE, "tty");
diff --git a/drivers/char/vt.c b/drivers/char/vt.c
index 7a5badfb7d84..367be9175061 100644
--- a/drivers/char/vt.c
+++ b/drivers/char/vt.c
@@ -2400,13 +2400,15 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
{
struct vc_data *vc = vc_cons[fg_console].d;
unsigned char c;
- static unsigned long printing;
+ static DEFINE_SPINLOCK(printing_lock);
const ushort *start;
ushort cnt = 0;
ushort myx;
/* console busy or not yet initialized */
- if (!printable || test_and_set_bit(0, &printing))
+ if (!printable)
+ return;
+ if (!spin_trylock(&printing_lock))
return;
if (kmsg_redirect && vc_cons_allocated(kmsg_redirect - 1))
@@ -2481,7 +2483,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
notify_update(vc);
quit:
- clear_bit(0, &printing);
+ spin_unlock(&printing_lock);
}
static struct tty_driver *vt_console_device(struct console *c, int *index)
diff --git a/drivers/char/xilinx_hwicap/Makefile b/drivers/char/xilinx_hwicap/Makefile
new file mode 100644
index 000000000000..5491cbc42f43
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Xilinx OPB hwicap driver
+#
+
+obj-$(CONFIG_XILINX_HWICAP) += xilinx_hwicap_m.o
+
+xilinx_hwicap_m-y := xilinx_hwicap.o fifo_icap.o buffer_icap.o
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.c b/drivers/char/xilinx_hwicap/buffer_icap.c
new file mode 100644
index 000000000000..dfea2bde162b
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/buffer_icap.c
@@ -0,0 +1,380 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2003-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include "buffer_icap.h"
+
+/* Indicates how many bytes will fit in a buffer. (1 BRAM) */
+#define XHI_MAX_BUFFER_BYTES 2048
+#define XHI_MAX_BUFFER_INTS (XHI_MAX_BUFFER_BYTES >> 2)
+
+/* File access and error constants */
+#define XHI_DEVICE_READ_ERROR -1
+#define XHI_DEVICE_WRITE_ERROR -2
+#define XHI_BUFFER_OVERFLOW_ERROR -3
+
+#define XHI_DEVICE_READ 0x1
+#define XHI_DEVICE_WRITE 0x0
+
+/* Constants for checking transfer status */
+#define XHI_CYCLE_DONE 0
+#define XHI_CYCLE_EXECUTING 1
+
+/* buffer_icap register offsets */
+
+/* Size of transfer, read & write */
+#define XHI_SIZE_REG_OFFSET 0x800L
+/* offset into bram, read & write */
+#define XHI_BRAM_OFFSET_REG_OFFSET 0x804L
+/* Read not Configure, direction of transfer. Write only */
+#define XHI_RNC_REG_OFFSET 0x808L
+/* Indicates transfer complete. Read only */
+#define XHI_STATUS_REG_OFFSET 0x80CL
+
+/* Constants for setting the RNC register */
+#define XHI_CONFIGURE 0x0UL
+#define XHI_READBACK 0x1UL
+
+/* Constants for the Done register */
+#define XHI_NOT_FINISHED 0x0UL
+#define XHI_FINISHED 0x1UL
+
+#define XHI_BUFFER_START 0
+
+/**
+ * buffer_icap_get_status: Get the contents of the status register.
+ * @parameter base_address: is the base address of the device
+ *
+ * The status register contains the ICAP status and the done bit.
+ *
+ * D8 - cfgerr
+ * D7 - dalign
+ * D6 - rip
+ * D5 - in_abort_l
+ * D4 - Always 1
+ * D3 - Always 1
+ * D2 - Always 1
+ * D1 - Always 1
+ * D0 - Done bit
+ **/
+static inline u32 buffer_icap_get_status(void __iomem *base_address)
+{
+ return in_be32(base_address + XHI_STATUS_REG_OFFSET);
+}
+
+/**
+ * buffer_icap_get_bram: Reads data from the storage buffer bram.
+ * @parameter base_address: contains the base address of the component.
+ * @parameter offset: The word offset from which the data should be read.
+ *
+ * A bram is used as a configuration memory cache. One frame of data can
+ * be stored in this "storage buffer".
+ **/
+static inline u32 buffer_icap_get_bram(void __iomem *base_address,
+ u32 offset)
+{
+ return in_be32(base_address + (offset << 2));
+}
+
+/**
+ * buffer_icap_busy: Return true if the icap device is busy
+ * @parameter base_address: is the base address of the device
+ *
+ * The queries the low order bit of the status register, which
+ * indicates whether the current configuration or readback operation
+ * has completed.
+ **/
+static inline bool buffer_icap_busy(void __iomem *base_address)
+{
+ return (buffer_icap_get_status(base_address) & 1) == XHI_NOT_FINISHED;
+}
+
+/**
+ * buffer_icap_busy: Return true if the icap device is not busy
+ * @parameter base_address: is the base address of the device
+ *
+ * The queries the low order bit of the status register, which
+ * indicates whether the current configuration or readback operation
+ * has completed.
+ **/
+static inline bool buffer_icap_done(void __iomem *base_address)
+{
+ return (buffer_icap_get_status(base_address) & 1) == XHI_FINISHED;
+}
+
+/**
+ * buffer_icap_set_size: Set the size register.
+ * @parameter base_address: is the base address of the device
+ * @parameter data: The size in bytes.
+ *
+ * The size register holds the number of 8 bit bytes to transfer between
+ * bram and the icap (or icap to bram).
+ **/
+static inline void buffer_icap_set_size(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_SIZE_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_mSetoffsetReg: Set the bram offset register.
+ * @parameter base_address: contains the base address of the device.
+ * @parameter data: is the value to be written to the data register.
+ *
+ * The bram offset register holds the starting bram address to transfer
+ * data from during configuration or write data to during readback.
+ **/
+static inline void buffer_icap_set_offset(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_BRAM_OFFSET_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_rnc: Set the RNC (Readback not Configure) register.
+ * @parameter base_address: contains the base address of the device.
+ * @parameter data: is the value to be written to the data register.
+ *
+ * The RNC register determines the direction of the data transfer. It
+ * controls whether a configuration or readback take place. Writing to
+ * this register initiates the transfer. A value of 1 initiates a
+ * readback while writing a value of 0 initiates a configuration.
+ **/
+static inline void buffer_icap_set_rnc(void __iomem *base_address,
+ u32 data)
+{
+ out_be32(base_address + XHI_RNC_REG_OFFSET, data);
+}
+
+/**
+ * buffer_icap_set_bram: Write data to the storage buffer bram.
+ * @parameter base_address: contains the base address of the component.
+ * @parameter offset: The word offset at which the data should be written.
+ * @parameter data: The value to be written to the bram offset.
+ *
+ * A bram is used as a configuration memory cache. One frame of data can
+ * be stored in this "storage buffer".
+ **/
+static inline void buffer_icap_set_bram(void __iomem *base_address,
+ u32 offset, u32 data)
+{
+ out_be32(base_address + (offset << 2), data);
+}
+
+/**
+ * buffer_icap_device_read: Transfer bytes from ICAP to the storage buffer.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter offset: The storage buffer start address.
+ * @parameter count: The number of words (32 bit) to read from the
+ * device (ICAP).
+ **/
+static int buffer_icap_device_read(struct hwicap_drvdata *drvdata,
+ u32 offset, u32 count)
+{
+
+ s32 retries = 0;
+ void __iomem *base_address = drvdata->base_address;
+
+ if (buffer_icap_busy(base_address))
+ return -EBUSY;
+
+ if ((offset + count) > XHI_MAX_BUFFER_INTS)
+ return -EINVAL;
+
+ /* setSize count*4 to get bytes. */
+ buffer_icap_set_size(base_address, (count << 2));
+ buffer_icap_set_offset(base_address, offset);
+ buffer_icap_set_rnc(base_address, XHI_READBACK);
+
+ while (buffer_icap_busy(base_address)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EBUSY;
+ }
+ return 0;
+
+};
+
+/**
+ * buffer_icap_device_write: Transfer bytes from ICAP to the storage buffer.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter offset: The storage buffer start address.
+ * @parameter count: The number of words (32 bit) to read from the
+ * device (ICAP).
+ **/
+static int buffer_icap_device_write(struct hwicap_drvdata *drvdata,
+ u32 offset, u32 count)
+{
+
+ s32 retries = 0;
+ void __iomem *base_address = drvdata->base_address;
+
+ if (buffer_icap_busy(base_address))
+ return -EBUSY;
+
+ if ((offset + count) > XHI_MAX_BUFFER_INTS)
+ return -EINVAL;
+
+ /* setSize count*4 to get bytes. */
+ buffer_icap_set_size(base_address, count << 2);
+ buffer_icap_set_offset(base_address, offset);
+ buffer_icap_set_rnc(base_address, XHI_CONFIGURE);
+
+ while (buffer_icap_busy(base_address)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EBUSY;
+ }
+ return 0;
+
+};
+
+/**
+ * buffer_icap_reset: Reset the logic of the icap device.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * Writing to the status register resets the ICAP logic in an internal
+ * version of the core. For the version of the core published in EDK,
+ * this is a noop.
+ **/
+void buffer_icap_reset(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_STATUS_REG_OFFSET, 0xFEFE);
+}
+
+/**
+ * buffer_icap_set_configuration: Load a partial bitstream from system memory.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter data: Kernel address of the partial bitstream.
+ * @parameter size: the size of the partial bitstream in 32 bit words.
+ **/
+int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size)
+{
+ int status;
+ s32 buffer_count = 0;
+ s32 num_writes = 0;
+ bool dirty = 0;
+ u32 i;
+ void __iomem *base_address = drvdata->base_address;
+
+ /* Loop through all the data */
+ for (i = 0, buffer_count = 0; i < size; i++) {
+
+ /* Copy data to bram */
+ buffer_icap_set_bram(base_address, buffer_count, data[i]);
+ dirty = 1;
+
+ if (buffer_count < XHI_MAX_BUFFER_INTS - 1) {
+ buffer_count++;
+ continue;
+ }
+
+ /* Write data to ICAP */
+ status = buffer_icap_device_write(
+ drvdata,
+ XHI_BUFFER_START,
+ XHI_MAX_BUFFER_INTS);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ return status;
+ }
+
+ buffer_count = 0;
+ num_writes++;
+ dirty = 0;
+ }
+
+ /* Write unwritten data to ICAP */
+ if (dirty) {
+ /* Write data to ICAP */
+ status = buffer_icap_device_write(drvdata, XHI_BUFFER_START,
+ buffer_count);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ }
+ return status;
+ }
+
+ return 0;
+};
+
+/**
+ * buffer_icap_get_configuration: Read configuration data from the device.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter data: Address of the data representing the partial bitstream
+ * @parameter size: the size of the partial bitstream in 32 bit words.
+ **/
+int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size)
+{
+ int status;
+ s32 buffer_count = 0;
+ s32 read_count = 0;
+ u32 i;
+ void __iomem *base_address = drvdata->base_address;
+
+ /* Loop through all the data */
+ for (i = 0, buffer_count = XHI_MAX_BUFFER_INTS; i < size; i++) {
+ if (buffer_count == XHI_MAX_BUFFER_INTS) {
+ u32 words_remaining = size - i;
+ u32 words_to_read =
+ words_remaining <
+ XHI_MAX_BUFFER_INTS ? words_remaining :
+ XHI_MAX_BUFFER_INTS;
+
+ /* Read data from ICAP */
+ status = buffer_icap_device_read(
+ drvdata,
+ XHI_BUFFER_START,
+ words_to_read);
+ if (status != 0) {
+ /* abort. */
+ buffer_icap_reset(drvdata);
+ return status;
+ }
+
+ buffer_count = 0;
+ read_count++;
+ }
+
+ /* Copy data from bram */
+ data[i] = buffer_icap_get_bram(base_address, buffer_count);
+ buffer_count++;
+ }
+
+ return 0;
+};
diff --git a/drivers/char/xilinx_hwicap/buffer_icap.h b/drivers/char/xilinx_hwicap/buffer_icap.h
new file mode 100644
index 000000000000..03184959fa00
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/buffer_icap.h
@@ -0,0 +1,57 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2003-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_BUFFER_ICAP_H_ /* prevent circular inclusions */
+#define XILINX_BUFFER_ICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include "xilinx_hwicap.h"
+
+void buffer_icap_reset(struct hwicap_drvdata *drvdata);
+
+/* Loads a partial bitstream from system memory. */
+int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 Size);
+
+/* Loads a partial bitstream from system memory. */
+int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 Size);
+
+#endif
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.c b/drivers/char/xilinx_hwicap/fifo_icap.c
new file mode 100644
index 000000000000..0988314694a6
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/fifo_icap.c
@@ -0,0 +1,381 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#include "fifo_icap.h"
+
+/* Register offsets for the XHwIcap device. */
+#define XHI_GIER_OFFSET 0x1C /* Device Global Interrupt Enable Reg */
+#define XHI_IPISR_OFFSET 0x20 /* Interrupt Status Register */
+#define XHI_IPIER_OFFSET 0x28 /* Interrupt Enable Register */
+#define XHI_WF_OFFSET 0x100 /* Write FIFO */
+#define XHI_RF_OFFSET 0x104 /* Read FIFO */
+#define XHI_SZ_OFFSET 0x108 /* Size Register */
+#define XHI_CR_OFFSET 0x10C /* Control Register */
+#define XHI_SR_OFFSET 0x110 /* Status Register */
+#define XHI_WFV_OFFSET 0x114 /* Write FIFO Vacancy Register */
+#define XHI_RFO_OFFSET 0x118 /* Read FIFO Occupancy Register */
+
+/* Device Global Interrupt Enable Register (GIER) bit definitions */
+
+#define XHI_GIER_GIE_MASK 0x80000000 /* Global Interrupt enable Mask */
+
+/**
+ * HwIcap Device Interrupt Status/Enable Registers
+ *
+ * Interrupt Status Register (IPISR) : This register holds the
+ * interrupt status flags for the device. These bits are toggle on
+ * write.
+ *
+ * Interrupt Enable Register (IPIER) : This register is used to enable
+ * interrupt sources for the device.
+ * Writing a '1' to a bit enables the corresponding interrupt.
+ * Writing a '0' to a bit disables the corresponding interrupt.
+ *
+ * IPISR/IPIER registers have the same bit definitions and are only defined
+ * once.
+ */
+#define XHI_IPIXR_RFULL_MASK 0x00000008 /* Read FIFO Full */
+#define XHI_IPIXR_WEMPTY_MASK 0x00000004 /* Write FIFO Empty */
+#define XHI_IPIXR_RDP_MASK 0x00000002 /* Read FIFO half full */
+#define XHI_IPIXR_WRP_MASK 0x00000001 /* Write FIFO half full */
+#define XHI_IPIXR_ALL_MASK 0x0000000F /* Mask of all interrupts */
+
+/* Control Register (CR) */
+#define XHI_CR_SW_RESET_MASK 0x00000008 /* SW Reset Mask */
+#define XHI_CR_FIFO_CLR_MASK 0x00000004 /* FIFO Clear Mask */
+#define XHI_CR_READ_MASK 0x00000002 /* Read from ICAP to FIFO */
+#define XHI_CR_WRITE_MASK 0x00000001 /* Write from FIFO to ICAP */
+
+/* Status Register (SR) */
+#define XHI_SR_CFGERR_N_MASK 0x00000100 /* Config Error Mask */
+#define XHI_SR_DALIGN_MASK 0x00000080 /* Data Alignment Mask */
+#define XHI_SR_RIP_MASK 0x00000040 /* Read back Mask */
+#define XHI_SR_IN_ABORT_N_MASK 0x00000020 /* Select Map Abort Mask */
+#define XHI_SR_DONE_MASK 0x00000001 /* Done bit Mask */
+
+
+#define XHI_WFO_MAX_VACANCY 1024 /* Max Write FIFO Vacancy, in words */
+#define XHI_RFO_MAX_OCCUPANCY 256 /* Max Read FIFO Occupancy, in words */
+/* The maximum amount we can request from fifo_icap_get_configuration
+ at once, in bytes. */
+#define XHI_MAX_READ_TRANSACTION_WORDS 0xFFF
+
+
+/**
+ * fifo_icap_fifo_write: Write data to the write FIFO.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter data: the 32-bit value to be written to the FIFO.
+ *
+ * This function will silently fail if the fifo is full.
+ **/
+static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata,
+ u32 data)
+{
+ dev_dbg(drvdata->dev, "fifo_write: %x\n", data);
+ out_be32(drvdata->base_address + XHI_WF_OFFSET, data);
+}
+
+/**
+ * fifo_icap_fifo_read: Read data from the Read FIFO.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * This function will silently fail if the fifo is empty.
+ **/
+static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata)
+{
+ u32 data = in_be32(drvdata->base_address + XHI_RF_OFFSET);
+ dev_dbg(drvdata->dev, "fifo_read: %x\n", data);
+ return data;
+}
+
+/**
+ * fifo_icap_set_read_size: Set the the size register.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter data: the size of the following read transaction, in words.
+ **/
+static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata,
+ u32 data)
+{
+ out_be32(drvdata->base_address + XHI_SZ_OFFSET, data);
+}
+
+/**
+ * fifo_icap_start_config: Initiate a configuration (write) to the device.
+ * @parameter drvdata: a pointer to the drvdata.
+ **/
+static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_WRITE_MASK);
+ dev_dbg(drvdata->dev, "configuration started\n");
+}
+
+/**
+ * fifo_icap_start_readback: Initiate a readback from the device.
+ * @parameter drvdata: a pointer to the drvdata.
+ **/
+static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata)
+{
+ out_be32(drvdata->base_address + XHI_CR_OFFSET, XHI_CR_READ_MASK);
+ dev_dbg(drvdata->dev, "readback started\n");
+}
+
+/**
+ * fifo_icap_busy: Return true if the ICAP is still processing a transaction.
+ * @parameter drvdata: a pointer to the drvdata.
+ **/
+static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata)
+{
+ u32 status = in_be32(drvdata->base_address + XHI_SR_OFFSET);
+ dev_dbg(drvdata->dev, "Getting status = %x\n", status);
+ return (status & XHI_SR_DONE_MASK) ? 0 : 1;
+}
+
+/**
+ * fifo_icap_write_fifo_vacancy: Query the write fifo available space.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * Return the number of words that can be safely pushed into the write fifo.
+ **/
+static inline u32 fifo_icap_write_fifo_vacancy(
+ struct hwicap_drvdata *drvdata)
+{
+ return in_be32(drvdata->base_address + XHI_WFV_OFFSET);
+}
+
+/**
+ * fifo_icap_read_fifo_occupancy: Query the read fifo available data.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * Return the number of words that can be safely read from the read fifo.
+ **/
+static inline u32 fifo_icap_read_fifo_occupancy(
+ struct hwicap_drvdata *drvdata)
+{
+ return in_be32(drvdata->base_address + XHI_RFO_OFFSET);
+}
+
+/**
+ * fifo_icap_set_configuration: Send configuration data to the ICAP.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter frame_buffer: a pointer to the data to be written to the
+ * ICAP device.
+ * @parameter num_words: the number of words (32 bit) to write to the ICAP
+ * device.
+
+ * This function writes the given user data to the Write FIFO in
+ * polled mode and starts the transfer of the data to
+ * the ICAP device.
+ **/
+int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata,
+ u32 *frame_buffer, u32 num_words)
+{
+
+ u32 write_fifo_vacancy = 0;
+ u32 retries = 0;
+ u32 remaining_words;
+
+ dev_dbg(drvdata->dev, "fifo_set_configuration\n");
+
+ /*
+ * Check if the ICAP device is Busy with the last Read/Write
+ */
+ if (fifo_icap_busy(drvdata))
+ return -EBUSY;
+
+ /*
+ * Set up the buffer pointer and the words to be transferred.
+ */
+ remaining_words = num_words;
+
+ while (remaining_words > 0) {
+ /*
+ * Wait until we have some data in the fifo.
+ */
+ while (write_fifo_vacancy == 0) {
+ write_fifo_vacancy =
+ fifo_icap_write_fifo_vacancy(drvdata);
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EIO;
+ }
+
+ /*
+ * Write data into the Write FIFO.
+ */
+ while ((write_fifo_vacancy != 0) &&
+ (remaining_words > 0)) {
+ fifo_icap_fifo_write(drvdata, *frame_buffer);
+
+ remaining_words--;
+ write_fifo_vacancy--;
+ frame_buffer++;
+ }
+ /* Start pushing whatever is in the FIFO into the ICAP. */
+ fifo_icap_start_config(drvdata);
+ }
+
+ /* Wait until the write has finished. */
+ while (fifo_icap_busy(drvdata)) {
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ break;
+ }
+
+ dev_dbg(drvdata->dev, "done fifo_set_configuration\n");
+
+ /*
+ * If the requested number of words have not been read from
+ * the device then indicate failure.
+ */
+ if (remaining_words != 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * fifo_icap_get_configuration: Read configuration data from the device.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter data: Address of the data representing the partial bitstream
+ * @parameter size: the size of the partial bitstream in 32 bit words.
+ *
+ * This function reads the specified number of words from the ICAP device in
+ * the polled mode.
+ */
+int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata,
+ u32 *frame_buffer, u32 num_words)
+{
+
+ u32 read_fifo_occupancy = 0;
+ u32 retries = 0;
+ u32 *data = frame_buffer;
+ u32 remaining_words;
+ u32 words_to_read;
+
+ dev_dbg(drvdata->dev, "fifo_get_configuration\n");
+
+ /*
+ * Check if the ICAP device is Busy with the last Write/Read
+ */
+ if (fifo_icap_busy(drvdata))
+ return -EBUSY;
+
+ remaining_words = num_words;
+
+ while (remaining_words > 0) {
+ words_to_read = remaining_words;
+ /* The hardware has a limit on the number of words
+ that can be read at one time. */
+ if (words_to_read > XHI_MAX_READ_TRANSACTION_WORDS)
+ words_to_read = XHI_MAX_READ_TRANSACTION_WORDS;
+
+ remaining_words -= words_to_read;
+
+ fifo_icap_set_read_size(drvdata, words_to_read);
+ fifo_icap_start_readback(drvdata);
+
+ while (words_to_read > 0) {
+ /* Wait until we have some data in the fifo. */
+ while (read_fifo_occupancy == 0) {
+ read_fifo_occupancy =
+ fifo_icap_read_fifo_occupancy(drvdata);
+ retries++;
+ if (retries > XHI_MAX_RETRIES)
+ return -EIO;
+ }
+
+ if (read_fifo_occupancy > words_to_read)
+ read_fifo_occupancy = words_to_read;
+
+ words_to_read -= read_fifo_occupancy;
+
+ /* Read the data from the Read FIFO. */
+ while (read_fifo_occupancy != 0) {
+ *data++ = fifo_icap_fifo_read(drvdata);
+ read_fifo_occupancy--;
+ }
+ }
+ }
+
+ dev_dbg(drvdata->dev, "done fifo_get_configuration\n");
+
+ return 0;
+}
+
+/**
+ * buffer_icap_reset: Reset the logic of the icap device.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * This function forces the software reset of the complete HWICAP device.
+ * All the registers will return to the default value and the FIFO is also
+ * flushed as a part of this software reset.
+ */
+void fifo_icap_reset(struct hwicap_drvdata *drvdata)
+{
+ u32 reg_data;
+ /*
+ * Reset the device by setting/clearing the RESET bit in the
+ * Control Register.
+ */
+ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data | XHI_CR_SW_RESET_MASK);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data & (~XHI_CR_SW_RESET_MASK));
+
+}
+
+/**
+ * fifo_icap_flush_fifo: This function flushes the FIFOs in the device.
+ * @parameter drvdata: a pointer to the drvdata.
+ */
+void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata)
+{
+ u32 reg_data;
+ /*
+ * Flush the FIFO by setting/clearing the FIFO Clear bit in the
+ * Control Register.
+ */
+ reg_data = in_be32(drvdata->base_address + XHI_CR_OFFSET);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data | XHI_CR_FIFO_CLR_MASK);
+
+ out_be32(drvdata->base_address + XHI_CR_OFFSET,
+ reg_data & (~XHI_CR_FIFO_CLR_MASK));
+}
+
diff --git a/drivers/char/xilinx_hwicap/fifo_icap.h b/drivers/char/xilinx_hwicap/fifo_icap.h
new file mode 100644
index 000000000000..4d3068dd0405
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/fifo_icap.h
@@ -0,0 +1,62 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_FIFO_ICAP_H_ /* prevent circular inclusions */
+#define XILINX_FIFO_ICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include "xilinx_hwicap.h"
+
+/* Reads integers from the device into the storage buffer. */
+int fifo_icap_get_configuration(
+ struct hwicap_drvdata *drvdata,
+ u32 *FrameBuffer,
+ u32 NumWords);
+
+/* Writes integers to the device from the storage buffer. */
+int fifo_icap_set_configuration(
+ struct hwicap_drvdata *drvdata,
+ u32 *FrameBuffer,
+ u32 NumWords);
+
+void fifo_icap_reset(struct hwicap_drvdata *drvdata);
+void fifo_icap_flush_fifo(struct hwicap_drvdata *drvdata);
+
+#endif
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
new file mode 100644
index 000000000000..24f6aef0fd3c
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -0,0 +1,904 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2002 Xilinx Inc., Systems Engineering Group
+ * (c) Copyright 2004 Xilinx Inc., Systems Engineering Group
+ * (c) Copyright 2007-2008 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+/*
+ * This is the code behind /dev/xilinx_icap -- it allows a user-space
+ * application to use the Xilinx ICAP subsystem.
+ *
+ * The following operations are possible:
+ *
+ * open open the port and initialize for access.
+ * release release port
+ * write Write a bitstream to the configuration processor.
+ * read Read a data stream from the configuration processor.
+ *
+ * After being opened, the port is initialized and accessed to avoid a
+ * corrupted first read which may occur with some hardware. The port
+ * is left in a desynched state, requiring that a synch sequence be
+ * transmitted before any valid configuration data. A user will have
+ * exclusive access to the device while it remains open, and the state
+ * of the ICAP cannot be guaranteed after the device is closed. Note
+ * that a complete reset of the core and the state of the ICAP cannot
+ * be performed on many versions of the cores, hence users of this
+ * device should avoid making inconsistent accesses to the device. In
+ * particular, accessing the read interface, without first generating
+ * a write containing a readback packet can leave the ICAP in an
+ * inaccessible state.
+ *
+ * Note that in order to use the read interface, it is first necessary
+ * to write a request packet to the write interface. i.e., it is not
+ * possible to simply readback the bitstream (or any configuration
+ * bits) from a device without specifically requesting them first.
+ * The code to craft such packets is intended to be part of the
+ * user-space application code that uses this device. The simplest
+ * way to use this interface is simply:
+ *
+ * cp foo.bit /dev/xilinx_icap
+ *
+ * Note that unless foo.bit is an appropriately constructed partial
+ * bitstream, this has a high likelyhood of overwriting the design
+ * currently programmed in the FPGA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/proc_fs.h>
+#include <asm/semaphore.h>
+#include <linux/sysctl.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_OF
+/* For open firmware. */
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#endif
+
+#include "xilinx_hwicap.h"
+#include "buffer_icap.h"
+#include "fifo_icap.h"
+
+#define DRIVER_NAME "xilinx_icap"
+
+#define HWICAP_REGS (0x10000)
+
+/* dynamically allocate device number */
+static int xhwicap_major;
+static int xhwicap_minor;
+#define HWICAP_DEVICES 1
+
+module_param(xhwicap_major, int, S_IRUGO);
+module_param(xhwicap_minor, int, S_IRUGO);
+
+/* An array, which is set to true when the device is registered. */
+static bool probed_devices[HWICAP_DEVICES];
+
+static struct class *icap_class;
+
+#define UNIMPLEMENTED 0xFFFF
+
+static const struct config_registers v2_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = 11,
+ .KEY = 12,
+ .CBC = 13,
+ .IDCODE = 14,
+ .AXSS = UNIMPLEMENTED,
+ .C0R_1 = UNIMPLEMENTED,
+ .CSOB = UNIMPLEMENTED,
+ .WBSTAR = UNIMPLEMENTED,
+ .TIMER = UNIMPLEMENTED,
+ .BOOTSTS = UNIMPLEMENTED,
+ .CTL_1 = UNIMPLEMENTED,
+};
+
+static const struct config_registers v4_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = UNIMPLEMENTED,
+ .KEY = UNIMPLEMENTED,
+ .CBC = 11,
+ .IDCODE = 12,
+ .AXSS = 13,
+ .C0R_1 = UNIMPLEMENTED,
+ .CSOB = UNIMPLEMENTED,
+ .WBSTAR = UNIMPLEMENTED,
+ .TIMER = UNIMPLEMENTED,
+ .BOOTSTS = UNIMPLEMENTED,
+ .CTL_1 = UNIMPLEMENTED,
+};
+static const struct config_registers v5_config_registers = {
+ .CRC = 0,
+ .FAR = 1,
+ .FDRI = 2,
+ .FDRO = 3,
+ .CMD = 4,
+ .CTL = 5,
+ .MASK = 6,
+ .STAT = 7,
+ .LOUT = 8,
+ .COR = 9,
+ .MFWR = 10,
+ .FLR = UNIMPLEMENTED,
+ .KEY = UNIMPLEMENTED,
+ .CBC = 11,
+ .IDCODE = 12,
+ .AXSS = 13,
+ .C0R_1 = 14,
+ .CSOB = 15,
+ .WBSTAR = 16,
+ .TIMER = 17,
+ .BOOTSTS = 18,
+ .CTL_1 = 19,
+};
+
+/**
+ * hwicap_command_desync: Send a DESYNC command to the ICAP port.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * This command desynchronizes the ICAP After this command, a
+ * bitstream containing a NULL packet, followed by a SYNCH packet is
+ * required before the ICAP will recognize commands.
+ */
+int hwicap_command_desync(struct hwicap_drvdata *drvdata)
+{
+ u32 buffer[4];
+ u32 index = 0;
+
+ /*
+ * Create the data to be written to the ICAP.
+ */
+ buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
+ buffer[index++] = XHI_CMD_DESYNCH;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+
+ /*
+ * Write the data to the FIFO and intiate the transfer of data present
+ * in the FIFO to the ICAP device.
+ */
+ return drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+}
+
+/**
+ * hwicap_command_capture: Send a CAPTURE command to the ICAP port.
+ * @parameter drvdata: a pointer to the drvdata.
+ *
+ * This command captures all of the flip flop states so they will be
+ * available during readback. One can use this command instead of
+ * enabling the CAPTURE block in the design.
+ */
+int hwicap_command_capture(struct hwicap_drvdata *drvdata)
+{
+ u32 buffer[7];
+ u32 index = 0;
+
+ /*
+ * Create the data to be written to the ICAP.
+ */
+ buffer[index++] = XHI_DUMMY_PACKET;
+ buffer[index++] = XHI_SYNC_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
+ buffer[index++] = XHI_CMD_GCAPTURE;
+ buffer[index++] = XHI_DUMMY_PACKET;
+ buffer[index++] = XHI_DUMMY_PACKET;
+
+ /*
+ * Write the data to the FIFO and intiate the transfer of data
+ * present in the FIFO to the ICAP device.
+ */
+ return drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+
+}
+
+/**
+ * hwicap_get_configuration_register: Query a configuration register.
+ * @parameter drvdata: a pointer to the drvdata.
+ * @parameter reg: a constant which represents the configuration
+ * register value to be returned.
+ * Examples: XHI_IDCODE, XHI_FLR.
+ * @parameter RegData: returns the value of the register.
+ *
+ * Sends a query packet to the ICAP and then receives the response.
+ * The icap is left in Synched state.
+ */
+int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
+ u32 reg, u32 *RegData)
+{
+ int status;
+ u32 buffer[6];
+ u32 index = 0;
+
+ /*
+ * Create the data to be written to the ICAP.
+ */
+ buffer[index++] = XHI_DUMMY_PACKET;
+ buffer[index++] = XHI_SYNC_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = hwicap_type_1_read(reg) | 1;
+ buffer[index++] = XHI_NOOP_PACKET;
+ buffer[index++] = XHI_NOOP_PACKET;
+
+ /*
+ * Write the data to the FIFO and intiate the transfer of data present
+ * in the FIFO to the ICAP device.
+ */
+ status = drvdata->config->set_configuration(drvdata,
+ &buffer[0], index);
+ if (status)
+ return status;
+
+ /*
+ * Read the configuration register
+ */
+ status = drvdata->config->get_configuration(drvdata, RegData, 1);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
+{
+ int status;
+ u32 idcode;
+
+ dev_dbg(drvdata->dev, "initializing\n");
+
+ /* Abort any current transaction, to make sure we have the
+ * ICAP in a good state. */
+ dev_dbg(drvdata->dev, "Reset...\n");
+ drvdata->config->reset(drvdata);
+
+ dev_dbg(drvdata->dev, "Desync...\n");
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ return status;
+
+ /* Attempt to read the IDCODE from ICAP. This
+ * may not be returned correctly, due to the design of the
+ * hardware.
+ */
+ dev_dbg(drvdata->dev, "Reading IDCODE...\n");
+ status = hwicap_get_configuration_register(
+ drvdata, drvdata->config_regs->IDCODE, &idcode);
+ dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode);
+ if (status)
+ return status;
+
+ dev_dbg(drvdata->dev, "Desync...\n");
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static ssize_t
+hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ ssize_t bytes_to_read = 0;
+ u32 *kbuf;
+ u32 words;
+ u32 bytes_remaining;
+ int status;
+
+ if (down_interruptible(&drvdata->sem))
+ return -ERESTARTSYS;
+
+ if (drvdata->read_buffer_in_use) {
+ /* If there are leftover bytes in the buffer, just */
+ /* return them and don't try to read more from the */
+ /* ICAP device. */
+ bytes_to_read =
+ (count < drvdata->read_buffer_in_use) ? count :
+ drvdata->read_buffer_in_use;
+
+ /* Return the data currently in the read buffer. */
+ if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) {
+ status = -EFAULT;
+ goto error;
+ }
+ drvdata->read_buffer_in_use -= bytes_to_read;
+ memcpy(drvdata->read_buffer + bytes_to_read,
+ drvdata->read_buffer, 4 - bytes_to_read);
+ } else {
+ /* Get new data from the ICAP, and return was was requested. */
+ kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
+ if (!kbuf) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ /* The ICAP device is only able to read complete */
+ /* words. If a number of bytes that do not correspond */
+ /* to complete words is requested, then we read enough */
+ /* words to get the required number of bytes, and then */
+ /* save the remaining bytes for the next read. */
+
+ /* Determine the number of words to read, rounding up */
+ /* if necessary. */
+ words = ((count + 3) >> 2);
+ bytes_to_read = words << 2;
+
+ if (bytes_to_read > PAGE_SIZE)
+ bytes_to_read = PAGE_SIZE;
+
+ /* Ensure we only read a complete number of words. */
+ bytes_remaining = bytes_to_read & 3;
+ bytes_to_read &= ~3;
+ words = bytes_to_read >> 2;
+
+ status = drvdata->config->get_configuration(drvdata,
+ kbuf, words);
+
+ /* If we didn't read correctly, then bail out. */
+ if (status) {
+ free_page((unsigned long)kbuf);
+ goto error;
+ }
+
+ /* If we fail to return the data to the user, then bail out. */
+ if (copy_to_user(buf, kbuf, bytes_to_read)) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ memcpy(kbuf, drvdata->read_buffer, bytes_remaining);
+ drvdata->read_buffer_in_use = bytes_remaining;
+ free_page((unsigned long)kbuf);
+ }
+ status = bytes_to_read;
+ error:
+ up(&drvdata->sem);
+ return status;
+}
+
+static ssize_t
+hwicap_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ ssize_t written = 0;
+ ssize_t left = count;
+ u32 *kbuf;
+ ssize_t len;
+ ssize_t status;
+
+ if (down_interruptible(&drvdata->sem))
+ return -ERESTARTSYS;
+
+ left += drvdata->write_buffer_in_use;
+
+ /* Only write multiples of 4 bytes. */
+ if (left < 4) {
+ status = 0;
+ goto error;
+ }
+
+ kbuf = (u32 *) __get_free_page(GFP_KERNEL);
+ if (!kbuf) {
+ status = -ENOMEM;
+ goto error;
+ }
+
+ while (left > 3) {
+ /* only write multiples of 4 bytes, so there might */
+ /* be as many as 3 bytes left (at the end). */
+ len = left;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ len &= ~3;
+
+ if (drvdata->write_buffer_in_use) {
+ memcpy(kbuf, drvdata->write_buffer,
+ drvdata->write_buffer_in_use);
+ if (copy_from_user(
+ (((char *)kbuf) + (drvdata->write_buffer_in_use)),
+ buf + written,
+ len - (drvdata->write_buffer_in_use))) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ } else {
+ if (copy_from_user(kbuf, buf + written, len)) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ }
+
+ status = drvdata->config->set_configuration(drvdata,
+ kbuf, len >> 2);
+
+ if (status) {
+ free_page((unsigned long)kbuf);
+ status = -EFAULT;
+ goto error;
+ }
+ if (drvdata->write_buffer_in_use) {
+ len -= drvdata->write_buffer_in_use;
+ left -= drvdata->write_buffer_in_use;
+ drvdata->write_buffer_in_use = 0;
+ }
+ written += len;
+ left -= len;
+ }
+ if ((left > 0) && (left < 4)) {
+ if (!copy_from_user(drvdata->write_buffer,
+ buf + written, left)) {
+ drvdata->write_buffer_in_use = left;
+ written += left;
+ left = 0;
+ }
+ }
+
+ free_page((unsigned long)kbuf);
+ status = written;
+ error:
+ up(&drvdata->sem);
+ return status;
+}
+
+static int hwicap_open(struct inode *inode, struct file *file)
+{
+ struct hwicap_drvdata *drvdata;
+ int status;
+
+ drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev);
+
+ if (down_interruptible(&drvdata->sem))
+ return -ERESTARTSYS;
+
+ if (drvdata->is_open) {
+ status = -EBUSY;
+ goto error;
+ }
+
+ status = hwicap_initialize_hwicap(drvdata);
+ if (status) {
+ dev_err(drvdata->dev, "Failed to open file");
+ goto error;
+ }
+
+ file->private_data = drvdata;
+ drvdata->write_buffer_in_use = 0;
+ drvdata->read_buffer_in_use = 0;
+ drvdata->is_open = 1;
+
+ error:
+ up(&drvdata->sem);
+ return status;
+}
+
+static int hwicap_release(struct inode *inode, struct file *file)
+{
+ struct hwicap_drvdata *drvdata = file->private_data;
+ int i;
+ int status = 0;
+
+ if (down_interruptible(&drvdata->sem))
+ return -ERESTARTSYS;
+
+ if (drvdata->write_buffer_in_use) {
+ /* Flush write buffer. */
+ for (i = drvdata->write_buffer_in_use; i < 4; i++)
+ drvdata->write_buffer[i] = 0;
+
+ status = drvdata->config->set_configuration(drvdata,
+ (u32 *) drvdata->write_buffer, 1);
+ if (status)
+ goto error;
+ }
+
+ status = hwicap_command_desync(drvdata);
+ if (status)
+ goto error;
+
+ error:
+ drvdata->is_open = 0;
+ up(&drvdata->sem);
+ return status;
+}
+
+static struct file_operations hwicap_fops = {
+ .owner = THIS_MODULE,
+ .write = hwicap_write,
+ .read = hwicap_read,
+ .open = hwicap_open,
+ .release = hwicap_release,
+};
+
+static int __devinit hwicap_setup(struct device *dev, int id,
+ const struct resource *regs_res,
+ const struct hwicap_driver_config *config,
+ const struct config_registers *config_regs)
+{
+ dev_t devt;
+ struct hwicap_drvdata *drvdata = NULL;
+ int retval = 0;
+
+ dev_info(dev, "Xilinx icap port driver\n");
+
+ if (id < 0) {
+ for (id = 0; id < HWICAP_DEVICES; id++)
+ if (!probed_devices[id])
+ break;
+ }
+ if (id < 0 || id >= HWICAP_DEVICES) {
+ dev_err(dev, "%s%i too large\n", DRIVER_NAME, id);
+ return -EINVAL;
+ }
+ if (probed_devices[id]) {
+ dev_err(dev, "cannot assign to %s%i; it is already in use\n",
+ DRIVER_NAME, id);
+ return -EBUSY;
+ }
+
+ probed_devices[id] = 1;
+
+ devt = MKDEV(xhwicap_major, xhwicap_minor + id);
+
+ drvdata = kmalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ dev_err(dev, "Couldn't allocate device private record\n");
+ return -ENOMEM;
+ }
+ memset((void *)drvdata, 0, sizeof(struct hwicap_drvdata));
+ dev_set_drvdata(dev, (void *)drvdata);
+
+ if (!regs_res) {
+ dev_err(dev, "Couldn't get registers resource\n");
+ retval = -EFAULT;
+ goto failed1;
+ }
+
+ drvdata->mem_start = regs_res->start;
+ drvdata->mem_end = regs_res->end;
+ drvdata->mem_size = regs_res->end - regs_res->start + 1;
+
+ if (!request_mem_region(drvdata->mem_start,
+ drvdata->mem_size, DRIVER_NAME)) {
+ dev_err(dev, "Couldn't lock memory region at %p\n",
+ (void *)regs_res->start);
+ retval = -EBUSY;
+ goto failed1;
+ }
+
+ drvdata->devt = devt;
+ drvdata->dev = dev;
+ drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
+ if (!drvdata->base_address) {
+ dev_err(dev, "ioremap() failed\n");
+ goto failed2;
+ }
+
+ drvdata->config = config;
+ drvdata->config_regs = config_regs;
+
+ init_MUTEX(&drvdata->sem);
+ drvdata->is_open = 0;
+
+ dev_info(dev, "ioremap %lx to %p with size %x\n",
+ (unsigned long int)drvdata->mem_start,
+ drvdata->base_address, drvdata->mem_size);
+
+ cdev_init(&drvdata->cdev, &hwicap_fops);
+ drvdata->cdev.owner = THIS_MODULE;
+ retval = cdev_add(&drvdata->cdev, devt, 1);
+ if (retval) {
+ dev_err(dev, "cdev_add() failed\n");
+ goto failed3;
+ }
+ /* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */
+ class_device_create(icap_class, NULL, devt, NULL, DRIVER_NAME);
+ return 0; /* success */
+
+ failed3:
+ iounmap(drvdata->base_address);
+
+ failed2:
+ release_mem_region(regs_res->start, drvdata->mem_size);
+
+ failed1:
+ kfree(drvdata);
+
+ return retval;
+}
+
+static struct hwicap_driver_config buffer_icap_config = {
+ .get_configuration = buffer_icap_get_configuration,
+ .set_configuration = buffer_icap_set_configuration,
+ .reset = buffer_icap_reset,
+};
+
+static struct hwicap_driver_config fifo_icap_config = {
+ .get_configuration = fifo_icap_get_configuration,
+ .set_configuration = fifo_icap_set_configuration,
+ .reset = fifo_icap_reset,
+};
+
+static int __devexit hwicap_remove(struct device *dev)
+{
+ struct hwicap_drvdata *drvdata;
+
+ drvdata = (struct hwicap_drvdata *)dev_get_drvdata(dev);
+
+ if (!drvdata)
+ return 0;
+
+ class_device_destroy(icap_class, drvdata->devt);
+ cdev_del(&drvdata->cdev);
+ iounmap(drvdata->base_address);
+ release_mem_region(drvdata->mem_start, drvdata->mem_size);
+ kfree(drvdata);
+ dev_set_drvdata(dev, NULL);
+ probed_devices[MINOR(dev->devt)-xhwicap_minor] = 0;
+
+ return 0; /* success */
+}
+
+static int __devinit hwicap_drv_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ const struct config_registers *regs;
+ const char *family;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ /* It's most likely that we're using V4, if the family is not
+ specified */
+ regs = &v4_config_registers;
+ family = pdev->dev.platform_data;
+
+ if (family) {
+ if (!strcmp(family, "virtex2p")) {
+ regs = &v2_config_registers;
+ } else if (!strcmp(family, "virtex4")) {
+ regs = &v4_config_registers;
+ } else if (!strcmp(family, "virtex5")) {
+ regs = &v5_config_registers;
+ }
+ }
+
+ return hwicap_setup(&pdev->dev, pdev->id, res,
+ &buffer_icap_config, regs);
+}
+
+static int __devexit hwicap_drv_remove(struct platform_device *pdev)
+{
+ return hwicap_remove(&pdev->dev);
+}
+
+static struct platform_driver hwicap_platform_driver = {
+ .probe = hwicap_drv_probe,
+ .remove = hwicap_drv_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ },
+};
+
+/* ---------------------------------------------------------------------
+ * OF bus binding
+ */
+
+#if defined(CONFIG_OF)
+static int __devinit
+hwicap_of_probe(struct of_device *op, const struct of_device_id *match)
+{
+ struct resource res;
+ const unsigned int *id;
+ const char *family;
+ int rc;
+ const struct hwicap_driver_config *config = match->data;
+ const struct config_registers *regs;
+
+ dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match);
+
+ rc = of_address_to_resource(op->node, 0, &res);
+ if (rc) {
+ dev_err(&op->dev, "invalid address\n");
+ return rc;
+ }
+
+ id = of_get_property(op->node, "port-number", NULL);
+
+ /* It's most likely that we're using V4, if the family is not
+ specified */
+ regs = &v4_config_registers;
+ family = of_get_property(op->node, "xlnx,family", NULL);
+
+ if (family) {
+ if (!strcmp(family, "virtex2p")) {
+ regs = &v2_config_registers;
+ } else if (!strcmp(family, "virtex4")) {
+ regs = &v4_config_registers;
+ } else if (!strcmp(family, "virtex5")) {
+ regs = &v5_config_registers;
+ }
+ }
+ return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
+ regs);
+}
+
+static int __devexit hwicap_of_remove(struct of_device *op)
+{
+ return hwicap_remove(&op->dev);
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id __devinit hwicap_of_match[] = {
+ { .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
+ { .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config},
+ {},
+};
+MODULE_DEVICE_TABLE(of, hwicap_of_match);
+
+static struct of_platform_driver hwicap_of_driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .match_table = hwicap_of_match,
+ .probe = hwicap_of_probe,
+ .remove = __devexit_p(hwicap_of_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+/* Registration helpers to keep the number of #ifdefs to a minimum */
+static inline int __devinit hwicap_of_register(void)
+{
+ pr_debug("hwicap: calling of_register_platform_driver()\n");
+ return of_register_platform_driver(&hwicap_of_driver);
+}
+
+static inline void __devexit hwicap_of_unregister(void)
+{
+ of_unregister_platform_driver(&hwicap_of_driver);
+}
+#else /* CONFIG_OF */
+/* CONFIG_OF not enabled; do nothing helpers */
+static inline int __devinit hwicap_of_register(void) { return 0; }
+static inline void __devexit hwicap_of_unregister(void) { }
+#endif /* CONFIG_OF */
+
+static int __devinit hwicap_module_init(void)
+{
+ dev_t devt;
+ int retval;
+
+ icap_class = class_create(THIS_MODULE, "xilinx_config");
+
+ if (xhwicap_major) {
+ devt = MKDEV(xhwicap_major, xhwicap_minor);
+ retval = register_chrdev_region(
+ devt,
+ HWICAP_DEVICES,
+ DRIVER_NAME);
+ if (retval < 0)
+ return retval;
+ } else {
+ retval = alloc_chrdev_region(&devt,
+ xhwicap_minor,
+ HWICAP_DEVICES,
+ DRIVER_NAME);
+ if (retval < 0)
+ return retval;
+ xhwicap_major = MAJOR(devt);
+ }
+
+ retval = platform_driver_register(&hwicap_platform_driver);
+
+ if (retval)
+ goto failed1;
+
+ retval = hwicap_of_register();
+
+ if (retval)
+ goto failed2;
+
+ return retval;
+
+ failed2:
+ platform_driver_unregister(&hwicap_platform_driver);
+
+ failed1:
+ unregister_chrdev_region(devt, HWICAP_DEVICES);
+
+ return retval;
+}
+
+static void __devexit hwicap_module_cleanup(void)
+{
+ dev_t devt = MKDEV(xhwicap_major, xhwicap_minor);
+
+ class_destroy(icap_class);
+
+ platform_driver_unregister(&hwicap_platform_driver);
+
+ hwicap_of_unregister();
+
+ unregister_chrdev_region(devt, HWICAP_DEVICES);
+}
+
+module_init(hwicap_module_init);
+module_exit(hwicap_module_cleanup);
+
+MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group");
+MODULE_DESCRIPTION("Xilinx ICAP Port Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
new file mode 100644
index 000000000000..ae771cac1629
--- /dev/null
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -0,0 +1,193 @@
+/*****************************************************************************
+ *
+ * Author: Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
+ * AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
+ * SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
+ * OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
+ * APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
+ * THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
+ * AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
+ * FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
+ * WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
+ * IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
+ * REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
+ * INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * Xilinx products are not intended for use in life support appliances,
+ * devices, or systems. Use in such applications is expressly prohibited.
+ *
+ * (c) Copyright 2003-2007 Xilinx Inc.
+ * All rights reserved.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************/
+
+#ifndef XILINX_HWICAP_H_ /* prevent circular inclusions */
+#define XILINX_HWICAP_H_ /* by using protection macros */
+
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+
+struct hwicap_drvdata {
+ u32 write_buffer_in_use; /* Always in [0,3] */
+ u8 write_buffer[4];
+ u32 read_buffer_in_use; /* Always in [0,3] */
+ u8 read_buffer[4];
+ u32 mem_start; /* phys. address of the control registers */
+ u32 mem_end; /* phys. address of the control registers */
+ u32 mem_size;
+ void __iomem *base_address;/* virt. address of the control registers */
+
+ struct device *dev;
+ struct cdev cdev; /* Char device structure */
+ dev_t devt;
+
+ const struct hwicap_driver_config *config;
+ const struct config_registers *config_regs;
+ void *private_data;
+ bool is_open;
+ struct semaphore sem;
+};
+
+struct hwicap_driver_config {
+ int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size);
+ int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
+ u32 size);
+ void (*reset)(struct hwicap_drvdata *drvdata);
+};
+
+/* Number of times to poll the done regsiter */
+#define XHI_MAX_RETRIES 10
+
+/************ Constant Definitions *************/
+
+#define XHI_PAD_FRAMES 0x1
+
+/* Mask for calculating configuration packet headers */
+#define XHI_WORD_COUNT_MASK_TYPE_1 0x7FFUL
+#define XHI_WORD_COUNT_MASK_TYPE_2 0x1FFFFFUL
+#define XHI_TYPE_MASK 0x7
+#define XHI_REGISTER_MASK 0xF
+#define XHI_OP_MASK 0x3
+
+#define XHI_TYPE_SHIFT 29
+#define XHI_REGISTER_SHIFT 13
+#define XHI_OP_SHIFT 27
+
+#define XHI_TYPE_1 1
+#define XHI_TYPE_2 2
+#define XHI_OP_WRITE 2
+#define XHI_OP_READ 1
+
+/* Address Block Types */
+#define XHI_FAR_CLB_BLOCK 0
+#define XHI_FAR_BRAM_BLOCK 1
+#define XHI_FAR_BRAM_INT_BLOCK 2
+
+struct config_registers {
+ u32 CRC;
+ u32 FAR;
+ u32 FDRI;
+ u32 FDRO;
+ u32 CMD;
+ u32 CTL;
+ u32 MASK;
+ u32 STAT;
+ u32 LOUT;
+ u32 COR;
+ u32 MFWR;
+ u32 FLR;
+ u32 KEY;
+ u32 CBC;
+ u32 IDCODE;
+ u32 AXSS;
+ u32 C0R_1;
+ u32 CSOB;
+ u32 WBSTAR;
+ u32 TIMER;
+ u32 BOOTSTS;
+ u32 CTL_1;
+};
+
+/* Configuration Commands */
+#define XHI_CMD_NULL 0
+#define XHI_CMD_WCFG 1
+#define XHI_CMD_MFW 2
+#define XHI_CMD_DGHIGH 3
+#define XHI_CMD_RCFG 4
+#define XHI_CMD_START 5
+#define XHI_CMD_RCAP 6
+#define XHI_CMD_RCRC 7
+#define XHI_CMD_AGHIGH 8
+#define XHI_CMD_SWITCH 9
+#define XHI_CMD_GRESTORE 10
+#define XHI_CMD_SHUTDOWN 11
+#define XHI_CMD_GCAPTURE 12
+#define XHI_CMD_DESYNCH 13
+#define XHI_CMD_IPROG 15 /* Only in Virtex5 */
+#define XHI_CMD_CRCC 16 /* Only in Virtex5 */
+#define XHI_CMD_LTIMER 17 /* Only in Virtex5 */
+
+/* Packet constants */
+#define XHI_SYNC_PACKET 0xAA995566UL
+#define XHI_DUMMY_PACKET 0xFFFFFFFFUL
+#define XHI_NOOP_PACKET (XHI_TYPE_1 << XHI_TYPE_SHIFT)
+#define XHI_TYPE_2_READ ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \
+ (XHI_OP_READ << XHI_OP_SHIFT))
+
+#define XHI_TYPE_2_WRITE ((XHI_TYPE_2 << XHI_TYPE_SHIFT) | \
+ (XHI_OP_WRITE << XHI_OP_SHIFT))
+
+#define XHI_TYPE2_CNT_MASK 0x07FFFFFF
+
+#define XHI_TYPE_1_PACKET_MAX_WORDS 2047UL
+#define XHI_TYPE_1_HEADER_BYTES 4
+#define XHI_TYPE_2_HEADER_BYTES 8
+
+/* Constant to use for CRC check when CRC has been disabled */
+#define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL
+
+/**
+ * hwicap_type_1_read: Generates a Type 1 read packet header.
+ * @parameter: Register is the address of the register to be read back.
+ *
+ * Generates a Type 1 read packet header, which is used to indirectly
+ * read registers in the configuration logic. This packet must then
+ * be sent through the icap device, and a return packet received with
+ * the information.
+ **/
+static inline u32 hwicap_type_1_read(u32 Register)
+{
+ return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
+ (Register << XHI_REGISTER_SHIFT) |
+ (XHI_OP_READ << XHI_OP_SHIFT);
+}
+
+/**
+ * hwicap_type_1_write: Generates a Type 1 write packet header
+ * @parameter: Register is the address of the register to be read back.
+ **/
+static inline u32 hwicap_type_1_write(u32 Register)
+{
+ return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
+ (Register << XHI_REGISTER_SHIFT) |
+ (XHI_OP_WRITE << XHI_OP_SHIFT);
+}
+
+#endif
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 721f86f4f008..c159ae64eeb2 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -9,9 +9,6 @@ config CPU_FREQ
clock speed, you need to either enable a dynamic cpufreq governor
(see below) after boot, or use a userspace tool.
- To compile this driver as a module, choose M here: the
- module will be called cpufreq.
-
For details, take a look at <file:Documentation/cpu-freq>.
If in doubt, say N.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b730d6709529..64926aa990db 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -287,7 +287,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
if (!l_p_j_ref_freq) {
l_p_j_ref = loops_per_jiffy;
l_p_j_ref_freq = ci->old;
- dprintk("saving %lu as reference value for loops_per_jiffy;"
+ dprintk("saving %lu as reference value for loops_per_jiffy; "
"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
}
if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
@@ -295,7 +295,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
- dprintk("scaling loops_per_jiffy to %lu"
+ dprintk("scaling loops_per_jiffy to %lu "
"for frequency %u kHz\n", loops_per_jiffy, ci->new);
}
}
@@ -601,6 +601,31 @@ static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
return i;
}
+static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int freq = 0;
+ unsigned int ret;
+
+ if (!policy->governor->store_setspeed)
+ return -EINVAL;
+
+ ret = sscanf(buf, "%u", &freq);
+ if (ret != 1)
+ return -EINVAL;
+
+ policy->governor->store_setspeed(policy, freq);
+
+ return count;
+}
+
+static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
+{
+ if (!policy->governor->show_setspeed)
+ return sprintf(buf, "<unsupported>\n");
+
+ return policy->governor->show_setspeed(policy, buf);
+}
#define define_one_ro(_name) \
static struct freq_attr _name = \
@@ -624,6 +649,7 @@ define_one_ro(affected_cpus);
define_one_rw(scaling_min_freq);
define_one_rw(scaling_max_freq);
define_one_rw(scaling_governor);
+define_one_rw(scaling_setspeed);
static struct attribute * default_attrs[] = {
&cpuinfo_min_freq.attr,
@@ -634,6 +660,7 @@ static struct attribute * default_attrs[] = {
&scaling_governor.attr,
&scaling_driver.attr,
&scaling_available_governors.attr,
+ &scaling_setspeed.attr,
NULL
};
@@ -1313,7 +1340,7 @@ static int cpufreq_resume(struct sys_device * sysdev)
struct cpufreq_freqs freqs;
if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
- dprintk("Warning: CPU frequency"
+ dprintk("Warning: CPU frequency "
"is %u, cpufreq assumed %u kHz.\n",
cur_freq, cpu_policy->cur);
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index f8cdde4bf6cd..cb2ac01a41a1 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -65,12 +65,12 @@ static struct notifier_block userspace_cpufreq_notifier_block = {
/**
* cpufreq_set - set the CPU frequency
+ * @policy: pointer to policy struct where freq is being set
* @freq: target frequency in kHz
- * @cpu: CPU for which the frequency is to be set
*
* Sets the CPU frequency to freq.
*/
-static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
+static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
int ret = -EINVAL;
@@ -102,34 +102,11 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
}
-/************************** sysfs interface ************************/
-static ssize_t show_speed (struct cpufreq_policy *policy, char *buf)
+static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
{
- return sprintf (buf, "%u\n", cpu_cur_freq[policy->cpu]);
+ return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]);
}
-static ssize_t
-store_speed (struct cpufreq_policy *policy, const char *buf, size_t count)
-{
- unsigned int freq = 0;
- unsigned int ret;
-
- ret = sscanf (buf, "%u", &freq);
- if (ret != 1)
- return -EINVAL;
-
- cpufreq_set(freq, policy);
-
- return count;
-}
-
-static struct freq_attr freq_attr_scaling_setspeed =
-{
- .attr = { .name = "scaling_setspeed", .mode = 0644 },
- .show = show_speed,
- .store = store_speed,
-};
-
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -142,10 +119,6 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
return -EINVAL;
BUG_ON(!policy->cur);
mutex_lock(&userspace_mutex);
- rc = sysfs_create_file (&policy->kobj,
- &freq_attr_scaling_setspeed.attr);
- if (rc)
- goto start_out;
if (cpus_using_userspace_governor == 0) {
cpufreq_register_notifier(
@@ -160,7 +133,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
cpu_cur_freq[cpu] = policy->cur;
cpu_set_freq[cpu] = policy->cur;
dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
-start_out:
+
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_STOP:
@@ -176,7 +149,6 @@ start_out:
cpu_min_freq[cpu] = 0;
cpu_max_freq[cpu] = 0;
cpu_set_freq[cpu] = 0;
- sysfs_remove_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
dprintk("managing cpu %u stopped\n", cpu);
mutex_unlock(&userspace_mutex);
break;
@@ -211,6 +183,8 @@ start_out:
struct cpufreq_governor cpufreq_gov_userspace = {
.name = "userspace",
.governor = cpufreq_governor_userspace,
+ .store_setspeed = cpufreq_set,
+ .show_setspeed = show_speed,
.owner = THIS_MODULE,
};
EXPORT_SYMBOL(cpufreq_gov_userspace);
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 5409f3afb3f8..ae6cd60d5c14 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
static struct cpufreq_frequency_table *show_table[NR_CPUS];
/**
- * show_scaling_governor - show the current policy for the specified CPU
+ * show_available_freqs - show available frequencies for the specified CPU
*/
static ssize_t show_available_freqs (struct cpufreq_policy *policy, char *buf)
{
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 3bed4127d4ad..7dbc4a83c45c 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -1,13 +1,13 @@
config CPU_IDLE
bool "CPU idle PM support"
+ default ACPI
help
CPU idle is a generic framework for supporting software-controlled
idle processor power management. It includes modular cross-platform
governors that can be swapped during runtime.
- If you're using a mobile platform that supports CPU idle PM (e.g.
- an ACPI-capable notebook), you should say Y here.
+ If you're using an ACPI-enabled platform, you should say Y here.
config CPU_IDLE_GOV_LADDER
bool
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d2fabe7863a9..2c4b2d47973e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -12,9 +12,10 @@
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/notifier.h>
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
+#include <linux/ktime.h>
#include "cpuidle.h"
@@ -180,6 +181,44 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_disable_device);
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
+{
+ ktime_t t1, t2;
+ s64 diff;
+ int ret;
+
+ t1 = ktime_get();
+ local_irq_enable();
+ while (!need_resched())
+ cpu_relax();
+
+ t2 = ktime_get();
+ diff = ktime_to_us(ktime_sub(t2, t1));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ ret = (int) diff;
+ return ret;
+}
+
+static void poll_idle_init(struct cpuidle_device *dev)
+{
+ struct cpuidle_state *state = &dev->states[0];
+
+ cpuidle_set_statedata(state, NULL);
+
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C0 (poll idle)");
+ state->exit_latency = 0;
+ state->target_residency = 0;
+ state->power_usage = -1;
+ state->flags = CPUIDLE_FLAG_POLL | CPUIDLE_FLAG_TIME_VALID;
+ state->enter = poll_idle;
+}
+#else
+static void poll_idle_init(struct cpuidle_device *dev) {}
+#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
+
/**
* cpuidle_register_device - registers a CPU's idle PM feature
* @dev: the cpu
@@ -198,6 +237,8 @@ int cpuidle_register_device(struct cpuidle_device *dev)
mutex_lock(&cpuidle_lock);
+ poll_idle_init(dev);
+
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
@@ -265,7 +306,10 @@ static struct notifier_block cpuidle_latency_notifier = {
.notifier_call = cpuidle_latency_notify,
};
-#define latency_notifier_init(x) do { register_latency_notifier(x); } while (0)
+static inline void latency_notifier_init(struct notifier_block *n)
+{
+ pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
+}
#else /* CONFIG_SMP */
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index eb666ecae7c9..ba7b9a6b17a1 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include <linux/moduleparam.h>
#include <linux/jiffies.h>
@@ -81,7 +81,8 @@ static int ladder_select_state(struct cpuidle_device *dev)
/* consider promotion */
if (last_idx < dev->state_count - 1 &&
last_residency > last_state->threshold.promotion_time &&
- dev->states[last_idx + 1].exit_latency <= system_latency_constraint()) {
+ dev->states[last_idx + 1].exit_latency <=
+ pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 299d45c3bdd2..78d77c5dc35c 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -8,7 +8,7 @@
#include <linux/kernel.h>
#include <linux/cpuidle.h>
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include <linux/time.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
@@ -48,7 +48,7 @@ static int menu_select(struct cpuidle_device *dev)
break;
if (s->target_residency > data->predicted_us)
break;
- if (s->exit_latency > system_latency_constraint())
+ if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY))
break;
}
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index 17502d6efae7..07f274f853d9 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -88,8 +88,6 @@ static struct dioname names[] =
#undef DIONAME
#undef DIOFBNAME
-#define NUMNAMES (sizeof(names) / sizeof(struct dioname))
-
static const char *unknowndioname
= "unknown DIO board -- please email <linux-m68k@lists.linux-m68k.org>!";
@@ -97,7 +95,7 @@ static const char *dio_getname(int id)
{
/* return pointer to a constant string describing the board with given ID */
unsigned int i;
- for (i = 0; i < NUMNAMES; i++)
+ for (i = 0; i < ARRAY_SIZE(names); i++)
if (names[i].id == id)
return names[i].name;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c46b7c219ee9..a703deffb795 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -5,6 +5,7 @@
menuconfig DMADEVICES
bool "DMA Engine support"
depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
+ depends on !HIGHMEM64G
help
DMA engines can do asynchronous data transfers without
involving the host CPU. Currently, this framework can be
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index bcf52df30339..29965231b912 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -473,20 +473,22 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- tx = dev->device_prep_dma_memcpy(chan, len, 0);
- if (!tx)
+ dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
+ dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+ if (!tx) {
+ dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
+ }
tx->ack = 1;
tx->callback = NULL;
- addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx);
cpu = get_cpu();
@@ -517,20 +519,22 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- tx = dev->device_prep_dma_memcpy(chan, len, 0);
- if (!tx)
+ dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
+ dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+ if (!tx) {
+ dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
+ dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
+ }
tx->ack = 1;
tx->callback = NULL;
- addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx);
cpu = get_cpu();
@@ -563,20 +567,23 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
{
struct dma_device *dev = chan->device;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- tx = dev->device_prep_dma_memcpy(chan, len, 0);
- if (!tx)
+ dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
+ dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
+ DMA_FROM_DEVICE);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, 0);
+
+ if (!tx) {
+ dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
+ dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
return -ENOMEM;
+ }
tx->ack = 1;
tx->callback = NULL;
- addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
cookie = tx->tx_submit(tx);
cpu = get_cpu();
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index 45e7b4666c7b..dff38accc5c1 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -159,20 +159,6 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
return device->common.chancnt;
}
-static void ioat_set_src(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx,
- int index)
-{
- tx_to_ioat_desc(tx)->src = addr;
-}
-
-static void ioat_set_dest(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx,
- int index)
-{
- tx_to_ioat_desc(tx)->dst = addr;
-}
-
/**
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
* descriptors to hw
@@ -415,8 +401,6 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
memset(desc, 0, sizeof(*desc));
dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
- desc_sw->async_tx.tx_set_src = ioat_set_src;
- desc_sw->async_tx.tx_set_dest = ioat_set_dest;
switch (ioat_chan->device->version) {
case IOAT_VER_1_2:
desc_sw->async_tx.tx_submit = ioat1_tx_submit;
@@ -714,8 +698,10 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
struct dma_chan *chan,
+ dma_addr_t dma_dest,
+ dma_addr_t dma_src,
size_t len,
- int int_en)
+ unsigned long flags)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *new;
@@ -726,6 +712,8 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
if (new) {
new->len = len;
+ new->dst = dma_dest;
+ new->src = dma_src;
return &new->async_tx;
} else
return NULL;
@@ -733,8 +721,10 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
struct dma_chan *chan,
+ dma_addr_t dma_dest,
+ dma_addr_t dma_src,
size_t len,
- int int_en)
+ unsigned long flags)
{
struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
struct ioat_desc_sw *new;
@@ -749,6 +739,8 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
if (new) {
new->len = len;
+ new->dst = dma_dest;
+ new->src = dma_src;
return &new->async_tx;
} else
return NULL;
@@ -1045,7 +1037,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
u8 *dest;
struct dma_chan *dma_chan;
struct dma_async_tx_descriptor *tx;
- dma_addr_t addr;
+ dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int err = 0;
@@ -1073,7 +1065,12 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
goto out;
}
- tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0);
+ dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
+ DMA_TO_DEVICE);
+ dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
+ DMA_FROM_DEVICE);
+ tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
+ IOAT_TEST_SIZE, 0);
if (!tx) {
dev_err(&device->pdev->dev,
"Self-test prep failed, disabling\n");
@@ -1082,12 +1079,6 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
}
async_tx_ack(tx);
- addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
- DMA_TO_DEVICE);
- tx->tx_set_src(addr, tx, 0);
- addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
- DMA_FROM_DEVICE);
- tx->tx_set_dest(addr, tx, 0);
tx->callback = ioat_dma_test_callback;
tx->callback_param = (void *)0x8086;
cookie = tx->tx_submit(tx);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e5c62b75f36f..3986d54492bd 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -284,7 +284,7 @@ iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
int slots_per_op)
{
struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
- struct list_head chain = LIST_HEAD_INIT(chain);
+ LIST_HEAD(chain);
int slots_found, retry = 0;
/* start search from the last allocated descrtiptor
@@ -443,17 +443,6 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
return cookie;
}
-static void
-iop_adma_set_dest(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
-
- /* to do: support transfers lengths > IOP_ADMA_MAX_BYTE_COUNT */
- iop_desc_set_dest_addr(sw_desc->group_head, iop_chan, addr);
-}
-
static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
@@ -486,7 +475,6 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = iop_adma_tx_submit;
- slot->async_tx.tx_set_dest = iop_adma_set_dest;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->async_tx.tx_list);
@@ -547,18 +535,9 @@ iop_adma_prep_dma_interrupt(struct dma_chan *chan)
return sw_desc ? &sw_desc->async_tx : NULL;
}
-static void
-iop_adma_memcpy_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
-
- iop_desc_set_memcpy_src_addr(grp_start, addr);
-}
-
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
+iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -576,11 +555,12 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_memcpy(grp_start, int_en);
+ iop_desc_init_memcpy(grp_start, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
+ iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
+ iop_desc_set_memcpy_src_addr(grp_start, dma_src);
sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len;
- sw_desc->async_tx.tx_set_src = iop_adma_memcpy_set_src;
}
spin_unlock_bh(&iop_chan->lock);
@@ -588,8 +568,8 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, size_t len, int int_en)
}
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
- int int_en)
+iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
+ int value, size_t len, unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -607,9 +587,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_memset(grp_start, int_en);
+ iop_desc_init_memset(grp_start, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
iop_desc_set_block_fill_val(grp_start, value);
+ iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
sw_desc->unmap_src_cnt = 1;
sw_desc->unmap_len = len;
}
@@ -618,19 +599,10 @@ iop_adma_prep_dma_memset(struct dma_chan *chan, int value, size_t len,
return sw_desc ? &sw_desc->async_tx : NULL;
}
-static void
-iop_adma_xor_set_src(dma_addr_t addr, struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
-
- iop_desc_set_xor_src_addr(grp_start, index, addr);
-}
-
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
- int int_en)
+iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
+ unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -641,39 +613,32 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, unsigned int src_cnt, size_t len,
BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
dev_dbg(iop_chan->device->common.dev,
- "%s src_cnt: %d len: %u int_en: %d\n",
- __FUNCTION__, src_cnt, len, int_en);
+ "%s src_cnt: %d len: %u flags: %lx\n",
+ __FUNCTION__, src_cnt, len, flags);
spin_lock_bh(&iop_chan->lock);
slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_xor(grp_start, src_cnt, int_en);
+ iop_desc_init_xor(grp_start, src_cnt, flags);
iop_desc_set_byte_count(grp_start, iop_chan, len);
+ iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
- sw_desc->async_tx.tx_set_src = iop_adma_xor_set_src;
+ while (src_cnt--)
+ iop_desc_set_xor_src_addr(grp_start, src_cnt,
+ dma_src[src_cnt]);
}
spin_unlock_bh(&iop_chan->lock);
return sw_desc ? &sw_desc->async_tx : NULL;
}
-static void
-iop_adma_xor_zero_sum_set_src(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx,
- int index)
-{
- struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
- struct iop_adma_desc_slot *grp_start = sw_desc->group_head;
-
- iop_desc_set_zero_sum_src_addr(grp_start, index, addr);
-}
-
static struct dma_async_tx_descriptor *
-iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
- size_t len, u32 *result, int int_en)
+iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
+ unsigned int src_cnt, size_t len, u32 *result,
+ unsigned long flags)
{
struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
struct iop_adma_desc_slot *sw_desc, *grp_start;
@@ -690,14 +655,16 @@ iop_adma_prep_dma_zero_sum(struct dma_chan *chan, unsigned int src_cnt,
sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
if (sw_desc) {
grp_start = sw_desc->group_head;
- iop_desc_init_zero_sum(grp_start, src_cnt, int_en);
+ iop_desc_init_zero_sum(grp_start, src_cnt, flags);
iop_desc_set_zero_sum_byte_count(grp_start, len);
grp_start->xor_check_result = result;
pr_debug("\t%s: grp_start->xor_check_result: %p\n",
__FUNCTION__, grp_start->xor_check_result);
sw_desc->unmap_src_cnt = src_cnt;
sw_desc->unmap_len = len;
- sw_desc->async_tx.tx_set_src = iop_adma_xor_zero_sum_set_src;
+ while (src_cnt--)
+ iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
+ dma_src[src_cnt]);
}
spin_unlock_bh(&iop_chan->lock);
@@ -882,13 +849,12 @@ static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
goto out;
}
- tx = iop_adma_prep_dma_memcpy(dma_chan, IOP_ADMA_TEST_SIZE, 1);
dest_dma = dma_map_single(dma_chan->device->dev, dest,
IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
- iop_adma_set_dest(dest_dma, tx, 0);
src_dma = dma_map_single(dma_chan->device->dev, src,
IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
- iop_adma_memcpy_set_src(src_dma, tx, 0);
+ tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
+ IOP_ADMA_TEST_SIZE, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -929,6 +895,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
struct page *dest;
struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
+ dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
dma_addr_t dma_addr, dest_dma;
struct dma_async_tx_descriptor *tx;
struct dma_chan *dma_chan;
@@ -981,17 +948,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
}
/* test xor */
- tx = iop_adma_prep_dma_xor(dma_chan, IOP_ADMA_NUM_SRC_TEST,
- PAGE_SIZE, 1);
dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- iop_adma_set_dest(dest_dma, tx, 0);
-
- for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
- dma_addr = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0,
- PAGE_SIZE, DMA_TO_DEVICE);
- iop_adma_xor_set_src(dma_addr, tx, i);
- }
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
+ dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
+ tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1032,13 +995,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
zero_sum_result = 1;
- tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,
- PAGE_SIZE, &zero_sum_result, 1);
- for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {
- dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],
- 0, PAGE_SIZE, DMA_TO_DEVICE);
- iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);
- }
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = dma_map_page(dma_chan->device->dev,
+ zero_sum_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &zero_sum_result, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1060,10 +1023,9 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
}
/* test memset */
- tx = iop_adma_prep_dma_memset(dma_chan, 0, PAGE_SIZE, 1);
dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
- iop_adma_set_dest(dma_addr, tx, 0);
+ tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
@@ -1089,13 +1051,13 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
/* test for non-zero parity sum */
zero_sum_result = 0;
- tx = iop_adma_prep_dma_zero_sum(dma_chan, IOP_ADMA_NUM_SRC_TEST + 1,
- PAGE_SIZE, &zero_sum_result, 1);
- for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++) {
- dma_addr = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i],
- 0, PAGE_SIZE, DMA_TO_DEVICE);
- iop_adma_xor_zero_sum_set_src(dma_addr, tx, i);
- }
+ for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
+ dma_srcs[i] = dma_map_page(dma_chan->device->dev,
+ zero_sum_srcs[i], 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
+ IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
+ &zero_sum_result, 1);
cookie = iop_adma_tx_submit(tx);
iop_adma_issue_pending(dma_chan);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 98b6b4fb4257..2b382990fe58 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -97,7 +97,7 @@ config EDAC_I82975X
config EDAC_I3000
tristate "Intel 3000/3010"
- depends on EDAC_MM_EDAC && PCI && X86_32
+ depends on EDAC_MM_EDAC && PCI && X86
help
Support for error detection and correction on the Intel
3000 and 3010 server chipsets.
@@ -123,6 +123,20 @@ config EDAC_I5000
Support for error detection and correction the Intel
Greekcreek/Blackford chipsets.
+config EDAC_MPC85XX
+ tristate "Freescale MPC85xx"
+ depends on EDAC_MM_EDAC && FSL_SOC && MPC85xx
+ help
+ Support for error detection and correction on the Freescale
+ MPC8560, MPC8540, MPC8548
+
+config EDAC_MV64X60
+ tristate "Marvell MV64x60"
+ depends on EDAC_MM_EDAC && MV64X60
+ help
+ Support for error detection and correction on the Marvell
+ MV64360 and MV64460 chipsets.
+
config EDAC_PASEMI
tristate "PA Semi PWRficient"
depends on EDAC_MM_EDAC && PCI
@@ -131,5 +145,12 @@ config EDAC_PASEMI
Support for error detection and correction on PA Semi
PWRficient.
+config EDAC_CELL
+ tristate "Cell Broadband Engine memory controller"
+ depends on EDAC_MM_EDAC && PPC_CELL_NATIVE
+ help
+ Support for error detection and correction on the
+ Cell Broadband Engine internal memory controller
+ on platform without a hypervisor
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 02c09f0ff157..83807731d4a9 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -28,4 +28,7 @@ obj-$(CONFIG_EDAC_I3000) += i3000_edac.o
obj-$(CONFIG_EDAC_I82860) += i82860_edac.o
obj-$(CONFIG_EDAC_R82600) += r82600_edac.o
obj-$(CONFIG_EDAC_PASEMI) += pasemi_edac.o
+obj-$(CONFIG_EDAC_MPC85XX) += mpc85xx_edac.o
+obj-$(CONFIG_EDAC_MV64X60) += mv64x60_edac.o
+obj-$(CONFIG_EDAC_CELL) += cell_edac.o
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
new file mode 100644
index 000000000000..b54112ffd282
--- /dev/null
+++ b/drivers/edac/cell_edac.c
@@ -0,0 +1,258 @@
+/*
+ * Cell MIC driver for ECC counting
+ *
+ * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License.
+ */
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/stop_machine.h>
+#include <linux/io.h>
+#include <asm/machdep.h>
+#include <asm/cell-regs.h>
+
+#include "edac_core.h"
+
+struct cell_edac_priv
+{
+ struct cbe_mic_tm_regs __iomem *regs;
+ int node;
+ int chanmask;
+#ifdef DEBUG
+ u64 prev_fir;
+#endif
+};
+
+static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
+{
+ struct cell_edac_priv *priv = mci->pvt_info;
+ struct csrow_info *csrow = &mci->csrows[0];
+ unsigned long address, pfn, offset;
+
+ dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016lx\n",
+ priv->node, chan, ar);
+
+ /* Address decoding is likely a bit bogus, to dbl check */
+ address = (ar & 0xffffffffe0000000ul) >> 29;
+ if (priv->chanmask == 0x3)
+ address = (address << 1) | chan;
+ pfn = address >> PAGE_SHIFT;
+ offset = address & ~PAGE_MASK;
+
+ /* TODO: Decoding of the error addresss */
+ edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
+ 0, 0, chan, "");
+}
+
+static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
+{
+ struct cell_edac_priv *priv = mci->pvt_info;
+ struct csrow_info *csrow = &mci->csrows[0];
+ unsigned long address, pfn, offset;
+
+ dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016lx\n",
+ priv->node, chan, ar);
+
+ /* Address decoding is likely a bit bogus, to dbl check */
+ address = (ar & 0xffffffffe0000000ul) >> 29;
+ if (priv->chanmask == 0x3)
+ address = (address << 1) | chan;
+ pfn = address >> PAGE_SHIFT;
+ offset = address & ~PAGE_MASK;
+
+ /* TODO: Decoding of the error addresss */
+ edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
+}
+
+static void cell_edac_check(struct mem_ctl_info *mci)
+{
+ struct cell_edac_priv *priv = mci->pvt_info;
+ u64 fir, addreg, clear = 0;
+
+ fir = in_be64(&priv->regs->mic_fir);
+#ifdef DEBUG
+ if (fir != priv->prev_fir) {
+ dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir);
+ priv->prev_fir = fir;
+ }
+#endif
+ if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_SINGLE_0_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
+ clear |= CBE_MIC_FIR_ECC_SINGLE_0_RESET;
+ cell_edac_count_ce(mci, 0, addreg);
+ }
+ if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_SINGLE_1_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
+ clear |= CBE_MIC_FIR_ECC_SINGLE_1_RESET;
+ cell_edac_count_ce(mci, 1, addreg);
+ }
+ if ((priv->chanmask & 0x1) && (fir & CBE_MIC_FIR_ECC_MULTI_0_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_0);
+ clear |= CBE_MIC_FIR_ECC_MULTI_0_RESET;
+ cell_edac_count_ue(mci, 0, addreg);
+ }
+ if ((priv->chanmask & 0x2) && (fir & CBE_MIC_FIR_ECC_MULTI_1_ERR)) {
+ addreg = in_be64(&priv->regs->mic_df_ecc_address_1);
+ clear |= CBE_MIC_FIR_ECC_MULTI_1_RESET;
+ cell_edac_count_ue(mci, 1, addreg);
+ }
+
+ /* The procedure for clearing FIR bits is a bit ... weird */
+ if (clear) {
+ fir &= ~(CBE_MIC_FIR_ECC_ERR_MASK | CBE_MIC_FIR_ECC_SET_MASK);
+ fir |= CBE_MIC_FIR_ECC_RESET_MASK;
+ fir &= ~clear;
+ out_be64(&priv->regs->mic_fir, fir);
+ (void)in_be64(&priv->regs->mic_fir);
+
+ mb(); /* sync up */
+#ifdef DEBUG
+ fir = in_be64(&priv->regs->mic_fir);
+ dev_dbg(mci->dev, "fir clear : 0x%016lx\n", fir);
+#endif
+ }
+}
+
+static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
+{
+ struct csrow_info *csrow = &mci->csrows[0];
+ struct cell_edac_priv *priv = mci->pvt_info;
+ struct device_node *np;
+
+ for (np = NULL;
+ (np = of_find_node_by_name(np, "memory")) != NULL;) {
+ struct resource r;
+
+ /* We "know" that the Cell firmware only creates one entry
+ * in the "memory" nodes. If that changes, this code will
+ * need to be adapted.
+ */
+ if (of_address_to_resource(np, 0, &r))
+ continue;
+ if (of_node_to_nid(np) != priv->node)
+ continue;
+ csrow->first_page = r.start >> PAGE_SHIFT;
+ csrow->nr_pages = (r.end - r.start + 1) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->mtype = MEM_XDR;
+ csrow->edac_mode = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ dev_dbg(mci->dev,
+ "Initialized on node %d, chanmask=0x%x,"
+ " first_page=0x%lx, nr_pages=0x%x\n",
+ priv->node, priv->chanmask,
+ csrow->first_page, csrow->nr_pages);
+ break;
+ }
+}
+
+static int __devinit cell_edac_probe(struct platform_device *pdev)
+{
+ struct cbe_mic_tm_regs __iomem *regs;
+ struct mem_ctl_info *mci;
+ struct cell_edac_priv *priv;
+ u64 reg;
+ int rc, chanmask;
+
+ regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
+ if (regs == NULL)
+ return -ENODEV;
+
+ /* Get channel population */
+ reg = in_be64(&regs->mic_mnt_cfg);
+ dev_dbg(&pdev->dev, "MIC_MNT_CFG = 0x%016lx\n", reg);
+ chanmask = 0;
+ if (reg & CBE_MIC_MNT_CFG_CHAN_0_POP)
+ chanmask |= 0x1;
+ if (reg & CBE_MIC_MNT_CFG_CHAN_1_POP)
+ chanmask |= 0x2;
+ if (chanmask == 0) {
+ dev_warn(&pdev->dev,
+ "Yuck ! No channel populated ? Aborting !\n");
+ return -ENODEV;
+ }
+ dev_dbg(&pdev->dev, "Initial FIR = 0x%016lx\n",
+ in_be64(&regs->mic_fir));
+
+ /* Allocate & init EDAC MC data structure */
+ mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
+ chanmask == 3 ? 2 : 1, pdev->id);
+ if (mci == NULL)
+ return -ENOMEM;
+ priv = mci->pvt_info;
+ priv->regs = regs;
+ priv->node = pdev->id;
+ priv->chanmask = chanmask;
+ mci->dev = &pdev->dev;
+ mci->mtype_cap = MEM_FLAG_XDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED;
+ mci->mod_name = "cell_edac";
+ mci->ctl_name = "MIC";
+ mci->dev_name = pdev->dev.bus_id;
+ mci->edac_check = cell_edac_check;
+ cell_edac_init_csrows(mci);
+
+ /* Register with EDAC core */
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to register with EDAC core\n");
+ edac_mc_free(mci);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int __devexit cell_edac_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = edac_mc_del_mc(&pdev->dev);
+ if (mci)
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver cell_edac_driver = {
+ .driver = {
+ .name = "cbe-mic",
+ .owner = THIS_MODULE,
+ },
+ .probe = cell_edac_probe,
+ .remove = cell_edac_remove,
+};
+
+static int __init cell_edac_init(void)
+{
+ /* Sanity check registers data structure */
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_df_ecc_address_0) != 0xf8);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_df_ecc_address_1) != 0x1b8);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_df_config) != 0x218);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_fir) != 0x230);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_mnt_cfg) != 0x210);
+ BUILD_BUG_ON(offsetof(struct cbe_mic_tm_regs,
+ mic_exc) != 0x208);
+
+ return platform_driver_register(&cell_edac_driver);
+}
+
+static void __exit cell_edac_exit(void)
+{
+ platform_driver_unregister(&cell_edac_driver);
+}
+
+module_init(cell_edac_init);
+module_exit(cell_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
+MODULE_DESCRIPTION("ECC counting for Cell MIC");
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 2d23e304f5ec..a9aa845dbe74 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -136,6 +136,7 @@ enum mem_type {
MEM_DDR2, /* DDR2 RAM */
MEM_FB_DDR2, /* fully buffered DDR2 */
MEM_RDDR2, /* Registered DDR2 RAM */
+ MEM_XDR, /* Rambus XDR */
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
@@ -152,6 +153,7 @@ enum mem_type {
#define MEM_FLAG_DDR2 BIT(MEM_DDR2)
#define MEM_FLAG_FB_DDR2 BIT(MEM_FB_DDR2)
#define MEM_FLAG_RDDR2 BIT(MEM_RDDR2)
+#define MEM_FLAG_XDR BIT(MEM_XDR)
/* chipset Error Detection and Correction capabilities and mode */
enum edac_type {
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index f3690a697cf9..b9552bc03dea 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -155,6 +155,10 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
dev_ctl->instances = dev_inst;
dev_ctl->pvt_info = pvt;
+ /* Default logging of CEs and UEs */
+ dev_ctl->log_ce = 1;
+ dev_ctl->log_ue = 1;
+
/* Name of this edac device */
snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
@@ -436,7 +440,7 @@ static void edac_device_workq_function(struct work_struct *work_req)
*/
if (edac_dev->poll_msec == 1000)
queue_delayed_work(edac_workqueue, &edac_dev->work,
- round_jiffies(edac_dev->delay));
+ round_jiffies_relative(edac_dev->delay));
else
queue_delayed_work(edac_workqueue, &edac_dev->work,
edac_dev->delay);
@@ -468,7 +472,7 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
*/
if (edac_dev->poll_msec == 1000)
queue_delayed_work(edac_workqueue, &edac_dev->work,
- round_jiffies(edac_dev->delay));
+ round_jiffies_relative(edac_dev->delay));
else
queue_delayed_work(edac_workqueue, &edac_dev->work,
edac_dev->delay);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 9aac88027fb3..021d18795145 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -73,7 +73,8 @@ static const char *mem_types[] = {
[MEM_RMBS] = "RMBS",
[MEM_DDR2] = "Unbuffered-DDR2",
[MEM_FB_DDR2] = "FullyBuffered-DDR2",
- [MEM_RDDR2] = "Registered-DDR2"
+ [MEM_RDDR2] = "Registered-DDR2",
+ [MEM_XDR] = "XDR"
};
static const char *dev_types[] = {
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index e0b47b74ec45..32be43576a8e 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -246,7 +246,7 @@ static void edac_pci_workq_function(struct work_struct *work_req)
/* if we are on a one second period, then use round */
msec = edac_pci_get_poll_msec();
if (msec == 1000)
- delay = round_jiffies(msecs_to_jiffies(msec));
+ delay = round_jiffies_relative(msecs_to_jiffies(msec));
else
delay = msecs_to_jiffies(msec);
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 5b075da99145..71c3195d3704 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -558,8 +558,10 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
debugf4("PCI STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
- /* check the status reg for errors */
- if (status) {
+ /* check the status reg for errors on boards NOT marked as broken
+ * if broken, we cannot trust any of the status bits
+ */
+ if (status && !dev->broken_parity_status) {
if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
edac_printk(KERN_CRIT, EDAC_PCI,
"Signaled System Error on %s\n",
@@ -593,8 +595,10 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev->dev.bus_id);
- /* check the secondary status reg for errors */
- if (status) {
+ /* check the secondary status reg for errors,
+ * on NOT broken boards
+ */
+ if (status && !dev->broken_parity_status) {
if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) {
edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
"Signaled System Error on %s\n",
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index e895f9f887ab..5d4292811c14 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -14,6 +14,7 @@
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
+#include <linux/edac.h>
#include "edac_core.h"
#define I3000_REVISION "1.1"
@@ -30,105 +31,139 @@
#define I3000_MCHBAR_MASK 0xffffc000
#define I3000_MMR_WINDOW_SIZE 16384
-#define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
- *
- * 7:1 reserved
- * 0 bit 32 of address
- */
-#define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
- *
- * 31:7 address
- * 6:1 reserved
- * 0 Error channel 0/1
- */
-#define I3000_DEAP_GRAIN (1 << 7)
-#define I3000_DEAP_PFN(edeap, deap) ((((edeap) & 1) << (32 - PAGE_SHIFT)) | \
- ((deap) >> PAGE_SHIFT))
-#define I3000_DEAP_OFFSET(deap) ((deap) & ~(I3000_DEAP_GRAIN-1) & ~PAGE_MASK)
-#define I3000_DEAP_CHANNEL(deap) ((deap) & 1)
-
-#define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
- *
- * 7:0 DRAM ECC Syndrome
- */
-
-#define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
- *
- * 15:12 reserved
- * 11 MCH Thermal Sensor Event for SMI/SCI/SERR
- * 10 reserved
- * 9 LOCK to non-DRAM Memory Flag (LCKF)
- * 8 Received Refresh Timeout Flag (RRTOF)
- * 7:2 reserved
- * 1 Multiple-bit DRAM ECC Error Flag (DMERR)
- * 0 Single-bit DRAM ECC Error Flag (DSERR)
- */
+#define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
+ *
+ * 7:1 reserved
+ * 0 bit 32 of address
+ */
+#define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
+ *
+ * 31:7 address
+ * 6:1 reserved
+ * 0 Error channel 0/1
+ */
+#define I3000_DEAP_GRAIN (1 << 7)
+
+/*
+ * Helper functions to decode the DEAP/EDEAP hardware registers.
+ *
+ * The type promotion here is deliberate; we're deriving an
+ * unsigned long pfn and offset from hardware regs which are u8/u32.
+ */
+
+static inline unsigned long deap_pfn(u8 edeap, u32 deap)
+{
+ deap >>= PAGE_SHIFT;
+ deap |= (edeap & 1) << (32 - PAGE_SHIFT);
+ return deap;
+}
+
+static inline unsigned long deap_offset(u32 deap)
+{
+ return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK;
+}
+
+static inline int deap_channel(u32 deap)
+{
+ return deap & 1;
+}
+
+#define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
+ *
+ * 7:0 DRAM ECC Syndrome
+ */
+
+#define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
+ *
+ * 15:12 reserved
+ * 11 MCH Thermal Sensor Event
+ * for SMI/SCI/SERR
+ * 10 reserved
+ * 9 LOCK to non-DRAM Memory Flag (LCKF)
+ * 8 Received Refresh Timeout Flag (RRTOF)
+ * 7:2 reserved
+ * 1 Multi-bit DRAM ECC Error Flag (DMERR)
+ * 0 Single-bit DRAM ECC Error Flag (DSERR)
+ */
#define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */
#define I3000_ERRSTS_UE 0x0002
#define I3000_ERRSTS_CE 0x0001
-#define I3000_ERRCMD 0xca /* Error Command (16b)
- *
- * 15:12 reserved
- * 11 SERR on MCH Thermal Sensor Event (TSESERR)
- * 10 reserved
- * 9 SERR on LOCK to non-DRAM Memory (LCKERR)
- * 8 SERR on DRAM Refresh Timeout (DRTOERR)
- * 7:2 reserved
- * 1 SERR Multiple-Bit DRAM ECC Error (DMERR)
- * 0 SERR on Single-Bit ECC Error (DSERR)
- */
+#define I3000_ERRCMD 0xca /* Error Command (16b)
+ *
+ * 15:12 reserved
+ * 11 SERR on MCH Thermal Sensor Event
+ * (TSESERR)
+ * 10 reserved
+ * 9 SERR on LOCK to non-DRAM Memory
+ * (LCKERR)
+ * 8 SERR on DRAM Refresh Timeout
+ * (DRTOERR)
+ * 7:2 reserved
+ * 1 SERR Multi-Bit DRAM ECC Error
+ * (DMERR)
+ * 0 SERR on Single-Bit ECC Error
+ * (DSERR)
+ */
/* Intel MMIO register space - device 0 function 0 - MMR space */
#define I3000_DRB_SHIFT 25 /* 32MiB grain */
-#define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
- *
- * 7:0 Channel 0 DRAM Rank Boundary Address
- */
-#define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
- *
- * 7:0 Channel 1 DRAM Rank Boundary Address
- */
-
-#define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
- *
- * 7 reserved
- * 6:4 DRAM odd Rank Attribute
- * 3 reserved
- * 2:0 DRAM even Rank Attribute
- *
- * Each attribute defines the page
- * size of the corresponding rank:
- * 000: unpopulated
- * 001: reserved
- * 010: 4 KB
- * 011: 8 KB
- * 100: 16 KB
- * Others: reserved
- */
-#define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
-#define ODD_RANK_ATTRIB(dra) (((dra) & 0x70) >> 4)
-#define EVEN_RANK_ATTRIB(dra) ((dra) & 0x07)
-
-#define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
- *
- * 31:30 reserved
- * 29 Initialization Complete (IC)
- * 28:11 reserved
- * 10:8 Refresh Mode Select (RMS)
- * 7 reserved
- * 6:4 Mode Select (SMS)
- * 3:2 reserved
- * 1:0 DRAM Type (DT)
- */
-
-#define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
- *
- * 31 Enhanced Addressing Enable (ENHADE)
- * 30:0 reserved
- */
+#define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
+ *
+ * 7:0 Channel 0 DRAM Rank Boundary Address
+ */
+#define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
+ *
+ * 7:0 Channel 1 DRAM Rank Boundary Address
+ */
+
+#define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
+ *
+ * 7 reserved
+ * 6:4 DRAM odd Rank Attribute
+ * 3 reserved
+ * 2:0 DRAM even Rank Attribute
+ *
+ * Each attribute defines the page
+ * size of the corresponding rank:
+ * 000: unpopulated
+ * 001: reserved
+ * 010: 4 KB
+ * 011: 8 KB
+ * 100: 16 KB
+ * Others: reserved
+ */
+#define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
+
+static inline unsigned char odd_rank_attrib(unsigned char dra)
+{
+ return (dra & 0x70) >> 4;
+}
+
+static inline unsigned char even_rank_attrib(unsigned char dra)
+{
+ return dra & 0x07;
+}
+
+#define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
+ *
+ * 31:30 reserved
+ * 29 Initialization Complete (IC)
+ * 28:11 reserved
+ * 10:8 Refresh Mode Select (RMS)
+ * 7 reserved
+ * 6:4 Mode Select (SMS)
+ * 3:2 reserved
+ * 1:0 DRAM Type (DT)
+ */
+
+#define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
+ *
+ * 31 Enhanced Addressing Enable (ENHADE)
+ * 30:0 reserved
+ */
enum i3000p_chips {
I3000 = 0,
@@ -187,7 +222,8 @@ static void i3000_get_error_info(struct mem_ctl_info *mci,
pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
}
- /* Clear any error bits.
+ /*
+ * Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
@@ -198,8 +234,8 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
struct i3000_error_info *info,
int handle_errors)
{
- int row, multi_chan;
- int pfn, offset, channel;
+ int row, multi_chan, channel;
+ unsigned long pfn, offset;
multi_chan = mci->csrows[0].nr_channels - 1;
@@ -214,9 +250,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
info->errsts = info->errsts2;
}
- pfn = I3000_DEAP_PFN(info->edeap, info->deap);
- offset = I3000_DEAP_OFFSET(info->deap);
- channel = I3000_DEAP_CHANNEL(info->deap);
+ pfn = deap_pfn(info->edeap, info->deap);
+ offset = deap_offset(info->deap);
+ channel = deap_channel(info->deap);
row = edac_mc_find_csrow_by_page(mci, pfn);
@@ -245,16 +281,18 @@ static int i3000_is_interleaved(const unsigned char *c0dra,
{
int i;
- /* If the channels aren't populated identically then
+ /*
+ * If the channels aren't populated identically then
* we're not interleaved.
*/
for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
- if (ODD_RANK_ATTRIB(c0dra[i]) != ODD_RANK_ATTRIB(c1dra[i]) ||
- EVEN_RANK_ATTRIB(c0dra[i]) !=
- EVEN_RANK_ATTRIB(c1dra[i]))
+ if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) ||
+ even_rank_attrib(c0dra[i]) !=
+ even_rank_attrib(c1dra[i]))
return 0;
- /* If the rank boundaries for the two channels are different
+ /*
+ * If the rank boundaries for the two channels are different
* then we're not interleaved.
*/
for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
@@ -288,6 +326,15 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
return -ENODEV;
}
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_NMI:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_POLL;
+ break;
+ }
+
c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
@@ -300,7 +347,8 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
iounmap(window);
- /* Figure out how many channels we have.
+ /*
+ * Figure out how many channels we have.
*
* If we have what the datasheet calls "asymmetric channels"
* (essentially the same as what was called "virtual single
@@ -363,7 +411,8 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
csrow->edac_mode = EDAC_UNKNOWN;
}
- /* Clear any error bits.
+ /*
+ * Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
@@ -390,7 +439,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
debugf3("MC: %s(): success\n", __func__);
return 0;
- fail:
+fail:
if (mci)
edac_mc_free(mci);
@@ -409,7 +458,7 @@ static int __devinit i3000_init_one(struct pci_dev *pdev,
return -EIO;
rc = i3000_probe1(pdev, ent->driver_data);
- if (mci_pdev == NULL)
+ if (!mci_pdev)
mci_pdev = pci_dev_get(pdev);
return rc;
@@ -424,7 +473,8 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
if (i3000_pci)
edac_pci_release_generic_ctl(i3000_pci);
- if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
+ mci = edac_mc_del_mc(&pdev->dev);
+ if (!mci)
return;
edac_mc_free(mci);
@@ -457,7 +507,7 @@ static int __init i3000_init(void)
if (pci_rc < 0)
goto fail0;
- if (mci_pdev == NULL) {
+ if (!mci_pdev) {
i3000_registered = 0;
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_3000_HB, NULL);
@@ -504,3 +554,6 @@ module_exit(i3000_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
new file mode 100644
index 000000000000..065732ddf40c
--- /dev/null
+++ b/drivers/edac/mpc85xx_edac.c
@@ -0,0 +1,1043 @@
+/*
+ * Freescale MPC85xx Memory Controller kenel module
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/edac.h>
+
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <asm/mpc85xx.h>
+#include "edac_module.h"
+#include "edac_core.h"
+#include "mpc85xx_edac.h"
+
+static int edac_dev_idx;
+static int edac_pci_idx;
+static int edac_mc_idx;
+
+static u32 orig_ddr_err_disable;
+static u32 orig_ddr_err_sbe;
+
+/*
+ * PCI Err defines
+ */
+#ifdef CONFIG_PCI
+static u32 orig_pci_err_cap_dr;
+static u32 orig_pci_err_en;
+#endif
+
+static u32 orig_l2_err_disable;
+static u32 orig_hid1;
+
+static const char *mpc85xx_ctl_name = "MPC85xx";
+
+/************************ MC SYSFS parts ***********************************/
+
+static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_DATA_ERR_INJECT_HI));
+}
+
+static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_DATA_ERR_INJECT_LO));
+}
+
+static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT));
+}
+
+static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_data_hi",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_mc_inject_data_hi_show,
+ .store = mpc85xx_mc_inject_data_hi_store},
+ {
+ .attr = {
+ .name = "inject_data_lo",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_mc_inject_data_lo_show,
+ .store = mpc85xx_mc_inject_data_lo_store},
+ {
+ .attr = {
+ .name = "inject_ctrl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_mc_inject_ctrl_show,
+ .store = mpc85xx_mc_inject_ctrl_store},
+
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
+{
+ mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes;
+}
+
+/**************************** PCI Err device ***************************/
+#ifdef CONFIG_PCI
+
+static void mpc85xx_pci_check(struct edac_pci_ctl_info *pci)
+{
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+
+ /* master aborts can happen during PCI config cycles */
+ if (!(err_detect & ~(PCI_EDE_MULTI_ERR | PCI_EDE_MST_ABRT))) {
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+ return;
+ }
+
+ printk(KERN_ERR "PCI error(s) detected\n");
+ printk(KERN_ERR "PCI/X ERR_DR register: %#08x\n", err_detect);
+
+ printk(KERN_ERR "PCI/X ERR_ATTRIB register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ATTRIB));
+ printk(KERN_ERR "PCI/X ERR_ADDR register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_ADDR));
+ printk(KERN_ERR "PCI/X ERR_EXT_ADDR register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EXT_ADDR));
+ printk(KERN_ERR "PCI/X ERR_DL register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DL));
+ printk(KERN_ERR "PCI/X ERR_DH register: %#08x\n",
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DH));
+
+ /* clear error bits */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, err_detect);
+
+ if (err_detect & PCI_EDE_PERR_MASK)
+ edac_pci_handle_pe(pci, pci->ctl_name);
+
+ if ((err_detect & ~PCI_EDE_MULTI_ERR) & ~PCI_EDE_PERR_MASK)
+ edac_pci_handle_npe(pci, pci->ctl_name);
+}
+
+static irqreturn_t mpc85xx_pci_isr(int irq, void *dev_id)
+{
+ struct edac_pci_ctl_info *pci = dev_id;
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR);
+
+ if (!err_detect)
+ return IRQ_NONE;
+
+ mpc85xx_pci_check(pci);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mpc85xx_pci_err_probe(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci;
+ struct mpc85xx_pci_pdata *pdata;
+ struct resource *r;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mpc85xx_pci_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mpc85xx_pci_err");
+ if (!pci)
+ return -ENOMEM;
+
+ pdata = pci->pvt_info;
+ pdata->name = "mpc85xx_pci_err";
+ pdata->irq = NO_IRQ;
+ platform_set_drvdata(pdev, pci);
+ pci->dev = &pdev->dev;
+ pci->mod_name = EDAC_MOD_STR;
+ pci->ctl_name = pdata->name;
+ pci->dev_name = pdev->dev.bus_id;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ pci->edac_check = mpc85xx_pci_check;
+
+ pdata->edac_idx = edac_pci_idx++;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "PCI err regs\n", __func__);
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, r->start,
+ r->end - r->start + 1, pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->pci_vbase = devm_ioremap(&pdev->dev, r->start,
+ r->end - r->start + 1);
+ if (!pdata->pci_vbase) {
+ printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ orig_pci_err_cap_dr =
+ in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR);
+
+ /* PCI master abort is expected during config cycles */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, 0x40);
+
+ orig_pci_err_en = in_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN);
+
+ /* disable master abort reporting */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, ~0x40);
+
+ /* clear error bits */
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0);
+
+ if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+ debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, pdata->irq,
+ mpc85xx_pci_isr, IRQF_DISABLED,
+ "[EDAC] PCI err", pci);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to requiest irq %d for "
+ "MPC85xx PCI err\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mpc85xx_pci_err_probe);
+ debugf3("%s(): success\n", __func__);
+ printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n");
+
+ return 0;
+
+err2:
+ edac_pci_del_device(&pdev->dev);
+err:
+ edac_pci_free_ctl_info(pci);
+ devres_release_group(&pdev->dev, mpc85xx_pci_err_probe);
+ return res;
+}
+
+static int mpc85xx_pci_err_remove(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
+ struct mpc85xx_pci_pdata *pdata = pci->pvt_info;
+
+ debugf0("%s()\n", __func__);
+
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR,
+ orig_pci_err_cap_dr);
+
+ out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_EN, orig_pci_err_en);
+
+ edac_pci_del_device(pci->dev);
+
+ if (edac_op_state == EDAC_OPSTATE_INT)
+ irq_dispose_mapping(pdata->irq);
+
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
+static struct platform_driver mpc85xx_pci_err_driver = {
+ .probe = mpc85xx_pci_err_probe,
+ .remove = __devexit_p(mpc85xx_pci_err_remove),
+ .driver = {
+ .name = "mpc85xx_pci_err",
+ }
+};
+
+#endif /* CONFIG_PCI */
+
+/**************************** L2 Err device ***************************/
+
+/************************ L2 SYSFS parts ***********************************/
+
+static ssize_t mpc85xx_l2_inject_data_hi_show(struct edac_device_ctl_info
+ *edac_dev, char *data)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI));
+}
+
+static ssize_t mpc85xx_l2_inject_data_lo_show(struct edac_device_ctl_info
+ *edac_dev, char *data)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO));
+}
+
+static ssize_t mpc85xx_l2_inject_ctrl_show(struct edac_device_ctl_info
+ *edac_dev, char *data)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ return sprintf(data, "0x%08x",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL));
+}
+
+static ssize_t mpc85xx_l2_inject_data_hi_store(struct edac_device_ctl_info
+ *edac_dev, const char *data,
+ size_t count)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJHI,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_l2_inject_data_lo_store(struct edac_device_ctl_info
+ *edac_dev, const char *data,
+ size_t count)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJLO,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static ssize_t mpc85xx_l2_inject_ctrl_store(struct edac_device_ctl_info
+ *edac_dev, const char *data,
+ size_t count)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ if (isdigit(*data)) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINJCTL,
+ simple_strtoul(data, NULL, 0));
+ return count;
+ }
+ return 0;
+}
+
+static struct edac_dev_sysfs_attribute mpc85xx_l2_sysfs_attributes[] = {
+ {
+ .attr = {
+ .name = "inject_data_hi",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_l2_inject_data_hi_show,
+ .store = mpc85xx_l2_inject_data_hi_store},
+ {
+ .attr = {
+ .name = "inject_data_lo",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_l2_inject_data_lo_show,
+ .store = mpc85xx_l2_inject_data_lo_store},
+ {
+ .attr = {
+ .name = "inject_ctrl",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = mpc85xx_l2_inject_ctrl_show,
+ .store = mpc85xx_l2_inject_ctrl_store},
+
+ /* End of list */
+ {
+ .attr = {.name = NULL}
+ }
+};
+
+static void mpc85xx_set_l2_sysfs_attributes(struct edac_device_ctl_info
+ *edac_dev)
+{
+ edac_dev->sysfs_attributes = mpc85xx_l2_sysfs_attributes;
+}
+
+/***************************** L2 ops ***********************************/
+
+static void mpc85xx_l2_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
+
+ if (!(err_detect & L2_EDE_MASK))
+ return;
+
+ printk(KERN_ERR "ECC Error in CPU L2 cache\n");
+ printk(KERN_ERR "L2 Error Detect Register: 0x%08x\n", err_detect);
+ printk(KERN_ERR "L2 Error Capture Data High Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATAHI));
+ printk(KERN_ERR "L2 Error Capture Data Lo Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTDATALO));
+ printk(KERN_ERR "L2 Error Syndrome Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_CAPTECC));
+ printk(KERN_ERR "L2 Error Attributes Capture Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRATTR));
+ printk(KERN_ERR "L2 Error Address Capture Register: 0x%08x\n",
+ in_be32(pdata->l2_vbase + MPC85XX_L2_ERRADDR));
+
+ /* clear error detect register */
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, err_detect);
+
+ if (err_detect & L2_EDE_CE_MASK)
+ edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
+
+ if (err_detect & L2_EDE_UE_MASK)
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mpc85xx_l2_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET);
+
+ if (!(err_detect & L2_EDE_MASK))
+ return IRQ_NONE;
+
+ mpc85xx_l2_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mpc85xx_l2_err_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct mpc85xx_l2_pdata *pdata;
+ struct resource r;
+ int res;
+
+ if (!devres_open_group(&op->dev, mpc85xx_l2_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+ "cpu", 1, "L", 1, 2, NULL, 0,
+ edac_dev_idx);
+ if (!edac_dev) {
+ devres_release_group(&op->dev, mpc85xx_l2_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = edac_dev->pvt_info;
+ pdata->name = "mpc85xx_l2_err";
+ pdata->irq = NO_IRQ;
+ edac_dev->dev = &op->dev;
+ dev_set_drvdata(edac_dev->dev, edac_dev);
+ edac_dev->ctl_name = pdata->name;
+ edac_dev->dev_name = pdata->name;
+
+ res = of_address_to_resource(op->node, 0, &r);
+ if (res) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "L2 err regs\n", __func__);
+ goto err;
+ }
+
+ /* we only need the error registers */
+ r.start += 0xe00;
+
+ if (!devm_request_mem_region(&op->dev, r.start,
+ r.end - r.start + 1, pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->l2_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
+ if (!pdata->l2_vbase) {
+ printk(KERN_ERR "%s: Unable to setup L2 err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDET, ~0);
+
+ orig_l2_err_disable = in_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS);
+
+ /* clear the err_dis */
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, 0);
+
+ edac_dev->mod_name = EDAC_MOD_STR;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = mpc85xx_l2_check;
+
+ mpc85xx_set_l2_sysfs_attributes(edac_dev);
+
+ pdata->edac_idx = edac_dev_idx++;
+
+ if (edac_device_add_device(edac_dev) > 0) {
+ debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = irq_of_parse_and_map(op->node, 0);
+ res = devm_request_irq(&op->dev, pdata->irq,
+ mpc85xx_l2_isr, IRQF_DISABLED,
+ "[EDAC] L2 err", edac_dev);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to requiest irq %d for "
+ "MPC85xx L2 err\n", __func__, pdata->irq);
+ irq_dispose_mapping(pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for L2 Err\n",
+ pdata->irq);
+
+ edac_dev->op_state = OP_RUNNING_INTERRUPT;
+
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, L2_EIE_MASK);
+ }
+
+ devres_remove_group(&op->dev, mpc85xx_l2_err_probe);
+
+ debugf3("%s(): success\n", __func__);
+ printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n");
+
+ return 0;
+
+err2:
+ edac_device_del_device(&op->dev);
+err:
+ devres_release_group(&op->dev, mpc85xx_l2_err_probe);
+ edac_device_free_ctl_info(edac_dev);
+ return res;
+}
+
+static int mpc85xx_l2_err_remove(struct of_device *op)
+{
+ struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev);
+ struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info;
+
+ debugf0("%s()\n", __func__);
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0);
+ irq_dispose_mapping(pdata->irq);
+ }
+
+ out_be32(pdata->l2_vbase + MPC85XX_L2_ERRDIS, orig_l2_err_disable);
+ edac_device_del_device(&op->dev);
+ edac_device_free_ctl_info(edac_dev);
+ return 0;
+}
+
+static struct of_device_id mpc85xx_l2_err_of_match[] = {
+ {
+ .compatible = "fsl,8540-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8541-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8544-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8548-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8555-l2-cache-controller",
+ },
+ {
+ .compatible = "fsl,8568-l2-cache-controller",
+ },
+ {},
+};
+
+static struct of_platform_driver mpc85xx_l2_err_driver = {
+ .owner = THIS_MODULE,
+ .name = "mpc85xx_l2_err",
+ .match_table = mpc85xx_l2_err_of_match,
+ .probe = mpc85xx_l2_err_probe,
+ .remove = mpc85xx_l2_err_remove,
+ .driver = {
+ .name = "mpc85xx_l2_err",
+ .owner = THIS_MODULE,
+ },
+};
+
+/**************************** MC Err device ***************************/
+
+static void mpc85xx_mc_check(struct mem_ctl_info *mci)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ struct csrow_info *csrow;
+ u32 err_detect;
+ u32 syndrome;
+ u32 err_addr;
+ u32 pfn;
+ int row_index;
+
+ err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
+ if (err_detect)
+ return;
+
+ mpc85xx_mc_printk(mci, KERN_ERR, "Err Detect Register: %#8.8x\n",
+ err_detect);
+
+ /* no more processing if not ECC bit errors */
+ if (!(err_detect & (DDR_EDE_SBE | DDR_EDE_MBE))) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
+ return;
+ }
+
+ syndrome = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ECC);
+ err_addr = in_be32(pdata->mc_vbase + MPC85XX_MC_CAPTURE_ADDRESS);
+ pfn = err_addr >> PAGE_SHIFT;
+
+ for (row_index = 0; row_index < mci->nr_csrows; row_index++) {
+ csrow = &mci->csrows[row_index];
+ if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page))
+ break;
+ }
+
+ mpc85xx_mc_printk(mci, KERN_ERR, "Capture Data High: %#8.8x\n",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_CAPTURE_DATA_HI));
+ mpc85xx_mc_printk(mci, KERN_ERR, "Capture Data Low: %#8.8x\n",
+ in_be32(pdata->mc_vbase +
+ MPC85XX_MC_CAPTURE_DATA_LO));
+ mpc85xx_mc_printk(mci, KERN_ERR, "syndrome: %#8.8x\n", syndrome);
+ mpc85xx_mc_printk(mci, KERN_ERR, "err addr: %#8.8x\n", err_addr);
+ mpc85xx_mc_printk(mci, KERN_ERR, "PFN: %#8.8x\n", pfn);
+
+ /* we are out of range */
+ if (row_index == mci->nr_csrows)
+ mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
+
+ if (err_detect & DDR_EDE_SBE)
+ edac_mc_handle_ce(mci, pfn, err_addr & PAGE_MASK,
+ syndrome, row_index, 0, mci->ctl_name);
+
+ if (err_detect & DDR_EDE_MBE)
+ edac_mc_handle_ue(mci, pfn, err_addr & PAGE_MASK,
+ row_index, mci->ctl_name);
+
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
+}
+
+static irqreturn_t mpc85xx_mc_isr(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ u32 err_detect;
+
+ err_detect = in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT);
+ if (!err_detect)
+ return IRQ_NONE;
+
+ mpc85xx_mc_check(mci);
+
+ return IRQ_HANDLED;
+}
+
+static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
+{
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+ struct csrow_info *csrow;
+ u32 sdram_ctl;
+ u32 sdtype;
+ enum mem_type mtype;
+ u32 cs_bnds;
+ int index;
+
+ sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
+
+ sdtype = sdram_ctl & DSC_SDTYPE_MASK;
+ if (sdram_ctl & DSC_RD_EN) {
+ switch (sdtype) {
+ case DSC_SDTYPE_DDR:
+ mtype = MEM_RDDR;
+ break;
+ case DSC_SDTYPE_DDR2:
+ mtype = MEM_RDDR2;
+ break;
+ default:
+ mtype = MEM_UNKNOWN;
+ break;
+ }
+ } else {
+ switch (sdtype) {
+ case DSC_SDTYPE_DDR:
+ mtype = MEM_DDR;
+ break;
+ case DSC_SDTYPE_DDR2:
+ mtype = MEM_DDR2;
+ break;
+ default:
+ mtype = MEM_UNKNOWN;
+ break;
+ }
+ }
+
+ for (index = 0; index < mci->nr_csrows; index++) {
+ u32 start;
+ u32 end;
+
+ csrow = &mci->csrows[index];
+ cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
+ (index * MPC85XX_MC_CS_BNDS_OFS));
+ start = (cs_bnds & 0xfff0000) << 4;
+ end = ((cs_bnds & 0xfff) << 20);
+ if (start)
+ start |= 0xfffff;
+ if (end)
+ end |= 0xfffff;
+
+ if (start == end)
+ continue; /* not populated */
+
+ csrow->first_page = start >> PAGE_SHIFT;
+ csrow->last_page = end >> PAGE_SHIFT;
+ csrow->nr_pages = csrow->last_page + 1 - csrow->first_page;
+ csrow->grain = 8;
+ csrow->mtype = mtype;
+ csrow->dtype = DEV_UNKNOWN;
+ if (sdram_ctl & DSC_X32_EN)
+ csrow->dtype = DEV_X32;
+ csrow->edac_mode = EDAC_SECDED;
+ }
+}
+
+static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct mem_ctl_info *mci;
+ struct mpc85xx_mc_pdata *pdata;
+ struct resource r;
+ u32 sdram_ctl;
+ int res;
+
+ if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
+ if (!mci) {
+ devres_release_group(&op->dev, mpc85xx_mc_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = mci->pvt_info;
+ pdata->name = "mpc85xx_mc_err";
+ pdata->irq = NO_IRQ;
+ mci->dev = &op->dev;
+ pdata->edac_idx = edac_mc_idx++;
+ dev_set_drvdata(mci->dev, mci);
+ mci->ctl_name = pdata->name;
+ mci->dev_name = pdata->name;
+
+ res = of_address_to_resource(op->node, 0, &r);
+ if (res) {
+ printk(KERN_ERR "%s: Unable to get resource for MC err regs\n",
+ __func__);
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&op->dev, r.start,
+ r.end - r.start + 1, pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->mc_vbase = devm_ioremap(&op->dev, r.start, r.end - r.start + 1);
+ if (!pdata->mc_vbase) {
+ printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ sdram_ctl = in_be32(pdata->mc_vbase + MPC85XX_MC_DDR_SDRAM_CFG);
+ if (!(sdram_ctl & DSC_ECC_EN)) {
+ /* no ECC */
+ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+ res = -ENODEV;
+ goto err;
+ }
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 |
+ MEM_FLAG_DDR | MEM_FLAG_DDR2;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = MPC85XX_REVISION;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = mpc85xx_mc_check;
+
+ mci->ctl_page_to_phys = NULL;
+
+ mci->scrub_mode = SCRUB_SW_SRC;
+
+ mpc85xx_set_mc_sysfs_attributes(mci);
+
+ mpc85xx_init_csrows(mci);
+
+#ifdef CONFIG_EDAC_DEBUG
+ edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
+#endif
+
+ /* store the original error disable bits */
+ orig_ddr_err_disable =
+ in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE, 0);
+
+ /* clear all error bits */
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN,
+ DDR_EIE_MBEE | DDR_EIE_SBEE);
+
+ /* store the original error management threshold */
+ orig_ddr_err_sbe = in_be32(pdata->mc_vbase +
+ MPC85XX_MC_ERR_SBE) & 0xff0000;
+
+ /* set threshold to 1 error per interrupt */
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, 0x10000);
+
+ /* register interrupts */
+ pdata->irq = irq_of_parse_and_map(op->node, 0);
+ res = devm_request_irq(&op->dev, pdata->irq,
+ mpc85xx_mc_isr, IRQF_DISABLED,
+ "[EDAC] MC err", mci);
+ if (res < 0) {
+ printk(KERN_ERR "%s: Unable to request irq %d for "
+ "MPC85xx DRAM ERR\n", __func__, pdata->irq);
+ irq_dispose_mapping(pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&op->dev, mpc85xx_mc_err_probe);
+ debugf3("%s(): success\n", __func__);
+ printk(KERN_INFO EDAC_MOD_STR " MC err registered\n");
+
+ return 0;
+
+err2:
+ edac_mc_del_mc(&op->dev);
+err:
+ devres_release_group(&op->dev, mpc85xx_mc_err_probe);
+ edac_mc_free(mci);
+ return res;
+}
+
+static int mpc85xx_mc_err_remove(struct of_device *op)
+{
+ struct mem_ctl_info *mci = dev_get_drvdata(&op->dev);
+ struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
+
+ debugf0("%s()\n", __func__);
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0);
+ irq_dispose_mapping(pdata->irq);
+ }
+
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE,
+ orig_ddr_err_disable);
+ out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe);
+
+ edac_mc_del_mc(&op->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct of_device_id mpc85xx_mc_err_of_match[] = {
+ {
+ .compatible = "fsl,8540-memory-controller",
+ },
+ {
+ .compatible = "fsl,8541-memory-controller",
+ },
+ {
+ .compatible = "fsl,8544-memory-controller",
+ },
+ {
+ .compatible = "fsl,8548-memory-controller",
+ },
+ {
+ .compatible = "fsl,8555-memory-controller",
+ },
+ {
+ .compatible = "fsl,8568-memory-controller",
+ },
+ {},
+};
+
+static struct of_platform_driver mpc85xx_mc_err_driver = {
+ .owner = THIS_MODULE,
+ .name = "mpc85xx_mc_err",
+ .match_table = mpc85xx_mc_err_of_match,
+ .probe = mpc85xx_mc_err_probe,
+ .remove = mpc85xx_mc_err_remove,
+ .driver = {
+ .name = "mpc85xx_mc_err",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpc85xx_mc_init(void)
+{
+ int res = 0;
+
+ printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
+ "(C) 2006 Montavista Software\n");
+
+ /* make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_INT:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_INT;
+ break;
+ }
+
+ res = of_register_platform_driver(&mpc85xx_mc_err_driver);
+ if (res)
+ printk(KERN_WARNING EDAC_MOD_STR "MC fails to register\n");
+
+ res = of_register_platform_driver(&mpc85xx_l2_err_driver);
+ if (res)
+ printk(KERN_WARNING EDAC_MOD_STR "L2 fails to register\n");
+
+#ifdef CONFIG_PCI
+ res = platform_driver_register(&mpc85xx_pci_err_driver);
+ if (res)
+ printk(KERN_WARNING EDAC_MOD_STR "PCI fails to register\n");
+#endif
+
+ /*
+ * need to clear HID1[RFXE] to disable machine check int
+ * so we can catch it
+ */
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ orig_hid1 = mfspr(SPRN_HID1);
+ mtspr(SPRN_HID1, (orig_hid1 & ~0x20000));
+ }
+
+ return 0;
+}
+
+module_init(mpc85xx_mc_init);
+
+static void __exit mpc85xx_mc_exit(void)
+{
+ mtspr(SPRN_HID1, orig_hid1);
+#ifdef CONFIG_PCI
+ platform_driver_unregister(&mpc85xx_pci_err_driver);
+#endif
+ of_unregister_platform_driver(&mpc85xx_l2_err_driver);
+ of_unregister_platform_driver(&mpc85xx_mc_err_driver);
+}
+
+module_exit(mpc85xx_mc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Montavista Software, Inc.");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state,
+ "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h
new file mode 100644
index 000000000000..135b3539a030
--- /dev/null
+++ b/drivers/edac/mpc85xx_edac.h
@@ -0,0 +1,162 @@
+/*
+ * Freescale MPC85xx Memory Controller kenel module
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _MPC85XX_EDAC_H_
+#define _MPC85XX_EDAC_H_
+
+#define MPC85XX_REVISION " Ver: 2.0.0 " __DATE__
+#define EDAC_MOD_STR "MPC85xx_edac"
+
+#define mpc85xx_printk(level, fmt, arg...) \
+ edac_printk(level, "MPC85xx", fmt, ##arg)
+
+#define mpc85xx_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "MPC85xx", fmt, ##arg)
+
+/*
+ * DRAM error defines
+ */
+
+/* DDR_SDRAM_CFG */
+#define MPC85XX_MC_DDR_SDRAM_CFG 0x0110
+#define MPC85XX_MC_CS_BNDS_0 0x0000
+#define MPC85XX_MC_CS_BNDS_1 0x0008
+#define MPC85XX_MC_CS_BNDS_2 0x0010
+#define MPC85XX_MC_CS_BNDS_3 0x0018
+#define MPC85XX_MC_CS_BNDS_OFS 0x0008
+
+#define MPC85XX_MC_DATA_ERR_INJECT_HI 0x0e00
+#define MPC85XX_MC_DATA_ERR_INJECT_LO 0x0e04
+#define MPC85XX_MC_ECC_ERR_INJECT 0x0e08
+#define MPC85XX_MC_CAPTURE_DATA_HI 0x0e20
+#define MPC85XX_MC_CAPTURE_DATA_LO 0x0e24
+#define MPC85XX_MC_CAPTURE_ECC 0x0e28
+#define MPC85XX_MC_ERR_DETECT 0x0e40
+#define MPC85XX_MC_ERR_DISABLE 0x0e44
+#define MPC85XX_MC_ERR_INT_EN 0x0e48
+#define MPC85XX_MC_CAPTURE_ATRIBUTES 0x0e4c
+#define MPC85XX_MC_CAPTURE_ADDRESS 0x0e50
+#define MPC85XX_MC_ERR_SBE 0x0e58
+
+#define DSC_MEM_EN 0x80000000
+#define DSC_ECC_EN 0x20000000
+#define DSC_RD_EN 0x10000000
+
+#define DSC_SDTYPE_MASK 0x07000000
+
+#define DSC_SDTYPE_DDR 0x02000000
+#define DSC_SDTYPE_DDR2 0x03000000
+#define DSC_X32_EN 0x00000020
+
+/* Err_Int_En */
+#define DDR_EIE_MSEE 0x1 /* memory select */
+#define DDR_EIE_SBEE 0x4 /* single-bit ECC error */
+#define DDR_EIE_MBEE 0x8 /* multi-bit ECC error */
+
+/* Err_Detect */
+#define DDR_EDE_MSE 0x1 /* memory select */
+#define DDR_EDE_SBE 0x4 /* single-bit ECC error */
+#define DDR_EDE_MBE 0x8 /* multi-bit ECC error */
+#define DDR_EDE_MME 0x80000000 /* multiple memory errors */
+
+/* Err_Disable */
+#define DDR_EDI_MSED 0x1 /* memory select disable */
+#define DDR_EDI_SBED 0x4 /* single-bit ECC error disable */
+#define DDR_EDI_MBED 0x8 /* multi-bit ECC error disable */
+
+/*
+ * L2 Err defines
+ */
+#define MPC85XX_L2_ERRINJHI 0x0000
+#define MPC85XX_L2_ERRINJLO 0x0004
+#define MPC85XX_L2_ERRINJCTL 0x0008
+#define MPC85XX_L2_CAPTDATAHI 0x0020
+#define MPC85XX_L2_CAPTDATALO 0x0024
+#define MPC85XX_L2_CAPTECC 0x0028
+#define MPC85XX_L2_ERRDET 0x0040
+#define MPC85XX_L2_ERRDIS 0x0044
+#define MPC85XX_L2_ERRINTEN 0x0048
+#define MPC85XX_L2_ERRATTR 0x004c
+#define MPC85XX_L2_ERRADDR 0x0050
+#define MPC85XX_L2_ERRCTL 0x0058
+
+/* Error Interrupt Enable */
+#define L2_EIE_L2CFGINTEN 0x1
+#define L2_EIE_SBECCINTEN 0x4
+#define L2_EIE_MBECCINTEN 0x8
+#define L2_EIE_TPARINTEN 0x10
+#define L2_EIE_MASK (L2_EIE_L2CFGINTEN | L2_EIE_SBECCINTEN | \
+ L2_EIE_MBECCINTEN | L2_EIE_TPARINTEN)
+
+/* Error Detect */
+#define L2_EDE_L2CFGERR 0x1
+#define L2_EDE_SBECCERR 0x4
+#define L2_EDE_MBECCERR 0x8
+#define L2_EDE_TPARERR 0x10
+#define L2_EDE_MULL2ERR 0x80000000
+
+#define L2_EDE_CE_MASK L2_EDE_SBECCERR
+#define L2_EDE_UE_MASK (L2_EDE_L2CFGERR | L2_EDE_MBECCERR | \
+ L2_EDE_TPARERR)
+#define L2_EDE_MASK (L2_EDE_L2CFGERR | L2_EDE_SBECCERR | \
+ L2_EDE_MBECCERR | L2_EDE_TPARERR | L2_EDE_MULL2ERR)
+
+/*
+ * PCI Err defines
+ */
+#define PCI_EDE_TOE 0x00000001
+#define PCI_EDE_SCM 0x00000002
+#define PCI_EDE_IRMSV 0x00000004
+#define PCI_EDE_ORMSV 0x00000008
+#define PCI_EDE_OWMSV 0x00000010
+#define PCI_EDE_TGT_ABRT 0x00000020
+#define PCI_EDE_MST_ABRT 0x00000040
+#define PCI_EDE_TGT_PERR 0x00000080
+#define PCI_EDE_MST_PERR 0x00000100
+#define PCI_EDE_RCVD_SERR 0x00000200
+#define PCI_EDE_ADDR_PERR 0x00000400
+#define PCI_EDE_MULTI_ERR 0x80000000
+
+#define PCI_EDE_PERR_MASK (PCI_EDE_TGT_PERR | PCI_EDE_MST_PERR | \
+ PCI_EDE_ADDR_PERR)
+
+#define MPC85XX_PCI_ERR_DR 0x0000
+#define MPC85XX_PCI_ERR_CAP_DR 0x0004
+#define MPC85XX_PCI_ERR_EN 0x0008
+#define MPC85XX_PCI_ERR_ATTRIB 0x000c
+#define MPC85XX_PCI_ERR_ADDR 0x0010
+#define MPC85XX_PCI_ERR_EXT_ADDR 0x0014
+#define MPC85XX_PCI_ERR_DL 0x0018
+#define MPC85XX_PCI_ERR_DH 0x001c
+#define MPC85XX_PCI_GAS_TIMR 0x0020
+#define MPC85XX_PCI_PCIX_TIMR 0x0024
+
+struct mpc85xx_mc_pdata {
+ char *name;
+ int edac_idx;
+ void __iomem *mc_vbase;
+ int irq;
+};
+
+struct mpc85xx_l2_pdata {
+ char *name;
+ int edac_idx;
+ void __iomem *l2_vbase;
+ int irq;
+};
+
+struct mpc85xx_pci_pdata {
+ char *name;
+ int edac_idx;
+ void __iomem *pci_vbase;
+ int irq;
+};
+
+#endif
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
new file mode 100644
index 000000000000..bf071f140a05
--- /dev/null
+++ b/drivers/edac/mv64x60_edac.c
@@ -0,0 +1,855 @@
+/*
+ * Marvell MV64x60 Memory Controller kernel module for PPC platforms
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/edac.h>
+
+#include "edac_core.h"
+#include "edac_module.h"
+#include "mv64x60_edac.h"
+
+static const char *mv64x60_ctl_name = "MV64x60";
+static int edac_dev_idx;
+static int edac_pci_idx;
+static int edac_mc_idx;
+
+/*********************** PCI err device **********************************/
+#ifdef CONFIG_PCI
+static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
+{
+ struct mv64x60_pci_pdata *pdata = pci->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+ if (!cause)
+ return;
+
+ printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
+ printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+ printk(KERN_ERR "Address Low: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
+ printk(KERN_ERR "Address High: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
+ printk(KERN_ERR "Attribute: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
+ printk(KERN_ERR "Command: 0x%08x\n",
+ in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, ~cause);
+
+ if (cause & MV64X60_PCI_PE_MASK)
+ edac_pci_handle_pe(pci, pci->ctl_name);
+
+ if (!(cause & MV64X60_PCI_PE_MASK))
+ edac_pci_handle_npe(pci, pci->ctl_name);
+}
+
+static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
+{
+ struct edac_pci_ctl_info *pci = dev_id;
+ struct mv64x60_pci_pdata *pdata = pci->pvt_info;
+ u32 val;
+
+ val = in_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
+ if (!val)
+ return IRQ_NONE;
+
+ mv64x60_pci_check(pci);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci;
+ struct mv64x60_pci_pdata *pdata;
+ struct resource *r;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
+ if (!pci)
+ return -ENOMEM;
+
+ pdata = pci->pvt_info;
+
+ pdata->pci_hose = pdev->id;
+ pdata->name = "mpc85xx_pci_err";
+ pdata->irq = NO_IRQ;
+ platform_set_drvdata(pdev, pci);
+ pci->dev = &pdev->dev;
+ pci->dev_name = pdev->dev.bus_id;
+ pci->mod_name = EDAC_MOD_STR;
+ pci->ctl_name = pdata->name;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ pci->edac_check = mv64x60_pci_check;
+
+ pdata->edac_idx = edac_pci_idx++;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "PCI err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->pci_vbase = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->pci_vbase) {
+ printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE, 0);
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK, 0);
+ out_le32(pdata->pci_vbase + MV64X60_PCI_ERROR_MASK,
+ MV64X60_PCIx_ERR_MASK_VAL);
+
+ if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
+ debugf3("%s(): failed edac_pci_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_pci_isr,
+ IRQF_DISABLED,
+ "[EDAC] PCI err",
+ pci);
+ if (res < 0) {
+ printk(KERN_ERR "%s: Unable to request irq %d for "
+ "MV64x60 PCI ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_pci_del_device(&pdev->dev);
+err:
+ edac_pci_free_ctl_info(pci);
+ devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
+ return res;
+}
+
+static int mv64x60_pci_err_remove(struct platform_device *pdev)
+{
+ struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_pci_del_device(&pdev->dev);
+
+ edac_pci_free_ctl_info(pci);
+
+ return 0;
+}
+
+static struct platform_driver mv64x60_pci_err_driver = {
+ .probe = mv64x60_pci_err_probe,
+ .remove = __devexit_p(mv64x60_pci_err_remove),
+ .driver = {
+ .name = "mv64x60_pci_err",
+ }
+};
+
+#endif /* CONFIG_PCI */
+
+/*********************** SRAM err device **********************************/
+static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+ if (!cause)
+ return;
+
+ printk(KERN_ERR "Error in internal SRAM\n");
+ printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+ printk(KERN_ERR "Address Low: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
+ printk(KERN_ERR "Address High: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
+ printk(KERN_ERR "Data Low: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
+ printk(KERN_ERR "Data High: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
+ printk(KERN_ERR "Parity: 0x%08x\n",
+ in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
+ out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
+ if (!cause)
+ return IRQ_NONE;
+
+ mv64x60_sram_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct mv64x60_sram_pdata *pdata;
+ struct resource *r;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+ "sram", 1, NULL, 0, 0, NULL, 0,
+ edac_dev_idx);
+ if (!edac_dev) {
+ devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = edac_dev->pvt_info;
+ pdata->name = "mv64x60_sram_err";
+ pdata->irq = NO_IRQ;
+ edac_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, edac_dev);
+ edac_dev->dev_name = pdev->dev.bus_id;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "SRAM err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while request mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->sram_vbase = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->sram_vbase) {
+ printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
+ __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ /* setup SRAM err registers */
+ out_le32(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE, 0);
+
+ edac_dev->mod_name = EDAC_MOD_STR;
+ edac_dev->ctl_name = pdata->name;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = mv64x60_sram_check;
+
+ pdata->edac_idx = edac_dev_idx++;
+
+ if (edac_device_add_device(edac_dev) > 0) {
+ debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_sram_isr,
+ IRQF_DISABLED,
+ "[EDAC] SRAM err",
+ edac_dev);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to request irq %d for "
+ "MV64x60 SRAM ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
+ pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_device_del_device(&pdev->dev);
+err:
+ devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
+ edac_device_free_ctl_info(edac_dev);
+ return res;
+}
+
+static int mv64x60_sram_err_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(edac_dev);
+
+ return 0;
+}
+
+static struct platform_driver mv64x60_sram_err_driver = {
+ .probe = mv64x60_sram_err_probe,
+ .remove = mv64x60_sram_err_remove,
+ .driver = {
+ .name = "mv64x60_sram_err",
+ }
+};
+
+/*********************** CPU err device **********************************/
+static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
+{
+ struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+ MV64x60_CPU_CAUSE_MASK;
+ if (!cause)
+ return;
+
+ printk(KERN_ERR "Error on CPU interface\n");
+ printk(KERN_ERR "Cause register: 0x%08x\n", cause);
+ printk(KERN_ERR "Address Low: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
+ printk(KERN_ERR "Address High: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
+ printk(KERN_ERR "Data Low: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
+ printk(KERN_ERR "Data High: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
+ printk(KERN_ERR "Parity: 0x%08x\n",
+ in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
+
+ edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
+}
+
+static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *edac_dev = dev_id;
+ struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
+ u32 cause;
+
+ cause = in_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
+ MV64x60_CPU_CAUSE_MASK;
+ if (!cause)
+ return IRQ_NONE;
+
+ mv64x60_cpu_check(edac_dev);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev;
+ struct resource *r;
+ struct mv64x60_cpu_pdata *pdata;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
+ "cpu", 1, NULL, 0, 0, NULL, 0,
+ edac_dev_idx);
+ if (!edac_dev) {
+ devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = edac_dev->pvt_info;
+ pdata->name = "mv64x60_cpu_err";
+ pdata->irq = NO_IRQ;
+ edac_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, edac_dev);
+ edac_dev->dev_name = pdev->dev.bus_id;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "CPU err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->cpu_vbase[0]) {
+ printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "CPU err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->cpu_vbase[1]) {
+ printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ /* setup CPU err registers */
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE, 0);
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0);
+ out_le32(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK, 0x000000ff);
+
+ edac_dev->mod_name = EDAC_MOD_STR;
+ edac_dev->ctl_name = pdata->name;
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ edac_dev->edac_check = mv64x60_cpu_check;
+
+ pdata->edac_idx = edac_dev_idx++;
+
+ if (edac_device_add_device(edac_dev) > 0) {
+ debugf3("%s(): failed edac_device_add_device()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_cpu_isr,
+ IRQF_DISABLED,
+ "[EDAC] CPU err",
+ edac_dev);
+ if (res < 0) {
+ printk(KERN_ERR
+ "%s: Unable to request irq %d for MV64x60 "
+ "CPU ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR
+ " acquired irq %d for CPU Err\n", pdata->irq);
+ }
+
+ devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_device_del_device(&pdev->dev);
+err:
+ devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
+ edac_device_free_ctl_info(edac_dev);
+ return res;
+}
+
+static int mv64x60_cpu_err_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(edac_dev);
+ return 0;
+}
+
+static struct platform_driver mv64x60_cpu_err_driver = {
+ .probe = mv64x60_cpu_err_probe,
+ .remove = mv64x60_cpu_err_remove,
+ .driver = {
+ .name = "mv64x60_cpu_err",
+ }
+};
+
+/*********************** DRAM err device **********************************/
+
+static void mv64x60_mc_check(struct mem_ctl_info *mci)
+{
+ struct mv64x60_mc_pdata *pdata = mci->pvt_info;
+ u32 reg;
+ u32 err_addr;
+ u32 sdram_ecc;
+ u32 comp_ecc;
+ u32 syndrome;
+
+ reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+ if (!reg)
+ return;
+
+ err_addr = reg & ~0x3;
+ sdram_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
+ comp_ecc = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
+ syndrome = sdram_ecc ^ comp_ecc;
+
+ /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
+ if (!(reg & 0x1))
+ edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, syndrome, 0, 0,
+ mci->ctl_name);
+ else /* 2 bit error, UE */
+ edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, 0, mci->ctl_name);
+
+ /* clear the error */
+ out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
+}
+
+static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
+{
+ struct mem_ctl_info *mci = dev_id;
+ struct mv64x60_mc_pdata *pdata = mci->pvt_info;
+ u32 reg;
+
+ reg = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
+ if (!reg)
+ return IRQ_NONE;
+
+ /* writing 0's to the ECC err addr in check function clears irq */
+ mv64x60_mc_check(mci);
+
+ return IRQ_HANDLED;
+}
+
+static void get_total_mem(struct mv64x60_mc_pdata *pdata)
+{
+ struct device_node *np = NULL;
+ const unsigned int *reg;
+
+ np = of_find_node_by_type(NULL, "memory");
+ if (!np)
+ return;
+
+ reg = get_property(np, "reg", NULL);
+
+ pdata->total_mem = reg[1];
+}
+
+static void mv64x60_init_csrows(struct mem_ctl_info *mci,
+ struct mv64x60_mc_pdata *pdata)
+{
+ struct csrow_info *csrow;
+ u32 devtype;
+ u32 ctl;
+
+ get_total_mem(pdata);
+
+ ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+
+ csrow = &mci->csrows[0];
+ csrow->first_page = 0;
+ csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ csrow->grain = 8;
+
+ csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
+
+ devtype = (ctl >> 20) & 0x3;
+ switch (devtype) {
+ case 0x0:
+ csrow->dtype = DEV_X32;
+ break;
+ case 0x2: /* could be X8 too, but no way to tell */
+ csrow->dtype = DEV_X16;
+ break;
+ case 0x3:
+ csrow->dtype = DEV_X4;
+ break;
+ default:
+ csrow->dtype = DEV_UNKNOWN;
+ break;
+ }
+
+ csrow->edac_mode = EDAC_SECDED;
+}
+
+static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct mv64x60_mc_pdata *pdata;
+ struct resource *r;
+ u32 ctl;
+ int res = 0;
+
+ if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
+ return -ENOMEM;
+
+ mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
+ if (!mci) {
+ printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
+ devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
+ return -ENOMEM;
+ }
+
+ pdata = mci->pvt_info;
+ mci->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mci);
+ pdata->name = "mv64x60_mc_err";
+ pdata->irq = NO_IRQ;
+ mci->dev_name = pdev->dev.bus_id;
+ pdata->edac_idx = edac_mc_idx++;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ printk(KERN_ERR "%s: Unable to get resource for "
+ "MC err regs\n", __func__);
+ res = -ENOENT;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev,
+ r->start,
+ r->end - r->start + 1,
+ pdata->name)) {
+ printk(KERN_ERR "%s: Error while requesting mem region\n",
+ __func__);
+ res = -EBUSY;
+ goto err;
+ }
+
+ pdata->mc_vbase = devm_ioremap(&pdev->dev,
+ r->start,
+ r->end - r->start + 1);
+ if (!pdata->mc_vbase) {
+ printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
+ res = -ENOMEM;
+ goto err;
+ }
+
+ ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
+ if (!(ctl & MV64X60_SDRAM_ECC)) {
+ /* Non-ECC RAM? */
+ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ debugf3("%s(): init mci\n", __func__);
+ mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->mod_name = EDAC_MOD_STR;
+ mci->mod_ver = MV64x60_REVISION;
+ mci->ctl_name = mv64x60_ctl_name;
+
+ if (edac_op_state == EDAC_OPSTATE_POLL)
+ mci->edac_check = mv64x60_mc_check;
+
+ mci->ctl_page_to_phys = NULL;
+
+ mci->scrub_mode = SCRUB_SW_SRC;
+
+ mv64x60_init_csrows(mci, pdata);
+
+ /* setup MC registers */
+ out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
+ ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
+ ctl = (ctl & 0xff00ffff) | 0x10000;
+ out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl);
+
+ if (edac_mc_add_mc(mci)) {
+ debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
+ goto err;
+ }
+
+ if (edac_op_state == EDAC_OPSTATE_INT) {
+ /* acquire interrupt that reports errors */
+ pdata->irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev,
+ pdata->irq,
+ mv64x60_mc_isr,
+ IRQF_DISABLED,
+ "[EDAC] MC err",
+ mci);
+ if (res < 0) {
+ printk(KERN_ERR "%s: Unable to request irq %d for "
+ "MV64x60 DRAM ERR\n", __func__, pdata->irq);
+ res = -ENODEV;
+ goto err2;
+ }
+
+ printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
+ pdata->irq);
+ }
+
+ /* get this far and it's successful */
+ debugf3("%s(): success\n", __func__);
+
+ return 0;
+
+err2:
+ edac_mc_del_mc(&pdev->dev);
+err:
+ devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
+ edac_mc_free(mci);
+ return res;
+}
+
+static int mv64x60_mc_err_remove(struct platform_device *pdev)
+{
+ struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+
+ debugf0("%s()\n", __func__);
+
+ edac_mc_del_mc(&pdev->dev);
+ edac_mc_free(mci);
+ return 0;
+}
+
+static struct platform_driver mv64x60_mc_err_driver = {
+ .probe = mv64x60_mc_err_probe,
+ .remove = mv64x60_mc_err_remove,
+ .driver = {
+ .name = "mv64x60_mc_err",
+ }
+};
+
+static int __init mv64x60_edac_init(void)
+{
+ int ret = 0;
+
+ printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
+ printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
+ /* make sure error reporting method is sane */
+ switch (edac_op_state) {
+ case EDAC_OPSTATE_POLL:
+ case EDAC_OPSTATE_INT:
+ break;
+ default:
+ edac_op_state = EDAC_OPSTATE_INT;
+ break;
+ }
+
+ ret = platform_driver_register(&mv64x60_mc_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR "MC err failed to register\n");
+
+ ret = platform_driver_register(&mv64x60_cpu_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR
+ "CPU err failed to register\n");
+
+ ret = platform_driver_register(&mv64x60_sram_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR
+ "SRAM err failed to register\n");
+
+#ifdef CONFIG_PCI
+ ret = platform_driver_register(&mv64x60_pci_err_driver);
+ if (ret)
+ printk(KERN_WARNING EDAC_MOD_STR
+ "PCI err failed to register\n");
+#endif
+
+ return ret;
+}
+module_init(mv64x60_edac_init);
+
+static void __exit mv64x60_edac_exit(void)
+{
+#ifdef CONFIG_PCI
+ platform_driver_unregister(&mv64x60_pci_err_driver);
+#endif
+ platform_driver_unregister(&mv64x60_sram_err_driver);
+ platform_driver_unregister(&mv64x60_cpu_err_driver);
+ platform_driver_unregister(&mv64x60_mc_err_driver);
+}
+module_exit(mv64x60_edac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Montavista Software, Inc.");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state,
+ "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
diff --git a/drivers/edac/mv64x60_edac.h b/drivers/edac/mv64x60_edac.h
new file mode 100644
index 000000000000..e042e2daa8f4
--- /dev/null
+++ b/drivers/edac/mv64x60_edac.h
@@ -0,0 +1,114 @@
+/*
+ * EDAC defs for Marvell MV64x60 bridge chip
+ *
+ * Author: Dave Jiang <djiang@mvista.com>
+ *
+ * 2007 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#ifndef _MV64X60_EDAC_H_
+#define _MV64X60_EDAC_H_
+
+#define MV64x60_REVISION " Ver: 2.0.0 " __DATE__
+#define EDAC_MOD_STR "MV64x60_edac"
+
+#define mv64x60_printk(level, fmt, arg...) \
+ edac_printk(level, "MV64x60", fmt, ##arg)
+
+#define mv64x60_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "MV64x60", fmt, ##arg)
+
+/* CPU Error Report Registers */
+#define MV64x60_CPU_ERR_ADDR_LO 0x00 /* 0x0070 */
+#define MV64x60_CPU_ERR_ADDR_HI 0x08 /* 0x0078 */
+#define MV64x60_CPU_ERR_DATA_LO 0x00 /* 0x0128 */
+#define MV64x60_CPU_ERR_DATA_HI 0x08 /* 0x0130 */
+#define MV64x60_CPU_ERR_PARITY 0x10 /* 0x0138 */
+#define MV64x60_CPU_ERR_CAUSE 0x18 /* 0x0140 */
+#define MV64x60_CPU_ERR_MASK 0x20 /* 0x0148 */
+
+#define MV64x60_CPU_CAUSE_MASK 0x07ffffff
+
+/* SRAM Error Report Registers */
+#define MV64X60_SRAM_ERR_CAUSE 0x08 /* 0x0388 */
+#define MV64X60_SRAM_ERR_ADDR_LO 0x10 /* 0x0390 */
+#define MV64X60_SRAM_ERR_ADDR_HI 0x78 /* 0x03f8 */
+#define MV64X60_SRAM_ERR_DATA_LO 0x18 /* 0x0398 */
+#define MV64X60_SRAM_ERR_DATA_HI 0x20 /* 0x03a0 */
+#define MV64X60_SRAM_ERR_PARITY 0x28 /* 0x03a8 */
+
+/* SDRAM Controller Registers */
+#define MV64X60_SDRAM_CONFIG 0x00 /* 0x1400 */
+#define MV64X60_SDRAM_ERR_DATA_HI 0x40 /* 0x1440 */
+#define MV64X60_SDRAM_ERR_DATA_LO 0x44 /* 0x1444 */
+#define MV64X60_SDRAM_ERR_ECC_RCVD 0x48 /* 0x1448 */
+#define MV64X60_SDRAM_ERR_ECC_CALC 0x4c /* 0x144c */
+#define MV64X60_SDRAM_ERR_ADDR 0x50 /* 0x1450 */
+#define MV64X60_SDRAM_ERR_ECC_CNTL 0x54 /* 0x1454 */
+#define MV64X60_SDRAM_ERR_ECC_ERR_CNT 0x58 /* 0x1458 */
+
+#define MV64X60_SDRAM_REGISTERED 0x20000
+#define MV64X60_SDRAM_ECC 0x40000
+
+#ifdef CONFIG_PCI
+/*
+ * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
+ * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
+ * well. IOW, don't set bit 0.
+ */
+#define MV64X60_PCIx_ERR_MASK_VAL 0x00a50c24
+
+/* Register offsets from PCIx error address low register */
+#define MV64X60_PCI_ERROR_ADDR_LO 0x00
+#define MV64X60_PCI_ERROR_ADDR_HI 0x04
+#define MV64X60_PCI_ERROR_ATTR 0x08
+#define MV64X60_PCI_ERROR_CMD 0x10
+#define MV64X60_PCI_ERROR_CAUSE 0x18
+#define MV64X60_PCI_ERROR_MASK 0x1c
+
+#define MV64X60_PCI_ERR_SWrPerr 0x0002
+#define MV64X60_PCI_ERR_SRdPerr 0x0004
+#define MV64X60_PCI_ERR_MWrPerr 0x0020
+#define MV64X60_PCI_ERR_MRdPerr 0x0040
+
+#define MV64X60_PCI_PE_MASK (MV64X60_PCI_ERR_SWrPerr | \
+ MV64X60_PCI_ERR_SRdPerr | \
+ MV64X60_PCI_ERR_MWrPerr | \
+ MV64X60_PCI_ERR_MRdPerr)
+
+struct mv64x60_pci_pdata {
+ int pci_hose;
+ void __iomem *pci_vbase;
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+#endif /* CONFIG_PCI */
+
+struct mv64x60_mc_pdata {
+ void __iomem *mc_vbase;
+ int total_mem;
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+struct mv64x60_cpu_pdata {
+ void __iomem *cpu_vbase[2];
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+struct mv64x60_sram_pdata {
+ void __iomem *sram_vbase;
+ char *name;
+ int irq;
+ int edac_idx;
+};
+
+#endif
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 18cdcb3ae1ca..1636806ec55e 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -658,4 +658,5 @@ MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR("Dell Inc.");
MODULE_LICENSE("GPL");
-
+/* Any System or BIOS claiming to be by Dell */
+MODULE_ALIAS("dmi:*:[bs]vnD[Ee][Ll][Ll]*:*");
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 313c99cbdc62..e880d6c8d896 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/device.h>
-#include <linux/autoconf.h>
struct dmi_device_attribute{
struct device_attribute dev_attr;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 9008ed5ef4ce..e0bade732376 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -489,12 +489,3 @@ int dmi_get_year(int field)
return year;
}
-
-/**
- * dmi_get_slot - return dmi_ident[slot]
- * @slot: index into dmi_ident[]
- */
-char *dmi_get_slot(int slot)
-{
- return(dmi_ident[slot]);
-}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
new file mode 100644
index 000000000000..bbd28342e771
--- /dev/null
+++ b/drivers/gpio/Kconfig
@@ -0,0 +1,73 @@
+#
+# GPIO infrastructure and expanders
+#
+
+config HAVE_GPIO_LIB
+ bool
+ help
+ Platforms select gpiolib if they use this infrastructure
+ for all their GPIOs, usually starting with ones integrated
+ into SOC processors.
+
+menu "GPIO Support"
+ depends on HAVE_GPIO_LIB
+
+config DEBUG_GPIO
+ bool "Debug GPIO calls"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here to add some extra checks and diagnostics to GPIO calls.
+ The checks help ensure that GPIOs have been properly initialized
+ before they are used and that sleeping calls aren not made from
+ nonsleeping contexts. They can make bitbanged serial protocols
+ slower. The diagnostics help catch the type of setup errors
+ that are most common when setting up new platforms or boards.
+
+# put expanders in the right section, in alphabetical order
+
+comment "I2C GPIO expanders:"
+
+config GPIO_PCA953X
+ tristate "PCA953x I/O ports"
+ depends on I2C
+ help
+ Say yes here to support the PCA9534 (8-bit), PCA9535 (16-bit),
+ PCA9536 (4-bit), PCA9537 (4-bit), PCA9538 (8-bit), and PCA9539
+ (16-bit) I/O ports. These parts are made by NXP and TI.
+
+ This driver can also be built as a module. If so, the module
+ will be called pca953x.
+
+config GPIO_PCF857X
+ tristate "PCF857x, PCA857x, and PCA967x I2C GPIO expanders"
+ depends on I2C
+ help
+ Say yes here to provide access to most "quasi-bidirectional" I2C
+ GPIO expanders used for additional digital outputs or inputs.
+ Most of these parts are from NXP, though TI is a second source for
+ some of them. Compatible models include:
+
+ 8 bits: pcf8574, pcf8574a, pca8574, pca8574a,
+ pca9670, pca9672, pca9674, pca9674a
+
+ 16 bits: pcf8575, pcf8575c, pca8575,
+ pca9671, pca9673, pca9675
+
+ Your board setup code will need to declare the expanders in
+ use, and assign numbers to the GPIOs they expose. Those GPIOs
+ can then be used from drivers and other kernel code, just like
+ other GPIOs, but only accessible from task contexts.
+
+ This driver provides an in-kernel interface to those GPIOs using
+ platform-neutral GPIO calls.
+
+comment "SPI GPIO expanders:"
+
+config GPIO_MCP23S08
+ tristate "Microchip MCP23S08 I/O expander"
+ depends on SPI_MASTER
+ help
+ SPI driver for Microchip MCP23S08 I/O expander. This provides
+ a GPIO interface supporting inputs and outputs.
+
+endmenu
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
new file mode 100644
index 000000000000..fdde9923cf33
--- /dev/null
+++ b/drivers/gpio/Makefile
@@ -0,0 +1,9 @@
+# gpio support: dedicated expander chips, etc
+
+ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
+
+obj-$(CONFIG_HAVE_GPIO_LIB) += gpiolib.o
+
+obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
+obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
+obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
new file mode 100644
index 000000000000..d8db2f8ee411
--- /dev/null
+++ b/drivers/gpio/gpiolib.c
@@ -0,0 +1,567 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+
+#include <asm/gpio.h>
+
+
+/* Optional implementation infrastructure for GPIO interfaces.
+ *
+ * Platforms may want to use this if they tend to use very many GPIOs
+ * that aren't part of a System-On-Chip core; or across I2C/SPI/etc.
+ *
+ * When kernel footprint or instruction count is an issue, simpler
+ * implementations may be preferred. The GPIO programming interface
+ * allows for inlining speed-critical get/set operations for common
+ * cases, so that access to SOC-integrated GPIOs can sometimes cost
+ * only an instruction or two per bit.
+ */
+
+
+/* When debugging, extend minimal trust to callers and platform code.
+ * Also emit diagnostic messages that may help initial bringup, when
+ * board setup or driver bugs are most common.
+ *
+ * Otherwise, minimize overhead in what may be bitbanging codepaths.
+ */
+#ifdef DEBUG
+#define extra_checks 1
+#else
+#define extra_checks 0
+#endif
+
+/* gpio_lock prevents conflicts during gpio_desc[] table updates.
+ * While any GPIO is requested, its gpio_chip is not removable;
+ * each GPIO's "requested" flag serves as a lock and refcount.
+ */
+static DEFINE_SPINLOCK(gpio_lock);
+
+struct gpio_desc {
+ struct gpio_chip *chip;
+ unsigned long flags;
+/* flag symbols are bit numbers */
+#define FLAG_REQUESTED 0
+#define FLAG_IS_OUT 1
+
+#ifdef CONFIG_DEBUG_FS
+ const char *label;
+#endif
+};
+static struct gpio_desc gpio_desc[ARCH_NR_GPIOS];
+
+static inline void desc_set_label(struct gpio_desc *d, const char *label)
+{
+#ifdef CONFIG_DEBUG_FS
+ d->label = label;
+#endif
+}
+
+/* Warn when drivers omit gpio_request() calls -- legal but ill-advised
+ * when setting direction, and otherwise illegal. Until board setup code
+ * and drivers use explicit requests everywhere (which won't happen when
+ * those calls have no teeth) we can't avoid autorequesting. This nag
+ * message should motivate switching to explicit requests...
+ */
+static void gpio_ensure_requested(struct gpio_desc *desc)
+{
+ if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
+ pr_warning("GPIO-%d autorequested\n", (int)(desc - gpio_desc));
+ desc_set_label(desc, "[auto]");
+ }
+}
+
+/* caller holds gpio_lock *OR* gpio is marked as requested */
+static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
+{
+ return gpio_desc[gpio].chip;
+}
+
+/**
+ * gpiochip_add() - register a gpio_chip
+ * @chip: the chip to register, with chip->base initialized
+ * Context: potentially before irqs or kmalloc will work
+ *
+ * Returns a negative errno if the chip can't be registered, such as
+ * because the chip->base is invalid or already associated with a
+ * different chip. Otherwise it returns zero as a success code.
+ */
+int gpiochip_add(struct gpio_chip *chip)
+{
+ unsigned long flags;
+ int status = 0;
+ unsigned id;
+
+ /* NOTE chip->base negative is reserved to mean a request for
+ * dynamic allocation. We don't currently support that.
+ */
+
+ if (chip->base < 0 || (chip->base + chip->ngpio) >= ARCH_NR_GPIOS) {
+ status = -EINVAL;
+ goto fail;
+ }
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ /* these GPIO numbers must not be managed by another gpio_chip */
+ for (id = chip->base; id < chip->base + chip->ngpio; id++) {
+ if (gpio_desc[id].chip != NULL) {
+ status = -EBUSY;
+ break;
+ }
+ }
+ if (status == 0) {
+ for (id = chip->base; id < chip->base + chip->ngpio; id++) {
+ gpio_desc[id].chip = chip;
+ gpio_desc[id].flags = 0;
+ }
+ }
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+fail:
+ /* failures here can mean systems won't boot... */
+ if (status)
+ pr_err("gpiochip_add: gpios %d..%d (%s) not registered\n",
+ chip->base, chip->base + chip->ngpio,
+ chip->label ? : "generic");
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpiochip_add);
+
+/**
+ * gpiochip_remove() - unregister a gpio_chip
+ * @chip: the chip to unregister
+ *
+ * A gpio_chip with any GPIOs still requested may not be removed.
+ */
+int gpiochip_remove(struct gpio_chip *chip)
+{
+ unsigned long flags;
+ int status = 0;
+ unsigned id;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ for (id = chip->base; id < chip->base + chip->ngpio; id++) {
+ if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) {
+ status = -EBUSY;
+ break;
+ }
+ }
+ if (status == 0) {
+ for (id = chip->base; id < chip->base + chip->ngpio; id++)
+ gpio_desc[id].chip = NULL;
+ }
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpiochip_remove);
+
+
+/* These "optional" allocation calls help prevent drivers from stomping
+ * on each other, and help provide better diagnostics in debugfs.
+ * They're called even less than the "set direction" calls.
+ */
+int gpio_request(unsigned gpio, const char *label)
+{
+ struct gpio_desc *desc;
+ int status = -EINVAL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (gpio >= ARCH_NR_GPIOS)
+ goto done;
+ desc = &gpio_desc[gpio];
+ if (desc->chip == NULL)
+ goto done;
+
+ /* NOTE: gpio_request() can be called in early boot,
+ * before IRQs are enabled.
+ */
+
+ if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
+ desc_set_label(desc, label ? : "?");
+ status = 0;
+ } else
+ status = -EBUSY;
+
+done:
+ if (status)
+ pr_debug("gpio_request: gpio-%d (%s) status %d\n",
+ gpio, label ? : "?", status);
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_request);
+
+void gpio_free(unsigned gpio)
+{
+ unsigned long flags;
+ struct gpio_desc *desc;
+
+ if (gpio >= ARCH_NR_GPIOS) {
+ WARN_ON(extra_checks);
+ return;
+ }
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ desc = &gpio_desc[gpio];
+ if (desc->chip && test_and_clear_bit(FLAG_REQUESTED, &desc->flags))
+ desc_set_label(desc, NULL);
+ else
+ WARN_ON(extra_checks);
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+}
+EXPORT_SYMBOL_GPL(gpio_free);
+
+
+/**
+ * gpiochip_is_requested - return string iff signal was requested
+ * @chip: controller managing the signal
+ * @offset: of signal within controller's 0..(ngpio - 1) range
+ *
+ * Returns NULL if the GPIO is not currently requested, else a string.
+ * If debugfs support is enabled, the string returned is the label passed
+ * to gpio_request(); otherwise it is a meaningless constant.
+ *
+ * This function is for use by GPIO controller drivers. The label can
+ * help with diagnostics, and knowing that the signal is used as a GPIO
+ * can help avoid accidentally multiplexing it to another controller.
+ */
+const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned gpio = chip->base + offset;
+
+ if (gpio >= ARCH_NR_GPIOS || gpio_desc[gpio].chip != chip)
+ return NULL;
+ if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0)
+ return NULL;
+#ifdef CONFIG_DEBUG_FS
+ return gpio_desc[gpio].label;
+#else
+ return "?";
+#endif
+}
+EXPORT_SYMBOL_GPL(gpiochip_is_requested);
+
+
+/* Drivers MUST set GPIO direction before making get/set calls. In
+ * some cases this is done in early boot, before IRQs are enabled.
+ *
+ * As a rule these aren't called more than once (except for drivers
+ * using the open-drain emulation idiom) so these are natural places
+ * to accumulate extra debugging checks. Note that we can't (yet)
+ * rely on gpio_request() having been called beforehand.
+ */
+
+int gpio_direction_input(unsigned gpio)
+{
+ unsigned long flags;
+ struct gpio_chip *chip;
+ struct gpio_desc *desc = &gpio_desc[gpio];
+ int status = -EINVAL;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (gpio >= ARCH_NR_GPIOS)
+ goto fail;
+ chip = desc->chip;
+ if (!chip || !chip->get || !chip->direction_input)
+ goto fail;
+ gpio -= chip->base;
+ if (gpio >= chip->ngpio)
+ goto fail;
+ gpio_ensure_requested(desc);
+
+ /* now we know the gpio is valid and chip won't vanish */
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ might_sleep_if(extra_checks && chip->can_sleep);
+
+ status = chip->direction_input(chip, gpio);
+ if (status == 0)
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ return status;
+fail:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n",
+ __FUNCTION__, gpio, status);
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_direction_input);
+
+int gpio_direction_output(unsigned gpio, int value)
+{
+ unsigned long flags;
+ struct gpio_chip *chip;
+ struct gpio_desc *desc = &gpio_desc[gpio];
+ int status = -EINVAL;
+
+ spin_lock_irqsave(&gpio_lock, flags);
+
+ if (gpio >= ARCH_NR_GPIOS)
+ goto fail;
+ chip = desc->chip;
+ if (!chip || !chip->set || !chip->direction_output)
+ goto fail;
+ gpio -= chip->base;
+ if (gpio >= chip->ngpio)
+ goto fail;
+ gpio_ensure_requested(desc);
+
+ /* now we know the gpio is valid and chip won't vanish */
+
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ might_sleep_if(extra_checks && chip->can_sleep);
+
+ status = chip->direction_output(chip, gpio, value);
+ if (status == 0)
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ return status;
+fail:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ if (status)
+ pr_debug("%s: gpio-%d status %d\n",
+ __FUNCTION__, gpio, status);
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpio_direction_output);
+
+
+/* I/O calls are only valid after configuration completed; the relevant
+ * "is this a valid GPIO" error checks should already have been done.
+ *
+ * "Get" operations are often inlinable as reading a pin value register,
+ * and masking the relevant bit in that register.
+ *
+ * When "set" operations are inlinable, they involve writing that mask to
+ * one register to set a low value, or a different register to set it high.
+ * Otherwise locking is needed, so there may be little value to inlining.
+ *
+ *------------------------------------------------------------------------
+ *
+ * IMPORTANT!!! The hot paths -- get/set value -- assume that callers
+ * have requested the GPIO. That can include implicit requesting by
+ * a direction setting call. Marking a gpio as requested locks its chip
+ * in memory, guaranteeing that these table lookups need no more locking
+ * and that gpiochip_remove() will fail.
+ *
+ * REVISIT when debugging, consider adding some instrumentation to ensure
+ * that the GPIO was actually requested.
+ */
+
+/**
+ * __gpio_get_value() - return a gpio's value
+ * @gpio: gpio whose value will be returned
+ * Context: any
+ *
+ * This is used directly or indirectly to implement gpio_get_value().
+ * It returns the zero or nonzero value provided by the associated
+ * gpio_chip.get() method; or zero if no such method is provided.
+ */
+int __gpio_get_value(unsigned gpio)
+{
+ struct gpio_chip *chip;
+
+ chip = gpio_to_chip(gpio);
+ WARN_ON(extra_checks && chip->can_sleep);
+ return chip->get ? chip->get(chip, gpio - chip->base) : 0;
+}
+EXPORT_SYMBOL_GPL(__gpio_get_value);
+
+/**
+ * __gpio_set_value() - assign a gpio's value
+ * @gpio: gpio whose value will be assigned
+ * @value: value to assign
+ * Context: any
+ *
+ * This is used directly or indirectly to implement gpio_set_value().
+ * It invokes the associated gpio_chip.set() method.
+ */
+void __gpio_set_value(unsigned gpio, int value)
+{
+ struct gpio_chip *chip;
+
+ chip = gpio_to_chip(gpio);
+ WARN_ON(extra_checks && chip->can_sleep);
+ chip->set(chip, gpio - chip->base, value);
+}
+EXPORT_SYMBOL_GPL(__gpio_set_value);
+
+/**
+ * __gpio_cansleep() - report whether gpio value access will sleep
+ * @gpio: gpio in question
+ * Context: any
+ *
+ * This is used directly or indirectly to implement gpio_cansleep(). It
+ * returns nonzero if access reading or writing the GPIO value can sleep.
+ */
+int __gpio_cansleep(unsigned gpio)
+{
+ struct gpio_chip *chip;
+
+ /* only call this on GPIOs that are valid! */
+ chip = gpio_to_chip(gpio);
+
+ return chip->can_sleep;
+}
+EXPORT_SYMBOL_GPL(__gpio_cansleep);
+
+
+
+/* There's no value in making it easy to inline GPIO calls that may sleep.
+ * Common examples include ones connected to I2C or SPI chips.
+ */
+
+int gpio_get_value_cansleep(unsigned gpio)
+{
+ struct gpio_chip *chip;
+
+ might_sleep_if(extra_checks);
+ chip = gpio_to_chip(gpio);
+ return chip->get(chip, gpio - chip->base);
+}
+EXPORT_SYMBOL_GPL(gpio_get_value_cansleep);
+
+void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ struct gpio_chip *chip;
+
+ might_sleep_if(extra_checks);
+ chip = gpio_to_chip(gpio);
+ chip->set(chip, gpio - chip->base, value);
+}
+EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
+
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+
+static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ unsigned i;
+ unsigned gpio = chip->base;
+ struct gpio_desc *gdesc = &gpio_desc[gpio];
+ int is_out;
+
+ for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) {
+ if (!test_bit(FLAG_REQUESTED, &gdesc->flags))
+ continue;
+
+ is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
+ seq_printf(s, " gpio-%-3d (%-12s) %s %s",
+ gpio, gdesc->label,
+ is_out ? "out" : "in ",
+ chip->get
+ ? (chip->get(chip, i) ? "hi" : "lo")
+ : "? ");
+
+ if (!is_out) {
+ int irq = gpio_to_irq(gpio);
+ struct irq_desc *desc = irq_desc + irq;
+
+ /* This races with request_irq(), set_irq_type(),
+ * and set_irq_wake() ... but those are "rare".
+ *
+ * More significantly, trigger type flags aren't
+ * currently maintained by genirq.
+ */
+ if (irq >= 0 && desc->action) {
+ char *trigger;
+
+ switch (desc->status & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_NONE:
+ trigger = "(default)";
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ trigger = "edge-falling";
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ trigger = "edge-rising";
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ trigger = "edge-both";
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ trigger = "level-high";
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ trigger = "level-low";
+ break;
+ default:
+ trigger = "?trigger?";
+ break;
+ }
+
+ seq_printf(s, " irq-%d %s%s",
+ irq, trigger,
+ (desc->status & IRQ_WAKEUP)
+ ? " wakeup" : "");
+ }
+ }
+
+ seq_printf(s, "\n");
+ }
+}
+
+static int gpiolib_show(struct seq_file *s, void *unused)
+{
+ struct gpio_chip *chip = NULL;
+ unsigned gpio;
+ int started = 0;
+
+ /* REVISIT this isn't locked against gpio_chip removal ... */
+
+ for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) {
+ if (chip == gpio_desc[gpio].chip)
+ continue;
+ chip = gpio_desc[gpio].chip;
+ if (!chip)
+ continue;
+
+ seq_printf(s, "%sGPIOs %d-%d, %s%s:\n",
+ started ? "\n" : "",
+ chip->base, chip->base + chip->ngpio - 1,
+ chip->label ? : "generic",
+ chip->can_sleep ? ", can sleep" : "");
+ started = 1;
+ if (chip->dbg_show)
+ chip->dbg_show(s, chip);
+ else
+ gpiolib_dbg_show(s, chip);
+ }
+ return 0;
+}
+
+static int gpiolib_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, gpiolib_show, NULL);
+}
+
+static struct file_operations gpiolib_operations = {
+ .open = gpiolib_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init gpiolib_debugfs_init(void)
+{
+ /* /sys/kernel/debug/gpio */
+ (void) debugfs_create_file("gpio", S_IFREG | S_IRUGO,
+ NULL, NULL, &gpiolib_operations);
+ return 0;
+}
+subsys_initcall(gpiolib_debugfs_init);
+
+#endif /* DEBUG_FS */
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/mcp23s08.c
new file mode 100644
index 000000000000..bb60e8c1a1f0
--- /dev/null
+++ b/drivers/gpio/mcp23s08.c
@@ -0,0 +1,357 @@
+/*
+ * mcp23s08.c - SPI gpio expander driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/mcp23s08.h>
+
+#include <asm/gpio.h>
+
+
+/* Registers are all 8 bits wide.
+ *
+ * The mcp23s17 has twice as many bits, and can be configured to work
+ * with either 16 bit registers or with two adjacent 8 bit banks.
+ *
+ * Also, there are I2C versions of both chips.
+ */
+#define MCP_IODIR 0x00 /* init/reset: all ones */
+#define MCP_IPOL 0x01
+#define MCP_GPINTEN 0x02
+#define MCP_DEFVAL 0x03
+#define MCP_INTCON 0x04
+#define MCP_IOCON 0x05
+# define IOCON_SEQOP (1 << 5)
+# define IOCON_HAEN (1 << 3)
+# define IOCON_ODR (1 << 2)
+# define IOCON_INTPOL (1 << 1)
+#define MCP_GPPU 0x06
+#define MCP_INTF 0x07
+#define MCP_INTCAP 0x08
+#define MCP_GPIO 0x09
+#define MCP_OLAT 0x0a
+
+struct mcp23s08 {
+ struct spi_device *spi;
+ u8 addr;
+
+ /* lock protects the cached values */
+ struct mutex lock;
+ u8 cache[11];
+
+ struct gpio_chip chip;
+
+ struct work_struct work;
+};
+
+static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg)
+{
+ u8 tx[2], rx[1];
+ int status;
+
+ tx[0] = mcp->addr | 0x01;
+ tx[1] = reg;
+ status = spi_write_then_read(mcp->spi, tx, sizeof tx, rx, sizeof rx);
+ return (status < 0) ? status : rx[0];
+}
+
+static int mcp23s08_write(struct mcp23s08 *mcp, unsigned reg, u8 val)
+{
+ u8 tx[3];
+
+ tx[0] = mcp->addr;
+ tx[1] = reg;
+ tx[2] = val;
+ return spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
+}
+
+static int
+mcp23s08_read_regs(struct mcp23s08 *mcp, unsigned reg, u8 *vals, unsigned n)
+{
+ u8 tx[2];
+
+ if ((n + reg) > sizeof mcp->cache)
+ return -EINVAL;
+ tx[0] = mcp->addr | 0x01;
+ tx[1] = reg;
+ return spi_write_then_read(mcp->spi, tx, sizeof tx, vals, n);
+}
+
+/*----------------------------------------------------------------------*/
+
+static int mcp23s08_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
+ int status;
+
+ mutex_lock(&mcp->lock);
+ mcp->cache[MCP_IODIR] |= (1 << offset);
+ status = mcp23s08_write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
+ mutex_unlock(&mcp->lock);
+ return status;
+}
+
+static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
+ int status;
+
+ mutex_lock(&mcp->lock);
+
+ /* REVISIT reading this clears any IRQ ... */
+ status = mcp23s08_read(mcp, MCP_GPIO);
+ if (status < 0)
+ status = 0;
+ else {
+ mcp->cache[MCP_GPIO] = status;
+ status = !!(status & (1 << offset));
+ }
+ mutex_unlock(&mcp->lock);
+ return status;
+}
+
+static int __mcp23s08_set(struct mcp23s08 *mcp, unsigned mask, int value)
+{
+ u8 olat = mcp->cache[MCP_OLAT];
+
+ if (value)
+ olat |= mask;
+ else
+ olat &= ~mask;
+ mcp->cache[MCP_OLAT] = olat;
+ return mcp23s08_write(mcp, MCP_OLAT, olat);
+}
+
+static void mcp23s08_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
+ u8 mask = 1 << offset;
+
+ mutex_lock(&mcp->lock);
+ __mcp23s08_set(mcp, mask, value);
+ mutex_unlock(&mcp->lock);
+}
+
+static int
+mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct mcp23s08 *mcp = container_of(chip, struct mcp23s08, chip);
+ u8 mask = 1 << offset;
+ int status;
+
+ mutex_lock(&mcp->lock);
+ status = __mcp23s08_set(mcp, mask, value);
+ if (status == 0) {
+ mcp->cache[MCP_IODIR] &= ~mask;
+ status = mcp23s08_write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]);
+ }
+ mutex_unlock(&mcp->lock);
+ return status;
+}
+
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/seq_file.h>
+
+/*
+ * This shows more info than the generic gpio dump code:
+ * pullups, deglitching, open drain drive.
+ */
+static void mcp23s08_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct mcp23s08 *mcp;
+ char bank;
+ unsigned t;
+ unsigned mask;
+
+ mcp = container_of(chip, struct mcp23s08, chip);
+
+ /* NOTE: we only handle one bank for now ... */
+ bank = '0' + ((mcp->addr >> 1) & 0x3);
+
+ mutex_lock(&mcp->lock);
+ t = mcp23s08_read_regs(mcp, 0, mcp->cache, sizeof mcp->cache);
+ if (t < 0) {
+ seq_printf(s, " I/O ERROR %d\n", t);
+ goto done;
+ }
+
+ for (t = 0, mask = 1; t < 8; t++, mask <<= 1) {
+ const char *label;
+
+ label = gpiochip_is_requested(chip, t);
+ if (!label)
+ continue;
+
+ seq_printf(s, " gpio-%-3d P%c.%d (%-12s) %s %s %s",
+ chip->base + t, bank, t, label,
+ (mcp->cache[MCP_IODIR] & mask) ? "in " : "out",
+ (mcp->cache[MCP_GPIO] & mask) ? "hi" : "lo",
+ (mcp->cache[MCP_GPPU] & mask) ? " " : "up");
+ /* NOTE: ignoring the irq-related registers */
+ seq_printf(s, "\n");
+ }
+done:
+ mutex_unlock(&mcp->lock);
+}
+
+#else
+#define mcp23s08_dbg_show NULL
+#endif
+
+/*----------------------------------------------------------------------*/
+
+static int mcp23s08_probe(struct spi_device *spi)
+{
+ struct mcp23s08 *mcp;
+ struct mcp23s08_platform_data *pdata;
+ int status;
+ int do_update = 0;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata || pdata->slave > 3 || !pdata->base)
+ return -ENODEV;
+
+ mcp = kzalloc(sizeof *mcp, GFP_KERNEL);
+ if (!mcp)
+ return -ENOMEM;
+
+ mutex_init(&mcp->lock);
+
+ mcp->spi = spi;
+ mcp->addr = 0x40 | (pdata->slave << 1);
+
+ mcp->chip.label = "mcp23s08",
+
+ mcp->chip.direction_input = mcp23s08_direction_input;
+ mcp->chip.get = mcp23s08_get;
+ mcp->chip.direction_output = mcp23s08_direction_output;
+ mcp->chip.set = mcp23s08_set;
+ mcp->chip.dbg_show = mcp23s08_dbg_show;
+
+ mcp->chip.base = pdata->base;
+ mcp->chip.ngpio = 8;
+ mcp->chip.can_sleep = 1;
+
+ spi_set_drvdata(spi, mcp);
+
+ /* verify MCP_IOCON.SEQOP = 0, so sequential reads work */
+ status = mcp23s08_read(mcp, MCP_IOCON);
+ if (status < 0)
+ goto fail;
+ if (status & IOCON_SEQOP) {
+ status &= ~IOCON_SEQOP;
+ status = mcp23s08_write(mcp, MCP_IOCON, (u8) status);
+ if (status < 0)
+ goto fail;
+ }
+
+ /* configure ~100K pullups */
+ status = mcp23s08_write(mcp, MCP_GPPU, pdata->pullups);
+ if (status < 0)
+ goto fail;
+
+ status = mcp23s08_read_regs(mcp, 0, mcp->cache, sizeof mcp->cache);
+ if (status < 0)
+ goto fail;
+
+ /* disable inverter on input */
+ if (mcp->cache[MCP_IPOL] != 0) {
+ mcp->cache[MCP_IPOL] = 0;
+ do_update = 1;
+ }
+
+ /* disable irqs */
+ if (mcp->cache[MCP_GPINTEN] != 0) {
+ mcp->cache[MCP_GPINTEN] = 0;
+ do_update = 1;
+ }
+
+ if (do_update) {
+ u8 tx[4];
+
+ tx[0] = mcp->addr;
+ tx[1] = MCP_IPOL;
+ memcpy(&tx[2], &mcp->cache[MCP_IPOL], sizeof(tx) - 2);
+ status = spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
+
+ /* FIXME check status... */
+ }
+
+ status = gpiochip_add(&mcp->chip);
+
+ /* NOTE: these chips have a relatively sane IRQ framework, with
+ * per-signal masking and level/edge triggering. It's not yet
+ * handled here...
+ */
+
+ if (pdata->setup) {
+ status = pdata->setup(spi, mcp->chip.base,
+ mcp->chip.ngpio, pdata->context);
+ if (status < 0)
+ dev_dbg(&spi->dev, "setup --> %d\n", status);
+ }
+
+ return 0;
+
+fail:
+ kfree(mcp);
+ return status;
+}
+
+static int mcp23s08_remove(struct spi_device *spi)
+{
+ struct mcp23s08 *mcp = spi_get_drvdata(spi);
+ struct mcp23s08_platform_data *pdata = spi->dev.platform_data;
+ int status = 0;
+
+ if (pdata->teardown) {
+ status = pdata->teardown(spi,
+ mcp->chip.base, mcp->chip.ngpio,
+ pdata->context);
+ if (status < 0) {
+ dev_err(&spi->dev, "%s --> %d\n", "teardown", status);
+ return status;
+ }
+ }
+
+ status = gpiochip_remove(&mcp->chip);
+ if (status == 0)
+ kfree(mcp);
+ else
+ dev_err(&spi->dev, "%s --> %d\n", "remove", status);
+ return status;
+}
+
+static struct spi_driver mcp23s08_driver = {
+ .probe = mcp23s08_probe,
+ .remove = mcp23s08_remove,
+ .driver = {
+ .name = "mcp23s08",
+ .owner = THIS_MODULE,
+ },
+};
+
+/*----------------------------------------------------------------------*/
+
+static int __init mcp23s08_init(void)
+{
+ return spi_register_driver(&mcp23s08_driver);
+}
+module_init(mcp23s08_init);
+
+static void __exit mcp23s08_exit(void)
+{
+ spi_unregister_driver(&mcp23s08_driver);
+}
+module_exit(mcp23s08_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
new file mode 100644
index 000000000000..92583cd4bffd
--- /dev/null
+++ b/drivers/gpio/pca953x.c
@@ -0,0 +1,308 @@
+/*
+ * pca953x.c - 4/8/16 bit I/O ports
+ *
+ * Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
+ * Copyright (C) 2007 Marvell International Ltd.
+ *
+ * Derived from drivers/i2c/chips/pca9539.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pca953x.h>
+
+#include <asm/gpio.h>
+
+#define PCA953X_INPUT 0
+#define PCA953X_OUTPUT 1
+#define PCA953X_INVERT 2
+#define PCA953X_DIRECTION 3
+
+/* This is temporary - in 2.6.26 i2c_driver_data should replace it. */
+struct pca953x_desc {
+ char name[I2C_NAME_SIZE];
+ unsigned long driver_data;
+};
+
+static const struct pca953x_desc pca953x_descs[] = {
+ { "pca9534", 8, },
+ { "pca9535", 16, },
+ { "pca9536", 4, },
+ { "pca9537", 4, },
+ { "pca9538", 8, },
+ { "pca9539", 16, },
+ /* REVISIT several pca955x parts should work here too */
+};
+
+struct pca953x_chip {
+ unsigned gpio_start;
+ uint16_t reg_output;
+ uint16_t reg_direction;
+
+ struct i2c_client *client;
+ struct gpio_chip gpio_chip;
+};
+
+/* NOTE: we can't currently rely on fault codes to come from SMBus
+ * calls, so we map all errors to EIO here and return zero otherwise.
+ */
+static int pca953x_write_reg(struct pca953x_chip *chip, int reg, uint16_t val)
+{
+ int ret;
+
+ if (chip->gpio_chip.ngpio <= 8)
+ ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+ else
+ ret = i2c_smbus_write_word_data(chip->client, reg << 1, val);
+
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "failed writing register\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int pca953x_read_reg(struct pca953x_chip *chip, int reg, uint16_t *val)
+{
+ int ret;
+
+ if (chip->gpio_chip.ngpio <= 8)
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ else
+ ret = i2c_smbus_read_word_data(chip->client, reg << 1);
+
+ if (ret < 0) {
+ dev_err(&chip->client->dev, "failed reading register\n");
+ return -EIO;
+ }
+
+ *val = (uint16_t)ret;
+ return 0;
+}
+
+static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
+{
+ struct pca953x_chip *chip;
+ uint16_t reg_val;
+ int ret;
+
+ chip = container_of(gc, struct pca953x_chip, gpio_chip);
+
+ reg_val = chip->reg_direction | (1u << off);
+ ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+ if (ret)
+ return ret;
+
+ chip->reg_direction = reg_val;
+ return 0;
+}
+
+static int pca953x_gpio_direction_output(struct gpio_chip *gc,
+ unsigned off, int val)
+{
+ struct pca953x_chip *chip;
+ uint16_t reg_val;
+ int ret;
+
+ chip = container_of(gc, struct pca953x_chip, gpio_chip);
+
+ /* set output level */
+ if (val)
+ reg_val = chip->reg_output | (1u << off);
+ else
+ reg_val = chip->reg_output & ~(1u << off);
+
+ ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ if (ret)
+ return ret;
+
+ chip->reg_output = reg_val;
+
+ /* then direction */
+ reg_val = chip->reg_direction & ~(1u << off);
+ ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
+ if (ret)
+ return ret;
+
+ chip->reg_direction = reg_val;
+ return 0;
+}
+
+static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
+{
+ struct pca953x_chip *chip;
+ uint16_t reg_val;
+ int ret;
+
+ chip = container_of(gc, struct pca953x_chip, gpio_chip);
+
+ ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+ if (ret < 0) {
+ /* NOTE: diagnostic already emitted; that's all we should
+ * do unless gpio_*_value_cansleep() calls become different
+ * from their nonsleeping siblings (and report faults).
+ */
+ return 0;
+ }
+
+ return (reg_val & (1u << off)) ? 1 : 0;
+}
+
+static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val)
+{
+ struct pca953x_chip *chip;
+ uint16_t reg_val;
+ int ret;
+
+ chip = container_of(gc, struct pca953x_chip, gpio_chip);
+
+ if (val)
+ reg_val = chip->reg_output | (1u << off);
+ else
+ reg_val = chip->reg_output & ~(1u << off);
+
+ ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
+ if (ret)
+ return;
+
+ chip->reg_output = reg_val;
+}
+
+static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
+{
+ struct gpio_chip *gc;
+
+ gc = &chip->gpio_chip;
+
+ gc->direction_input = pca953x_gpio_direction_input;
+ gc->direction_output = pca953x_gpio_direction_output;
+ gc->get = pca953x_gpio_get_value;
+ gc->set = pca953x_gpio_set_value;
+
+ gc->base = chip->gpio_start;
+ gc->ngpio = gpios;
+ gc->label = chip->client->name;
+}
+
+static int __devinit pca953x_probe(struct i2c_client *client)
+{
+ struct pca953x_platform_data *pdata;
+ struct pca953x_chip *chip;
+ int ret, i;
+ const struct pca953x_desc *id = NULL;
+
+ pdata = client->dev.platform_data;
+ if (pdata == NULL)
+ return -ENODEV;
+
+ /* this loop vanishes when we get i2c_device_id */
+ for (i = 0; i < ARRAY_SIZE(pca953x_descs); i++)
+ if (!strcmp(pca953x_descs[i].name, client->name)) {
+ id = pca953x_descs + i;
+ break;
+ }
+ if (!id)
+ return -ENODEV;
+
+ chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ chip->client = client;
+
+ chip->gpio_start = pdata->gpio_base;
+
+ /* initialize cached registers from their original values.
+ * we can't share this chip with another i2c master.
+ */
+ pca953x_setup_gpio(chip, id->driver_data);
+
+ ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
+ if (ret)
+ goto out_failed;
+
+ ret = pca953x_read_reg(chip, PCA953X_DIRECTION, &chip->reg_direction);
+ if (ret)
+ goto out_failed;
+
+ /* set platform specific polarity inversion */
+ ret = pca953x_write_reg(chip, PCA953X_INVERT, pdata->invert);
+ if (ret)
+ goto out_failed;
+
+
+ ret = gpiochip_add(&chip->gpio_chip);
+ if (ret)
+ goto out_failed;
+
+ if (pdata->setup) {
+ ret = pdata->setup(client, chip->gpio_chip.base,
+ chip->gpio_chip.ngpio, pdata->context);
+ if (ret < 0)
+ dev_warn(&client->dev, "setup failed, %d\n", ret);
+ }
+
+ i2c_set_clientdata(client, chip);
+ return 0;
+
+out_failed:
+ kfree(chip);
+ return ret;
+}
+
+static int pca953x_remove(struct i2c_client *client)
+{
+ struct pca953x_platform_data *pdata = client->dev.platform_data;
+ struct pca953x_chip *chip = i2c_get_clientdata(client);
+ int ret = 0;
+
+ if (pdata->teardown) {
+ ret = pdata->teardown(client, chip->gpio_chip.base,
+ chip->gpio_chip.ngpio, pdata->context);
+ if (ret < 0) {
+ dev_err(&client->dev, "%s failed, %d\n",
+ "teardown", ret);
+ return ret;
+ }
+ }
+
+ ret = gpiochip_remove(&chip->gpio_chip);
+ if (ret) {
+ dev_err(&client->dev, "%s failed, %d\n",
+ "gpiochip_remove()", ret);
+ return ret;
+ }
+
+ kfree(chip);
+ return 0;
+}
+
+static struct i2c_driver pca953x_driver = {
+ .driver = {
+ .name = "pca953x",
+ },
+ .probe = pca953x_probe,
+ .remove = pca953x_remove,
+};
+
+static int __init pca953x_init(void)
+{
+ return i2c_add_driver(&pca953x_driver);
+}
+module_init(pca953x_init);
+
+static void __exit pca953x_exit(void)
+{
+ i2c_del_driver(&pca953x_driver);
+}
+module_exit(pca953x_exit);
+
+MODULE_AUTHOR("eric miao <eric.miao@marvell.com>");
+MODULE_DESCRIPTION("GPIO expander driver for PCA953x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/pcf857x.c
new file mode 100644
index 000000000000..c6b3b5378384
--- /dev/null
+++ b/drivers/gpio/pcf857x.c
@@ -0,0 +1,330 @@
+/*
+ * pcf857x - driver for pcf857x, pca857x, and pca967x I2C GPIO expanders
+ *
+ * Copyright (C) 2007 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/i2c/pcf857x.h>
+
+#include <asm/gpio.h>
+
+
+/*
+ * The pcf857x, pca857x, and pca967x chips only expose one read and one
+ * write register. Writing a "one" bit (to match the reset state) lets
+ * that pin be used as an input; it's not an open-drain model, but acts
+ * a bit like one. This is described as "quasi-bidirectional"; read the
+ * chip documentation for details.
+ *
+ * Many other I2C GPIO expander chips (like the pca953x models) have
+ * more complex register models and more conventional circuitry using
+ * push/pull drivers. They often use the same 0x20..0x27 addresses as
+ * pcf857x parts, making the "legacy" I2C driver model problematic.
+ */
+struct pcf857x {
+ struct gpio_chip chip;
+ struct i2c_client *client;
+ unsigned out; /* software latch */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Talk to 8-bit I/O expander */
+
+static int pcf857x_input8(struct gpio_chip *chip, unsigned offset)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+
+ gpio->out |= (1 << offset);
+ return i2c_smbus_write_byte(gpio->client, gpio->out);
+}
+
+static int pcf857x_get8(struct gpio_chip *chip, unsigned offset)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+ s32 value;
+
+ value = i2c_smbus_read_byte(gpio->client);
+ return (value < 0) ? 0 : (value & (1 << offset));
+}
+
+static int pcf857x_output8(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+ unsigned bit = 1 << offset;
+
+ if (value)
+ gpio->out |= bit;
+ else
+ gpio->out &= ~bit;
+ return i2c_smbus_write_byte(gpio->client, gpio->out);
+}
+
+static void pcf857x_set8(struct gpio_chip *chip, unsigned offset, int value)
+{
+ pcf857x_output8(chip, offset, value);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Talk to 16-bit I/O expander */
+
+static int i2c_write_le16(struct i2c_client *client, u16 word)
+{
+ u8 buf[2] = { word & 0xff, word >> 8, };
+ int status;
+
+ status = i2c_master_send(client, buf, 2);
+ return (status < 0) ? status : 0;
+}
+
+static int i2c_read_le16(struct i2c_client *client)
+{
+ u8 buf[2];
+ int status;
+
+ status = i2c_master_recv(client, buf, 2);
+ if (status < 0)
+ return status;
+ return (buf[1] << 8) | buf[0];
+}
+
+static int pcf857x_input16(struct gpio_chip *chip, unsigned offset)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+
+ gpio->out |= (1 << offset);
+ return i2c_write_le16(gpio->client, gpio->out);
+}
+
+static int pcf857x_get16(struct gpio_chip *chip, unsigned offset)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+ int value;
+
+ value = i2c_read_le16(gpio->client);
+ return (value < 0) ? 0 : (value & (1 << offset));
+}
+
+static int pcf857x_output16(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct pcf857x *gpio = container_of(chip, struct pcf857x, chip);
+ unsigned bit = 1 << offset;
+
+ if (value)
+ gpio->out |= bit;
+ else
+ gpio->out &= ~bit;
+ return i2c_write_le16(gpio->client, gpio->out);
+}
+
+static void pcf857x_set16(struct gpio_chip *chip, unsigned offset, int value)
+{
+ pcf857x_output16(chip, offset, value);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int pcf857x_probe(struct i2c_client *client)
+{
+ struct pcf857x_platform_data *pdata;
+ struct pcf857x *gpio;
+ int status;
+
+ pdata = client->dev.platform_data;
+ if (!pdata)
+ return -ENODEV;
+
+ /* Allocate, initialize, and register this gpio_chip. */
+ gpio = kzalloc(sizeof *gpio, GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->chip.base = pdata->gpio_base;
+ gpio->chip.can_sleep = 1;
+
+ /* NOTE: the OnSemi jlc1562b is also largely compatible with
+ * these parts, notably for output. It has a low-resolution
+ * DAC instead of pin change IRQs; and its inputs can be the
+ * result of comparators.
+ */
+
+ /* 8574 addresses are 0x20..0x27; 8574a uses 0x38..0x3f;
+ * 9670, 9672, 9764, and 9764a use quite a variety.
+ *
+ * NOTE: we don't distinguish here between *4 and *4a parts.
+ */
+ if (strcmp(client->name, "pcf8574") == 0
+ || strcmp(client->name, "pca8574") == 0
+ || strcmp(client->name, "pca9670") == 0
+ || strcmp(client->name, "pca9672") == 0
+ || strcmp(client->name, "pca9674") == 0
+ ) {
+ gpio->chip.ngpio = 8;
+ gpio->chip.direction_input = pcf857x_input8;
+ gpio->chip.get = pcf857x_get8;
+ gpio->chip.direction_output = pcf857x_output8;
+ gpio->chip.set = pcf857x_set8;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE))
+ status = -EIO;
+
+ /* fail if there's no chip present */
+ else
+ status = i2c_smbus_read_byte(client);
+
+ /* '75/'75c addresses are 0x20..0x27, just like the '74;
+ * the '75c doesn't have a current source pulling high.
+ * 9671, 9673, and 9765 use quite a variety of addresses.
+ *
+ * NOTE: we don't distinguish here between '75 and '75c parts.
+ */
+ } else if (strcmp(client->name, "pcf8575") == 0
+ || strcmp(client->name, "pca8575") == 0
+ || strcmp(client->name, "pca9671") == 0
+ || strcmp(client->name, "pca9673") == 0
+ || strcmp(client->name, "pca9675") == 0
+ ) {
+ gpio->chip.ngpio = 16;
+ gpio->chip.direction_input = pcf857x_input16;
+ gpio->chip.get = pcf857x_get16;
+ gpio->chip.direction_output = pcf857x_output16;
+ gpio->chip.set = pcf857x_set16;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ status = -EIO;
+
+ /* fail if there's no chip present */
+ else
+ status = i2c_read_le16(client);
+
+ } else
+ status = -ENODEV;
+
+ if (status < 0)
+ goto fail;
+
+ gpio->chip.label = client->name;
+
+ gpio->client = client;
+ i2c_set_clientdata(client, gpio);
+
+ /* NOTE: these chips have strange "quasi-bidirectional" I/O pins.
+ * We can't actually know whether a pin is configured (a) as output
+ * and driving the signal low, or (b) as input and reporting a low
+ * value ... without knowing the last value written since the chip
+ * came out of reset (if any). We can't read the latched output.
+ *
+ * In short, the only reliable solution for setting up pin direction
+ * is to do it explicitly. The setup() method can do that, but it
+ * may cause transient glitching since it can't know the last value
+ * written (some pins may need to be driven low).
+ *
+ * Using pdata->n_latch avoids that trouble. When left initialized
+ * to zero, our software copy of the "latch" then matches the chip's
+ * all-ones reset state. Otherwise it flags pins to be driven low.
+ */
+ gpio->out = ~pdata->n_latch;
+
+ status = gpiochip_add(&gpio->chip);
+ if (status < 0)
+ goto fail;
+
+ /* NOTE: these chips can issue "some pin-changed" IRQs, which we
+ * don't yet even try to use. Among other issues, the relevant
+ * genirq state isn't available to modular drivers; and most irq
+ * methods can't be called from sleeping contexts.
+ */
+
+ dev_info(&client->dev, "gpios %d..%d on a %s%s\n",
+ gpio->chip.base,
+ gpio->chip.base + gpio->chip.ngpio - 1,
+ client->name,
+ client->irq ? " (irq ignored)" : "");
+
+ /* Let platform code set up the GPIOs and their users.
+ * Now is the first time anyone could use them.
+ */
+ if (pdata->setup) {
+ status = pdata->setup(client,
+ gpio->chip.base, gpio->chip.ngpio,
+ pdata->context);
+ if (status < 0)
+ dev_warn(&client->dev, "setup --> %d\n", status);
+ }
+
+ return 0;
+
+fail:
+ dev_dbg(&client->dev, "probe error %d for '%s'\n",
+ status, client->name);
+ kfree(gpio);
+ return status;
+}
+
+static int pcf857x_remove(struct i2c_client *client)
+{
+ struct pcf857x_platform_data *pdata = client->dev.platform_data;
+ struct pcf857x *gpio = i2c_get_clientdata(client);
+ int status = 0;
+
+ if (pdata->teardown) {
+ status = pdata->teardown(client,
+ gpio->chip.base, gpio->chip.ngpio,
+ pdata->context);
+ if (status < 0) {
+ dev_err(&client->dev, "%s --> %d\n",
+ "teardown", status);
+ return status;
+ }
+ }
+
+ status = gpiochip_remove(&gpio->chip);
+ if (status == 0)
+ kfree(gpio);
+ else
+ dev_err(&client->dev, "%s --> %d\n", "remove", status);
+ return status;
+}
+
+static struct i2c_driver pcf857x_driver = {
+ .driver = {
+ .name = "pcf857x",
+ .owner = THIS_MODULE,
+ },
+ .probe = pcf857x_probe,
+ .remove = pcf857x_remove,
+};
+
+static int __init pcf857x_init(void)
+{
+ return i2c_add_driver(&pcf857x_driver);
+}
+module_init(pcf857x_init);
+
+static void __exit pcf857x_exit(void)
+{
+ i2c_del_driver(&pcf857x_driver);
+}
+module_exit(pcf857x_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Brownell");
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 86c66c345f8b..0c94770b7f83 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -905,7 +905,7 @@ static ssize_t applesmc_key_at_index_store(struct device *dev,
}
static struct led_classdev applesmc_backlight = {
- .name = "smc:kbd_backlight",
+ .name = "smc::kbd_backlight",
.default_trigger = "nand-disk",
.brightness_set = applesmc_brightness_set,
};
diff --git a/drivers/i2c/chips/Kconfig b/drivers/i2c/chips/Kconfig
index bd7082c2443d..b21593f93586 100644
--- a/drivers/i2c/chips/Kconfig
+++ b/drivers/i2c/chips/Kconfig
@@ -54,8 +54,8 @@ config PCF8575
hardware. If unsure, say N.
config SENSORS_PCA9539
- tristate "Philips PCA9539 16-bit I/O port"
- depends on EXPERIMENTAL
+ tristate "Philips PCA9539 16-bit I/O port (DEPRECATED)"
+ depends on EXPERIMENTAL && GPIO_PCA9539 = "n"
help
If you say yes here you get support for the Philips PCA9539
16-bit I/O port.
@@ -63,6 +63,9 @@ config SENSORS_PCA9539
This driver can also be built as a module. If so, the module
will be called pca9539.
+ This driver is deprecated and will be dropped soon. Use
+ drivers/gpio/pca9539.c instead.
+
config SENSORS_PCF8591
tristate "Philips PCF8591"
depends on EXPERIMENTAL
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 45b26ed351cf..ab8fb257528e 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -1009,6 +1009,15 @@ config BLK_DEV_Q40IDE
normally be on; disable it only if you are running a custom hard
drive subsystem through an expansion card.
+config BLK_DEV_PALMCHIP_BK3710
+ tristate "Palmchip bk3710 IDE controller support"
+ depends on ARCH_DAVINCI
+ select BLK_DEV_IDEDMA_PCI
+ help
+ Say Y here if you want to support the onchip IDE controller on the
+ TI DaVinci SoC
+
+
config BLK_DEV_MPC8xx_IDE
tristate "MPC8xx IDE support"
depends on 8xx && (LWMON || IVMS8 || IVML24 || TQM8xxL) && IDE=y && BLK_DEV_IDE=y && !PPC_MERGE
diff --git a/drivers/ide/arm/Makefile b/drivers/ide/arm/Makefile
index 5f63ad216862..936e7b0237f5 100644
--- a/drivers/ide/arm/Makefile
+++ b/drivers/ide/arm/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
obj-$(CONFIG_BLK_DEV_IDE_RAPIDE) += rapide.o
obj-$(CONFIG_BLK_DEV_IDE_BAST) += bast-ide.o
+obj-$(CONFIG_BLK_DEV_PALMCHIP_BK3710) += palm_bk3710.o
ifeq ($(CONFIG_IDE_ARM), m)
obj-m += ide_arm.o
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index fb00f3827ecd..e816b0ffcfe6 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -365,7 +365,7 @@ static void icside_dma_timeout(ide_drive_t *drive)
if (icside_dma_test_irq(drive))
return;
- ide_dump_status(drive, "DMA timeout", HWIF(drive)->INB(IDE_STATUS_REG));
+ ide_dump_status(drive, "DMA timeout", ide_read_status(drive));
icside_dma_end(drive);
}
diff --git a/drivers/ide/arm/palm_bk3710.c b/drivers/ide/arm/palm_bk3710.c
new file mode 100644
index 000000000000..c3069970a012
--- /dev/null
+++ b/drivers/ide/arm/palm_bk3710.c
@@ -0,0 +1,395 @@
+/*
+ * Palmchip bk3710 IDE controller
+ *
+ * Copyright (C) 2006 Texas Instruments.
+ * Copyright (C) 2007 MontaVista Software, Inc., <source@mvista.com>
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ----------------------------------------------------------------------------
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/hdreg.h>
+#include <linux/ide.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+/* Offset of the primary interface registers */
+#define IDE_PALM_ATA_PRI_REG_OFFSET 0x1F0
+
+/* Primary Control Offset */
+#define IDE_PALM_ATA_PRI_CTL_OFFSET 0x3F6
+
+/*
+ * PalmChip 3710 IDE Controller UDMA timing structure Definition
+ */
+struct palm_bk3710_udmatiming {
+ unsigned int rptime; /* Ready to pause time */
+ unsigned int cycletime; /* Cycle Time */
+};
+
+#define BK3710_BMICP 0x00
+#define BK3710_BMISP 0x02
+#define BK3710_BMIDTP 0x04
+#define BK3710_BMICS 0x08
+#define BK3710_BMISS 0x0A
+#define BK3710_BMIDTS 0x0C
+#define BK3710_IDETIMP 0x40
+#define BK3710_IDETIMS 0x42
+#define BK3710_SIDETIM 0x44
+#define BK3710_SLEWCTL 0x45
+#define BK3710_IDESTATUS 0x47
+#define BK3710_UDMACTL 0x48
+#define BK3710_UDMATIM 0x4A
+#define BK3710_MISCCTL 0x50
+#define BK3710_REGSTB 0x54
+#define BK3710_REGRCVR 0x58
+#define BK3710_DATSTB 0x5C
+#define BK3710_DATRCVR 0x60
+#define BK3710_DMASTB 0x64
+#define BK3710_DMARCVR 0x68
+#define BK3710_UDMASTB 0x6C
+#define BK3710_UDMATRP 0x70
+#define BK3710_UDMAENV 0x74
+#define BK3710_IORDYTMP 0x78
+#define BK3710_IORDYTMS 0x7C
+
+#include "../ide-timing.h"
+
+static long ide_palm_clk;
+
+static const struct palm_bk3710_udmatiming palm_bk3710_udmatimings[6] = {
+ {160, 240}, /* UDMA Mode 0 */
+ {125, 160}, /* UDMA Mode 1 */
+ {100, 120}, /* UDMA Mode 2 */
+ {100, 90}, /* UDMA Mode 3 */
+ {85, 60}, /* UDMA Mode 4 */
+};
+
+static struct clk *ideclkp;
+
+static void palm_bk3710_setudmamode(void __iomem *base, unsigned int dev,
+ unsigned int mode)
+{
+ u8 tenv, trp, t0;
+ u32 val32;
+ u16 val16;
+
+ /* DMA Data Setup */
+ t0 = (palm_bk3710_udmatimings[mode].cycletime + ide_palm_clk - 1)
+ / ide_palm_clk - 1;
+ tenv = (20 + ide_palm_clk - 1) / ide_palm_clk - 1;
+ trp = (palm_bk3710_udmatimings[mode].rptime + ide_palm_clk - 1)
+ / ide_palm_clk - 1;
+
+ /* udmatim Register */
+ val16 = readw(base + BK3710_UDMATIM) & (dev ? 0xFF0F : 0xFFF0);
+ val16 |= (mode << (dev ? 4 : 0));
+ writew(val16, base + BK3710_UDMATIM);
+
+ /* udmastb Ultra DMA Access Strobe Width */
+ val32 = readl(base + BK3710_UDMASTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= (t0 << (dev ? 8 : 0));
+ writel(val32, base + BK3710_UDMASTB);
+
+ /* udmatrp Ultra DMA Ready to Pause Time */
+ val32 = readl(base + BK3710_UDMATRP) & (0xFF << (dev ? 0 : 8));
+ val32 |= (trp << (dev ? 8 : 0));
+ writel(val32, base + BK3710_UDMATRP);
+
+ /* udmaenv Ultra DMA envelop Time */
+ val32 = readl(base + BK3710_UDMAENV) & (0xFF << (dev ? 0 : 8));
+ val32 |= (tenv << (dev ? 8 : 0));
+ writel(val32, base + BK3710_UDMAENV);
+
+ /* Enable UDMA for Device */
+ val16 = readw(base + BK3710_UDMACTL) | (1 << dev);
+ writew(val16, base + BK3710_UDMACTL);
+}
+
+static void palm_bk3710_setdmamode(void __iomem *base, unsigned int dev,
+ unsigned short min_cycle,
+ unsigned int mode)
+{
+ u8 td, tkw, t0;
+ u32 val32;
+ u16 val16;
+ struct ide_timing *t;
+ int cycletime;
+
+ t = ide_timing_find_mode(mode);
+ cycletime = max_t(int, t->cycle, min_cycle);
+
+ /* DMA Data Setup */
+ t0 = (cycletime + ide_palm_clk - 1) / ide_palm_clk;
+ td = (t->active + ide_palm_clk - 1) / ide_palm_clk;
+ tkw = t0 - td - 1;
+ td -= 1;
+
+ val32 = readl(base + BK3710_DMASTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= (td << (dev ? 8 : 0));
+ writel(val32, base + BK3710_DMASTB);
+
+ val32 = readl(base + BK3710_DMARCVR) & (0xFF << (dev ? 0 : 8));
+ val32 |= (tkw << (dev ? 8 : 0));
+ writel(val32, base + BK3710_DMARCVR);
+
+ /* Disable UDMA for Device */
+ val16 = readw(base + BK3710_UDMACTL) & ~(1 << dev);
+ writew(val16, base + BK3710_UDMACTL);
+}
+
+static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
+ unsigned int dev, unsigned int cycletime,
+ unsigned int mode)
+{
+ u8 t2, t2i, t0;
+ u32 val32;
+ struct ide_timing *t;
+
+ /* PIO Data Setup */
+ t0 = (cycletime + ide_palm_clk - 1) / ide_palm_clk;
+ t2 = (ide_timing_find_mode(XFER_PIO_0 + mode)->active +
+ ide_palm_clk - 1) / ide_palm_clk;
+
+ t2i = t0 - t2 - 1;
+ t2 -= 1;
+
+ val32 = readl(base + BK3710_DATSTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= (t2 << (dev ? 8 : 0));
+ writel(val32, base + BK3710_DATSTB);
+
+ val32 = readl(base + BK3710_DATRCVR) & (0xFF << (dev ? 0 : 8));
+ val32 |= (t2i << (dev ? 8 : 0));
+ writel(val32, base + BK3710_DATRCVR);
+
+ if (mate && mate->present) {
+ u8 mode2 = ide_get_best_pio_mode(mate, 255, 4);
+
+ if (mode2 < mode)
+ mode = mode2;
+ }
+
+ /* TASKFILE Setup */
+ t = ide_timing_find_mode(XFER_PIO_0 + mode);
+ t0 = (t->cyc8b + ide_palm_clk - 1) / ide_palm_clk;
+ t2 = (t->act8b + ide_palm_clk - 1) / ide_palm_clk;
+
+ t2i = t0 - t2 - 1;
+ t2 -= 1;
+
+ val32 = readl(base + BK3710_REGSTB) & (0xFF << (dev ? 0 : 8));
+ val32 |= (t2 << (dev ? 8 : 0));
+ writel(val32, base + BK3710_REGSTB);
+
+ val32 = readl(base + BK3710_REGRCVR) & (0xFF << (dev ? 0 : 8));
+ val32 |= (t2i << (dev ? 8 : 0));
+ writel(val32, base + BK3710_REGRCVR);
+}
+
+static void palm_bk3710_set_dma_mode(ide_drive_t *drive, u8 xferspeed)
+{
+ int is_slave = drive->dn & 1;
+ void __iomem *base = (void *)drive->hwif->dma_base;
+
+ if (xferspeed >= XFER_UDMA_0) {
+ palm_bk3710_setudmamode(base, is_slave,
+ xferspeed - XFER_UDMA_0);
+ } else {
+ palm_bk3710_setdmamode(base, is_slave, drive->id->eide_dma_min,
+ xferspeed);
+ }
+}
+
+static void palm_bk3710_set_pio_mode(ide_drive_t *drive, u8 pio)
+{
+ unsigned int cycle_time;
+ int is_slave = drive->dn & 1;
+ ide_drive_t *mate;
+ void __iomem *base = (void *)drive->hwif->dma_base;
+
+ /*
+ * Obtain the drive PIO data for tuning the Palm Chip registers
+ */
+ cycle_time = ide_pio_cycle_time(drive, pio);
+ mate = ide_get_paired_drive(drive);
+ palm_bk3710_setpiomode(base, mate, is_slave, cycle_time, pio);
+}
+
+static void __devinit palm_bk3710_chipinit(void __iomem *base)
+{
+ /*
+ * enable the reset_en of ATA controller so that when ata signals
+ * are brought out, by writing into device config. at that
+ * time por_n signal should not be 'Z' and have a stable value.
+ */
+ writel(0x0300, base + BK3710_MISCCTL);
+
+ /* wait for some time and deassert the reset of ATA Device. */
+ mdelay(100);
+
+ /* Deassert the Reset */
+ writel(0x0200, base + BK3710_MISCCTL);
+
+ /*
+ * Program the IDETIMP Register Value based on the following assumptions
+ *
+ * (ATA_IDETIMP_IDEEN , ENABLE ) |
+ * (ATA_IDETIMP_SLVTIMEN , DISABLE) |
+ * (ATA_IDETIMP_RDYSMPL , 70NS) |
+ * (ATA_IDETIMP_RDYRCVRY , 50NS) |
+ * (ATA_IDETIMP_DMAFTIM1 , PIOCOMP) |
+ * (ATA_IDETIMP_PREPOST1 , DISABLE) |
+ * (ATA_IDETIMP_RDYSEN1 , DISABLE) |
+ * (ATA_IDETIMP_PIOFTIM1 , DISABLE) |
+ * (ATA_IDETIMP_DMAFTIM0 , PIOCOMP) |
+ * (ATA_IDETIMP_PREPOST0 , DISABLE) |
+ * (ATA_IDETIMP_RDYSEN0 , DISABLE) |
+ * (ATA_IDETIMP_PIOFTIM0 , DISABLE)
+ */
+ writew(0xB388, base + BK3710_IDETIMP);
+
+ /*
+ * Configure SIDETIM Register
+ * (ATA_SIDETIM_RDYSMPS1 ,120NS ) |
+ * (ATA_SIDETIM_RDYRCYS1 ,120NS )
+ */
+ writeb(0, base + BK3710_SIDETIM);
+
+ /*
+ * UDMACTL Ultra-ATA DMA Control
+ * (ATA_UDMACTL_UDMAP1 , 0 ) |
+ * (ATA_UDMACTL_UDMAP0 , 0 )
+ *
+ */
+ writew(0, base + BK3710_UDMACTL);
+
+ /*
+ * MISCCTL Miscellaneous Conrol Register
+ * (ATA_MISCCTL_RSTMODEP , 1) |
+ * (ATA_MISCCTL_RESETP , 0) |
+ * (ATA_MISCCTL_TIMORIDE , 1)
+ */
+ writel(0x201, base + BK3710_MISCCTL);
+
+ /*
+ * IORDYTMP IORDY Timer for Primary Register
+ * (ATA_IORDYTMP_IORDYTMP , 0xffff )
+ */
+ writel(0xFFFF, base + BK3710_IORDYTMP);
+
+ /*
+ * Configure BMISP Register
+ * (ATA_BMISP_DMAEN1 , DISABLE ) |
+ * (ATA_BMISP_DMAEN0 , DISABLE ) |
+ * (ATA_BMISP_IORDYINT , CLEAR) |
+ * (ATA_BMISP_INTRSTAT , CLEAR) |
+ * (ATA_BMISP_DMAERROR , CLEAR)
+ */
+ writew(0, base + BK3710_BMISP);
+
+ palm_bk3710_setpiomode(base, NULL, 0, 600, 0);
+ palm_bk3710_setpiomode(base, NULL, 1, 600, 0);
+}
+static int __devinit palm_bk3710_probe(struct platform_device *pdev)
+{
+ hw_regs_t ide_ctlr_info;
+ int index = 0;
+ int pribase;
+ struct clk *clkp;
+ struct resource *mem, *irq;
+ ide_hwif_t *hwif;
+ void __iomem *base;
+
+ clkp = clk_get(NULL, "IDECLK");
+ if (IS_ERR(clkp))
+ return -ENODEV;
+
+ ideclkp = clkp;
+ clk_enable(ideclkp);
+ ide_palm_clk = clk_get_rate(ideclkp)/100000;
+ ide_palm_clk = (10000/ide_palm_clk) + 1;
+ /* Register the IDE interface with Linux ATA Interface */
+ memset(&ide_ctlr_info, 0, sizeof(ide_ctlr_info));
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL) {
+ printk(KERN_ERR "failed to get memory region resource\n");
+ return -ENODEV;
+ }
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq == NULL) {
+ printk(KERN_ERR "failed to get IRQ resource\n");
+ return -ENODEV;
+ }
+
+ base = (void *)mem->start;
+
+ /* Configure the Palm Chip controller */
+ palm_bk3710_chipinit(base);
+
+ pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET;
+ for (index = 0; index < IDE_NR_PORTS - 2; index++)
+ ide_ctlr_info.io_ports[index] = pribase + index;
+ ide_ctlr_info.io_ports[IDE_CONTROL_OFFSET] = mem->start +
+ IDE_PALM_ATA_PRI_CTL_OFFSET;
+ ide_ctlr_info.irq = irq->start;
+ ide_ctlr_info.chipset = ide_palm3710;
+
+ if (ide_register_hw(&ide_ctlr_info, NULL, &hwif) < 0) {
+ printk(KERN_WARNING "Palm Chip BK3710 IDE Register Fail\n");
+ return -ENODEV;
+ }
+
+ hwif->set_pio_mode = &palm_bk3710_set_pio_mode;
+ hwif->set_dma_mode = &palm_bk3710_set_dma_mode;
+ hwif->mmio = 1;
+ default_hwif_mmiops(hwif);
+ hwif->cbl = ATA_CBL_PATA80;
+ hwif->ultra_mask = 0x1f; /* Ultra DMA Mode 4 Max
+ (input clk 99MHz) */
+ hwif->mwdma_mask = 0x7;
+ hwif->drives[0].autotune = 1;
+ hwif->drives[1].autotune = 1;
+
+ ide_setup_dma(hwif, mem->start);
+
+ return 0;
+}
+
+static struct platform_driver platform_bk_driver = {
+ .driver = {
+ .name = "palm_bk3710",
+ },
+ .probe = palm_bk3710_probe,
+ .remove = NULL,
+};
+
+static int __init palm_bk3710_init(void)
+{
+ return platform_driver_register(&platform_bk_driver);
+}
+
+module_init(palm_bk3710_init);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 00587a8c2ba1..e79bf8f9b7db 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -753,6 +753,25 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
}
+static void __init cris_setup_ports(hw_regs_t *hw, unsigned long base)
+{
+ int i;
+
+ memset(hw, 0, sizeof(*hw));
+
+ for (i = 0; i <= 7; i++)
+ hw->io_ports[i] = base + cris_ide_reg_addr(i, 0, 1);
+
+ /*
+ * the IDE control register is at ATA address 6,
+ * with CS1 active instead of CS0
+ */
+ hw->io_ports[IDE_CONTROL_OFFSET] = base + cris_ide_reg_addr(6, 1, 0);
+
+ hw->irq = ide_default_irq(0);
+ hw->ack_intr = cris_ide_ack_intr;
+}
+
static const struct ide_port_info cris_port_info __initdata = {
.chipset = ide_etrax100,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
@@ -765,24 +784,16 @@ static const struct ide_port_info cris_port_info __initdata = {
static int __init init_e100_ide(void)
{
hw_regs_t hw;
- int ide_offsets[IDE_NR_PORTS], h, i;
+ int h;
u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
printk("ide: ETRAX FS built-in ATA DMA controller\n");
- for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
- ide_offsets[i] = cris_ide_reg_addr(i, 0, 1);
-
- /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */
- ide_offsets[IDE_CONTROL_OFFSET] = cris_ide_reg_addr(6, 1, 0);
-
for (h = 0; h < 4; h++) {
ide_hwif_t *hwif = NULL;
- ide_setup_ports(&hw, cris_ide_base_address(h),
- ide_offsets,
- 0, 0, cris_ide_ack_intr,
- ide_default_irq(0));
+ cris_setup_ports(&hw, cris_ide_base_address(h));
+
hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
if (hwif == NULL)
continue;
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index 25aaeae1e830..e07b189f3ec8 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -171,7 +171,7 @@ err:
static acpi_handle ide_acpi_hwif_get_handle(ide_hwif_t *hwif)
{
struct device *dev = hwif->gendev.parent;
- acpi_handle dev_handle;
+ acpi_handle uninitialized_var(dev_handle);
acpi_integer pcidevfn;
acpi_handle chan_handle;
int err;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index ee4d458e2bbf..5e42c19a03e3 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -295,7 +295,8 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
int stat, err, sense_key;
/* Check for errors. */
- stat = HWIF(drive)->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
+
if (stat_ret)
*stat_ret = stat;
@@ -303,7 +304,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
return 0;
/* Get the IDE error register. */
- err = HWIF(drive)->INB(IDE_ERROR_REG);
+ err = ide_read_error(drive);
sense_key = err >> 4;
if (rq == NULL) {
@@ -692,7 +693,7 @@ int ide_cd_check_ireason(ide_drive_t *drive, int len, int ireason, int rw)
/* Some drives (ASUS) seem to tell us that status
* info is available. just get it and ignore.
*/
- (void) HWIF(drive)->INB(IDE_STATUS_REG);
+ (void)ide_read_status(drive);
return 0;
} else {
/* Drive wants a command packet, or invalid ireason... */
@@ -1326,7 +1327,7 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
if (blk_fs_request(rq)) {
if (info->cd_flags & IDE_CD_FLAG_SEEKING) {
unsigned long elapsed = jiffies - info->start_seek;
- int stat = HWIF(drive)->INB(IDE_STATUS_REG);
+ int stat = ide_read_status(drive);
if ((stat & SEEK_STAT) != SEEK_STAT) {
if (elapsed < IDECD_SEEK_TIMEOUT) {
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 3cf59f2c3928..a4bb32883c6b 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -147,7 +147,8 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
u8 stat = 0, dma_stat = 0;
dma_stat = HWIF(drive)->ide_dma_end(drive);
- stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */
+ stat = ide_read_status(drive);
+
if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
if (!dma_stat) {
struct request *rq = HWGROUP(drive)->rq;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index f8fe6ee128f3..faf22d716f80 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -465,7 +465,7 @@ static void idefloppy_retry_pc(ide_drive_t *drive)
idefloppy_pc_t *pc;
struct request *rq;
- (void)drive->hwif->INB(IDE_ERROR_REG);
+ (void)ide_read_error(drive);
pc = idefloppy_next_pc_storage(drive);
rq = idefloppy_next_rq_storage(drive);
idefloppy_create_request_sense_cmd(pc);
@@ -501,7 +501,7 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive)
}
/* Clear the interrupt */
- stat = drive->hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
/* No more interrupts */
if ((stat & DRQ_STAT) == 0) {
@@ -1246,7 +1246,7 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg)
u8 stat;
local_irq_save(flags);
- stat = drive->hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
local_irq_restore(flags);
progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000;
diff --git a/drivers/ide/ide-generic.c b/drivers/ide/ide-generic.c
index be469dbbe8fb..709b9e4d2871 100644
--- a/drivers/ide/ide-generic.c
+++ b/drivers/ide/ide-generic.c
@@ -20,8 +20,14 @@ static int __init ide_generic_init(void)
if (ide_hwifs[0].io_ports[IDE_DATA_OFFSET])
ide_get_lock(NULL, NULL); /* for atari only */
- for (i = 0; i < MAX_HWIFS; i++)
- idx[i] = ide_hwifs[i].present ? 0xff : i;
+ for (i = 0; i < MAX_HWIFS; i++) {
+ ide_hwif_t *hwif = &ide_hwifs[i];
+
+ if (hwif->io_ports[IDE_DATA_OFFSET] && !hwif->present)
+ idx[i] = i;
+ else
+ idx[i] = 0xff;
+ }
ide_device_add_all(idx, NULL);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 4bddef0c0b96..3addbe478d26 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -466,7 +466,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
return ide_stopped;
}
- if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
+ if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
rq->errors |= ERROR_RESET;
if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
@@ -493,7 +493,7 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
/* add decoding error stuff */
}
- if (hwif->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
+ if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
hwif->OUTB(WIN_IDLEIMMEDIATE, IDE_COMMAND_REG);
@@ -821,9 +821,8 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
#ifdef DEBUG
printk("%s: DRIVE_CMD (null)\n", drive->name);
#endif
- ide_end_drive_cmd(drive,
- hwif->INB(IDE_STATUS_REG),
- hwif->INB(IDE_ERROR_REG));
+ ide_end_drive_cmd(drive, ide_read_status(drive), ide_read_error(drive));
+
return ide_stopped;
}
@@ -1231,7 +1230,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
(void)HWIF(drive)->ide_dma_end(drive);
ret = ide_error(drive, "dma timeout error",
- hwif->INB(IDE_STATUS_REG));
+ ide_read_status(drive));
} else {
printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
hwif->dma_timeout(drive);
@@ -1355,7 +1354,8 @@ void ide_timer_expiry (unsigned long data)
startstop = ide_dma_timeout_retry(drive, wait);
} else
startstop =
- ide_error(drive, "irq timeout", hwif->INB(IDE_STATUS_REG));
+ ide_error(drive, "irq timeout",
+ ide_read_status(drive));
}
drive->service_time = jiffies - drive->service_start;
spin_lock_irq(&ide_lock);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index a95178f5e1bb..c32e759df208 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -430,10 +430,10 @@ int drive_is_ready (ide_drive_t *drive)
* about possible isa-pnp and pci-pnp issues yet.
*/
if (IDE_CONTROL_REG)
- stat = hwif->INB(IDE_ALTSTATUS_REG);
+ stat = ide_read_altstatus(drive);
else
/* Note: this may clear a pending IRQ!! */
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (stat & BUSY_STAT)
/* drive busy: definitely not interrupting */
@@ -458,23 +458,24 @@ EXPORT_SYMBOL(drive_is_ready);
*/
static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
{
- ide_hwif_t *hwif = drive->hwif;
unsigned long flags;
int i;
u8 stat;
udelay(1); /* spec allows drive 400ns to assert "BUSY" */
- if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
+ stat = ide_read_status(drive);
+
+ if (stat & BUSY_STAT) {
local_irq_set(flags);
timeout += jiffies;
- while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
+ while ((stat = ide_read_status(drive)) & BUSY_STAT) {
if (time_after(jiffies, timeout)) {
/*
* One last read after the timeout in case
* heavy interrupt load made us not make any
* progress during the timeout..
*/
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (!(stat & BUSY_STAT))
break;
@@ -494,7 +495,9 @@ static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long ti
*/
for (i = 0; i < 10; i++) {
udelay(1);
- if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), good, bad)) {
+ stat = ide_read_status(drive);
+
+ if (OK_STAT(stat, good, bad)) {
*rstat = stat;
return 0;
}
@@ -617,6 +620,7 @@ int ide_driveid_update(ide_drive_t *drive)
ide_hwif_t *hwif = drive->hwif;
struct hd_driveid *id;
unsigned long timeout, flags;
+ u8 stat;
/*
* Re-read drive->id for possible DMA mode
@@ -633,10 +637,15 @@ int ide_driveid_update(ide_drive_t *drive)
SELECT_MASK(drive, 0);
return 0; /* drive timed-out */
}
+
msleep(50); /* give drive a breather */
- } while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT);
+ stat = ide_read_altstatus(drive);
+ } while (stat & BUSY_STAT);
+
msleep(50); /* wait for IRQ and DRQ_STAT */
- if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) {
+ stat = ide_read_status(drive);
+
+ if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
SELECT_MASK(drive, 0);
printk("%s: CHECK for good STATUS\n", drive->name);
return 0;
@@ -649,7 +658,7 @@ int ide_driveid_update(ide_drive_t *drive)
return 0;
}
ata_input_data(drive, id, SECTOR_WORDS);
- (void) hwif->INB(IDE_STATUS_REG); /* clear drive IRQ */
+ (void)ide_read_status(drive); /* clear drive IRQ */
local_irq_enable();
local_irq_restore(flags);
ide_fix_driveid(id);
@@ -850,17 +859,16 @@ static ide_startstop_t do_reset1 (ide_drive_t *, int);
static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
{
ide_hwgroup_t *hwgroup = HWGROUP(drive);
- ide_hwif_t *hwif = HWIF(drive);
u8 stat;
SELECT_DRIVE(drive);
udelay (10);
+ stat = ide_read_status(drive);
- if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
+ if (OK_STAT(stat, 0, BUSY_STAT))
printk("%s: ATAPI reset complete\n", drive->name);
- } else {
+ else {
if (time_before(jiffies, hwgroup->poll_timeout)) {
- BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
/* continue polling */
return ide_started;
@@ -898,9 +906,10 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
}
}
- if (!OK_STAT(tmp = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
+ tmp = ide_read_status(drive);
+
+ if (!OK_STAT(tmp, 0, BUSY_STAT)) {
if (time_before(jiffies, hwgroup->poll_timeout)) {
- BUG_ON(HWGROUP(drive)->handler != NULL);
ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
/* continue polling */
return ide_started;
@@ -909,7 +918,9 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
drive->failures++;
} else {
printk("%s: reset: ", hwif->name);
- if ((tmp = hwif->INB(IDE_ERROR_REG)) == 1) {
+ tmp = ide_read_error(drive);
+
+ if (tmp == 1) {
printk("success\n");
drive->failures = 0;
} else {
diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c
index b42940d8bf70..1ff676cc6473 100644
--- a/drivers/ide/ide-lib.c
+++ b/drivers/ide/ide-lib.c
@@ -578,7 +578,7 @@ u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
}
printk("}\n");
if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) {
- err = drive->hwif->INB(IDE_ERROR_REG);
+ err = ide_read_error(drive);
printk("%s: %s: error=0x%02x ", drive->name, msg, err);
if (drive->media == ide_disk)
ide_dump_ata_error(drive, err);
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 9c07bdb68d1a..6daea896c5db 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -264,8 +264,7 @@ err_misc:
static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
{
ide_hwif_t *hwif = HWIF(drive);
- int rc;
- unsigned long hd_status;
+ int use_altstatus = 0, rc;
unsigned long timeout;
u8 s = 0, a = 0;
@@ -273,19 +272,17 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
msleep(50);
if (IDE_CONTROL_REG) {
- a = hwif->INB(IDE_ALTSTATUS_REG);
- s = hwif->INB(IDE_STATUS_REG);
- if ((a ^ s) & ~INDEX_STAT) {
- printk(KERN_INFO "%s: probing with STATUS(0x%02x) instead of "
- "ALTSTATUS(0x%02x)\n", drive->name, s, a);
+ a = ide_read_altstatus(drive);
+ s = ide_read_status(drive);
+ if ((a ^ s) & ~INDEX_STAT)
/* ancient Seagate drives, broken interfaces */
- hd_status = IDE_STATUS_REG;
- } else {
+ printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
+ "instead of ALTSTATUS(0x%02x)\n",
+ drive->name, s, a);
+ else
/* use non-intrusive polling */
- hd_status = IDE_ALTSTATUS_REG;
- }
- } else
- hd_status = IDE_STATUS_REG;
+ use_altstatus = 1;
+ }
/* set features register for atapi
* identify command to be sure of reply
@@ -306,11 +303,15 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
}
/* give drive a breather */
msleep(50);
- } while ((hwif->INB(hd_status)) & BUSY_STAT);
+ s = use_altstatus ? ide_read_altstatus(drive)
+ : ide_read_status(drive);
+ } while (s & BUSY_STAT);
/* wait for IRQ and DRQ_STAT */
msleep(50);
- if (OK_STAT((hwif->INB(IDE_STATUS_REG)), DRQ_STAT, BAD_R_STAT)) {
+ s = ide_read_status(drive);
+
+ if (OK_STAT(s, DRQ_STAT, BAD_R_STAT)) {
unsigned long flags;
/* local CPU only; some systems need this */
@@ -320,7 +321,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
/* drive responded with ID */
rc = 0;
/* clear drive IRQ */
- (void) hwif->INB(IDE_STATUS_REG);
+ (void)ide_read_status(drive);
local_irq_restore(flags);
} else {
/* drive refused ID */
@@ -367,7 +368,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
ide_set_irq(drive, 0);
/* clear drive IRQ */
- (void) hwif->INB(IDE_STATUS_REG);
+ (void)ide_read_status(drive);
udelay(5);
irq = probe_irq_off(cookie);
if (!hwif->irq) {
@@ -455,7 +456,9 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
return 3;
}
- if (OK_STAT((hwif->INB(IDE_STATUS_REG)), READY_STAT, BUSY_STAT) ||
+ stat = ide_read_status(drive);
+
+ if (OK_STAT(stat, READY_STAT, BUSY_STAT) ||
drive->present || cmd == WIN_PIDENTIFY) {
/* send cmd and wait */
if ((rc = try_to_identify(drive, cmd))) {
@@ -463,7 +466,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
rc = try_to_identify(drive,cmd);
}
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (stat == (BUSY_STAT | READY_STAT))
return 4;
@@ -482,7 +485,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
}
/* ensure drive IRQ is clear */
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (rc == 1)
printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
@@ -496,7 +499,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
SELECT_DRIVE(&hwif->drives[0]);
msleep(50);
/* ensure drive irq is clear */
- (void) hwif->INB(IDE_STATUS_REG);
+ (void)ide_read_status(drive);
}
return rc;
}
@@ -521,7 +524,7 @@ static void enable_nest (ide_drive_t *drive)
msleep(50);
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (!OK_STAT(stat, 0, BAD_STAT))
printk(KERN_CONT "failed (status = 0x%02x)\n", stat);
@@ -1046,7 +1049,7 @@ static int init_irq (ide_hwif_t *hwif)
*/
if (!match || match->irq != hwif->irq) {
int sa = 0;
-#if defined(__mc68000__) || defined(CONFIG_APUS)
+#if defined(__mc68000__)
sa = IRQF_SHARED;
#endif /* __mc68000__ || CONFIG_APUS */
@@ -1069,7 +1072,7 @@ static int init_irq (ide_hwif_t *hwif)
hwif->rqsize = 65536;
}
-#if !defined(__mc68000__) && !defined(CONFIG_APUS)
+#if !defined(__mc68000__)
printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
hwif->io_ports[IDE_DATA_OFFSET],
hwif->io_ports[IDE_DATA_OFFSET]+7,
@@ -1077,7 +1080,7 @@ static int init_irq (ide_hwif_t *hwif)
#else
printk("%s at 0x%08lx on irq %d", hwif->name,
hwif->io_ports[IDE_DATA_OFFSET], hwif->irq);
-#endif /* __mc68000__ && CONFIG_APUS */
+#endif /* __mc68000__ */
if (match)
printk(" (%sed with %s)",
hwif->sharing_irq ? "shar" : "serializ", match->name);
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 975c0ff0f438..bab88ca7f7ec 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -65,6 +65,7 @@ static int proc_ide_read_imodel
case ide_4drives: name = "4drives"; break;
case ide_pmac: name = "mac-io"; break;
case ide_au1xxx: name = "au1xxx"; break;
+ case ide_palm3710: name = "palm3710"; break;
case ide_etrax100: name = "etrax100"; break;
case ide_acorn: name = "acorn"; break;
default: name = "(unknown)"; break;
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index bf40d8c824ad..49dd2e7bae7a 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -15,7 +15,7 @@
* Documentation/ide/ChangeLog.ide-tape.1995-2002
*/
-#define IDETAPE_VERSION "1.19"
+#define IDETAPE_VERSION "1.20"
#include <linux/module.h>
#include <linux/types.h>
@@ -39,63 +39,70 @@
#include <scsi/scsi.h>
#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
+#include <linux/irq.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
#include <linux/mtio.h>
+enum {
+ /* output errors only */
+ DBG_ERR = (1 << 0),
+ /* output all sense key/asc */
+ DBG_SENSE = (1 << 1),
+ /* info regarding all chrdev-related procedures */
+ DBG_CHRDEV = (1 << 2),
+ /* all remaining procedures */
+ DBG_PROCS = (1 << 3),
+ /* buffer alloc info (pc_stack & rq_stack) */
+ DBG_PCRQ_STACK = (1 << 4),
+};
+
+/* define to see debug info */
+#define IDETAPE_DEBUG_LOG 0
+
+#if IDETAPE_DEBUG_LOG
+#define debug_log(lvl, fmt, args...) \
+{ \
+ if (tape->debug_mask & lvl) \
+ printk(KERN_INFO "ide-tape: " fmt, ## args); \
+}
+#else
+#define debug_log(lvl, fmt, args...) do {} while (0)
+#endif
+
/**************************** Tunable parameters *****************************/
/*
- * Pipelined mode parameters.
+ * Pipelined mode parameters.
*
- * We try to use the minimum number of stages which is enough to
- * keep the tape constantly streaming. To accomplish that, we implement
- * a feedback loop around the maximum number of stages:
+ * We try to use the minimum number of stages which is enough to keep the tape
+ * constantly streaming. To accomplish that, we implement a feedback loop around
+ * the maximum number of stages:
*
- * We start from MIN maximum stages (we will not even use MIN stages
- * if we don't need them), increment it by RATE*(MAX-MIN)
- * whenever we sense that the pipeline is empty, until we reach
- * the optimum value or until we reach MAX.
+ * We start from MIN maximum stages (we will not even use MIN stages if we don't
+ * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
+ * pipeline is empty, until we reach the optimum value or until we reach MAX.
*
- * Setting the following parameter to 0 is illegal: the pipelined mode
- * cannot be disabled (calculate_speeds() divides by tape->max_stages.)
+ * Setting the following parameter to 0 is illegal: the pipelined mode cannot be
+ * disabled (idetape_calculate_speeds() divides by tape->max_stages.)
*/
#define IDETAPE_MIN_PIPELINE_STAGES 1
#define IDETAPE_MAX_PIPELINE_STAGES 400
#define IDETAPE_INCREASE_STAGES_RATE 20
/*
- * The following are used to debug the driver:
- *
- * Setting IDETAPE_DEBUG_LOG to 1 will log driver flow control.
+ * After each failed packet command we issue a request sense command and retry
+ * the packet command IDETAPE_MAX_PC_RETRIES times.
*
- * Setting them to 0 will restore normal operation mode:
- *
- * 1. Disable logging normal successful operations.
- * 2. Disable self-sanity checks.
- * 3. Errors will still be logged, of course.
- *
- * All the #if DEBUG code will be removed some day, when the driver
- * is verified to be stable enough. This will make it much more
- * esthetic.
- */
-#define IDETAPE_DEBUG_LOG 0
-
-/*
- * After each failed packet command we issue a request sense command
- * and retry the packet command IDETAPE_MAX_PC_RETRIES times.
- *
- * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
+ * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
*/
#define IDETAPE_MAX_PC_RETRIES 3
/*
- * With each packet command, we allocate a buffer of
- * IDETAPE_PC_BUFFER_SIZE bytes. This is used for several packet
- * commands (Not for READ/WRITE commands).
+ * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
+ * bytes. This is used for several packet commands (Not for READ/WRITE commands)
*/
#define IDETAPE_PC_BUFFER_SIZE 256
@@ -114,48 +121,39 @@
#define IDETAPE_WAIT_CMD (900*HZ)
/*
- * The following parameter is used to select the point in the internal
- * tape fifo in which we will start to refill the buffer. Decreasing
- * the following parameter will improve the system's latency and
- * interactive response, while using a high value might improve system
- * throughput.
+ * The following parameter is used to select the point in the internal tape fifo
+ * in which we will start to refill the buffer. Decreasing the following
+ * parameter will improve the system's latency and interactive response, while
+ * using a high value might improve system throughput.
*/
-#define IDETAPE_FIFO_THRESHOLD 2
+#define IDETAPE_FIFO_THRESHOLD 2
/*
- * DSC polling parameters.
- *
- * Polling for DSC (a single bit in the status register) is a very
- * important function in ide-tape. There are two cases in which we
- * poll for DSC:
+ * DSC polling parameters.
*
- * 1. Before a read/write packet command, to ensure that we
- * can transfer data from/to the tape's data buffers, without
- * causing an actual media access. In case the tape is not
- * ready yet, we take out our request from the device
- * request queue, so that ide.c will service requests from
- * the other device on the same interface meanwhile.
+ * Polling for DSC (a single bit in the status register) is a very important
+ * function in ide-tape. There are two cases in which we poll for DSC:
*
- * 2. After the successful initialization of a "media access
- * packet command", which is a command which can take a long
- * time to complete (it can be several seconds or even an hour).
+ * 1. Before a read/write packet command, to ensure that we can transfer data
+ * from/to the tape's data buffers, without causing an actual media access.
+ * In case the tape is not ready yet, we take out our request from the device
+ * request queue, so that ide.c could service requests from the other device
+ * on the same interface in the meantime.
*
- * Again, we postpone our request in the middle to free the bus
- * for the other device. The polling frequency here should be
- * lower than the read/write frequency since those media access
- * commands are slow. We start from a "fast" frequency -
- * IDETAPE_DSC_MA_FAST (one second), and if we don't receive DSC
- * after IDETAPE_DSC_MA_THRESHOLD (5 minutes), we switch it to a
- * lower frequency - IDETAPE_DSC_MA_SLOW (1 minute).
+ * 2. After the successful initialization of a "media access packet command",
+ * which is a command that can take a long time to complete (the interval can
+ * range from several seconds to even an hour). Again, we postpone our request
+ * in the middle to free the bus for the other device. The polling frequency
+ * here should be lower than the read/write frequency since those media access
+ * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
+ * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
+ * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
*
- * We also set a timeout for the timer, in case something goes wrong.
- * The timeout should be longer then the maximum execution time of a
- * tape operation.
- */
-
-/*
- * DSC timings.
+ * We also set a timeout for the timer, in case something goes wrong. The
+ * timeout should be longer then the maximum execution time of a tape operation.
*/
+
+/* DSC timings. */
#define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
#define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
#define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
@@ -166,19 +164,15 @@
/*************************** End of tunable parameters ***********************/
-/*
- * Read/Write error simulation
- */
+/* Read/Write error simulation */
#define SIMULATE_ERRORS 0
-/*
- * For general magnetic tape device compatibility.
- */
-typedef enum {
- idetape_direction_none,
- idetape_direction_read,
- idetape_direction_write
-} idetape_chrdev_direction_t;
+/* tape directions */
+enum {
+ IDETAPE_DIR_NONE = (1 << 0),
+ IDETAPE_DIR_READ = (1 << 1),
+ IDETAPE_DIR_WRITE = (1 << 2),
+};
struct idetape_bh {
u32 b_size;
@@ -187,24 +181,32 @@ struct idetape_bh {
char *b_data;
};
-/*
- * Our view of a packet command.
- */
typedef struct idetape_packet_command_s {
- u8 c[12]; /* Actual packet bytes */
- int retries; /* On each retry, we increment retries */
- int error; /* Error code */
- int request_transfer; /* Bytes to transfer */
- int actually_transferred; /* Bytes actually transferred */
- int buffer_size; /* Size of our data buffer */
+ /* Actual packet bytes */
+ u8 c[12];
+ /* On each retry, we increment retries */
+ int retries;
+ /* Error code */
+ int error;
+ /* Bytes to transfer */
+ int request_transfer;
+ /* Bytes actually transferred */
+ int actually_transferred;
+ /* Size of our data buffer */
+ int buffer_size;
struct idetape_bh *bh;
char *b_data;
int b_count;
- u8 *buffer; /* Data buffer */
- u8 *current_position; /* Pointer into the above buffer */
- ide_startstop_t (*callback) (ide_drive_t *); /* Called when this packet command is completed */
- u8 pc_buffer[IDETAPE_PC_BUFFER_SIZE]; /* Temporary buffer */
- unsigned long flags; /* Status/Action bit flags: long for set_bit */
+ /* Data buffer */
+ u8 *buffer;
+ /* Pointer into the above buffer */
+ u8 *current_position;
+ /* Called when this packet command is completed */
+ ide_startstop_t (*callback) (ide_drive_t *);
+ /* Temporary buffer */
+ u8 pc_buffer[IDETAPE_PC_BUFFER_SIZE];
+ /* Status/Action bit flags: long for set_bit */
+ unsigned long flags;
} idetape_pc_t;
/*
@@ -223,9 +225,7 @@ typedef struct idetape_packet_command_s {
/* Data direction */
#define PC_WRITING 5
-/*
- * A pipeline stage.
- */
+/* A pipeline stage. */
typedef struct idetape_stage_s {
struct request rq; /* The corresponding request */
struct idetape_bh *bh; /* The data buffers */
@@ -233,9 +233,8 @@ typedef struct idetape_stage_s {
} idetape_stage_t;
/*
- * Most of our global data which we need to save even as we leave the
- * driver due to an interrupt or a timer event is stored in a variable
- * of type idetape_tape_t, defined below.
+ * Most of our global data which we need to save even as we leave the driver due
+ * to an interrupt or a timer event is stored in the struct defined below.
*/
typedef struct ide_tape_obj {
ide_drive_t *drive;
@@ -271,15 +270,14 @@ typedef struct ide_tape_obj {
int rq_stack_index;
/*
- * DSC polling variables.
+ * DSC polling variables.
*
- * While polling for DSC we use postponed_rq to postpone the
- * current request so that ide.c will be able to service
- * pending requests on the other device. Note that at most
- * we will have only one DSC (usually data transfer) request
- * in the device request queue. Additional requests can be
- * queued in our internal pipeline, but they will be visible
- * to ide.c only one at a time.
+ * While polling for DSC we use postponed_rq to postpone the current
+ * request so that ide.c will be able to service pending requests on the
+ * other device. Note that at most we will have only one DSC (usually
+ * data transfer) request in the device request queue. Additional
+ * requests can be queued in our internal pipeline, but they will be
+ * visible to ide.c only one at a time.
*/
struct request *postponed_rq;
/* The time in which we started polling for DSC */
@@ -287,73 +285,57 @@ typedef struct ide_tape_obj {
/* Timer used to poll for dsc */
struct timer_list dsc_timer;
/* Read/Write dsc polling frequency */
- unsigned long best_dsc_rw_frequency;
- /* The current polling frequency */
- unsigned long dsc_polling_frequency;
- /* Maximum waiting time */
+ unsigned long best_dsc_rw_freq;
+ unsigned long dsc_poll_freq;
unsigned long dsc_timeout;
- /*
- * Read position information
- */
+ /* Read position information */
u8 partition;
/* Current block */
- unsigned int first_frame_position;
- unsigned int last_frame_position;
- unsigned int blocks_in_buffer;
+ unsigned int first_frame;
- /*
- * Last error information
- */
+ /* Last error information */
u8 sense_key, asc, ascq;
- /*
- * Character device operation
- */
+ /* Character device operation */
unsigned int minor;
/* device name */
char name[4];
/* Current character device data transfer direction */
- idetape_chrdev_direction_t chrdev_direction;
+ u8 chrdev_dir;
- /*
- * Device information
- */
- /* Usually 512 or 1024 bytes */
- unsigned short tape_block_size;
+ /* tape block size, usually 512 or 1024 bytes */
+ unsigned short blk_size;
int user_bs_factor;
/* Copy of the tape's Capabilities and Mechanical Page */
u8 caps[20];
/*
- * Active data transfer request parameters.
- *
- * At most, there is only one ide-tape originated data transfer
- * request in the device request queue. This allows ide.c to
- * easily service requests from the other device when we
- * postpone our active request. In the pipelined operation
- * mode, we use our internal pipeline structure to hold
- * more data requests.
+ * Active data transfer request parameters.
*
- * The data buffer size is chosen based on the tape's
- * recommendation.
+ * At most, there is only one ide-tape originated data transfer request
+ * in the device request queue. This allows ide.c to easily service
+ * requests from the other device when we postpone our active request.
+ * In the pipelined operation mode, we use our internal pipeline
+ * structure to hold more data requests. The data buffer size is chosen
+ * based on the tape's recommendation.
*/
- /* Pointer to the request which is waiting in the device request queue */
- struct request *active_data_request;
- /* Data buffer size (chosen based on the tape's recommendation */
+ /* ptr to the request which is waiting in the device request queue */
+ struct request *active_data_rq;
+ /* Data buffer size chosen based on the tape's recommendation */
int stage_size;
idetape_stage_t *merge_stage;
int merge_stage_size;
struct idetape_bh *bh;
char *b_data;
int b_count;
-
+
/*
- * Pipeline parameters.
+ * Pipeline parameters.
*
- * To accomplish non-pipelined mode, we simply set the following
- * variables to zero (or NULL, where appropriate).
+ * To accomplish non-pipelined mode, we simply set the following
+ * variables to zero (or NULL, where appropriate).
*/
/* Number of currently used stages */
int nr_stages;
@@ -378,20 +360,13 @@ typedef struct ide_tape_obj {
/* Status/Action flags: long for set_bit */
unsigned long flags;
/* protects the ide-tape queue */
- spinlock_t spinlock;
+ spinlock_t lock;
- /*
- * Measures average tape speed
- */
+ /* Measures average tape speed */
unsigned long avg_time;
int avg_size;
int avg_speed;
- char vendor_id[10];
- char product_id[18];
- char firmware_revision[6];
- int firmware_revision_num;
-
/* the door is currently locked */
int door_locked;
/* the tape hardware is write protected */
@@ -400,11 +375,9 @@ typedef struct ide_tape_obj {
char write_prot;
/*
- * Limit the number of times a request can
- * be postponed, to avoid an infinite postpone
- * deadlock.
+ * Limit the number of times a request can be postponed, to avoid an
+ * infinite postpone deadlock.
*/
- /* request postpone count limit */
int postpone_cnt;
/*
@@ -419,30 +392,19 @@ typedef struct ide_tape_obj {
int tape_head;
int last_tape_head;
- /*
- * Speed control at the tape buffers input/output
- */
+ /* Speed control at the tape buffers input/output */
unsigned long insert_time;
int insert_size;
int insert_speed;
int max_insert_speed;
int measure_insert_time;
- /*
- * Measure tape still time, in milliseconds
- */
- unsigned long tape_still_time_begin;
- int tape_still_time;
-
- /*
- * Speed regulation negative feedback loop
- */
+ /* Speed regulation negative feedback loop */
int speed_control;
int pipeline_head_speed;
int controlled_pipeline_head_speed;
int uncontrolled_pipeline_head_speed;
int controlled_last_pipeline_head;
- int uncontrolled_last_pipeline_head;
unsigned long uncontrolled_pipeline_head_time;
unsigned long controlled_pipeline_head_time;
int controlled_previous_pipeline_head;
@@ -451,18 +413,7 @@ typedef struct ide_tape_obj {
unsigned long uncontrolled_previous_head_time;
int restart_speed_control_req;
- /*
- * Debug_level determines amount of debugging output;
- * can be changed using /proc/ide/hdx/settings
- * 0 : almost no debugging output
- * 1 : 0+output errors only
- * 2 : 1+output all sensekey/asc
- * 3 : 2+follow all chrdev related procedures
- * 4 : 3+follow all procedures
- * 5 : 4+include pc_stack rq_stack info
- * 6 : 5+USE_COUNT updates
- */
- int debug_level;
+ u32 debug_mask;
} idetape_tape_t;
static DEFINE_MUTEX(idetape_ref_mutex);
@@ -495,9 +446,7 @@ static void ide_tape_put(struct ide_tape_obj *tape)
mutex_unlock(&idetape_ref_mutex);
}
-/*
- * Tape door status
- */
+/* Tape door status */
#define DOOR_UNLOCKED 0
#define DOOR_LOCKED 1
#define DOOR_EXPLICITLY_LOCKED 2
@@ -517,30 +466,23 @@ static void ide_tape_put(struct ide_tape_obj *tape)
/* 0 = no tape is loaded, so we don't rewind after ejecting */
#define IDETAPE_MEDIUM_PRESENT 9
-/*
- * Some defines for the READ BUFFER command
- */
+/* A define for the READ BUFFER command */
#define IDETAPE_RETRIEVE_FAULTY_BLOCK 6
-/*
- * Some defines for the SPACE command
- */
+/* Some defines for the SPACE command */
#define IDETAPE_SPACE_OVER_FILEMARK 1
#define IDETAPE_SPACE_TO_EOD 3
-/*
- * Some defines for the LOAD UNLOAD command
- */
+/* Some defines for the LOAD UNLOAD command */
#define IDETAPE_LU_LOAD_MASK 1
#define IDETAPE_LU_RETENSION_MASK 2
#define IDETAPE_LU_EOT_MASK 4
/*
- * Special requests for our block device strategy routine.
+ * Special requests for our block device strategy routine.
*
- * In order to service a character device command, we add special
- * requests to the tail of our block device request queue and wait
- * for their completion.
+ * In order to service a character device command, we add special requests to
+ * the tail of our block device request queue and wait for their completion.
*/
enum {
@@ -551,55 +493,20 @@ enum {
REQ_IDETAPE_READ_BUFFER = (1 << 4),
};
-/*
- * Error codes which are returned in rq->errors to the higher part
- * of the driver.
- */
+/* Error codes returned in rq->errors to the higher part of the driver. */
#define IDETAPE_ERROR_GENERAL 101
#define IDETAPE_ERROR_FILEMARK 102
#define IDETAPE_ERROR_EOD 103
-/*
- * The following is used to format the general configuration word of
- * the ATAPI IDENTIFY DEVICE command.
- */
-struct idetape_id_gcw {
- unsigned packet_size :2; /* Packet Size */
- unsigned reserved234 :3; /* Reserved */
- unsigned drq_type :2; /* Command packet DRQ type */
- unsigned removable :1; /* Removable media */
- unsigned device_type :5; /* Device type */
- unsigned reserved13 :1; /* Reserved */
- unsigned protocol :2; /* Protocol type */
-};
-
-/*
- * READ POSITION packet command - Data Format (From Table 6-57)
- */
-typedef struct {
- unsigned reserved0_10 :2; /* Reserved */
- unsigned bpu :1; /* Block Position Unknown */
- unsigned reserved0_543 :3; /* Reserved */
- unsigned eop :1; /* End Of Partition */
- unsigned bop :1; /* Beginning Of Partition */
- u8 partition; /* Partition Number */
- u8 reserved2, reserved3; /* Reserved */
- u32 first_block; /* First Block Location */
- u32 last_block; /* Last Block Location (Optional) */
- u8 reserved12; /* Reserved */
- u8 blocks_in_buffer[3]; /* Blocks In Buffer - (Optional) */
- u32 bytes_in_buffer; /* Bytes In Buffer (Optional) */
-} idetape_read_position_result_t;
-
/* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
#define IDETAPE_BLOCK_DESCRIPTOR 0
#define IDETAPE_CAPABILITIES_PAGE 0x2a
/*
- * The variables below are used for the character device interface.
- * Additional state variables are defined in our ide_drive_t structure.
+ * The variables below are used for the character device interface. Additional
+ * state variables are defined in our ide_drive_t structure.
*/
-static struct ide_tape_obj * idetape_devs[MAX_HWIFS * MAX_DRIVES];
+static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
#define ide_tape_f(file) ((file)->private_data)
@@ -616,23 +523,17 @@ static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
}
/*
- * Function declarations
- *
- */
-static int idetape_chrdev_release (struct inode *inode, struct file *filp);
-static void idetape_write_release (ide_drive_t *drive, unsigned int minor);
-
-/*
* Too bad. The drive wants to send us data which we are not ready to accept.
* Just throw it away.
*/
-static void idetape_discard_data (ide_drive_t *drive, unsigned int bcount)
+static void idetape_discard_data(ide_drive_t *drive, unsigned int bcount)
{
while (bcount--)
(void) HWIF(drive)->INB(IDE_DATA_REG);
}
-static void idetape_input_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigned int bcount)
+static void idetape_input_buffers(ide_drive_t *drive, idetape_pc_t *pc,
+ unsigned int bcount)
{
struct idetape_bh *bh = pc->bh;
int count;
@@ -644,8 +545,11 @@ static void idetape_input_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigne
idetape_discard_data(drive, bcount);
return;
}
- count = min((unsigned int)(bh->b_size - atomic_read(&bh->b_count)), bcount);
- HWIF(drive)->atapi_input_bytes(drive, bh->b_data + atomic_read(&bh->b_count), count);
+ count = min(
+ (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
+ bcount);
+ HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
+ atomic_read(&bh->b_count), count);
bcount -= count;
atomic_add(count, &bh->b_count);
if (atomic_read(&bh->b_count) == bh->b_size) {
@@ -657,15 +561,16 @@ static void idetape_input_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigne
pc->bh = bh;
}
-static void idetape_output_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsigned int bcount)
+static void idetape_output_buffers(ide_drive_t *drive, idetape_pc_t *pc,
+ unsigned int bcount)
{
struct idetape_bh *bh = pc->bh;
int count;
while (bcount) {
if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in "
- "idetape_output_buffers\n");
+ printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
+ __func__);
return;
}
count = min((unsigned int)pc->b_count, (unsigned int)bcount);
@@ -674,7 +579,8 @@ static void idetape_output_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsign
pc->b_data += count;
pc->b_count -= count;
if (!pc->b_count) {
- pc->bh = bh = bh->b_reqnext;
+ bh = bh->b_reqnext;
+ pc->bh = bh;
if (bh) {
pc->b_data = bh->b_data;
pc->b_count = atomic_read(&bh->b_count);
@@ -683,7 +589,7 @@ static void idetape_output_buffers (ide_drive_t *drive, idetape_pc_t *pc, unsign
}
}
-static void idetape_update_buffers (idetape_pc_t *pc)
+static void idetape_update_buffers(idetape_pc_t *pc)
{
struct idetape_bh *bh = pc->bh;
int count;
@@ -693,8 +599,8 @@ static void idetape_update_buffers (idetape_pc_t *pc)
return;
while (bcount) {
if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in "
- "idetape_update_buffers\n");
+ printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
+ __func__);
return;
}
count = min((unsigned int)bh->b_size, (unsigned int)bcount);
@@ -712,17 +618,14 @@ static void idetape_update_buffers (idetape_pc_t *pc)
* driver. A storage space for a maximum of IDETAPE_PC_STACK packet
* commands is allocated at initialization time.
*/
-static idetape_pc_t *idetape_next_pc_storage (ide_drive_t *drive)
+static idetape_pc_t *idetape_next_pc_storage(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 5)
- printk(KERN_INFO "ide-tape: pc_stack_index=%d\n",
- tape->pc_stack_index);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
+
if (tape->pc_stack_index == IDETAPE_PC_STACK)
- tape->pc_stack_index=0;
+ tape->pc_stack_index = 0;
return (&tape->pc_stack[tape->pc_stack_index++]);
}
@@ -731,32 +634,26 @@ static idetape_pc_t *idetape_next_pc_storage (ide_drive_t *drive)
* Since we queue packet commands in the request queue, we need to
* allocate a request, along with the allocation of a packet command.
*/
-
+
/**************************************************************
* *
* This should get fixed to use kmalloc(.., GFP_ATOMIC) *
* followed later on by kfree(). -ml *
* *
**************************************************************/
-
-static struct request *idetape_next_rq_storage (ide_drive_t *drive)
+
+static struct request *idetape_next_rq_storage(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 5)
- printk(KERN_INFO "ide-tape: rq_stack_index=%d\n",
- tape->rq_stack_index);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
+
if (tape->rq_stack_index == IDETAPE_PC_STACK)
- tape->rq_stack_index=0;
+ tape->rq_stack_index = 0;
return (&tape->rq_stack[tape->rq_stack_index++]);
}
-/*
- * idetape_init_pc initializes a packet command.
- */
-static void idetape_init_pc (idetape_pc_t *pc)
+static void idetape_init_pc(idetape_pc_t *pc)
{
memset(pc->c, 0, 12);
pc->retries = 0;
@@ -780,22 +677,14 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
tape->sense_key = sense[2] & 0xF;
tape->asc = sense[12];
tape->ascq = sense[13];
-#if IDETAPE_DEBUG_LOG
- /*
- * Without debugging, we only log an error if we decided to give up
- * retrying.
- */
- if (tape->debug_level >= 1)
- printk(KERN_INFO "ide-tape: pc = %x, sense key = %x, "
- "asc = %x, ascq = %x\n",
- pc->c[0], tape->sense_key,
- tape->asc, tape->ascq);
-#endif /* IDETAPE_DEBUG_LOG */
+
+ debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
+ pc->c[0], tape->sense_key, tape->asc, tape->ascq);
/* Correct pc->actually_transferred by asking the tape. */
if (test_bit(PC_DMA_ERROR, &pc->flags)) {
pc->actually_transferred = pc->request_transfer -
- tape->tape_block_size *
+ tape->blk_size *
be32_to_cpu(get_unaligned((u32 *)&sense[3]));
idetape_update_buffers(pc);
}
@@ -843,50 +732,24 @@ static void idetape_activate_next_stage(ide_drive_t *drive)
idetape_stage_t *stage = tape->next_stage;
struct request *rq = &stage->rq;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_active_next_stage\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
if (stage == NULL) {
- printk(KERN_ERR "ide-tape: bug: Trying to activate a non existing stage\n");
+ printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
+ " existing stage\n");
return;
}
rq->rq_disk = tape->disk;
rq->buffer = NULL;
rq->special = (void *)stage->bh;
- tape->active_data_request = rq;
+ tape->active_data_rq = rq;
tape->active_stage = stage;
tape->next_stage = stage->next;
}
-/*
- * idetape_increase_max_pipeline_stages is a part of the feedback
- * loop which tries to find the optimum number of stages. In the
- * feedback loop, we are starting from a minimum maximum number of
- * stages, and if we sense that the pipeline is empty, we try to
- * increase it, until we reach the user compile time memory limit.
- */
-static void idetape_increase_max_pipeline_stages (ide_drive_t *drive)
-{
- idetape_tape_t *tape = drive->driver_data;
- int increase = (tape->max_pipeline - tape->min_pipeline) / 10;
-
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk (KERN_INFO "ide-tape: Reached idetape_increase_max_pipeline_stages\n");
-#endif /* IDETAPE_DEBUG_LOG */
-
- tape->max_stages += max(increase, 1);
- tape->max_stages = max(tape->max_stages, tape->min_pipeline);
- tape->max_stages = min(tape->max_stages, tape->max_pipeline);
-}
-
-/*
- * idetape_kfree_stage calls kfree to completely free a stage, along with
- * its related buffers.
- */
-static void __idetape_kfree_stage (idetape_stage_t *stage)
+/* Free a stage along with its related buffers completely. */
+static void __idetape_kfree_stage(idetape_stage_t *stage)
{
struct idetape_bh *prev_bh, *bh = stage->bh;
int size;
@@ -907,30 +770,29 @@ static void __idetape_kfree_stage (idetape_stage_t *stage)
kfree(stage);
}
-static void idetape_kfree_stage (idetape_tape_t *tape, idetape_stage_t *stage)
+static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
{
__idetape_kfree_stage(stage);
}
/*
- * idetape_remove_stage_head removes tape->first_stage from the pipeline.
- * The caller should avoid race conditions.
+ * Remove tape->first_stage from the pipeline. The caller should avoid race
+ * conditions.
*/
-static void idetape_remove_stage_head (ide_drive_t *drive)
+static void idetape_remove_stage_head(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
idetape_stage_t *stage;
-
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_remove_stage_head\n");
-#endif /* IDETAPE_DEBUG_LOG */
+
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
if (tape->first_stage == NULL) {
printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
return;
}
if (tape->active_stage == tape->first_stage) {
- printk(KERN_ERR "ide-tape: bug: Trying to free our active pipeline stage\n");
+ printk(KERN_ERR "ide-tape: bug: Trying to free our active "
+ "pipeline stage\n");
return;
}
stage = tape->first_stage;
@@ -940,9 +802,11 @@ static void idetape_remove_stage_head (ide_drive_t *drive)
if (tape->first_stage == NULL) {
tape->last_stage = NULL;
if (tape->next_stage != NULL)
- printk(KERN_ERR "ide-tape: bug: tape->next_stage != NULL\n");
+ printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
+ " NULL\n");
if (tape->nr_stages)
- printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 now\n");
+ printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
+ "now\n");
}
}
@@ -957,10 +821,8 @@ static void idetape_abort_pipeline(ide_drive_t *drive,
idetape_stage_t *stage = new_last_stage->next;
idetape_stage_t *nstage;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: %s: idetape_abort_pipeline called\n", tape->name);
-#endif
+ debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
+
while (stage) {
nstage = stage->next;
idetape_kfree_stage(tape, stage);
@@ -975,8 +837,8 @@ static void idetape_abort_pipeline(ide_drive_t *drive,
}
/*
- * idetape_end_request is used to finish servicing a request, and to
- * insert a pending pipeline request into the main device queue.
+ * Finish servicing a request and insert a pending pipeline request into the
+ * main device queue.
*/
static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
{
@@ -987,15 +849,12 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
int remove_stage = 0;
idetape_stage_t *active_stage;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_end_request\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
switch (uptodate) {
- case 0: error = IDETAPE_ERROR_GENERAL; break;
- case 1: error = 0; break;
- default: error = uptodate;
+ case 0: error = IDETAPE_ERROR_GENERAL; break;
+ case 1: error = 0; break;
+ default: error = uptodate;
}
rq->errors = error;
if (error)
@@ -1006,20 +865,21 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
return 0;
}
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
/* The request was a pipelined data transfer request */
- if (tape->active_data_request == rq) {
+ if (tape->active_data_rq == rq) {
active_stage = tape->active_stage;
tape->active_stage = NULL;
- tape->active_data_request = NULL;
+ tape->active_data_rq = NULL;
tape->nr_pending_stages--;
if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
remove_stage = 1;
if (error) {
set_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
if (error == IDETAPE_ERROR_EOD)
- idetape_abort_pipeline(drive, active_stage);
+ idetape_abort_pipeline(drive,
+ active_stage);
}
} else if (rq->cmd[0] & REQ_IDETAPE_READ) {
if (error == IDETAPE_ERROR_EOD) {
@@ -1030,48 +890,57 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
if (tape->next_stage != NULL) {
idetape_activate_next_stage(drive);
+ /* Insert the next request into the request queue. */
+ (void)ide_do_drive_cmd(drive, tape->active_data_rq,
+ ide_end);
+ } else if (!error) {
/*
- * Insert the next request into the request queue.
+ * This is a part of the feedback loop which tries to
+ * find the optimum number of stages. We are starting
+ * from a minimum maximum number of stages, and if we
+ * sense that the pipeline is empty, we try to increase
+ * it, until we reach the user compile time memory
+ * limit.
*/
- (void) ide_do_drive_cmd(drive, tape->active_data_request, ide_end);
- } else if (!error) {
- idetape_increase_max_pipeline_stages(drive);
+ int i = (tape->max_pipeline - tape->min_pipeline) / 10;
+
+ tape->max_stages += max(i, 1);
+ tape->max_stages = max(tape->max_stages,
+ tape->min_pipeline);
+ tape->max_stages = min(tape->max_stages,
+ tape->max_pipeline);
}
}
ide_end_drive_cmd(drive, 0, 0);
-// blkdev_dequeue_request(rq);
-// drive->rq = NULL;
-// end_that_request_last(rq);
if (remove_stage)
idetape_remove_stage_head(drive);
- if (tape->active_data_request == NULL)
+ if (tape->active_data_rq == NULL)
clear_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ spin_unlock_irqrestore(&tape->lock, flags);
return 0;
}
-static ide_startstop_t idetape_request_sense_callback (ide_drive_t *drive)
+static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_request_sense_callback\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
if (!tape->pc->error) {
idetape_analyze_error(drive, tape->pc->buffer);
idetape_end_request(drive, 1, 0);
} else {
- printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - Aborting request!\n");
+ printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
+ "Aborting request!\n");
idetape_end_request(drive, 0, 0);
}
return ide_stopped;
}
-static void idetape_create_request_sense_cmd (idetape_pc_t *pc)
+static void idetape_create_request_sense_cmd(idetape_pc_t *pc)
{
- idetape_init_pc(pc);
+ idetape_init_pc(pc);
pc->c[0] = REQUEST_SENSE;
pc->c[4] = 20;
pc->request_transfer = 20;
@@ -1086,25 +955,22 @@ static void idetape_init_rq(struct request *rq, u8 cmd)
}
/*
- * idetape_queue_pc_head generates a new packet command request in front
- * of the request queue, before the current request, so that it will be
- * processed immediately, on the next pass through the driver.
- *
- * idetape_queue_pc_head is called from the request handling part of
- * the driver (the "bottom" part). Safe storage for the request should
- * be allocated with idetape_next_pc_storage and idetape_next_rq_storage
- * before calling idetape_queue_pc_head.
+ * Generate a new packet command request in front of the request queue, before
+ * the current request, so that it will be processed immediately, on the next
+ * pass through the driver. The function below is called from the request
+ * handling part of the driver (the "bottom" part). Safe storage for the request
+ * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
*
- * Memory for those requests is pre-allocated at initialization time, and
- * is limited to IDETAPE_PC_STACK requests. We assume that we have enough
- * space for the maximum possible number of inter-dependent packet commands.
+ * Memory for those requests is pre-allocated at initialization time, and is
+ * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
+ * the maximum possible number of inter-dependent packet commands.
*
- * The higher level of the driver - The ioctl handler and the character
- * device handling functions should queue request to the lower level part
- * and wait for their completion using idetape_queue_pc_tail or
- * idetape_queue_rw_tail.
+ * The higher level of the driver - The ioctl handler and the character device
+ * handling functions should queue request to the lower level part and wait for
+ * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
*/
-static void idetape_queue_pc_head (ide_drive_t *drive, idetape_pc_t *pc,struct request *rq)
+static void idetape_queue_pc_head(ide_drive_t *drive, idetape_pc_t *pc,
+ struct request *rq)
{
struct ide_tape_obj *tape = drive->driver_data;
@@ -1125,7 +991,7 @@ static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
idetape_pc_t *pc;
struct request *rq;
- (void)drive->hwif->INB(IDE_ERROR_REG);
+ (void)ide_read_error(drive);
pc = idetape_next_pc_storage(drive);
rq = idetape_next_rq_storage(drive);
idetape_create_request_sense_cmd(pc);
@@ -1135,50 +1001,46 @@ static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
}
/*
- * idetape_postpone_request postpones the current request so that
- * ide.c will be able to service requests from another device on
- * the same hwgroup while we are polling for DSC.
+ * Postpone the current request so that ide.c will be able to service requests
+ * from another device on the same hwgroup while we are polling for DSC.
*/
-static void idetape_postpone_request (ide_drive_t *drive)
+static void idetape_postpone_request(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: idetape_postpone_request\n");
-#endif
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
tape->postponed_rq = HWGROUP(drive)->rq;
- ide_stall_queue(drive, tape->dsc_polling_frequency);
+ ide_stall_queue(drive, tape->dsc_poll_freq);
}
+typedef void idetape_io_buf(ide_drive_t *, idetape_pc_t *, unsigned int);
+
/*
- * idetape_pc_intr is the usual interrupt handler which will be called
- * during a packet command. We will transfer some of the data (as
- * requested by the drive) and will re-point interrupt handler to us.
- * When data transfer is finished, we will act according to the
- * algorithm described before idetape_issue_packet_command.
- *
+ * This is the usual interrupt handler which will be called during a packet
+ * command. We will transfer some of the data (as requested by the drive) and
+ * will re-point interrupt handler to us. When data transfer is finished, we
+ * will act according to the algorithm described before
+ * idetape_issue_pc.
*/
-static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
+static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t *pc = tape->pc;
+ xfer_func_t *xferfunc;
+ idetape_io_buf *iobuf;
unsigned int temp;
#if SIMULATE_ERRORS
- static int error_sim_count = 0;
+ static int error_sim_count;
#endif
u16 bcount;
u8 stat, ireason;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_pc_intr "
- "interrupt handler\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
/* Clear the interrupt */
- stat = hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (test_bit(PC_DMA_IN_PROGRESS, &pc->flags)) {
if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
@@ -1208,20 +1070,16 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
pc->actually_transferred = pc->request_transfer;
idetape_update_buffers(pc);
}
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: DMA finished\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "DMA finished\n");
+
}
/* No more interrupts */
if ((stat & DRQ_STAT) == 0) {
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: Packet command completed, %d bytes transferred\n", pc->actually_transferred);
-#endif /* IDETAPE_DEBUG_LOG */
- clear_bit(PC_DMA_IN_PROGRESS, &pc->flags);
+ debug_log(DBG_SENSE, "Packet command completed, %d bytes"
+ " transferred\n", pc->actually_transferred);
+ clear_bit(PC_DMA_IN_PROGRESS, &pc->flags);
local_irq_enable();
#if SIMULATE_ERRORS
@@ -1236,19 +1094,16 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
stat &= ~ERR_STAT;
if ((stat & ERR_STAT) || test_bit(PC_DMA_ERROR, &pc->flags)) {
/* Error detected */
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 1)
- printk(KERN_INFO "ide-tape: %s: I/O error\n",
- tape->name);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
+
if (pc->c[0] == REQUEST_SENSE) {
- printk(KERN_ERR "ide-tape: I/O error in request sense command\n");
+ printk(KERN_ERR "ide-tape: I/O error in request"
+ " sense command\n");
return ide_do_reset(drive);
}
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 1)
- printk(KERN_INFO "ide-tape: [cmd %x]: check condition\n", pc->c[0]);
-#endif
+ debug_log(DBG_ERR, "[cmd %x]: check condition\n",
+ pc->c[0]);
+
/* Retry operation */
return idetape_retry_pc(drive);
}
@@ -1257,7 +1112,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
(stat & SEEK_STAT) == 0) {
/* Media access command */
tape->dsc_polling_start = jiffies;
- tape->dsc_polling_frequency = IDETAPE_DSC_MA_FAST;
+ tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
/* Allow ide.c to handle other requests */
idetape_postpone_request(drive);
@@ -1282,7 +1137,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
ireason = hwif->INB(IDE_IREASON_REG);
if (ireason & CD) {
- printk(KERN_ERR "ide-tape: CoD != 0 in idetape_pc_intr\n");
+ printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
return ide_do_reset(drive);
}
if (((ireason & IO) == IO) == test_bit(PC_WRITING, &pc->flags)) {
@@ -1298,86 +1153,76 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive)
temp = pc->actually_transferred + bcount;
if (temp > pc->request_transfer) {
if (temp > pc->buffer_size) {
- printk(KERN_ERR "ide-tape: The tape wants to send us more data than expected - discarding data\n");
+ printk(KERN_ERR "ide-tape: The tape wants to "
+ "send us more data than expected "
+ "- discarding data\n");
idetape_discard_data(drive, bcount);
- ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
+ ide_set_handler(drive, &idetape_pc_intr,
+ IDETAPE_WAIT_CMD, NULL);
return ide_started;
}
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_NOTICE "ide-tape: The tape wants to send us more data than expected - allowing transfer\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_SENSE, "The tape wants to send us more "
+ "data than expected - allowing transfer\n");
}
- }
- if (test_bit(PC_WRITING, &pc->flags)) {
- if (pc->bh != NULL)
- idetape_output_buffers(drive, pc, bcount);
- else
- /* Write the current buffer */
- hwif->atapi_output_bytes(drive, pc->current_position,
- bcount);
+ iobuf = &idetape_input_buffers;
+ xferfunc = hwif->atapi_input_bytes;
} else {
- if (pc->bh != NULL)
- idetape_input_buffers(drive, pc, bcount);
- else
- /* Read the current buffer */
- hwif->atapi_input_bytes(drive, pc->current_position,
- bcount);
+ iobuf = &idetape_output_buffers;
+ xferfunc = hwif->atapi_output_bytes;
}
+
+ if (pc->bh)
+ iobuf(drive, pc, bcount);
+ else
+ xferfunc(drive, pc->current_position, bcount);
+
/* Update the current position */
pc->actually_transferred += bcount;
pc->current_position += bcount;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: [cmd %x] transferred %d bytes "
- "on that interrupt\n", pc->c[0], bcount);
-#endif
+
+ debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
+ pc->c[0], bcount);
+
/* And set the interrupt handler again */
ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
return ide_started;
}
/*
- * Packet Command Interface
- *
- * The current Packet Command is available in tape->pc, and will not
- * change until we finish handling it. Each packet command is associated
- * with a callback function that will be called when the command is
- * finished.
+ * Packet Command Interface
*
- * The handling will be done in three stages:
+ * The current Packet Command is available in tape->pc, and will not change
+ * until we finish handling it. Each packet command is associated with a
+ * callback function that will be called when the command is finished.
*
- * 1. idetape_issue_packet_command will send the packet command to the
- * drive, and will set the interrupt handler to idetape_pc_intr.
+ * The handling will be done in three stages:
*
- * 2. On each interrupt, idetape_pc_intr will be called. This step
- * will be repeated until the device signals us that no more
- * interrupts will be issued.
+ * 1. idetape_issue_pc will send the packet command to the drive, and will set
+ * the interrupt handler to idetape_pc_intr.
*
- * 3. ATAPI Tape media access commands have immediate status with a
- * delayed process. In case of a successful initiation of a
- * media access packet command, the DSC bit will be set when the
- * actual execution of the command is finished.
- * Since the tape drive will not issue an interrupt, we have to
- * poll for this event. In this case, we define the request as
- * "low priority request" by setting rq_status to
- * IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and exit
- * the driver.
+ * 2. On each interrupt, idetape_pc_intr will be called. This step will be
+ * repeated until the device signals us that no more interrupts will be issued.
*
- * ide.c will then give higher priority to requests which
- * originate from the other device, until will change rq_status
- * to RQ_ACTIVE.
+ * 3. ATAPI Tape media access commands have immediate status with a delayed
+ * process. In case of a successful initiation of a media access packet command,
+ * the DSC bit will be set when the actual execution of the command is finished.
+ * Since the tape drive will not issue an interrupt, we have to poll for this
+ * event. In this case, we define the request as "low priority request" by
+ * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
+ * exit the driver.
*
- * 4. When the packet command is finished, it will be checked for errors.
+ * ide.c will then give higher priority to requests which originate from the
+ * other device, until will change rq_status to RQ_ACTIVE.
*
- * 5. In case an error was found, we queue a request sense packet
- * command in front of the request queue and retry the operation
- * up to IDETAPE_MAX_PC_RETRIES times.
+ * 4. When the packet command is finished, it will be checked for errors.
*
- * 6. In case no error was found, or we decided to give up and not
- * to retry again, the callback function will be called and then
- * we will handle the next request.
+ * 5. In case an error was found, we queue a request sense packet command in
+ * front of the request queue and retry the operation up to
+ * IDETAPE_MAX_PC_RETRIES times.
*
+ * 6. In case no error was found, or we decided to give up and not to retry
+ * again, the callback function will be called and then we will handle the next
+ * request.
*/
static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
{
@@ -1388,8 +1233,9 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
ide_startstop_t startstop;
u8 ireason;
- if (ide_wait_stat(&startstop,drive,DRQ_STAT,BUSY_STAT,WAIT_READY)) {
- printk(KERN_ERR "ide-tape: Strange, packet command initiated yet DRQ isn't asserted\n");
+ if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
+ printk(KERN_ERR "ide-tape: Strange, packet command initiated "
+ "yet DRQ isn't asserted\n");
return startstop;
}
ireason = hwif->INB(IDE_IREASON_REG);
@@ -1422,7 +1268,7 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
return ide_started;
}
-static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape_pc_t *pc)
+static ide_startstop_t idetape_issue_pc(ide_drive_t *drive, idetape_pc_t *pc)
{
ide_hwif_t *hwif = drive->hwif;
idetape_tape_t *tape = drive->driver_data;
@@ -1443,9 +1289,9 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
test_bit(PC_ABORT, &pc->flags)) {
/*
- * We will "abort" retrying a packet command in case
- * a legitimate error code was received (crossing a
- * filemark, or end of the media, for example).
+ * We will "abort" retrying a packet command in case legitimate
+ * error code was received (crossing a filemark, or end of the
+ * media, for example).
*/
if (!test_bit(PC_ABORT, &pc->flags)) {
if (!(pc->c[0] == TEST_UNIT_READY &&
@@ -1464,10 +1310,7 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
tape->failed_pc = NULL;
return pc->callback(drive);
}
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: Retry number - %d, cmd = %02X\n", pc->retries, pc->c[0]);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
pc->retries++;
/* We haven't transferred any data yet */
@@ -1499,31 +1342,24 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape
}
}
-/*
- * General packet command callback function.
- */
-static ide_startstop_t idetape_pc_callback (ide_drive_t *drive)
+static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_pc_callback\n");
-#endif /* IDETAPE_DEBUG_LOG */
+
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
return ide_stopped;
}
-/*
- * A mode sense command is used to "sense" tape parameters.
- */
-static void idetape_create_mode_sense_cmd (idetape_pc_t *pc, u8 page_code)
+/* A mode sense command is used to "sense" tape parameters. */
+static void idetape_create_mode_sense_cmd(idetape_pc_t *pc, u8 page_code)
{
idetape_init_pc(pc);
pc->c[0] = MODE_SENSE;
if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
- pc->c[1] = 8; /* DBD = 1 - Don't return block descriptors */
+ /* DBD = 1 - Don't return block descriptors */
+ pc->c[1] = 8;
pc->c[2] = page_code;
/*
* Changed pc->c[3] to 0 (255 will at best return unused info).
@@ -1533,7 +1369,8 @@ static void idetape_create_mode_sense_cmd (idetape_pc_t *pc, u8 page_code)
* and return an error when 255 is used.
*/
pc->c[3] = 0;
- pc->c[4] = 255; /* (We will just discard data in that case) */
+ /* We will just discard data in that case */
+ pc->c[4] = 255;
if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
pc->request_transfer = 12;
else if (page_code == IDETAPE_CAPABILITIES_PAGE)
@@ -1543,62 +1380,77 @@ static void idetape_create_mode_sense_cmd (idetape_pc_t *pc, u8 page_code)
pc->callback = &idetape_pc_callback;
}
-static void calculate_speeds(ide_drive_t *drive)
+static void idetape_calculate_speeds(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
- int full = 125, empty = 75;
- if (time_after(jiffies, tape->controlled_pipeline_head_time + 120 * HZ)) {
- tape->controlled_previous_pipeline_head = tape->controlled_last_pipeline_head;
- tape->controlled_previous_head_time = tape->controlled_pipeline_head_time;
+ if (time_after(jiffies,
+ tape->controlled_pipeline_head_time + 120 * HZ)) {
+ tape->controlled_previous_pipeline_head =
+ tape->controlled_last_pipeline_head;
+ tape->controlled_previous_head_time =
+ tape->controlled_pipeline_head_time;
tape->controlled_last_pipeline_head = tape->pipeline_head;
tape->controlled_pipeline_head_time = jiffies;
}
if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
- tape->controlled_pipeline_head_speed = (tape->pipeline_head - tape->controlled_last_pipeline_head) * 32 * HZ / (jiffies - tape->controlled_pipeline_head_time);
+ tape->controlled_pipeline_head_speed = (tape->pipeline_head -
+ tape->controlled_last_pipeline_head) * 32 * HZ /
+ (jiffies - tape->controlled_pipeline_head_time);
else if (time_after(jiffies, tape->controlled_previous_head_time))
- tape->controlled_pipeline_head_speed = (tape->pipeline_head - tape->controlled_previous_pipeline_head) * 32 * HZ / (jiffies - tape->controlled_previous_head_time);
+ tape->controlled_pipeline_head_speed = (tape->pipeline_head -
+ tape->controlled_previous_pipeline_head) * 32 *
+ HZ / (jiffies - tape->controlled_previous_head_time);
- if (tape->nr_pending_stages < tape->max_stages /*- 1 */) {
+ if (tape->nr_pending_stages < tape->max_stages/*- 1 */) {
/* -1 for read mode error recovery */
- if (time_after(jiffies, tape->uncontrolled_previous_head_time + 10 * HZ)) {
+ if (time_after(jiffies, tape->uncontrolled_previous_head_time +
+ 10 * HZ)) {
tape->uncontrolled_pipeline_head_time = jiffies;
- tape->uncontrolled_pipeline_head_speed = (tape->pipeline_head - tape->uncontrolled_previous_pipeline_head) * 32 * HZ / (jiffies - tape->uncontrolled_previous_head_time);
+ tape->uncontrolled_pipeline_head_speed =
+ (tape->pipeline_head -
+ tape->uncontrolled_previous_pipeline_head) *
+ 32 * HZ / (jiffies -
+ tape->uncontrolled_previous_head_time);
}
} else {
tape->uncontrolled_previous_head_time = jiffies;
tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
- if (time_after(jiffies, tape->uncontrolled_pipeline_head_time + 30 * HZ)) {
+ if (time_after(jiffies, tape->uncontrolled_pipeline_head_time +
+ 30 * HZ))
tape->uncontrolled_pipeline_head_time = jiffies;
- }
+
}
- tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed, tape->controlled_pipeline_head_speed);
- if (tape->speed_control == 0) {
- tape->max_insert_speed = 5000;
- } else if (tape->speed_control == 1) {
+ tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed,
+ tape->controlled_pipeline_head_speed);
+
+ if (tape->speed_control == 1) {
if (tape->nr_pending_stages >= tape->max_stages / 2)
tape->max_insert_speed = tape->pipeline_head_speed +
- (1100 - tape->pipeline_head_speed) * 2 * (tape->nr_pending_stages - tape->max_stages / 2) / tape->max_stages;
+ (1100 - tape->pipeline_head_speed) * 2 *
+ (tape->nr_pending_stages - tape->max_stages / 2)
+ / tape->max_stages;
else
tape->max_insert_speed = 500 +
- (tape->pipeline_head_speed - 500) * 2 * tape->nr_pending_stages / tape->max_stages;
+ (tape->pipeline_head_speed - 500) * 2 *
+ tape->nr_pending_stages / tape->max_stages;
+
if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
tape->max_insert_speed = 5000;
- } else if (tape->speed_control == 2) {
- tape->max_insert_speed = tape->pipeline_head_speed * empty / 100 +
- (tape->pipeline_head_speed * full / 100 - tape->pipeline_head_speed * empty / 100) * tape->nr_pending_stages / tape->max_stages;
} else
tape->max_insert_speed = tape->speed_control;
+
tape->max_insert_speed = max(tape->max_insert_speed, 500);
}
-static ide_startstop_t idetape_media_access_finished (ide_drive_t *drive)
+static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t *pc = tape->pc;
u8 stat;
- stat = drive->hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
+
if (stat & SEEK_STAT) {
if (stat & ERR_STAT) {
/* Error detected */
@@ -1618,14 +1470,14 @@ static ide_startstop_t idetape_media_access_finished (ide_drive_t *drive)
return pc->callback(drive);
}
-static ide_startstop_t idetape_rw_callback (ide_drive_t *drive)
+static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
struct request *rq = HWGROUP(drive)->rq;
- int blocks = tape->pc->actually_transferred / tape->tape_block_size;
+ int blocks = tape->pc->actually_transferred / tape->blk_size;
- tape->avg_size += blocks * tape->tape_block_size;
- tape->insert_size += blocks * tape->tape_block_size;
+ tape->avg_size += blocks * tape->blk_size;
+ tape->insert_size += blocks * tape->blk_size;
if (tape->insert_size > 1024 * 1024)
tape->measure_insert_time = 1;
if (tape->measure_insert_time) {
@@ -1634,19 +1486,17 @@ static ide_startstop_t idetape_rw_callback (ide_drive_t *drive)
tape->insert_size = 0;
}
if (time_after(jiffies, tape->insert_time))
- tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
+ tape->insert_speed = tape->insert_size / 1024 * HZ /
+ (jiffies - tape->insert_time);
if (time_after_eq(jiffies, tape->avg_time + HZ)) {
- tape->avg_speed = tape->avg_size * HZ / (jiffies - tape->avg_time) / 1024;
+ tape->avg_speed = tape->avg_size * HZ /
+ (jiffies - tape->avg_time) / 1024;
tape->avg_size = 0;
tape->avg_time = jiffies;
}
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_rw_callback\n");
-#endif /* IDETAPE_DEBUG_LOG */
-
- tape->first_frame_position += blocks;
+ tape->first_frame += blocks;
rq->current_nr_sectors -= blocks;
if (!tape->pc->error)
@@ -1656,7 +1506,8 @@ static ide_startstop_t idetape_rw_callback (ide_drive_t *drive)
return ide_stopped;
}
-static void idetape_create_read_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsigned int length, struct idetape_bh *bh)
+static void idetape_create_read_cmd(idetape_tape_t *tape, idetape_pc_t *pc,
+ unsigned int length, struct idetape_bh *bh)
{
idetape_init_pc(pc);
pc->c[0] = READ_6;
@@ -1666,12 +1517,14 @@ static void idetape_create_read_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsi
pc->bh = bh;
atomic_set(&bh->b_count, 0);
pc->buffer = NULL;
- pc->request_transfer = pc->buffer_size = length * tape->tape_block_size;
+ pc->buffer_size = length * tape->blk_size;
+ pc->request_transfer = pc->buffer_size;
if (pc->request_transfer == tape->stage_size)
set_bit(PC_DMA_RECOMMENDED, &pc->flags);
}
-static void idetape_create_read_buffer_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsigned int length, struct idetape_bh *bh)
+static void idetape_create_read_buffer_cmd(idetape_tape_t *tape,
+ idetape_pc_t *pc, struct idetape_bh *bh)
{
int size = 32768;
struct idetape_bh *p = bh;
@@ -1689,10 +1542,12 @@ static void idetape_create_read_buffer_cmd(idetape_tape_t *tape, idetape_pc_t *p
atomic_set(&p->b_count, 0);
p = p->b_reqnext;
}
- pc->request_transfer = pc->buffer_size = size;
+ pc->request_transfer = size;
+ pc->buffer_size = size;
}
-static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc, unsigned int length, struct idetape_bh *bh)
+static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc,
+ unsigned int length, struct idetape_bh *bh)
{
idetape_init_pc(pc);
pc->c[0] = WRITE_6;
@@ -1704,14 +1559,12 @@ static void idetape_create_write_cmd(idetape_tape_t *tape, idetape_pc_t *pc, uns
pc->b_data = bh->b_data;
pc->b_count = atomic_read(&bh->b_count);
pc->buffer = NULL;
- pc->request_transfer = pc->buffer_size = length * tape->tape_block_size;
+ pc->buffer_size = length * tape->blk_size;
+ pc->request_transfer = pc->buffer_size;
if (pc->request_transfer == tape->stage_size)
set_bit(PC_DMA_RECOMMENDED, &pc->flags);
}
-/*
- * idetape_do_request is our request handling function.
- */
static ide_startstop_t idetape_do_request(ide_drive_t *drive,
struct request *rq, sector_t block)
{
@@ -1720,30 +1573,22 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
struct request *postponed_rq = tape->postponed_rq;
u8 stat;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: sector: %ld, "
- "nr_sectors: %ld, current_nr_sectors: %d\n",
+ debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
+ " current_nr_sectors: %d\n",
rq->sector, rq->nr_sectors, rq->current_nr_sectors);
-#endif /* IDETAPE_DEBUG_LOG */
if (!blk_special_request(rq)) {
- /*
- * We do not support buffer cache originated requests.
- */
+ /* We do not support buffer cache originated requests. */
printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
"request queue (%d)\n", drive->name, rq->cmd_type);
ide_end_request(drive, 0, 0);
return ide_stopped;
}
- /*
- * Retry a failed packet command
- */
- if (tape->failed_pc != NULL &&
- tape->pc->c[0] == REQUEST_SENSE) {
- return idetape_issue_packet_command(drive, tape->failed_pc);
- }
+ /* Retry a failed packet command */
+ if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
+ return idetape_issue_pc(drive, tape->failed_pc);
+
if (postponed_rq != NULL)
if (rq != postponed_rq) {
printk(KERN_ERR "ide-tape: ide-tape.c bug - "
@@ -1758,7 +1603,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
* If the tape is still busy, postpone our request and service
* the other device meanwhile.
*/
- stat = drive->hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
set_bit(IDETAPE_IGNORE_DSC, &tape->flags);
@@ -1768,16 +1613,15 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
drive->post_reset = 0;
}
- if (tape->tape_still_time > 100 && tape->tape_still_time < 200)
- tape->measure_insert_time = 1;
if (time_after(jiffies, tape->insert_time))
- tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
- calculate_speeds(drive);
+ tape->insert_speed = tape->insert_size / 1024 * HZ /
+ (jiffies - tape->insert_time);
+ idetape_calculate_speeds(drive);
if (!test_and_clear_bit(IDETAPE_IGNORE_DSC, &tape->flags) &&
(stat & SEEK_STAT) == 0) {
if (postponed_rq == NULL) {
tape->dsc_polling_start = jiffies;
- tape->dsc_polling_frequency = tape->best_dsc_rw_frequency;
+ tape->dsc_poll_freq = tape->best_dsc_rw_freq;
tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
} else if (time_after(jiffies, tape->dsc_timeout)) {
printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
@@ -1788,8 +1632,10 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
} else {
return ide_do_reset(drive);
}
- } else if (time_after(jiffies, tape->dsc_polling_start + IDETAPE_DSC_MA_THRESHOLD))
- tape->dsc_polling_frequency = IDETAPE_DSC_MA_SLOW;
+ } else if (time_after(jiffies,
+ tape->dsc_polling_start +
+ IDETAPE_DSC_MA_THRESHOLD))
+ tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
idetape_postpone_request(drive);
return ide_stopped;
}
@@ -1797,20 +1643,23 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
tape->buffer_head++;
tape->postpone_cnt = 0;
pc = idetape_next_pc_storage(drive);
- idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
+ idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
+ (struct idetape_bh *)rq->special);
goto out;
}
if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
tape->buffer_head++;
tape->postpone_cnt = 0;
pc = idetape_next_pc_storage(drive);
- idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
+ idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
+ (struct idetape_bh *)rq->special);
goto out;
}
if (rq->cmd[0] & REQ_IDETAPE_READ_BUFFER) {
tape->postpone_cnt = 0;
pc = idetape_next_pc_storage(drive);
- idetape_create_read_buffer_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special);
+ idetape_create_read_buffer_cmd(tape, pc,
+ (struct idetape_bh *)rq->special);
goto out;
}
if (rq->cmd[0] & REQ_IDETAPE_PC1) {
@@ -1825,49 +1674,51 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
}
BUG();
out:
- return idetape_issue_packet_command(drive, pc);
+ return idetape_issue_pc(drive, pc);
}
-/*
- * Pipeline related functions
- */
-static inline int idetape_pipeline_active (idetape_tape_t *tape)
+/* Pipeline related functions */
+static inline int idetape_pipeline_active(idetape_tape_t *tape)
{
int rc1, rc2;
rc1 = test_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
- rc2 = (tape->active_data_request != NULL);
+ rc2 = (tape->active_data_rq != NULL);
return rc1;
}
/*
- * idetape_kmalloc_stage uses __get_free_page to allocate a pipeline
- * stage, along with all the necessary small buffers which together make
- * a buffer of size tape->stage_size (or a bit more). We attempt to
- * combine sequential pages as much as possible.
+ * The function below uses __get_free_page to allocate a pipeline stage, along
+ * with all the necessary small buffers which together make a buffer of size
+ * tape->stage_size (or a bit more). We attempt to combine sequential pages as
+ * much as possible.
*
- * Returns a pointer to the new allocated stage, or NULL if we
- * can't (or don't want to) allocate a stage.
+ * It returns a pointer to the new allocated stage, or NULL if we can't (or
+ * don't want to) allocate a stage.
*
- * Pipeline stages are optional and are used to increase performance.
- * If we can't allocate them, we'll manage without them.
+ * Pipeline stages are optional and are used to increase performance. If we
+ * can't allocate them, we'll manage without them.
*/
-static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full, int clear)
+static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
+ int clear)
{
idetape_stage_t *stage;
struct idetape_bh *prev_bh, *bh;
int pages = tape->pages_per_stage;
char *b_data = NULL;
- if ((stage = kmalloc(sizeof (idetape_stage_t),GFP_KERNEL)) == NULL)
+ stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
+ if (!stage)
return NULL;
stage->next = NULL;
- bh = stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
+ stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
+ bh = stage->bh;
if (bh == NULL)
goto abort;
bh->b_reqnext = NULL;
- if ((bh->b_data = (char *) __get_free_page (GFP_KERNEL)) == NULL)
+ bh->b_data = (char *) __get_free_page(GFP_KERNEL);
+ if (!bh->b_data)
goto abort;
if (clear)
memset(bh->b_data, 0, PAGE_SIZE);
@@ -1875,7 +1726,8 @@ static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full,
atomic_set(&bh->b_count, full ? bh->b_size : 0);
while (--pages) {
- if ((b_data = (char *) __get_free_page (GFP_KERNEL)) == NULL)
+ b_data = (char *) __get_free_page(GFP_KERNEL);
+ if (!b_data)
goto abort;
if (clear)
memset(b_data, 0, PAGE_SIZE);
@@ -1893,7 +1745,8 @@ static idetape_stage_t *__idetape_kmalloc_stage (idetape_tape_t *tape, int full,
continue;
}
prev_bh = bh;
- if ((bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL)) == NULL) {
+ bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
+ if (!bh) {
free_page((unsigned long) b_data);
goto abort;
}
@@ -1912,14 +1765,11 @@ abort:
return NULL;
}
-static idetape_stage_t *idetape_kmalloc_stage (idetape_tape_t *tape)
+static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape)
{
idetape_stage_t *cache_stage = tape->cache_stage;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_kmalloc_stage\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
if (tape->nr_stages >= tape->max_stages)
return NULL;
@@ -1930,7 +1780,8 @@ static idetape_stage_t *idetape_kmalloc_stage (idetape_tape_t *tape)
return __idetape_kmalloc_stage(tape, 0, 0);
}
-static int idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t *stage, const char __user *buf, int n)
+static int idetape_copy_stage_from_user(idetape_tape_t *tape,
+ idetape_stage_t *stage, const char __user *buf, int n)
{
struct idetape_bh *bh = tape->bh;
int count;
@@ -1938,12 +1789,15 @@ static int idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t *
while (n) {
if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in "
- "idetape_copy_stage_from_user\n");
+ printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
+ __func__);
return 1;
}
- count = min((unsigned int)(bh->b_size - atomic_read(&bh->b_count)), (unsigned int)n);
- if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf, count))
+ count = min((unsigned int)
+ (bh->b_size - atomic_read(&bh->b_count)),
+ (unsigned int)n);
+ if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
+ count))
ret = 1;
n -= count;
atomic_add(count, &bh->b_count);
@@ -1958,7 +1812,8 @@ static int idetape_copy_stage_from_user (idetape_tape_t *tape, idetape_stage_t *
return ret;
}
-static int idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, idetape_stage_t *stage, int n)
+static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
+ idetape_stage_t *stage, int n)
{
struct idetape_bh *bh = tape->bh;
int count;
@@ -1966,8 +1821,8 @@ static int idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, i
while (n) {
if (bh == NULL) {
- printk(KERN_ERR "ide-tape: bh == NULL in "
- "idetape_copy_stage_to_user\n");
+ printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
+ __func__);
return 1;
}
count = min(tape->b_count, n);
@@ -1978,7 +1833,8 @@ static int idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, i
tape->b_count -= count;
buf += count;
if (!tape->b_count) {
- tape->bh = bh = bh->b_reqnext;
+ bh = bh->b_reqnext;
+ tape->bh = bh;
if (bh) {
tape->b_data = bh->b_data;
tape->b_count = atomic_read(&bh->b_count);
@@ -1988,12 +1844,12 @@ static int idetape_copy_stage_to_user (idetape_tape_t *tape, char __user *buf, i
return ret;
}
-static void idetape_init_merge_stage (idetape_tape_t *tape)
+static void idetape_init_merge_stage(idetape_tape_t *tape)
{
struct idetape_bh *bh = tape->merge_stage->bh;
-
+
tape->bh = bh;
- if (tape->chrdev_direction == idetape_direction_write)
+ if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
atomic_set(&bh->b_count, 0);
else {
tape->b_data = bh->b_data;
@@ -2001,7 +1857,7 @@ static void idetape_init_merge_stage (idetape_tape_t *tape)
}
}
-static void idetape_switch_buffers (idetape_tape_t *tape, idetape_stage_t *stage)
+static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage)
{
struct idetape_bh *tmp;
@@ -2011,87 +1867,76 @@ static void idetape_switch_buffers (idetape_tape_t *tape, idetape_stage_t *stage
idetape_init_merge_stage(tape);
}
-/*
- * idetape_add_stage_tail adds a new stage at the end of the pipeline.
- */
-static void idetape_add_stage_tail (ide_drive_t *drive,idetape_stage_t *stage)
+/* Add a new stage at the end of the pipeline. */
+static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
-
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk (KERN_INFO "ide-tape: Reached idetape_add_stage_tail\n");
-#endif /* IDETAPE_DEBUG_LOG */
- spin_lock_irqsave(&tape->spinlock, flags);
+
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
+ spin_lock_irqsave(&tape->lock, flags);
stage->next = NULL;
if (tape->last_stage != NULL)
- tape->last_stage->next=stage;
+ tape->last_stage->next = stage;
else
- tape->first_stage = tape->next_stage=stage;
+ tape->first_stage = stage;
+ tape->next_stage = stage;
tape->last_stage = stage;
if (tape->next_stage == NULL)
tape->next_stage = tape->last_stage;
tape->nr_stages++;
tape->nr_pending_stages++;
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ spin_unlock_irqrestore(&tape->lock, flags);
}
-/*
- * idetape_wait_for_request installs a completion in a pending request
- * and sleeps until it is serviced.
- *
- * The caller should ensure that the request will not be serviced
- * before we install the completion (usually by disabling interrupts).
+/* Install a completion in a pending request and sleep until it is serviced. The
+ * caller should ensure that the request will not be serviced before we install
+ * the completion (usually by disabling interrupts).
*/
-static void idetape_wait_for_request (ide_drive_t *drive, struct request *rq)
+static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
{
DECLARE_COMPLETION_ONSTACK(wait);
idetape_tape_t *tape = drive->driver_data;
if (rq == NULL || !blk_special_request(rq)) {
- printk (KERN_ERR "ide-tape: bug: Trying to sleep on non-valid request\n");
+ printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
+ " request\n");
return;
}
rq->end_io_data = &wait;
rq->end_io = blk_end_sync_rq;
- spin_unlock_irq(&tape->spinlock);
+ spin_unlock_irq(&tape->lock);
wait_for_completion(&wait);
/* The stage and its struct request have been deallocated */
- spin_lock_irq(&tape->spinlock);
+ spin_lock_irq(&tape->lock);
}
-static ide_startstop_t idetape_read_position_callback (ide_drive_t *drive)
+static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
- idetape_read_position_result_t *result;
-
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_read_position_callback\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ u8 *readpos = tape->pc->buffer;
+
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
if (!tape->pc->error) {
- result = (idetape_read_position_result_t *) tape->pc->buffer;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: BOP - %s\n",result->bop ? "Yes":"No");
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: EOP - %s\n",result->eop ? "Yes":"No");
-#endif /* IDETAPE_DEBUG_LOG */
- if (result->bpu) {
- printk(KERN_INFO "ide-tape: Block location is unknown to the tape\n");
+ debug_log(DBG_SENSE, "BOP - %s\n",
+ (readpos[0] & 0x80) ? "Yes" : "No");
+ debug_log(DBG_SENSE, "EOP - %s\n",
+ (readpos[0] & 0x40) ? "Yes" : "No");
+
+ if (readpos[0] & 0x4) {
+ printk(KERN_INFO "ide-tape: Block location is unknown"
+ "to the tape\n");
clear_bit(IDETAPE_ADDRESS_VALID, &tape->flags);
idetape_end_request(drive, 0, 0);
} else {
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: Block Location - %u\n", ntohl(result->first_block));
-#endif /* IDETAPE_DEBUG_LOG */
- tape->partition = result->partition;
- tape->first_frame_position = ntohl(result->first_block);
- tape->last_frame_position = ntohl(result->last_block);
- tape->blocks_in_buffer = result->blocks_in_buffer[2];
+ debug_log(DBG_SENSE, "Block Location - %u\n",
+ be32_to_cpu(*(u32 *)&readpos[4]));
+
+ tape->partition = readpos[1];
+ tape->first_frame =
+ be32_to_cpu(*(u32 *)&readpos[4]);
set_bit(IDETAPE_ADDRESS_VALID, &tape->flags);
idetape_end_request(drive, 1, 0);
}
@@ -2102,14 +1947,11 @@ static ide_startstop_t idetape_read_position_callback (ide_drive_t *drive)
}
/*
- * idetape_create_write_filemark_cmd will:
- *
- * 1. Write a filemark if write_filemark=1.
- * 2. Flush the device buffers without writing a filemark
- * if write_filemark=0.
- *
+ * Write a filemark if write_filemark=1. Flush the device buffers without
+ * writing a filemark otherwise.
*/
-static void idetape_create_write_filemark_cmd (ide_drive_t *drive, idetape_pc_t *pc,int write_filemark)
+static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
+ idetape_pc_t *pc, int write_filemark)
{
idetape_init_pc(pc);
pc->c[0] = WRITE_FILEMARKS;
@@ -2126,26 +1968,19 @@ static void idetape_create_test_unit_ready_cmd(idetape_pc_t *pc)
}
/*
- * idetape_queue_pc_tail is based on the following functions:
- *
- * ide_do_drive_cmd from ide.c
- * cdrom_queue_request and cdrom_queue_packet_command from ide-cd.c
+ * We add a special packet command request to the tail of the request queue, and
+ * wait for it to be serviced. This is not to be called from within the request
+ * handling part of the driver! We allocate here data on the stack and it is
+ * valid until the request is finished. This is not the case for the bottom part
+ * of the driver, where we are always leaving the functions to wait for an
+ * interrupt or a timer event.
*
- * We add a special packet command request to the tail of the request
- * queue, and wait for it to be serviced.
- *
- * This is not to be called from within the request handling part
- * of the driver ! We allocate here data in the stack, and it is valid
- * until the request is finished. This is not the case for the bottom
- * part of the driver, where we are always leaving the functions to wait
- * for an interrupt or a timer event.
- *
- * From the bottom part of the driver, we should allocate safe memory
- * using idetape_next_pc_storage and idetape_next_rq_storage, and add
- * the request to the request list without waiting for it to be serviced !
- * In that case, we usually use idetape_queue_pc_head.
+ * From the bottom part of the driver, we should allocate safe memory using
+ * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
+ * to the request list without waiting for it to be serviced! In that case, we
+ * usually use idetape_queue_pc_head().
*/
-static int __idetape_queue_pc_tail (ide_drive_t *drive, idetape_pc_t *pc)
+static int __idetape_queue_pc_tail(ide_drive_t *drive, idetape_pc_t *pc)
{
struct ide_tape_obj *tape = drive->driver_data;
struct request rq;
@@ -2156,7 +1991,8 @@ static int __idetape_queue_pc_tail (ide_drive_t *drive, idetape_pc_t *pc)
return ide_do_drive_cmd(drive, &rq, ide_wait);
}
-static void idetape_create_load_unload_cmd (ide_drive_t *drive, idetape_pc_t *pc,int cmd)
+static void idetape_create_load_unload_cmd(ide_drive_t *drive, idetape_pc_t *pc,
+ int cmd)
{
idetape_init_pc(pc);
pc->c[0] = START_STOP;
@@ -2171,9 +2007,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
idetape_pc_t pc;
int load_attempted = 0;
- /*
- * Wait for the tape to become ready
- */
+ /* Wait for the tape to become ready */
set_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags);
timeout += jiffies;
while (time_before(jiffies, timeout)) {
@@ -2181,10 +2015,12 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
if (!__idetape_queue_pc_tail(drive, &pc))
return 0;
if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
- || (tape->asc == 0x3A)) { /* no media */
+ || (tape->asc == 0x3A)) {
+ /* no media */
if (load_attempted)
return -ENOMEDIUM;
- idetape_create_load_unload_cmd(drive, &pc, IDETAPE_LU_LOAD_MASK);
+ idetape_create_load_unload_cmd(drive, &pc,
+ IDETAPE_LU_LOAD_MASK);
__idetape_queue_pc_tail(drive, &pc);
load_attempted = 1;
/* not about to be ready */
@@ -2196,24 +2032,25 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
return -EIO;
}
-static int idetape_queue_pc_tail (ide_drive_t *drive,idetape_pc_t *pc)
+static int idetape_queue_pc_tail(ide_drive_t *drive, idetape_pc_t *pc)
{
return __idetape_queue_pc_tail(drive, pc);
}
-static int idetape_flush_tape_buffers (ide_drive_t *drive)
+static int idetape_flush_tape_buffers(ide_drive_t *drive)
{
idetape_pc_t pc;
int rc;
idetape_create_write_filemark_cmd(drive, &pc, 0);
- if ((rc = idetape_queue_pc_tail(drive, &pc)))
+ rc = idetape_queue_pc_tail(drive, &pc);
+ if (rc)
return rc;
idetape_wait_ready(drive, 60 * 5 * HZ);
return 0;
}
-static void idetape_create_read_position_cmd (idetape_pc_t *pc)
+static void idetape_create_read_position_cmd(idetape_pc_t *pc)
{
idetape_init_pc(pc);
pc->c[0] = READ_POSITION;
@@ -2221,25 +2058,23 @@ static void idetape_create_read_position_cmd (idetape_pc_t *pc)
pc->callback = &idetape_read_position_callback;
}
-static int idetape_read_position (ide_drive_t *drive)
+static int idetape_read_position(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t pc;
int position;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_read_position\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
idetape_create_read_position_cmd(&pc);
if (idetape_queue_pc_tail(drive, &pc))
return -1;
- position = tape->first_frame_position;
+ position = tape->first_frame;
return position;
}
-static void idetape_create_locate_cmd (ide_drive_t *drive, idetape_pc_t *pc, unsigned int block, u8 partition, int skip)
+static void idetape_create_locate_cmd(ide_drive_t *drive, idetape_pc_t *pc,
+ unsigned int block, u8 partition, int skip)
{
idetape_init_pc(pc);
pc->c[0] = POSITION_TO_ELEMENT;
@@ -2250,7 +2085,8 @@ static void idetape_create_locate_cmd (ide_drive_t *drive, idetape_pc_t *pc, uns
pc->callback = &idetape_pc_callback;
}
-static int idetape_create_prevent_cmd (ide_drive_t *drive, idetape_pc_t *pc, int prevent)
+static int idetape_create_prevent_cmd(ide_drive_t *drive, idetape_pc_t *pc,
+ int prevent)
{
idetape_tape_t *tape = drive->driver_data;
@@ -2265,17 +2101,17 @@ static int idetape_create_prevent_cmd (ide_drive_t *drive, idetape_pc_t *pc, int
return 1;
}
-static int __idetape_discard_read_pipeline (ide_drive_t *drive)
+static int __idetape_discard_read_pipeline(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
int cnt;
- if (tape->chrdev_direction != idetape_direction_read)
+ if (tape->chrdev_dir != IDETAPE_DIR_READ)
return 0;
/* Remove merge stage. */
- cnt = tape->merge_stage_size / tape->tape_block_size;
+ cnt = tape->merge_stage_size / tape->blk_size;
if (test_and_clear_bit(IDETAPE_FILEMARK, &tape->flags))
++cnt; /* Filemarks count as 1 sector */
tape->merge_stage_size = 0;
@@ -2286,22 +2122,22 @@ static int __idetape_discard_read_pipeline (ide_drive_t *drive)
/* Clear pipeline flags. */
clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
- tape->chrdev_direction = idetape_direction_none;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
/* Remove pipeline stages. */
if (tape->first_stage == NULL)
return 0;
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
tape->next_stage = NULL;
if (idetape_pipeline_active(tape))
- idetape_wait_for_request(drive, tape->active_data_request);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ idetape_wait_for_request(drive, tape->active_data_rq);
+ spin_unlock_irqrestore(&tape->lock, flags);
while (tape->first_stage != NULL) {
struct request *rq_ptr = &tape->first_stage->rq;
- cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
+ cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
++cnt;
idetape_remove_stage_head(drive);
@@ -2312,21 +2148,19 @@ static int __idetape_discard_read_pipeline (ide_drive_t *drive)
}
/*
- * idetape_position_tape positions the tape to the requested block
- * using the LOCATE packet command. A READ POSITION command is then
- * issued to check where we are positioned.
- *
- * Like all higher level operations, we queue the commands at the tail
- * of the request queue and wait for their completion.
- *
+ * Position the tape to the requested block using the LOCATE packet command.
+ * A READ POSITION command is then issued to check where we are positioned. Like
+ * all higher level operations, we queue the commands at the tail of the request
+ * queue and wait for their completion.
*/
-static int idetape_position_tape (ide_drive_t *drive, unsigned int block, u8 partition, int skip)
+static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
+ u8 partition, int skip)
{
idetape_tape_t *tape = drive->driver_data;
int retval;
idetape_pc_t pc;
- if (tape->chrdev_direction == idetape_direction_read)
+ if (tape->chrdev_dir == IDETAPE_DIR_READ)
__idetape_discard_read_pipeline(drive);
idetape_wait_ready(drive, 60 * 5 * HZ);
idetape_create_locate_cmd(drive, &pc, block, partition, skip);
@@ -2338,7 +2172,8 @@ static int idetape_position_tape (ide_drive_t *drive, unsigned int block, u8 par
return (idetape_queue_pc_tail(drive, &pc));
}
-static void idetape_discard_read_pipeline (ide_drive_t *drive, int restore_position)
+static void idetape_discard_read_pipeline(ide_drive_t *drive,
+ int restore_position)
{
idetape_tape_t *tape = drive->driver_data;
int cnt;
@@ -2349,35 +2184,37 @@ static void idetape_discard_read_pipeline (ide_drive_t *drive, int restore_posit
position = idetape_read_position(drive);
seek = position > cnt ? position - cnt : 0;
if (idetape_position_tape(drive, seek, 0, 0)) {
- printk(KERN_INFO "ide-tape: %s: position_tape failed in discard_pipeline()\n", tape->name);
+ printk(KERN_INFO "ide-tape: %s: position_tape failed in"
+ " discard_pipeline()\n", tape->name);
return;
}
}
}
/*
- * idetape_queue_rw_tail generates a read/write request for the block
- * device interface and wait for it to be serviced.
+ * Generate a read/write request for the block device interface and wait for it
+ * to be serviced.
*/
-static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, struct idetape_bh *bh)
+static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
+ struct idetape_bh *bh)
{
idetape_tape_t *tape = drive->driver_data;
struct request rq;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: idetape_queue_rw_tail: cmd=%d\n",cmd);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
+
if (idetape_pipeline_active(tape)) {
- printk(KERN_ERR "ide-tape: bug: the pipeline is active in idetape_queue_rw_tail\n");
+ printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
+ __func__);
return (0);
}
idetape_init_rq(&rq, cmd);
rq.rq_disk = tape->disk;
rq.special = (void *)bh;
- rq.sector = tape->first_frame_position;
- rq.nr_sectors = rq.current_nr_sectors = blocks;
+ rq.sector = tape->first_frame;
+ rq.nr_sectors = blocks;
+ rq.current_nr_sectors = blocks;
(void) ide_do_drive_cmd(drive, &rq, ide_wait);
if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
@@ -2387,14 +2224,11 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks, struct
idetape_init_merge_stage(tape);
if (rq.errors == IDETAPE_ERROR_GENERAL)
return -EIO;
- return (tape->tape_block_size * (blocks-rq.current_nr_sectors));
+ return (tape->blk_size * (blocks-rq.current_nr_sectors));
}
-/*
- * idetape_insert_pipeline_into_queue is used to start servicing the
- * pipeline stages, starting from tape->next_stage.
- */
-static void idetape_insert_pipeline_into_queue (ide_drive_t *drive)
+/* start servicing the pipeline stages, starting from tape->next_stage. */
+static void idetape_plug_pipeline(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
@@ -2403,19 +2237,20 @@ static void idetape_insert_pipeline_into_queue (ide_drive_t *drive)
if (!idetape_pipeline_active(tape)) {
set_bit(IDETAPE_PIPELINE_ACTIVE, &tape->flags);
idetape_activate_next_stage(drive);
- (void) ide_do_drive_cmd(drive, tape->active_data_request, ide_end);
+ (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
}
}
-static void idetape_create_inquiry_cmd (idetape_pc_t *pc)
+static void idetape_create_inquiry_cmd(idetape_pc_t *pc)
{
idetape_init_pc(pc);
pc->c[0] = INQUIRY;
- pc->c[4] = pc->request_transfer = 254;
+ pc->c[4] = 254;
+ pc->request_transfer = 254;
pc->callback = &idetape_pc_callback;
}
-static void idetape_create_rewind_cmd (ide_drive_t *drive, idetape_pc_t *pc)
+static void idetape_create_rewind_cmd(ide_drive_t *drive, idetape_pc_t *pc)
{
idetape_init_pc(pc);
pc->c[0] = REZERO_UNIT;
@@ -2423,7 +2258,7 @@ static void idetape_create_rewind_cmd (ide_drive_t *drive, idetape_pc_t *pc)
pc->callback = &idetape_pc_callback;
}
-static void idetape_create_erase_cmd (idetape_pc_t *pc)
+static void idetape_create_erase_cmd(idetape_pc_t *pc)
{
idetape_init_pc(pc);
pc->c[0] = ERASE;
@@ -2432,7 +2267,7 @@ static void idetape_create_erase_cmd (idetape_pc_t *pc)
pc->callback = &idetape_pc_callback;
}
-static void idetape_create_space_cmd (idetape_pc_t *pc,int count, u8 cmd)
+static void idetape_create_space_cmd(idetape_pc_t *pc, int count, u8 cmd)
{
idetape_init_pc(pc);
pc->c[0] = SPACE;
@@ -2442,89 +2277,87 @@ static void idetape_create_space_cmd (idetape_pc_t *pc,int count, u8 cmd)
pc->callback = &idetape_pc_callback;
}
-static void idetape_wait_first_stage (ide_drive_t *drive)
+static void idetape_wait_first_stage(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
if (tape->first_stage == NULL)
return;
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
if (tape->active_stage == tape->first_stage)
- idetape_wait_for_request(drive, tape->active_data_request);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ idetape_wait_for_request(drive, tape->active_data_rq);
+ spin_unlock_irqrestore(&tape->lock, flags);
}
/*
- * idetape_add_chrdev_write_request tries to add a character device
- * originated write request to our pipeline. In case we don't succeed,
- * we revert to non-pipelined operation mode for this request.
+ * Try to add a character device originated write request to our pipeline. In
+ * case we don't succeed, we revert to non-pipelined operation mode for this
+ * request. In order to accomplish that, we
*
- * 1. Try to allocate a new pipeline stage.
- * 2. If we can't, wait for more and more requests to be serviced
- * and try again each time.
- * 3. If we still can't allocate a stage, fallback to
- * non-pipelined operation mode for this request.
+ * 1. Try to allocate a new pipeline stage.
+ * 2. If we can't, wait for more and more requests to be serviced and try again
+ * each time.
+ * 3. If we still can't allocate a stage, fallback to non-pipelined operation
+ * mode for this request.
*/
-static int idetape_add_chrdev_write_request (ide_drive_t *drive, int blocks)
+static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
{
idetape_tape_t *tape = drive->driver_data;
idetape_stage_t *new_stage;
unsigned long flags;
struct request *rq;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 3)
- printk(KERN_INFO "ide-tape: Reached idetape_add_chrdev_write_request\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
- /*
- * Attempt to allocate a new stage.
- * Pay special attention to possible race conditions.
- */
+ /* Attempt to allocate a new stage. Beware possible race conditions. */
while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
if (idetape_pipeline_active(tape)) {
- idetape_wait_for_request(drive, tape->active_data_request);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ idetape_wait_for_request(drive, tape->active_data_rq);
+ spin_unlock_irqrestore(&tape->lock, flags);
} else {
- spin_unlock_irqrestore(&tape->spinlock, flags);
- idetape_insert_pipeline_into_queue(drive);
+ spin_unlock_irqrestore(&tape->lock, flags);
+ idetape_plug_pipeline(drive);
if (idetape_pipeline_active(tape))
continue;
/*
- * Linux is short on memory. Fallback to
- * non-pipelined operation mode for this request.
+ * The machine is short on memory. Fallback to non-
+ * pipelined operation mode for this request.
*/
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks, tape->merge_stage->bh);
+ return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
+ blocks, tape->merge_stage->bh);
}
}
rq = &new_stage->rq;
idetape_init_rq(rq, REQ_IDETAPE_WRITE);
/* Doesn't actually matter - We always assume sequential access */
- rq->sector = tape->first_frame_position;
- rq->nr_sectors = rq->current_nr_sectors = blocks;
+ rq->sector = tape->first_frame;
+ rq->current_nr_sectors = blocks;
+ rq->nr_sectors = blocks;
idetape_switch_buffers(tape, new_stage);
idetape_add_stage_tail(drive, new_stage);
tape->pipeline_head++;
- calculate_speeds(drive);
+ idetape_calculate_speeds(drive);
/*
- * Estimate whether the tape has stopped writing by checking
- * if our write pipeline is currently empty. If we are not
- * writing anymore, wait for the pipeline to be full enough
- * (90%) before starting to service requests, so that we will
- * be able to keep up with the higher speeds of the tape.
+ * Estimate whether the tape has stopped writing by checking if our
+ * write pipeline is currently empty. If we are not writing anymore,
+ * wait for the pipeline to be almost completely full (90%) before
+ * starting to service requests, so that we will be able to keep up with
+ * the higher speeds of the tape.
*/
if (!idetape_pipeline_active(tape)) {
if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
- tape->nr_stages >= tape->max_stages - tape->uncontrolled_pipeline_head_speed * 3 * 1024 / tape->tape_block_size) {
+ tape->nr_stages >= tape->max_stages -
+ tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
+ tape->blk_size) {
tape->measure_insert_time = 1;
tape->insert_time = jiffies;
tape->insert_size = 0;
tape->insert_speed = 0;
- idetape_insert_pipeline_into_queue(drive);
+ idetape_plug_pipeline(drive);
}
}
if (test_and_clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags))
@@ -2534,31 +2367,32 @@ static int idetape_add_chrdev_write_request (ide_drive_t *drive, int blocks)
}
/*
- * idetape_wait_for_pipeline will wait until all pending pipeline
- * requests are serviced. Typically called on device close.
+ * Wait until all pending pipeline requests are serviced. Typically called on
+ * device close.
*/
-static void idetape_wait_for_pipeline (ide_drive_t *drive)
+static void idetape_wait_for_pipeline(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
while (tape->next_stage || idetape_pipeline_active(tape)) {
- idetape_insert_pipeline_into_queue(drive);
- spin_lock_irqsave(&tape->spinlock, flags);
+ idetape_plug_pipeline(drive);
+ spin_lock_irqsave(&tape->lock, flags);
if (idetape_pipeline_active(tape))
- idetape_wait_for_request(drive, tape->active_data_request);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ idetape_wait_for_request(drive, tape->active_data_rq);
+ spin_unlock_irqrestore(&tape->lock, flags);
}
}
-static void idetape_empty_write_pipeline (ide_drive_t *drive)
+static void idetape_empty_write_pipeline(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
int blocks, min;
struct idetape_bh *bh;
- if (tape->chrdev_direction != idetape_direction_write) {
- printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline, but we are not writing.\n");
+ if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
+ printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
+ " but we are not writing.\n");
return;
}
if (tape->merge_stage_size > tape->stage_size) {
@@ -2566,12 +2400,13 @@ static void idetape_empty_write_pipeline (ide_drive_t *drive)
tape->merge_stage_size = tape->stage_size;
}
if (tape->merge_stage_size) {
- blocks = tape->merge_stage_size / tape->tape_block_size;
- if (tape->merge_stage_size % tape->tape_block_size) {
+ blocks = tape->merge_stage_size / tape->blk_size;
+ if (tape->merge_stage_size % tape->blk_size) {
unsigned int i;
blocks++;
- i = tape->tape_block_size - tape->merge_stage_size % tape->tape_block_size;
+ i = tape->blk_size - tape->merge_stage_size %
+ tape->blk_size;
bh = tape->bh->b_reqnext;
while (bh) {
atomic_set(&bh->b_count, 0);
@@ -2580,12 +2415,14 @@ static void idetape_empty_write_pipeline (ide_drive_t *drive)
bh = tape->bh;
while (i) {
if (bh == NULL) {
-
- printk(KERN_INFO "ide-tape: bug, bh NULL\n");
+ printk(KERN_INFO "ide-tape: bug,"
+ " bh NULL\n");
break;
}
- min = min(i, (unsigned int)(bh->b_size - atomic_read(&bh->b_count)));
- memset(bh->b_data + atomic_read(&bh->b_count), 0, min);
+ min = min(i, (unsigned int)(bh->b_size -
+ atomic_read(&bh->b_count)));
+ memset(bh->b_data + atomic_read(&bh->b_count),
+ 0, min);
atomic_add(min, &bh->b_count);
i -= min;
bh = bh->b_reqnext;
@@ -2600,13 +2437,13 @@ static void idetape_empty_write_pipeline (ide_drive_t *drive)
tape->merge_stage = NULL;
}
clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
- tape->chrdev_direction = idetape_direction_none;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
/*
- * On the next backup, perform the feedback loop again.
- * (I don't want to keep sense information between backups,
- * as some systems are constantly on, and the system load
- * can be totally different on the next backup).
+ * On the next backup, perform the feedback loop again. (I don't want to
+ * keep sense information between backups, as some systems are
+ * constantly on, and the system load can be totally different on the
+ * next backup).
*/
tape->max_stages = tape->min_pipeline;
if (tape->first_stage != NULL ||
@@ -2621,21 +2458,25 @@ static void idetape_empty_write_pipeline (ide_drive_t *drive)
}
}
-static void idetape_restart_speed_control (ide_drive_t *drive)
+static void idetape_restart_speed_control(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
tape->restart_speed_control_req = 0;
tape->pipeline_head = 0;
- tape->controlled_last_pipeline_head = tape->uncontrolled_last_pipeline_head = 0;
- tape->controlled_previous_pipeline_head = tape->uncontrolled_previous_pipeline_head = 0;
- tape->pipeline_head_speed = tape->controlled_pipeline_head_speed = 5000;
+ tape->controlled_last_pipeline_head = 0;
+ tape->controlled_previous_pipeline_head = 0;
+ tape->uncontrolled_previous_pipeline_head = 0;
+ tape->controlled_pipeline_head_speed = 5000;
+ tape->pipeline_head_speed = 5000;
tape->uncontrolled_pipeline_head_speed = 0;
- tape->controlled_pipeline_head_time = tape->uncontrolled_pipeline_head_time = jiffies;
- tape->controlled_previous_head_time = tape->uncontrolled_previous_head_time = jiffies;
+ tape->controlled_pipeline_head_time =
+ tape->uncontrolled_pipeline_head_time = jiffies;
+ tape->controlled_previous_head_time =
+ tape->uncontrolled_previous_head_time = jiffies;
}
-static int idetape_initiate_read (ide_drive_t *drive, int max_stages)
+static int idetape_init_read(ide_drive_t *drive, int max_stages)
{
idetape_tape_t *tape = drive->driver_data;
idetape_stage_t *new_stage;
@@ -2644,32 +2485,35 @@ static int idetape_initiate_read (ide_drive_t *drive, int max_stages)
u16 blocks = *(u16 *)&tape->caps[12];
/* Initialize read operation */
- if (tape->chrdev_direction != idetape_direction_read) {
- if (tape->chrdev_direction == idetape_direction_write) {
+ if (tape->chrdev_dir != IDETAPE_DIR_READ) {
+ if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
idetape_empty_write_pipeline(drive);
idetape_flush_tape_buffers(drive);
}
if (tape->merge_stage || tape->merge_stage_size) {
- printk (KERN_ERR "ide-tape: merge_stage_size should be 0 now\n");
+ printk(KERN_ERR "ide-tape: merge_stage_size should be"
+ " 0 now\n");
tape->merge_stage_size = 0;
}
- if ((tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0)) == NULL)
+ tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
+ if (!tape->merge_stage)
return -ENOMEM;
- tape->chrdev_direction = idetape_direction_read;
+ tape->chrdev_dir = IDETAPE_DIR_READ;
/*
- * Issue a read 0 command to ensure that DSC handshake
- * is switched from completion mode to buffer available
- * mode.
- * No point in issuing this if DSC overlap isn't supported,
- * some drives (Seagate STT3401A) will return an error.
+ * Issue a read 0 command to ensure that DSC handshake is
+ * switched from completion mode to buffer available mode.
+ * No point in issuing this if DSC overlap isn't supported, some
+ * drives (Seagate STT3401A) will return an error.
*/
if (drive->dsc_overlap) {
- bytes_read = idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, 0, tape->merge_stage->bh);
+ bytes_read = idetape_queue_rw_tail(drive,
+ REQ_IDETAPE_READ, 0,
+ tape->merge_stage->bh);
if (bytes_read < 0) {
__idetape_kfree_stage(tape->merge_stage);
tape->merge_stage = NULL;
- tape->chrdev_direction = idetape_direction_none;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
return bytes_read;
}
}
@@ -2677,8 +2521,9 @@ static int idetape_initiate_read (ide_drive_t *drive, int max_stages)
if (tape->restart_speed_control_req)
idetape_restart_speed_control(drive);
idetape_init_rq(&rq, REQ_IDETAPE_READ);
- rq.sector = tape->first_frame_position;
- rq.nr_sectors = rq.current_nr_sectors = blocks;
+ rq.sector = tape->first_frame;
+ rq.nr_sectors = blocks;
+ rq.current_nr_sectors = blocks;
if (!test_bit(IDETAPE_PIPELINE_ERROR, &tape->flags) &&
tape->nr_stages < max_stages) {
new_stage = idetape_kmalloc_stage(tape);
@@ -2696,50 +2541,43 @@ static int idetape_initiate_read (ide_drive_t *drive, int max_stages)
tape->insert_time = jiffies;
tape->insert_size = 0;
tape->insert_speed = 0;
- idetape_insert_pipeline_into_queue(drive);
+ idetape_plug_pipeline(drive);
}
}
return 0;
}
/*
- * idetape_add_chrdev_read_request is called from idetape_chrdev_read
- * to service a character device read request and add read-ahead
- * requests to our pipeline.
+ * Called from idetape_chrdev_read() to service a character device read request
+ * and add read-ahead requests to our pipeline.
*/
-static int idetape_add_chrdev_read_request (ide_drive_t *drive,int blocks)
+static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
struct request *rq_ptr;
int bytes_read;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_add_chrdev_read_request, %d blocks\n", blocks);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
- /*
- * If we are at a filemark, return a read length of 0
- */
+ /* If we are at a filemark, return a read length of 0 */
if (test_bit(IDETAPE_FILEMARK, &tape->flags))
return 0;
- /*
- * Wait for the next block to be available at the head
- * of the pipeline
- */
- idetape_initiate_read(drive, tape->max_stages);
+ /* Wait for the next block to reach the head of the pipeline. */
+ idetape_init_read(drive, tape->max_stages);
if (tape->first_stage == NULL) {
if (test_bit(IDETAPE_PIPELINE_ERROR, &tape->flags))
return 0;
- return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks, tape->merge_stage->bh);
+ return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
+ tape->merge_stage->bh);
}
idetape_wait_first_stage(drive);
rq_ptr = &tape->first_stage->rq;
- bytes_read = tape->tape_block_size * (rq_ptr->nr_sectors - rq_ptr->current_nr_sectors);
- rq_ptr->nr_sectors = rq_ptr->current_nr_sectors = 0;
-
+ bytes_read = tape->blk_size * (rq_ptr->nr_sectors -
+ rq_ptr->current_nr_sectors);
+ rq_ptr->nr_sectors = 0;
+ rq_ptr->current_nr_sectors = 0;
if (rq_ptr->errors == IDETAPE_ERROR_EOD)
return 0;
@@ -2747,43 +2585,46 @@ static int idetape_add_chrdev_read_request (ide_drive_t *drive,int blocks)
idetape_switch_buffers(tape, tape->first_stage);
if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
set_bit(IDETAPE_FILEMARK, &tape->flags);
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
idetape_remove_stage_head(drive);
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ spin_unlock_irqrestore(&tape->lock, flags);
tape->pipeline_head++;
- calculate_speeds(drive);
+ idetape_calculate_speeds(drive);
}
- if (bytes_read > blocks * tape->tape_block_size) {
- printk(KERN_ERR "ide-tape: bug: trying to return more bytes than requested\n");
- bytes_read = blocks * tape->tape_block_size;
+ if (bytes_read > blocks * tape->blk_size) {
+ printk(KERN_ERR "ide-tape: bug: trying to return more bytes"
+ " than requested\n");
+ bytes_read = blocks * tape->blk_size;
}
return (bytes_read);
}
-static void idetape_pad_zeros (ide_drive_t *drive, int bcount)
+static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
{
idetape_tape_t *tape = drive->driver_data;
struct idetape_bh *bh;
int blocks;
-
+
while (bcount) {
unsigned int count;
bh = tape->merge_stage->bh;
count = min(tape->stage_size, bcount);
bcount -= count;
- blocks = count / tape->tape_block_size;
+ blocks = count / tape->blk_size;
while (count) {
- atomic_set(&bh->b_count, min(count, (unsigned int)bh->b_size));
+ atomic_set(&bh->b_count,
+ min(count, (unsigned int)bh->b_size));
memset(bh->b_data, 0, atomic_read(&bh->b_count));
count -= atomic_read(&bh->b_count);
bh = bh->b_reqnext;
}
- idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks, tape->merge_stage->bh);
+ idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
+ tape->merge_stage->bh);
}
}
-static int idetape_pipeline_size (ide_drive_t *drive)
+static int idetape_pipeline_size(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
idetape_stage_t *stage;
@@ -2794,9 +2635,10 @@ static int idetape_pipeline_size (ide_drive_t *drive)
stage = tape->first_stage;
while (stage != NULL) {
rq = &stage->rq;
- size += tape->tape_block_size * (rq->nr_sectors-rq->current_nr_sectors);
+ size += tape->blk_size * (rq->nr_sectors -
+ rq->current_nr_sectors);
if (rq->errors == IDETAPE_ERROR_FILEMARK)
- size += tape->tape_block_size;
+ size += tape->blk_size;
stage = stage->next;
}
size += tape->merge_stage_size;
@@ -2804,20 +2646,18 @@ static int idetape_pipeline_size (ide_drive_t *drive)
}
/*
- * Rewinds the tape to the Beginning Of the current Partition (BOP).
- *
- * We currently support only one partition.
- */
-static int idetape_rewind_tape (ide_drive_t *drive)
+ * Rewinds the tape to the Beginning Of the current Partition (BOP). We
+ * currently support only one partition.
+ */
+static int idetape_rewind_tape(ide_drive_t *drive)
{
int retval;
idetape_pc_t pc;
-#if IDETAPE_DEBUG_LOG
- idetape_tape_t *tape = drive->driver_data;
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: Reached idetape_rewind_tape\n");
-#endif /* IDETAPE_DEBUG_LOG */
-
+ idetape_tape_t *tape;
+ tape = drive->driver_data;
+
+ debug_log(DBG_SENSE, "Enter %s\n", __func__);
+
idetape_create_rewind_cmd(drive, &pc);
retval = idetape_queue_pc_tail(drive, &pc);
if (retval)
@@ -2830,14 +2670,9 @@ static int idetape_rewind_tape (ide_drive_t *drive)
return 0;
}
-/*
- * Our special ide-tape ioctl's.
- *
- * Currently there aren't any ioctl's.
- * mtio.h compatible commands should be issued to the character device
- * interface.
- */
-static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+/* mtio.h compatible commands should be issued to the chrdev interface. */
+static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
+ unsigned long arg)
{
idetape_tape_t *tape = drive->driver_data;
void __user *argp = (void __user *)arg;
@@ -2848,44 +2683,41 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd, unsigned l
int nr_stages;
} config;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 4)
- printk(KERN_INFO "ide-tape: Reached idetape_blkdev_ioctl\n");
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_PROCS, "Enter %s\n", __func__);
+
switch (cmd) {
- case 0x0340:
- if (copy_from_user(&config, argp, sizeof(config)))
- return -EFAULT;
- tape->best_dsc_rw_frequency = config.dsc_rw_frequency;
- tape->max_stages = config.nr_stages;
- break;
- case 0x0350:
- config.dsc_rw_frequency = (int) tape->best_dsc_rw_frequency;
- config.nr_stages = tape->max_stages;
- if (copy_to_user(argp, &config, sizeof(config)))
- return -EFAULT;
- break;
- default:
- return -EIO;
+ case 0x0340:
+ if (copy_from_user(&config, argp, sizeof(config)))
+ return -EFAULT;
+ tape->best_dsc_rw_freq = config.dsc_rw_frequency;
+ tape->max_stages = config.nr_stages;
+ break;
+ case 0x0350:
+ config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
+ config.nr_stages = tape->max_stages;
+ if (copy_to_user(argp, &config, sizeof(config)))
+ return -EFAULT;
+ break;
+ default:
+ return -EIO;
}
return 0;
}
/*
- * idetape_space_over_filemarks is now a bit more complicated than just
- * passing the command to the tape since we may have crossed some
- * filemarks during our pipelined read-ahead mode.
- *
- * As a minor side effect, the pipeline enables us to support MTFSFM when
- * the filemark is in our internal pipeline even if the tape doesn't
- * support spacing over filemarks in the reverse direction.
+ * The function below is now a bit more complicated than just passing the
+ * command to the tape since we may have crossed some filemarks during our
+ * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
+ * support MTFSFM when the filemark is in our internal pipeline even if the tape
+ * doesn't support spacing over filemarks in the reverse direction.
*/
-static int idetape_space_over_filemarks (ide_drive_t *drive,short mt_op,int mt_count)
+static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
+ int mt_count)
{
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t pc;
unsigned long flags;
- int retval,count=0;
+ int retval, count = 0;
int sprev = !!(tape->caps[4] & 0x20);
if (mt_count == 0)
@@ -2893,14 +2725,11 @@ static int idetape_space_over_filemarks (ide_drive_t *drive,short mt_op,int mt_c
if (MTBSF == mt_op || MTBSFM == mt_op) {
if (!sprev)
return -EIO;
- mt_count = - mt_count;
+ mt_count = -mt_count;
}
- if (tape->chrdev_direction == idetape_direction_read) {
- /*
- * We have a read-ahead buffer. Scan it for crossed
- * filemarks.
- */
+ if (tape->chrdev_dir == IDETAPE_DIR_READ) {
+ /* its a read-ahead buffer, scan it for crossed filemarks. */
tape->merge_stage_size = 0;
if (test_and_clear_bit(IDETAPE_FILEMARK, &tape->flags))
++count;
@@ -2910,24 +2739,27 @@ static int idetape_space_over_filemarks (ide_drive_t *drive,short mt_op,int mt_c
set_bit(IDETAPE_FILEMARK, &tape->flags);
return 0;
}
- spin_lock_irqsave(&tape->spinlock, flags);
+ spin_lock_irqsave(&tape->lock, flags);
if (tape->first_stage == tape->active_stage) {
/*
- * We have reached the active stage in the read pipeline.
- * There is no point in allowing the drive to continue
- * reading any farther, so we stop the pipeline.
+ * We have reached the active stage in the read
+ * pipeline. There is no point in allowing the
+ * drive to continue reading any farther, so we
+ * stop the pipeline.
*
- * This section should be moved to a separate subroutine,
- * because a similar function is performed in
- * __idetape_discard_read_pipeline(), for example.
+ * This section should be moved to a separate
+ * subroutine because similar operations are
+ * done in __idetape_discard_read_pipeline(),
+ * for example.
*/
tape->next_stage = NULL;
- spin_unlock_irqrestore(&tape->spinlock, flags);
+ spin_unlock_irqrestore(&tape->lock, flags);
idetape_wait_first_stage(drive);
tape->next_stage = tape->first_stage->next;
} else
- spin_unlock_irqrestore(&tape->spinlock, flags);
- if (tape->first_stage->rq.errors == IDETAPE_ERROR_FILEMARK)
+ spin_unlock_irqrestore(&tape->lock, flags);
+ if (tape->first_stage->rq.errors ==
+ IDETAPE_ERROR_FILEMARK)
++count;
idetape_remove_stage_head(drive);
}
@@ -2935,73 +2767,74 @@ static int idetape_space_over_filemarks (ide_drive_t *drive,short mt_op,int mt_c
}
/*
- * The filemark was not found in our internal pipeline.
- * Now we can issue the space command.
+ * The filemark was not found in our internal pipeline; now we can issue
+ * the space command.
*/
switch (mt_op) {
- case MTFSF:
- case MTBSF:
- idetape_create_space_cmd(&pc,mt_count-count,IDETAPE_SPACE_OVER_FILEMARK);
- return (idetape_queue_pc_tail(drive, &pc));
- case MTFSFM:
- case MTBSFM:
- if (!sprev)
- return (-EIO);
- retval = idetape_space_over_filemarks(drive, MTFSF, mt_count-count);
- if (retval) return (retval);
- count = (MTBSFM == mt_op ? 1 : -1);
- return (idetape_space_over_filemarks(drive, MTFSF, count));
- default:
- printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",mt_op);
- return (-EIO);
+ case MTFSF:
+ case MTBSF:
+ idetape_create_space_cmd(&pc, mt_count - count,
+ IDETAPE_SPACE_OVER_FILEMARK);
+ return idetape_queue_pc_tail(drive, &pc);
+ case MTFSFM:
+ case MTBSFM:
+ if (!sprev)
+ return -EIO;
+ retval = idetape_space_over_filemarks(drive, MTFSF,
+ mt_count - count);
+ if (retval)
+ return retval;
+ count = (MTBSFM == mt_op ? 1 : -1);
+ return idetape_space_over_filemarks(drive, MTFSF, count);
+ default:
+ printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
+ mt_op);
+ return -EIO;
}
}
-
/*
- * Our character device read / write functions.
+ * Our character device read / write functions.
*
- * The tape is optimized to maximize throughput when it is transferring
- * an integral number of the "continuous transfer limit", which is
- * a parameter of the specific tape (26 KB on my particular tape).
- * (32 kB for Onstream)
+ * The tape is optimized to maximize throughput when it is transferring an
+ * integral number of the "continuous transfer limit", which is a parameter of
+ * the specific tape (26kB on my particular tape, 32kB for Onstream).
*
- * As of version 1.3 of the driver, the character device provides an
- * abstract continuous view of the media - any mix of block sizes (even 1
- * byte) on the same backup/restore procedure is supported. The driver
- * will internally convert the requests to the recommended transfer unit,
- * so that an unmatch between the user's block size to the recommended
- * size will only result in a (slightly) increased driver overhead, but
- * will no longer hit performance.
- * This is not applicable to Onstream.
+ * As of version 1.3 of the driver, the character device provides an abstract
+ * continuous view of the media - any mix of block sizes (even 1 byte) on the
+ * same backup/restore procedure is supported. The driver will internally
+ * convert the requests to the recommended transfer unit, so that an unmatch
+ * between the user's block size to the recommended size will only result in a
+ * (slightly) increased driver overhead, but will no longer hit performance.
+ * This is not applicable to Onstream.
*/
-static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
struct ide_tape_obj *tape = ide_tape_f(file);
ide_drive_t *drive = tape->drive;
- ssize_t bytes_read,temp, actually_read = 0, rc;
+ ssize_t bytes_read, temp, actually_read = 0, rc;
ssize_t ret = 0;
u16 ctl = *(u16 *)&tape->caps[12];
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 3)
- printk(KERN_INFO "ide-tape: Reached idetape_chrdev_read, count %Zd\n", count);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
- if (tape->chrdev_direction != idetape_direction_read) {
+ if (tape->chrdev_dir != IDETAPE_DIR_READ) {
if (test_bit(IDETAPE_DETECT_BS, &tape->flags))
- if (count > tape->tape_block_size &&
- (count % tape->tape_block_size) == 0)
- tape->user_bs_factor = count / tape->tape_block_size;
+ if (count > tape->blk_size &&
+ (count % tape->blk_size) == 0)
+ tape->user_bs_factor = count / tape->blk_size;
}
- if ((rc = idetape_initiate_read(drive, tape->max_stages)) < 0)
+ rc = idetape_init_read(drive, tape->max_stages);
+ if (rc < 0)
return rc;
if (count == 0)
return (0);
if (tape->merge_stage_size) {
- actually_read = min((unsigned int)(tape->merge_stage_size), (unsigned int)count);
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, actually_read))
+ actually_read = min((unsigned int)(tape->merge_stage_size),
+ (unsigned int)count);
+ if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
+ actually_read))
ret = -EFAULT;
buf += actually_read;
tape->merge_stage_size -= actually_read;
@@ -3011,7 +2844,8 @@ static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
bytes_read = idetape_add_chrdev_read_request(drive, ctl);
if (bytes_read <= 0)
goto finish;
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, bytes_read))
+ if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
+ bytes_read))
ret = -EFAULT;
buf += bytes_read;
count -= bytes_read;
@@ -3022,25 +2856,24 @@ static ssize_t idetape_chrdev_read (struct file *file, char __user *buf,
if (bytes_read <= 0)
goto finish;
temp = min((unsigned long)count, (unsigned long)bytes_read);
- if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage, temp))
+ if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
+ temp))
ret = -EFAULT;
actually_read += temp;
tape->merge_stage_size = bytes_read-temp;
}
finish:
if (!actually_read && test_bit(IDETAPE_FILEMARK, &tape->flags)) {
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 2)
- printk(KERN_INFO "ide-tape: %s: spacing over filemark\n", tape->name);
-#endif
+ debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
+
idetape_space_over_filemarks(drive, MTFSF, 1);
return 0;
}
- return (ret) ? ret : actually_read;
+ return ret ? ret : actually_read;
}
-static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
+static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ide_tape_obj *tape = ide_tape_f(file);
@@ -3053,39 +2886,37 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
if (tape->write_prot)
return -EACCES;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 3)
- printk(KERN_INFO "ide-tape: Reached idetape_chrdev_write, "
- "count %Zd\n", count);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
/* Initialize write operation */
- if (tape->chrdev_direction != idetape_direction_write) {
- if (tape->chrdev_direction == idetape_direction_read)
+ if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
+ if (tape->chrdev_dir == IDETAPE_DIR_READ)
idetape_discard_read_pipeline(drive, 1);
if (tape->merge_stage || tape->merge_stage_size) {
printk(KERN_ERR "ide-tape: merge_stage_size "
"should be 0 now\n");
tape->merge_stage_size = 0;
}
- if ((tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0)) == NULL)
+ tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
+ if (!tape->merge_stage)
return -ENOMEM;
- tape->chrdev_direction = idetape_direction_write;
+ tape->chrdev_dir = IDETAPE_DIR_WRITE;
idetape_init_merge_stage(tape);
/*
- * Issue a write 0 command to ensure that DSC handshake
- * is switched from completion mode to buffer available
- * mode.
- * No point in issuing this if DSC overlap isn't supported,
- * some drives (Seagate STT3401A) will return an error.
+ * Issue a write 0 command to ensure that DSC handshake is
+ * switched from completion mode to buffer available mode. No
+ * point in issuing this if DSC overlap isn't supported, some
+ * drives (Seagate STT3401A) will return an error.
*/
if (drive->dsc_overlap) {
- ssize_t retval = idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, 0, tape->merge_stage->bh);
+ ssize_t retval = idetape_queue_rw_tail(drive,
+ REQ_IDETAPE_WRITE, 0,
+ tape->merge_stage->bh);
if (retval < 0) {
__idetape_kfree_stage(tape->merge_stage);
tape->merge_stage = NULL;
- tape->chrdev_direction = idetape_direction_none;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
return retval;
}
}
@@ -3096,11 +2927,14 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
idetape_restart_speed_control(drive);
if (tape->merge_stage_size) {
if (tape->merge_stage_size >= tape->stage_size) {
- printk(KERN_ERR "ide-tape: bug: merge buffer too big\n");
+ printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
tape->merge_stage_size = 0;
}
- actually_written = min((unsigned int)(tape->stage_size - tape->merge_stage_size), (unsigned int)count);
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, actually_written))
+ actually_written = min((unsigned int)
+ (tape->stage_size - tape->merge_stage_size),
+ (unsigned int)count);
+ if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
+ actually_written))
ret = -EFAULT;
buf += actually_written;
tape->merge_stage_size += actually_written;
@@ -3116,7 +2950,8 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
}
while (count >= tape->stage_size) {
ssize_t retval;
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, tape->stage_size))
+ if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
+ tape->stage_size))
ret = -EFAULT;
buf += tape->stage_size;
count -= tape->stage_size;
@@ -3127,14 +2962,15 @@ static ssize_t idetape_chrdev_write (struct file *file, const char __user *buf,
}
if (count) {
actually_written += count;
- if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf, count))
+ if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
+ count))
ret = -EFAULT;
tape->merge_stage_size += count;
}
- return (ret) ? ret : actually_written;
+ return ret ? ret : actually_written;
}
-static int idetape_write_filemark (ide_drive_t *drive)
+static int idetape_write_filemark(ide_drive_t *drive)
{
idetape_pc_t pc;
@@ -3165,113 +3001,117 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
{
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t pc;
- int i,retval;
+ int i, retval;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 1)
- printk(KERN_INFO "ide-tape: Handling MTIOCTOP ioctl: "
- "mt_op=%d, mt_count=%d\n", mt_op, mt_count);
-#endif /* IDETAPE_DEBUG_LOG */
- /*
- * Commands which need our pipelined read-ahead stages.
- */
+ debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
+ mt_op, mt_count);
+
+ /* Commands which need our pipelined read-ahead stages. */
switch (mt_op) {
- case MTFSF:
- case MTFSFM:
- case MTBSF:
- case MTBSFM:
- if (!mt_count)
- return (0);
- return (idetape_space_over_filemarks(drive,mt_op,mt_count));
- default:
- break;
+ case MTFSF:
+ case MTFSFM:
+ case MTBSF:
+ case MTBSFM:
+ if (!mt_count)
+ return 0;
+ return idetape_space_over_filemarks(drive, mt_op, mt_count);
+ default:
+ break;
}
+
switch (mt_op) {
- case MTWEOF:
- if (tape->write_prot)
- return -EACCES;
- idetape_discard_read_pipeline(drive, 1);
- for (i = 0; i < mt_count; i++) {
- retval = idetape_write_filemark(drive);
- if (retval)
- return retval;
- }
- return (0);
- case MTREW:
- idetape_discard_read_pipeline(drive, 0);
- if (idetape_rewind_tape(drive))
+ case MTWEOF:
+ if (tape->write_prot)
+ return -EACCES;
+ idetape_discard_read_pipeline(drive, 1);
+ for (i = 0; i < mt_count; i++) {
+ retval = idetape_write_filemark(drive);
+ if (retval)
+ return retval;
+ }
+ return 0;
+ case MTREW:
+ idetape_discard_read_pipeline(drive, 0);
+ if (idetape_rewind_tape(drive))
+ return -EIO;
+ return 0;
+ case MTLOAD:
+ idetape_discard_read_pipeline(drive, 0);
+ idetape_create_load_unload_cmd(drive, &pc,
+ IDETAPE_LU_LOAD_MASK);
+ return idetape_queue_pc_tail(drive, &pc);
+ case MTUNLOAD:
+ case MTOFFL:
+ /*
+ * If door is locked, attempt to unlock before
+ * attempting to eject.
+ */
+ if (tape->door_locked) {
+ if (idetape_create_prevent_cmd(drive, &pc, 0))
+ if (!idetape_queue_pc_tail(drive, &pc))
+ tape->door_locked = DOOR_UNLOCKED;
+ }
+ idetape_discard_read_pipeline(drive, 0);
+ idetape_create_load_unload_cmd(drive, &pc,
+ !IDETAPE_LU_LOAD_MASK);
+ retval = idetape_queue_pc_tail(drive, &pc);
+ if (!retval)
+ clear_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags);
+ return retval;
+ case MTNOP:
+ idetape_discard_read_pipeline(drive, 0);
+ return idetape_flush_tape_buffers(drive);
+ case MTRETEN:
+ idetape_discard_read_pipeline(drive, 0);
+ idetape_create_load_unload_cmd(drive, &pc,
+ IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
+ return idetape_queue_pc_tail(drive, &pc);
+ case MTEOM:
+ idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
+ return idetape_queue_pc_tail(drive, &pc);
+ case MTERASE:
+ (void)idetape_rewind_tape(drive);
+ idetape_create_erase_cmd(&pc);
+ return idetape_queue_pc_tail(drive, &pc);
+ case MTSETBLK:
+ if (mt_count) {
+ if (mt_count < tape->blk_size ||
+ mt_count % tape->blk_size)
return -EIO;
+ tape->user_bs_factor = mt_count / tape->blk_size;
+ clear_bit(IDETAPE_DETECT_BS, &tape->flags);
+ } else
+ set_bit(IDETAPE_DETECT_BS, &tape->flags);
+ return 0;
+ case MTSEEK:
+ idetape_discard_read_pipeline(drive, 0);
+ return idetape_position_tape(drive,
+ mt_count * tape->user_bs_factor, tape->partition, 0);
+ case MTSETPART:
+ idetape_discard_read_pipeline(drive, 0);
+ return idetape_position_tape(drive, 0, mt_count, 0);
+ case MTFSR:
+ case MTBSR:
+ case MTLOCK:
+ if (!idetape_create_prevent_cmd(drive, &pc, 1))
return 0;
- case MTLOAD:
- idetape_discard_read_pipeline(drive, 0);
- idetape_create_load_unload_cmd(drive, &pc, IDETAPE_LU_LOAD_MASK);
- return (idetape_queue_pc_tail(drive, &pc));
- case MTUNLOAD:
- case MTOFFL:
- /*
- * If door is locked, attempt to unlock before
- * attempting to eject.
- */
- if (tape->door_locked) {
- if (idetape_create_prevent_cmd(drive, &pc, 0))
- if (!idetape_queue_pc_tail(drive, &pc))
- tape->door_locked = DOOR_UNLOCKED;
- }
- idetape_discard_read_pipeline(drive, 0);
- idetape_create_load_unload_cmd(drive, &pc,!IDETAPE_LU_LOAD_MASK);
- retval = idetape_queue_pc_tail(drive, &pc);
- if (!retval)
- clear_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags);
+ retval = idetape_queue_pc_tail(drive, &pc);
+ if (retval)
return retval;
- case MTNOP:
- idetape_discard_read_pipeline(drive, 0);
- return (idetape_flush_tape_buffers(drive));
- case MTRETEN:
- idetape_discard_read_pipeline(drive, 0);
- idetape_create_load_unload_cmd(drive, &pc,IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
- return (idetape_queue_pc_tail(drive, &pc));
- case MTEOM:
- idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
- return (idetape_queue_pc_tail(drive, &pc));
- case MTERASE:
- (void) idetape_rewind_tape(drive);
- idetape_create_erase_cmd(&pc);
- return (idetape_queue_pc_tail(drive, &pc));
- case MTSETBLK:
- if (mt_count) {
- if (mt_count < tape->tape_block_size || mt_count % tape->tape_block_size)
- return -EIO;
- tape->user_bs_factor = mt_count / tape->tape_block_size;
- clear_bit(IDETAPE_DETECT_BS, &tape->flags);
- } else
- set_bit(IDETAPE_DETECT_BS, &tape->flags);
- return 0;
- case MTSEEK:
- idetape_discard_read_pipeline(drive, 0);
- return idetape_position_tape(drive, mt_count * tape->user_bs_factor, tape->partition, 0);
- case MTSETPART:
- idetape_discard_read_pipeline(drive, 0);
- return (idetape_position_tape(drive, 0, mt_count, 0));
- case MTFSR:
- case MTBSR:
- case MTLOCK:
- if (!idetape_create_prevent_cmd(drive, &pc, 1))
- return 0;
- retval = idetape_queue_pc_tail(drive, &pc);
- if (retval) return retval;
- tape->door_locked = DOOR_EXPLICITLY_LOCKED;
- return 0;
- case MTUNLOCK:
- if (!idetape_create_prevent_cmd(drive, &pc, 0))
- return 0;
- retval = idetape_queue_pc_tail(drive, &pc);
- if (retval) return retval;
- tape->door_locked = DOOR_UNLOCKED;
+ tape->door_locked = DOOR_EXPLICITLY_LOCKED;
+ return 0;
+ case MTUNLOCK:
+ if (!idetape_create_prevent_cmd(drive, &pc, 0))
return 0;
- default:
- printk(KERN_ERR "ide-tape: MTIO operation %d not "
- "supported\n", mt_op);
- return (-EIO);
+ retval = idetape_queue_pc_tail(drive, &pc);
+ if (retval)
+ return retval;
+ tape->door_locked = DOOR_UNLOCKED;
+ return 0;
+ default:
+ printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
+ mt_op);
+ return -EIO;
}
}
@@ -3288,50 +3128,51 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
struct mtop mtop;
struct mtget mtget;
struct mtpos mtpos;
- int block_offset = 0, position = tape->first_frame_position;
+ int block_offset = 0, position = tape->first_frame;
void __user *argp = (void __user *)arg;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 3)
- printk(KERN_INFO "ide-tape: Reached idetape_chrdev_ioctl, "
- "cmd=%u\n", cmd);
-#endif /* IDETAPE_DEBUG_LOG */
+ debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
tape->restart_speed_control_req = 1;
- if (tape->chrdev_direction == idetape_direction_write) {
+ if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
idetape_empty_write_pipeline(drive);
idetape_flush_tape_buffers(drive);
}
if (cmd == MTIOCGET || cmd == MTIOCPOS) {
- block_offset = idetape_pipeline_size(drive) / (tape->tape_block_size * tape->user_bs_factor);
- if ((position = idetape_read_position(drive)) < 0)
+ block_offset = idetape_pipeline_size(drive) /
+ (tape->blk_size * tape->user_bs_factor);
+ position = idetape_read_position(drive);
+ if (position < 0)
return -EIO;
}
switch (cmd) {
- case MTIOCTOP:
- if (copy_from_user(&mtop, argp, sizeof (struct mtop)))
- return -EFAULT;
- return (idetape_mtioctop(drive,mtop.mt_op,mtop.mt_count));
- case MTIOCGET:
- memset(&mtget, 0, sizeof (struct mtget));
- mtget.mt_type = MT_ISSCSI2;
- mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
- mtget.mt_dsreg = ((tape->tape_block_size * tape->user_bs_factor) << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
- if (tape->drv_write_prot) {
- mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
- }
- if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
- return -EFAULT;
- return 0;
- case MTIOCPOS:
- mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
- if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
- return -EFAULT;
- return 0;
- default:
- if (tape->chrdev_direction == idetape_direction_read)
- idetape_discard_read_pipeline(drive, 1);
- return idetape_blkdev_ioctl(drive, cmd, arg);
+ case MTIOCTOP:
+ if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
+ return -EFAULT;
+ return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
+ case MTIOCGET:
+ memset(&mtget, 0, sizeof(struct mtget));
+ mtget.mt_type = MT_ISSCSI2;
+ mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
+ mtget.mt_dsreg =
+ ((tape->blk_size * tape->user_bs_factor)
+ << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
+
+ if (tape->drv_write_prot)
+ mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
+
+ if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
+ return -EFAULT;
+ return 0;
+ case MTIOCPOS:
+ mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
+ if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
+ return -EFAULT;
+ return 0;
+ default:
+ if (tape->chrdev_dir == IDETAPE_DIR_READ)
+ idetape_discard_read_pipeline(drive, 1);
+ return idetape_blkdev_ioctl(drive, cmd, arg);
}
}
@@ -3347,23 +3188,20 @@ static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
if (idetape_queue_pc_tail(drive, &pc)) {
printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
- if (tape->tape_block_size == 0) {
+ if (tape->blk_size == 0) {
printk(KERN_WARNING "ide-tape: Cannot deal with zero "
"block size, assuming 32k\n");
- tape->tape_block_size = 32768;
+ tape->blk_size = 32768;
}
return;
}
- tape->tape_block_size = (pc.buffer[4 + 5] << 16) +
+ tape->blk_size = (pc.buffer[4 + 5] << 16) +
(pc.buffer[4 + 6] << 8) +
pc.buffer[4 + 7];
tape->drv_write_prot = (pc.buffer[2] & 0x80) >> 7;
}
-/*
- * Our character device open function.
- */
-static int idetape_chrdev_open (struct inode *inode, struct file *filp)
+static int idetape_chrdev_open(struct inode *inode, struct file *filp)
{
unsigned int minor = iminor(inode), i = minor & ~0xc0;
ide_drive_t *drive;
@@ -3371,6 +3209,15 @@ static int idetape_chrdev_open (struct inode *inode, struct file *filp)
idetape_pc_t pc;
int retval;
+ if (i >= MAX_HWIFS * MAX_DRIVES)
+ return -ENXIO;
+
+ tape = ide_tape_chrdev_get(i);
+ if (!tape)
+ return -ENXIO;
+
+ debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
+
/*
* We really want to do nonseekable_open(inode, filp); here, but some
* versions of tar incorrectly call lseek on tapes and bail out if that
@@ -3378,16 +3225,6 @@ static int idetape_chrdev_open (struct inode *inode, struct file *filp)
*/
filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
-#if IDETAPE_DEBUG_LOG
- printk(KERN_INFO "ide-tape: Reached idetape_chrdev_open\n");
-#endif /* IDETAPE_DEBUG_LOG */
-
- if (i >= MAX_HWIFS * MAX_DRIVES)
- return -ENXIO;
-
- if (!(tape = ide_tape_chrdev_get(i)))
- return -ENXIO;
-
drive = tape->drive;
filp->private_data = tape;
@@ -3408,7 +3245,7 @@ static int idetape_chrdev_open (struct inode *inode, struct file *filp)
if (!test_bit(IDETAPE_ADDRESS_VALID, &tape->flags))
(void)idetape_rewind_tape(drive);
- if (tape->chrdev_direction != idetape_direction_read)
+ if (tape->chrdev_dir != IDETAPE_DIR_READ)
clear_bit(IDETAPE_PIPELINE_ERROR, &tape->flags);
/* Read block size and write protect status from drive. */
@@ -3430,10 +3267,8 @@ static int idetape_chrdev_open (struct inode *inode, struct file *filp)
}
}
- /*
- * Lock the tape drive door so user can't eject.
- */
- if (tape->chrdev_direction == idetape_direction_none) {
+ /* Lock the tape drive door so user can't eject. */
+ if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
if (idetape_create_prevent_cmd(drive, &pc, 1)) {
if (!idetape_queue_pc_tail(drive, &pc)) {
if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
@@ -3450,14 +3285,15 @@ out_put_tape:
return retval;
}
-static void idetape_write_release (ide_drive_t *drive, unsigned int minor)
+static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
{
idetape_tape_t *tape = drive->driver_data;
idetape_empty_write_pipeline(drive);
tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
if (tape->merge_stage != NULL) {
- idetape_pad_zeros(drive, tape->tape_block_size * (tape->user_bs_factor - 1));
+ idetape_pad_zeros(drive, tape->blk_size *
+ (tape->user_bs_factor - 1));
__idetape_kfree_stage(tape->merge_stage);
tape->merge_stage = NULL;
}
@@ -3466,10 +3302,7 @@ static void idetape_write_release (ide_drive_t *drive, unsigned int minor)
idetape_flush_tape_buffers(drive);
}
-/*
- * Our character device release function.
- */
-static int idetape_chrdev_release (struct inode *inode, struct file *filp)
+static int idetape_chrdev_release(struct inode *inode, struct file *filp)
{
struct ide_tape_obj *tape = ide_tape_f(filp);
ide_drive_t *drive = tape->drive;
@@ -3478,14 +3311,12 @@ static int idetape_chrdev_release (struct inode *inode, struct file *filp)
lock_kernel();
tape = drive->driver_data;
-#if IDETAPE_DEBUG_LOG
- if (tape->debug_level >= 3)
- printk(KERN_INFO "ide-tape: Reached idetape_chrdev_release\n");
-#endif /* IDETAPE_DEBUG_LOG */
- if (tape->chrdev_direction == idetape_direction_write)
+ debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
+
+ if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
idetape_write_release(drive, minor);
- if (tape->chrdev_direction == idetape_direction_read) {
+ if (tape->chrdev_dir == IDETAPE_DIR_READ) {
if (minor < 128)
idetape_discard_read_pipeline(drive, 1);
else
@@ -3497,7 +3328,7 @@ static int idetape_chrdev_release (struct inode *inode, struct file *filp)
}
if (minor < 128 && test_bit(IDETAPE_MEDIUM_PRESENT, &tape->flags))
(void) idetape_rewind_tape(drive);
- if (tape->chrdev_direction == idetape_direction_none) {
+ if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
if (tape->door_locked == DOOR_LOCKED) {
if (idetape_create_prevent_cmd(drive, &pc, 0)) {
if (!idetape_queue_pc_tail(drive, &pc))
@@ -3512,37 +3343,39 @@ static int idetape_chrdev_release (struct inode *inode, struct file *filp)
}
/*
- * idetape_identify_device is called to check the contents of the
- * ATAPI IDENTIFY command results. We return:
+ * check the contents of the ATAPI IDENTIFY command results. We return:
*
- * 1 If the tape can be supported by us, based on the information
- * we have so far.
+ * 1 - If the tape can be supported by us, based on the information we have so
+ * far.
*
- * 0 If this tape driver is not currently supported by us.
+ * 0 - If this tape driver is not currently supported by us.
*/
-static int idetape_identify_device (ide_drive_t *drive)
+static int idetape_identify_device(ide_drive_t *drive)
{
- struct idetape_id_gcw gcw;
- struct hd_driveid *id = drive->id;
+ u8 gcw[2], protocol, device_type, removable, packet_size;
if (drive->id_read == 0)
return 1;
- *((unsigned short *) &gcw) = id->config;
+ *((unsigned short *) &gcw) = drive->id->config;
+
+ protocol = (gcw[1] & 0xC0) >> 6;
+ device_type = gcw[1] & 0x1F;
+ removable = !!(gcw[0] & 0x80);
+ packet_size = gcw[0] & 0x3;
/* Check that we can support this device */
-
- if (gcw.protocol != 2)
+ if (protocol != 2)
printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
- gcw.protocol);
- else if (gcw.device_type != 1)
+ protocol);
+ else if (device_type != 1)
printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
- "to tape\n", gcw.device_type);
- else if (!gcw.removable)
+ "to tape\n", device_type);
+ else if (!removable)
printk(KERN_ERR "ide-tape: The removable flag is not set\n");
- else if (gcw.packet_size != 0) {
- printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12 "
- "bytes long\n", gcw.packet_size);
+ else if (packet_size != 0) {
+ printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
+ " bytes\n", packet_size);
} else
return 1;
return 0;
@@ -3550,9 +3383,9 @@ static int idetape_identify_device (ide_drive_t *drive)
static void idetape_get_inquiry_results(ide_drive_t *drive)
{
- char *r;
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t pc;
+ char fw_rev[6], vendor_id[10], product_id[18];
idetape_create_inquiry_cmd(&pc);
if (idetape_queue_pc_tail(drive, &pc)) {
@@ -3560,27 +3393,23 @@ static void idetape_get_inquiry_results(ide_drive_t *drive)
tape->name);
return;
}
- memcpy(tape->vendor_id, &pc.buffer[8], 8);
- memcpy(tape->product_id, &pc.buffer[16], 16);
- memcpy(tape->firmware_revision, &pc.buffer[32], 4);
-
- ide_fixstring(tape->vendor_id, 10, 0);
- ide_fixstring(tape->product_id, 18, 0);
- ide_fixstring(tape->firmware_revision, 6, 0);
- r = tape->firmware_revision;
- if (*(r + 1) == '.')
- tape->firmware_revision_num = (*r - '0') * 100 +
- (*(r + 2) - '0') * 10 + *(r + 3) - '0';
+ memcpy(vendor_id, &pc.buffer[8], 8);
+ memcpy(product_id, &pc.buffer[16], 16);
+ memcpy(fw_rev, &pc.buffer[32], 4);
+
+ ide_fixstring(vendor_id, 10, 0);
+ ide_fixstring(product_id, 18, 0);
+ ide_fixstring(fw_rev, 6, 0);
+
printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
- drive->name, tape->name, tape->vendor_id,
- tape->product_id, tape->firmware_revision);
+ drive->name, tape->name, vendor_id, product_id, fw_rev);
}
/*
* Ask the tape about its various parameters. In particular, we will adjust our
* data transfer buffer size to the recommended value as returned by the tape.
*/
-static void idetape_get_mode_sense_results (ide_drive_t *drive)
+static void idetape_get_mode_sense_results(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
idetape_pc_t pc;
@@ -3591,7 +3420,7 @@ static void idetape_get_mode_sense_results (ide_drive_t *drive)
if (idetape_queue_pc_tail(drive, &pc)) {
printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
" some default values\n");
- tape->tape_block_size = 512;
+ tape->blk_size = 512;
put_unaligned(52, (u16 *)&tape->caps[12]);
put_unaligned(540, (u16 *)&tape->caps[14]);
put_unaligned(6*52, (u16 *)&tape->caps[16]);
@@ -3621,62 +3450,75 @@ static void idetape_get_mode_sense_results (ide_drive_t *drive)
memcpy(&tape->caps, caps, 20);
if (caps[7] & 0x02)
- tape->tape_block_size = 512;
+ tape->blk_size = 512;
else if (caps[7] & 0x04)
- tape->tape_block_size = 1024;
+ tape->blk_size = 1024;
}
#ifdef CONFIG_IDE_PROC_FS
-static void idetape_add_settings (ide_drive_t *drive)
+static void idetape_add_settings(ide_drive_t *drive)
{
idetape_tape_t *tape = drive->driver_data;
-/*
- * drive setting name read/write data type min max mul_factor div_factor data pointer set function
- */
ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
1, 2, (u16 *)&tape->caps[16], NULL);
- ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
- ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_stages, NULL);
- ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff, tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
- ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages, NULL);
- ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0, 0xffff, tape->stage_size / 1024, 1, &tape->nr_pending_stages, NULL);
+ ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
+ tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
+ ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
+ tape->stage_size / 1024, 1, &tape->max_stages, NULL);
+ ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
+ tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
+ ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
+ 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
+ NULL);
+ ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
+ 0xffff, tape->stage_size / 1024, 1,
+ &tape->nr_pending_stages, NULL);
ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
1, 1, (u16 *)&tape->caps[14], NULL);
- ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1024, &tape->stage_size, NULL);
- ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN, IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_frequency, NULL);
- ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
- ide_add_setting(drive, "pipeline_head_speed_c",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed, NULL);
- ide_add_setting(drive, "pipeline_head_speed_u",SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->uncontrolled_pipeline_head_speed,NULL);
- ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff, 1, 1, &tape->avg_speed, NULL);
- ide_add_setting(drive, "debug_level", SETTING_RW, TYPE_INT, 0, 0xffff, 1, 1, &tape->debug_level, NULL);
+ ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
+ 1024, &tape->stage_size, NULL);
+ ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
+ IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
+ NULL);
+ ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
+ 1, &drive->dsc_overlap, NULL);
+ ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT,
+ 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed,
+ NULL);
+ ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT,
+ 0, 0xffff, 1, 1,
+ &tape->uncontrolled_pipeline_head_speed, NULL);
+ ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
+ 1, 1, &tape->avg_speed, NULL);
+ ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
+ 1, &tape->debug_mask, NULL);
}
#else
static inline void idetape_add_settings(ide_drive_t *drive) { ; }
#endif
/*
- * ide_setup is called to:
+ * The function below is called to:
*
- * 1. Initialize our various state variables.
- * 2. Ask the tape for its capabilities.
- * 3. Allocate a buffer which will be used for data
- * transfer. The buffer size is chosen based on
- * the recommendation which we received in step (2).
+ * 1. Initialize our various state variables.
+ * 2. Ask the tape for its capabilities.
+ * 3. Allocate a buffer which will be used for data transfer. The buffer size
+ * is chosen based on the recommendation which we received in step 2.
*
- * Note that at this point ide.c already assigned us an irq, so that
- * we can queue requests here and wait for their completion.
+ * Note that at this point ide.c already assigned us an irq, so that we can
+ * queue requests here and wait for their completion.
*/
-static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
+static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
{
unsigned long t1, tmid, tn, t;
int speed;
- struct idetape_id_gcw gcw;
int stage_size;
+ u8 gcw[2];
struct sysinfo si;
u16 *ctl = (u16 *)&tape->caps[12];
- spin_lock_init(&tape->spinlock);
+ spin_lock_init(&tape->lock);
drive->dsc_overlap = 1;
if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
@@ -3690,25 +3532,29 @@ static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
tape->name[0] = 'h';
tape->name[1] = 't';
tape->name[2] = '0' + minor;
- tape->chrdev_direction = idetape_direction_none;
+ tape->chrdev_dir = IDETAPE_DIR_NONE;
tape->pc = tape->pc_stack;
tape->max_insert_speed = 10000;
tape->speed_control = 1;
*((unsigned short *) &gcw) = drive->id->config;
- if (gcw.drq_type == 1)
+
+ /* Command packet DRQ type */
+ if (((gcw[0] & 0x60) >> 5) == 1)
set_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags);
- tape->min_pipeline = tape->max_pipeline = tape->max_stages = 10;
-
+ tape->min_pipeline = 10;
+ tape->max_pipeline = 10;
+ tape->max_stages = 10;
+
idetape_get_inquiry_results(drive);
idetape_get_mode_sense_results(drive);
ide_tape_get_bsize_from_bdesc(drive);
tape->user_bs_factor = 1;
- tape->stage_size = *ctl * tape->tape_block_size;
+ tape->stage_size = *ctl * tape->blk_size;
while (tape->stage_size > 0xffff) {
printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
*ctl /= 2;
- tape->stage_size = *ctl * tape->tape_block_size;
+ tape->stage_size = *ctl * tape->blk_size;
}
stage_size = tape->stage_size;
tape->pages_per_stage = stage_size / PAGE_SIZE;
@@ -3722,17 +3568,22 @@ static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
tape->max_stages = speed * 1000 * 10 / tape->stage_size;
- /*
- * Limit memory use for pipeline to 10% of physical memory
- */
+ /* Limit memory use for pipeline to 10% of physical memory */
si_meminfo(&si);
- if (tape->max_stages * tape->stage_size > si.totalram * si.mem_unit / 10)
- tape->max_stages = si.totalram * si.mem_unit / (10 * tape->stage_size);
+ if (tape->max_stages * tape->stage_size >
+ si.totalram * si.mem_unit / 10)
+ tape->max_stages =
+ si.totalram * si.mem_unit / (10 * tape->stage_size);
+
tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
- tape->max_pipeline = min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
- if (tape->max_stages == 0)
- tape->max_stages = tape->min_pipeline = tape->max_pipeline = 1;
+ tape->max_pipeline =
+ min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
+ if (tape->max_stages == 0) {
+ tape->max_stages = 1;
+ tape->min_pipeline = 1;
+ tape->max_pipeline = 1;
+ }
t1 = (tape->stage_size * HZ) / (speed * 1000);
tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
@@ -3744,17 +3595,19 @@ static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
t = t1;
/*
- * Ensure that the number we got makes sense; limit
- * it within IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
+ * Ensure that the number we got makes sense; limit it within
+ * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
*/
- tape->best_dsc_rw_frequency = max_t(unsigned long, min_t(unsigned long, t, IDETAPE_DSC_RW_MAX), IDETAPE_DSC_RW_MIN);
+ tape->best_dsc_rw_freq = max_t(unsigned long,
+ min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
+ IDETAPE_DSC_RW_MIN);
printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
"%dkB pipeline, %lums tDSC%s\n",
drive->name, tape->name, *(u16 *)&tape->caps[14],
(*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
tape->stage_size / 1024,
tape->max_stages * tape->stage_size / 1024,
- tape->best_dsc_rw_frequency * 1000 / HZ,
+ tape->best_dsc_rw_freq * 1000 / HZ,
drive->using_dma ? ", DMA":"");
idetape_add_settings(drive);
@@ -3782,7 +3635,8 @@ static void ide_tape_release(struct kref *kref)
drive->dsc_overlap = 0;
drive->driver_data = NULL;
device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
- device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor + 128));
+ device_destroy(idetape_sysfs_class,
+ MKDEV(IDETAPE_MAJOR, tape->minor + 128));
idetape_devs[tape->minor] = NULL;
g->private_data = NULL;
put_disk(g);
@@ -3831,9 +3685,7 @@ static ide_driver_t idetape_driver = {
#endif
};
-/*
- * Our character device supporting functions, passed to register_chrdev.
- */
+/* Our character device supporting functions, passed to register_chrdev. */
static const struct file_operations idetape_fops = {
.owner = THIS_MODULE,
.read = idetape_chrdev_read,
@@ -3848,7 +3700,8 @@ static int idetape_open(struct inode *inode, struct file *filp)
struct gendisk *disk = inode->i_bdev->bd_disk;
struct ide_tape_obj *tape;
- if (!(tape = ide_tape_get(disk)))
+ tape = ide_tape_get(disk);
+ if (!tape)
return -ENXIO;
return 0;
@@ -3895,21 +3748,20 @@ static int ide_tape_probe(ide_drive_t *drive)
goto failed;
if (drive->media != ide_tape)
goto failed;
- if (!idetape_identify_device (drive)) {
- printk(KERN_ERR "ide-tape: %s: not supported by this version of ide-tape\n", drive->name);
+ if (!idetape_identify_device(drive)) {
+ printk(KERN_ERR "ide-tape: %s: not supported by this version of"
+ " the driver\n", drive->name);
goto failed;
}
if (drive->scsi) {
- printk("ide-tape: passing drive %s to ide-scsi emulation.\n", drive->name);
+ printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
+ " emulation.\n", drive->name);
goto failed;
}
- if (strstr(drive->id->model, "OnStream DI-")) {
- printk(KERN_WARNING "ide-tape: Use drive %s with ide-scsi emulation and osst.\n", drive->name);
- printk(KERN_WARNING "ide-tape: OnStream support will be removed soon from ide-tape!\n");
- }
- tape = kzalloc(sizeof (idetape_tape_t), GFP_KERNEL);
+ tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
if (tape == NULL) {
- printk(KERN_ERR "ide-tape: %s: Can't allocate a tape structure\n", drive->name);
+ printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
+ drive->name);
goto failed;
}
@@ -3955,10 +3807,7 @@ failed:
return -ENODEV;
}
-MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
-MODULE_LICENSE("GPL");
-
-static void __exit idetape_exit (void)
+static void __exit idetape_exit(void)
{
driver_unregister(&idetape_driver.gen_driver);
class_destroy(idetape_sysfs_class);
@@ -3977,7 +3826,8 @@ static int __init idetape_init(void)
}
if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
- printk(KERN_ERR "ide-tape: Failed to register character device interface\n");
+ printk(KERN_ERR "ide-tape: Failed to register chrdev"
+ " interface\n");
error = -EBUSY;
goto out_free_class;
}
@@ -4000,3 +3850,5 @@ MODULE_ALIAS("ide:*m-tape*");
module_init(idetape_init);
module_exit(idetape_exit);
MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
+MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 4e1da1c78cb5..0518a2e948cf 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -189,12 +189,11 @@ EXPORT_SYMBOL_GPL(do_rw_taskfile);
*/
static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 stat;
+ u8 stat = ide_read_status(drive);
- if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
+ if (OK_STAT(stat, READY_STAT, BAD_STAT))
drive->mult_count = drive->mult_req;
- } else {
+ else {
drive->mult_req = drive->mult_count = 0;
drive->special.b.recalibrate = 1;
(void) ide_dump_status(drive, "set_multmode", stat);
@@ -207,11 +206,10 @@ static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
*/
static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
int retries = 5;
u8 stat;
- while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
+ while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--)
udelay(10);
if (OK_STAT(stat, READY_STAT, BAD_STAT))
@@ -230,10 +228,9 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
*/
static ide_startstop_t recal_intr(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
- u8 stat;
+ u8 stat = ide_read_status(drive);
- if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), READY_STAT, BAD_STAT))
+ if (!OK_STAT(stat, READY_STAT, BAD_STAT))
return ide_error(drive, "recal_intr", stat);
return ide_stopped;
}
@@ -244,23 +241,23 @@ static ide_startstop_t recal_intr(ide_drive_t *drive)
static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
{
ide_task_t *args = HWGROUP(drive)->rq->special;
- ide_hwif_t *hwif = HWIF(drive);
u8 stat;
local_irq_enable_in_hardirq();
- if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
+ stat = ide_read_status(drive);
+
+ if (!OK_STAT(stat, READY_STAT, BAD_STAT))
return ide_error(drive, "task_no_data_intr", stat);
/* calls ide_end_drive_cmd */
- }
+
if (args)
- ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
+ ide_end_drive_cmd(drive, stat, ide_read_error(drive));
return ide_stopped;
}
static u8 wait_drive_not_busy(ide_drive_t *drive)
{
- ide_hwif_t *hwif = HWIF(drive);
int retries;
u8 stat;
@@ -269,7 +266,9 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
* This can take up to 10 usec, but we will wait max 1 ms.
*/
for (retries = 0; retries < 100; retries++) {
- if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT)
+ stat = ide_read_status(drive);
+
+ if (stat & BUSY_STAT)
udelay(10);
else
break;
@@ -408,7 +407,7 @@ static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
{
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
- u8 err = drive->hwif->INB(IDE_ERROR_REG);
+ u8 err = ide_read_error(drive);
ide_end_drive_cmd(drive, stat, err);
return;
@@ -430,7 +429,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct request *rq = HWGROUP(drive)->rq;
- u8 stat = hwif->INB(IDE_STATUS_REG);
+ u8 stat = ide_read_status(drive);
/* new way for dealing with premature shared PCI interrupts */
if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
@@ -465,7 +464,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
struct request *rq = HWGROUP(drive)->rq;
- u8 stat = hwif->INB(IDE_STATUS_REG);
+ u8 stat = ide_read_status(drive);
if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
return task_error(drive, rq, __FUNCTION__, stat);
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index ac6136001615..ad0e9955f73c 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -618,60 +618,6 @@ abort:
EXPORT_SYMBOL(ide_unregister);
-
-/**
- * ide_setup_ports - set up IDE interface ports
- * @hw: register descriptions
- * @base: base register
- * @offsets: table of register offsets
- * @ctrl: control register
- * @ack_irq: IRQ ack
- * @irq: interrupt lie
- *
- * Setup hw_regs_t structure described by parameters. You
- * may set up the hw structure yourself OR use this routine to
- * do it for you. This is basically a helper
- *
- */
-
-void ide_setup_ports ( hw_regs_t *hw,
- unsigned long base, int *offsets,
- unsigned long ctrl, unsigned long intr,
- ide_ack_intr_t *ack_intr,
-/*
- * ide_io_ops_t *iops,
- */
- int irq)
-{
- int i;
-
- memset(hw, 0, sizeof(hw_regs_t));
- for (i = 0; i < IDE_NR_PORTS; i++) {
- if (offsets[i] == -1) {
- switch(i) {
- case IDE_CONTROL_OFFSET:
- hw->io_ports[i] = ctrl;
- break;
-#if defined(CONFIG_AMIGA) || defined(CONFIG_MAC)
- case IDE_IRQ_OFFSET:
- hw->io_ports[i] = intr;
- break;
-#endif /* (CONFIG_AMIGA) || (CONFIG_MAC) */
- default:
- hw->io_ports[i] = 0;
- break;
- }
- } else {
- hw->io_ports[i] = base + offsets[i];
- }
- }
- hw->irq = irq;
- hw->ack_intr = ack_intr;
-/*
- * hw->iops = iops;
- */
-}
-
void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
{
memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
diff --git a/drivers/ide/legacy/buddha.c b/drivers/ide/legacy/buddha.c
index 8bdb79da17e8..50ffa871d5e9 100644
--- a/drivers/ide/legacy/buddha.c
+++ b/drivers/ide/legacy/buddha.c
@@ -56,31 +56,11 @@ static u_int xsurf_bases[XSURF_NUM_HWIFS] __initdata = {
XSURF_BASE1, XSURF_BASE2
};
-
/*
* Offsets from one of the above bases
*/
-#define BUDDHA_DATA 0x00
-#define BUDDHA_ERROR 0x06 /* see err-bits */
-#define BUDDHA_NSECTOR 0x0a /* nr of sectors to read/write */
-#define BUDDHA_SECTOR 0x0e /* starting sector */
-#define BUDDHA_LCYL 0x12 /* starting cylinder */
-#define BUDDHA_HCYL 0x16 /* high byte of starting cyl */
-#define BUDDHA_SELECT 0x1a /* 101dhhhh , d=drive, hhhh=head */
-#define BUDDHA_STATUS 0x1e /* see status-bits */
#define BUDDHA_CONTROL 0x11a
-#define XSURF_CONTROL -1 /* X-Surf has no CS1* (Control/AltStat) */
-
-static int buddha_offsets[IDE_NR_PORTS] __initdata = {
- BUDDHA_DATA, BUDDHA_ERROR, BUDDHA_NSECTOR, BUDDHA_SECTOR, BUDDHA_LCYL,
- BUDDHA_HCYL, BUDDHA_SELECT, BUDDHA_STATUS, BUDDHA_CONTROL, -1
-};
-
-static int xsurf_offsets[IDE_NR_PORTS] __initdata = {
- BUDDHA_DATA, BUDDHA_ERROR, BUDDHA_NSECTOR, BUDDHA_SECTOR, BUDDHA_LCYL,
- BUDDHA_HCYL, BUDDHA_SELECT, BUDDHA_STATUS, XSURF_CONTROL, -1
-};
/*
* Other registers
@@ -140,6 +120,26 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
return 1;
}
+static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
+ unsigned long ctl, unsigned long irq_port,
+ ide_ack_intr_t *ack_intr)
+{
+ int i;
+
+ memset(hw, 0, sizeof(*hw));
+
+ hw->io_ports[IDE_DATA_OFFSET] = base;
+
+ for (i = 1; i < 8; i++)
+ hw->io_ports[i] = base + 2 + i * 4;
+
+ hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
+ hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+
+ hw->irq = IRQ_AMIGA_PORTS;
+ hw->ack_intr = ack_intr;
+}
+
/*
* Probe for a Buddha or Catweasel IDE interface
*/
@@ -202,22 +202,24 @@ fail_base2:
printk(KERN_INFO "ide: %s IDE controller\n",
buddha_board_name[type]);
- for(i=0;i<buddha_num_hwifs;i++) {
- if(type != BOARD_XSURF) {
- ide_setup_ports(&hw, (buddha_board+buddha_bases[i]),
- buddha_offsets, 0,
- (buddha_board+buddha_irqports[i]),
- buddha_ack_intr,
-// budda_iops,
- IRQ_AMIGA_PORTS);
+ for (i = 0; i < buddha_num_hwifs; i++) {
+ unsigned long base, ctl, irq_port;
+ ide_ack_intr_t *ack_intr;
+
+ if (type != BOARD_XSURF) {
+ base = buddha_board + buddha_bases[i];
+ ctl = base + BUDDHA_CONTROL;
+ irq_port = buddha_board + buddha_irqports[i];
+ ack_intr = buddha_ack_intr;
} else {
- ide_setup_ports(&hw, (buddha_board+xsurf_bases[i]),
- xsurf_offsets, 0,
- (buddha_board+xsurf_irqports[i]),
- xsurf_ack_intr,
-// xsurf_iops,
- IRQ_AMIGA_PORTS);
- }
+ base = buddha_board + xsurf_bases[i];
+ /* X-Surf has no CS1* (Control/AltStat) */
+ ctl = 0;
+ irq_port = buddha_board + xsurf_irqports[i];
+ ack_intr = xsurf_ack_intr;
+ }
+
+ buddha_setup_ports(&hw, base, ctl, irq_port, ack_intr);
hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
if (hwif) {
diff --git a/drivers/ide/legacy/falconide.c b/drivers/ide/legacy/falconide.c
index 85b69a82825f..f044048903b3 100644
--- a/drivers/ide/legacy/falconide.c
+++ b/drivers/ide/legacy/falconide.c
@@ -33,22 +33,8 @@
* Offsets from the above base
*/
-#define ATA_HD_DATA 0x00
-#define ATA_HD_ERROR 0x05 /* see err-bits */
-#define ATA_HD_NSECTOR 0x09 /* nr of sectors to read/write */
-#define ATA_HD_SECTOR 0x0d /* starting sector */
-#define ATA_HD_LCYL 0x11 /* starting cylinder */
-#define ATA_HD_HCYL 0x15 /* high byte of starting cyl */
-#define ATA_HD_SELECT 0x19 /* 101dhhhh , d=drive, hhhh=head */
-#define ATA_HD_STATUS 0x1d /* see status-bits */
#define ATA_HD_CONTROL 0x39
-static int falconide_offsets[IDE_NR_PORTS] __initdata = {
- ATA_HD_DATA, ATA_HD_ERROR, ATA_HD_NSECTOR, ATA_HD_SECTOR, ATA_HD_LCYL,
- ATA_HD_HCYL, ATA_HD_SELECT, ATA_HD_STATUS, ATA_HD_CONTROL, -1
-};
-
-
/*
* falconide_intr_lock is used to obtain access to the IDE interrupt,
* which is shared between several drivers.
@@ -57,6 +43,22 @@ static int falconide_offsets[IDE_NR_PORTS] __initdata = {
int falconide_intr_lock;
EXPORT_SYMBOL(falconide_intr_lock);
+static void __init falconide_setup_ports(hw_regs_t *hw)
+{
+ int i;
+
+ memset(hw, 0, sizeof(*hw));
+
+ hw->io_ports[IDE_DATA_OFFSET] = ATA_HD_BASE;
+
+ for (i = 1; i < 8; i++)
+ hw->io_ports[i] = ATA_HD_BASE + 1 + i * 4;
+
+ hw->io_ports[IDE_CONTROL_OFFSET] = ATA_HD_CONTROL;
+
+ hw->irq = IRQ_MFP_IDE;
+ hw->ack_intr = NULL;
+}
/*
* Probe for a Falcon IDE interface
@@ -64,16 +66,15 @@ EXPORT_SYMBOL(falconide_intr_lock);
static int __init falconide_init(void)
{
- if (MACH_IS_ATARI && ATARIHW_PRESENT(IDE)) {
hw_regs_t hw;
ide_hwif_t *hwif;
+ if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE))
+ return 0;
+
printk(KERN_INFO "ide: Falcon IDE controller\n");
- ide_setup_ports(&hw, ATA_HD_BASE, falconide_offsets,
- 0, 0, NULL,
-// falconide_iops,
- IRQ_MFP_IDE);
+ falconide_setup_ports(&hw);
hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
if (hwif) {
@@ -85,9 +86,8 @@ static int __init falconide_init(void)
ide_device_add(idx, NULL);
}
- }
- return 0;
+ return 0;
}
module_init(falconide_init);
diff --git a/drivers/ide/legacy/gayle.c b/drivers/ide/legacy/gayle.c
index fc29ce75aff1..9d3851d27677 100644
--- a/drivers/ide/legacy/gayle.c
+++ b/drivers/ide/legacy/gayle.c
@@ -34,22 +34,8 @@
* Offsets from one of the above bases
*/
-#define GAYLE_DATA 0x00
-#define GAYLE_ERROR 0x06 /* see err-bits */
-#define GAYLE_NSECTOR 0x0a /* nr of sectors to read/write */
-#define GAYLE_SECTOR 0x0e /* starting sector */
-#define GAYLE_LCYL 0x12 /* starting cylinder */
-#define GAYLE_HCYL 0x16 /* high byte of starting cyl */
-#define GAYLE_SELECT 0x1a /* 101dhhhh , d=drive, hhhh=head */
-#define GAYLE_STATUS 0x1e /* see status-bits */
#define GAYLE_CONTROL 0x101a
-static int gayle_offsets[IDE_NR_PORTS] __initdata = {
- GAYLE_DATA, GAYLE_ERROR, GAYLE_NSECTOR, GAYLE_SECTOR, GAYLE_LCYL,
- GAYLE_HCYL, GAYLE_SELECT, GAYLE_STATUS, -1, -1
-};
-
-
/*
* These are at different offsets from the base
*/
@@ -106,6 +92,26 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
return 1;
}
+static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
+ unsigned long ctl, unsigned long irq_port,
+ ide_ack_intr_t *ack_intr);
+{
+ int i;
+
+ memset(hw, 0, sizeof(*hw));
+
+ hw->io_ports[IDE_DATA_OFFSET] = base;
+
+ for (i = 1; i < 8; i++)
+ hw->io_ports[i] = base + 2 + i * 4;
+
+ hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
+ hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+
+ hw->irq = IRQ_AMIGA_PORTS;
+ hw->ack_intr = ack_intr;
+}
+
/*
* Probe for a Gayle IDE interface (and optionally for an IDE doubler)
*/
@@ -167,10 +173,7 @@ found:
base = (unsigned long)ZTWO_VADDR(phys_base);
ctrlport = GAYLE_HAS_CONTROL_REG ? (base + GAYLE_CONTROL) : 0;
- ide_setup_ports(&hw, base, gayle_offsets,
- ctrlport, irqport, ack_intr,
-// &gayle_iops,
- IRQ_AMIGA_PORTS);
+ gayle_setup_ports(&hw, base, ctrlport, irqport, ack_intr);
hwif = ide_find_port(base);
if (hwif) {
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 8e05d88e81ba..0b0d86731927 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -421,11 +421,14 @@ static void bad_rw_intr(void)
static inline int wait_DRQ(void)
{
- int retries = 100000, stat;
+ int retries;
+ int stat;
- while (--retries > 0)
- if ((stat = inb_p(HD_STATUS)) & DRQ_STAT)
+ for (retries = 0; retries < 100000; retries++) {
+ stat = inb_p(HD_STATUS);
+ if (stat & DRQ_STAT)
return 0;
+ }
dump_status("wait_DRQ", stat);
return -1;
}
diff --git a/drivers/ide/legacy/ide_platform.c b/drivers/ide/legacy/ide_platform.c
index 26c82ce602de..688fcae17488 100644
--- a/drivers/ide/legacy/ide_platform.c
+++ b/drivers/ide/legacy/ide_platform.c
@@ -17,7 +17,7 @@
#include <linux/ide.h>
#include <linux/ioport.h>
#include <linux/module.h>
-#include <linux/pata_platform.h>
+#include <linux/ata_platform.h>
#include <linux/platform_device.h>
#include <linux/io.h>
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index 06df8df857a3..a61e60737dc7 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -31,14 +31,6 @@
* These match MkLinux so they should be correct.
*/
-#define IDE_DATA 0x00
-#define IDE_ERROR 0x04 /* see err-bits */
-#define IDE_NSECTOR 0x08 /* nr of sectors to read/write */
-#define IDE_SECTOR 0x0c /* starting sector */
-#define IDE_LCYL 0x10 /* starting cylinder */
-#define IDE_HCYL 0x14 /* high byte of starting cyl */
-#define IDE_SELECT 0x18 /* 101dhhhh , d=drive, hhhh=head */
-#define IDE_STATUS 0x1c /* see status-bits */
#define IDE_CONTROL 0x38 /* control/altstatus */
/*
@@ -63,11 +55,6 @@
volatile unsigned char *ide_ifr = (unsigned char *) (IDE_BASE + IDE_IFR);
-static int macide_offsets[IDE_NR_PORTS] = {
- IDE_DATA, IDE_ERROR, IDE_NSECTOR, IDE_SECTOR, IDE_LCYL,
- IDE_HCYL, IDE_SELECT, IDE_STATUS, IDE_CONTROL
-};
-
int macide_ack_intr(ide_hwif_t* hwif)
{
if (*ide_ifr & 0x20) {
@@ -77,6 +64,22 @@ int macide_ack_intr(ide_hwif_t* hwif)
return 0;
}
+static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
+ int irq, ide_ack_intr_t *ack_intr)
+{
+ int i;
+
+ memset(hw, 0, sizeof(*hw));
+
+ for (i = 0; i < 8; i++)
+ hw->io_ports[i] = base + i * 4;
+
+ hw->io_ports[IDE_CONTROL_OFFSET] = IDE_CONTROL;
+
+ hw->irq = irq;
+ hw->ack_intr = ack_intr;
+}
+
static const char *mac_ide_name[] =
{ "Quadra", "Powerbook", "Powerbook Baboon" };
@@ -86,27 +89,27 @@ static const char *mac_ide_name[] =
static int __init macide_init(void)
{
- hw_regs_t hw;
ide_hwif_t *hwif;
+ ide_ack_intr_t *ack_intr;
+ unsigned long base;
+ int irq;
+ hw_regs_t hw;
switch (macintosh_config->ide_type) {
case MAC_IDE_QUADRA:
- ide_setup_ports(&hw, IDE_BASE, macide_offsets,
- 0, 0, macide_ack_intr,
-// quadra_ide_iops,
- IRQ_NUBUS_F);
+ base = IDE_BASE;
+ ack_intr = macide_ack_intr;
+ irq = IRQ_NUBUS_F;
break;
case MAC_IDE_PB:
- ide_setup_ports(&hw, IDE_BASE, macide_offsets,
- 0, 0, macide_ack_intr,
-// macide_pb_iops,
- IRQ_NUBUS_C);
+ base = IDE_BASE;
+ ack_intr = macide_ack_intr;
+ irq = IRQ_NUBUS_C;
break;
case MAC_IDE_BABOON:
- ide_setup_ports(&hw, BABOON_BASE, macide_offsets,
- 0, 0, NULL,
-// macide_baboon_iops,
- IRQ_BABOON_1);
+ base = BABOON_BASE;
+ ack_intr = NULL;
+ irq = IRQ_BABOON_1;
break;
default:
return -ENODEV;
@@ -115,6 +118,8 @@ static int __init macide_init(void)
printk(KERN_INFO "ide: Macintosh %s IDE controller\n",
mac_ide_name[macintosh_config->ide_type - 1]);
+ macide_setup_ports(&hw, base, irq, ack_intr);
+
hwif = ide_find_port(hw.io_ports[IDE_DATA_OFFSET]);
if (hwif) {
u8 index = hwif->index;
diff --git a/drivers/ide/legacy/q40ide.c b/drivers/ide/legacy/q40ide.c
index 2f0b34d892a1..1381b91bc316 100644
--- a/drivers/ide/legacy/q40ide.c
+++ b/drivers/ide/legacy/q40ide.c
@@ -66,16 +66,12 @@ static int q40ide_default_irq(unsigned long base)
/*
- * This is very similar to ide_setup_ports except that addresses
- * are pretranslated for q40 ISA access
+ * Addresses are pretranslated for Q40 ISA access.
*/
void q40_ide_setup_ports ( hw_regs_t *hw,
unsigned long base, int *offsets,
unsigned long ctrl, unsigned long intr,
ide_ack_intr_t *ack_intr,
-/*
- * ide_io_ops_t *iops,
- */
int irq)
{
int i;
@@ -92,9 +88,6 @@ void q40_ide_setup_ports ( hw_regs_t *hw,
hw->irq = irq;
hw->ack_intr = ack_intr;
-/*
- * hw->iops = iops;
- */
}
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
index 94803253e8af..02e6ee7d751d 100644
--- a/drivers/ide/pci/Makefile
+++ b/drivers/ide/pci/Makefile
@@ -34,7 +34,8 @@ obj-$(CONFIG_BLK_DEV_TRM290) += trm290.o
obj-$(CONFIG_BLK_DEV_VIA82CXXX) += via82cxxx.o
# Must appear at the end of the block
-obj-$(CONFIG_BLK_DEV_GENERIC) += generic.o
+obj-$(CONFIG_BLK_DEV_GENERIC) += ide-pci-generic.o
+ide-pci-generic-y += generic.o
ifeq ($(CONFIG_BLK_DEV_CMD640), m)
obj-m += cmd640.o
diff --git a/drivers/ide/pci/generic.c b/drivers/ide/pci/generic.c
index 9262a9174b4e..7fd83a9d4dee 100644
--- a/drivers/ide/pci/generic.c
+++ b/drivers/ide/pci/generic.c
@@ -29,19 +29,6 @@
static int ide_generic_all; /* Set to claim all devices */
-/*
- * the module_param_named() was added for the modular case
- * the __setup() is left as compatibility for existing setups
- */
-#ifndef MODULE
-static int __init ide_generic_all_on(char *unused)
-{
- ide_generic_all = 1;
- printk(KERN_INFO "IDE generic will claim all unknown PCI IDE storage controllers.\n");
- return 1;
-}
-const __setup("all-generic-ide", ide_generic_all_on);
-#endif
module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index ef5b39fa042b..cc4be9621bc0 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -704,9 +704,6 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
hwif->sata_scr[SATA_STATUS_OFFSET] = base + 0x104;
hwif->sata_scr[SATA_ERROR_OFFSET] = base + 0x108;
hwif->sata_scr[SATA_CONTROL_OFFSET] = base + 0x100;
- hwif->sata_misc[SATA_MISC_OFFSET] = base + 0x140;
- hwif->sata_misc[SATA_PHY_OFFSET] = base + 0x144;
- hwif->sata_misc[SATA_IEN_OFFSET] = base + 0x148;
}
memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index a193dfbf99d2..a5dc78ae62d4 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -44,8 +44,8 @@ source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
-
source "drivers/infiniband/hw/mlx4/Kconfig"
+source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 75f325e40b54..ed35e4496241 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_CXGB3) += hw/cxgb3/
obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
+obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index c0150147d347..638b727d42e0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -974,6 +974,9 @@ static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
{
+ struct ib_sa_path_rec *pri_path = param->primary_path;
+ struct ib_sa_path_rec *alt_path = param->alternate_path;
+
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
@@ -997,35 +1000,46 @@ static void cm_format_req(struct cm_req_msg *req_msg,
cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
cm_req_set_srq(req_msg, param->srq);
- req_msg->primary_local_lid = param->primary_path->slid;
- req_msg->primary_remote_lid = param->primary_path->dlid;
- req_msg->primary_local_gid = param->primary_path->sgid;
- req_msg->primary_remote_gid = param->primary_path->dgid;
- cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
- cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
- req_msg->primary_traffic_class = param->primary_path->traffic_class;
- req_msg->primary_hop_limit = param->primary_path->hop_limit;
- cm_req_set_primary_sl(req_msg, param->primary_path->sl);
- cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
+ if (pri_path->hop_limit <= 1) {
+ req_msg->primary_local_lid = pri_path->slid;
+ req_msg->primary_remote_lid = pri_path->dlid;
+ } else {
+ /* Work-around until there's a way to obtain remote LID info */
+ req_msg->primary_local_lid = IB_LID_PERMISSIVE;
+ req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
+ }
+ req_msg->primary_local_gid = pri_path->sgid;
+ req_msg->primary_remote_gid = pri_path->dgid;
+ cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
+ cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
+ req_msg->primary_traffic_class = pri_path->traffic_class;
+ req_msg->primary_hop_limit = pri_path->hop_limit;
+ cm_req_set_primary_sl(req_msg, pri_path->sl);
+ cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
cm_req_set_primary_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
- param->primary_path->packet_life_time));
+ pri_path->packet_life_time));
- if (param->alternate_path) {
- req_msg->alt_local_lid = param->alternate_path->slid;
- req_msg->alt_remote_lid = param->alternate_path->dlid;
- req_msg->alt_local_gid = param->alternate_path->sgid;
- req_msg->alt_remote_gid = param->alternate_path->dgid;
+ if (alt_path) {
+ if (alt_path->hop_limit <= 1) {
+ req_msg->alt_local_lid = alt_path->slid;
+ req_msg->alt_remote_lid = alt_path->dlid;
+ } else {
+ req_msg->alt_local_lid = IB_LID_PERMISSIVE;
+ req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
+ }
+ req_msg->alt_local_gid = alt_path->sgid;
+ req_msg->alt_remote_gid = alt_path->dgid;
cm_req_set_alt_flow_label(req_msg,
- param->alternate_path->flow_label);
- cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
- req_msg->alt_traffic_class = param->alternate_path->traffic_class;
- req_msg->alt_hop_limit = param->alternate_path->hop_limit;
- cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
- cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
+ alt_path->flow_label);
+ cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
+ req_msg->alt_traffic_class = alt_path->traffic_class;
+ req_msg->alt_hop_limit = alt_path->hop_limit;
+ cm_req_set_alt_sl(req_msg, alt_path->sl);
+ cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
cm_req_set_alt_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
- param->alternate_path->packet_life_time));
+ alt_path->packet_life_time));
}
if (param->private_data && param->private_data_len)
@@ -1441,6 +1455,34 @@ out:
return listen_cm_id_priv;
}
+/*
+ * Work-around for inter-subnet connections. If the LIDs are permissive,
+ * we need to override the LID/SL data in the REQ with the LID information
+ * in the work completion.
+ */
+static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
+{
+ if (!cm_req_get_primary_subnet_local(req_msg)) {
+ if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
+ req_msg->primary_local_lid = cpu_to_be16(wc->slid);
+ cm_req_set_primary_sl(req_msg, wc->sl);
+ }
+
+ if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
+ req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
+ }
+
+ if (!cm_req_get_alt_subnet_local(req_msg)) {
+ if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
+ req_msg->alt_local_lid = cpu_to_be16(wc->slid);
+ cm_req_set_alt_sl(req_msg, wc->sl);
+ }
+
+ if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
+ req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
+ }
+}
+
static int cm_req_handler(struct cm_work *work)
{
struct ib_cm_id *cm_id;
@@ -1481,6 +1523,7 @@ static int cm_req_handler(struct cm_work *work)
cm_id_priv->id.service_id = req_msg->service_id;
cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
+ cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) {
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 6c7aa59794d4..7f00347364f7 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -320,10 +320,13 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
.max_maps = pool->max_remaps,
.page_shift = params->page_shift
};
+ int bytes_per_fmr = sizeof *fmr;
+
+ if (pool->cache_bucket)
+ bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
for (i = 0; i < params->pool_size; ++i) {
- fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
- GFP_KERNEL);
+ fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
if (!fmr) {
printk(KERN_WARNING PFX "failed to allocate fmr "
"struct for FMR %d\n", i);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index f281d16040f5..92cce8aacbb7 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -101,6 +101,7 @@ struct ehca_sport {
spinlock_t mod_sqp_lock;
enum ib_port_state port_state;
struct ehca_sma_attr saved_attr;
+ u32 pma_qp_nr;
};
#define HCA_CAP_MR_PGSIZE_4K 0x80000000
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 863b34fa9ff9..b5ca94c6b8d9 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -403,6 +403,8 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
sport->port_state = IB_PORT_ACTIVE;
dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
"is active");
+ ehca_query_sma_attr(shca, port,
+ &sport->saved_attr);
} else
notify_port_conf_change(shca, port);
break;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index c469bfde2708..a8a2ea585d2f 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -187,6 +187,11 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context);
int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad,
+ struct ib_mad *out_mad);
+
void ehca_poll_eqs(unsigned long data);
int ehca_calc_ipd(struct ehca_shca *shca, int port,
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 84c9b7b8669b..a86ebcc79a95 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -472,7 +472,7 @@ int ehca_init_device(struct ehca_shca *shca)
shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
shca->ib_device.attach_mcast = ehca_attach_mcast;
shca->ib_device.detach_mcast = ehca_detach_mcast;
- /* shca->ib_device.process_mad = ehca_process_mad; */
+ shca->ib_device.process_mad = ehca_process_mad;
shca->ib_device.mmap = ehca_mmap;
if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 3aacc8cf1e44..2ce8cffb8664 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -209,6 +209,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
return -EINVAL;
}
+ if (unlikely(send_wr->wr.ud.remote_qpn == 0)) {
+ ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
+ return -EINVAL;
+ }
my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
wqe_p->u.ud_av.ud_av = my_av->av;
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 79e72b25b252..706d97ad5555 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -39,12 +39,18 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rdma/ib_mad.h>
#include "ehca_classes.h"
#include "ehca_tools.h"
#include "ehca_iverbs.h"
#include "hcp_if.h"
+#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002)
+#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004)
+#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008)
+
+#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001)
/**
* ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
@@ -83,6 +89,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
port, ret);
return ret;
}
+ shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
+ ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
+ port, pma_qp_nr);
break;
default:
ehca_err(&shca->ib_device, "invalid qp_type=%x",
@@ -109,3 +118,85 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
return H_SUCCESS;
}
+
+struct ib_perf {
+ struct ib_mad_hdr mad_hdr;
+ u8 reserved[40];
+ u8 data[192];
+} __attribute__ ((packed));
+
+
+static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct ib_perf *in_perf = (struct ib_perf *)in_mad;
+ struct ib_perf *out_perf = (struct ib_perf *)out_mad;
+ struct ib_class_port_info *poi =
+ (struct ib_class_port_info *)out_perf->data;
+ struct ehca_shca *shca =
+ container_of(ibdev, struct ehca_shca, ib_device);
+ struct ehca_sport *sport = &shca->sport[port_num - 1];
+
+ ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
+
+ *out_mad = *in_mad;
+
+ if (in_perf->mad_hdr.class_version != 1) {
+ ehca_warn(ibdev, "Unsupported class_version=%x",
+ in_perf->mad_hdr.class_version);
+ out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
+ goto perf_reply;
+ }
+
+ switch (in_perf->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ case IB_MGMT_METHOD_SET:
+ /* set class port info for redirection */
+ out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
+ out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
+ memset(poi, 0, sizeof(*poi));
+ poi->base_version = 1;
+ poi->class_version = 1;
+ poi->resp_time_value = 18;
+ poi->redirect_lid = sport->saved_attr.lid;
+ poi->redirect_qp = sport->pma_qp_nr;
+ poi->redirect_qkey = IB_QP1_QKEY;
+ poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
+
+ ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
+ sport->saved_attr.lid, sport->pma_qp_nr);
+ break;
+
+ case IB_MGMT_METHOD_GET_RESP:
+ return IB_MAD_RESULT_FAILURE;
+
+ default:
+ out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
+ break;
+ }
+
+perf_reply:
+ out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ int ret;
+
+ if (!port_num || port_num > ibdev->phys_port_cnt)
+ return IB_MAD_RESULT_FAILURE;
+
+ /* accept only pma request */
+ if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
+ return IB_MAD_RESULT_SUCCESS;
+
+ ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
+ ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index d8287d9db41e..96a39b5c9254 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -52,7 +52,7 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
-static const char mlx4_ib_version[] __devinitdata =
+static const char mlx4_ib_version[] =
DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -468,6 +468,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (err)
goto out;
+ dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
@@ -516,9 +517,16 @@ static struct class_device_attribute *mlx4_class_attributes[] = {
static void *mlx4_ib_add(struct mlx4_dev *dev)
{
+ static int mlx4_ib_version_printed;
struct mlx4_ib_dev *ibdev;
int i;
+
+ if (!mlx4_ib_version_printed) {
+ printk(KERN_INFO "%s", mlx4_ib_version);
+ ++mlx4_ib_version_printed;
+ }
+
ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
if (!ibdev) {
dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 6966f943f440..09a30dd12b14 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1255,9 +1255,14 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
if (err)
goto out;
- MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
- MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
- MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
+ if (!mthca_is_memfree(dev)) {
+ MTHCA_GET(adapter->vendor_id, outbox,
+ QUERY_ADAPTER_VENDOR_ID_OFFSET);
+ MTHCA_GET(adapter->device_id, outbox,
+ QUERY_ADAPTER_DEVICE_ID_OFFSET);
+ MTHCA_GET(adapter->revision_id, outbox,
+ QUERY_ADAPTER_REVISION_ID_OFFSET);
+ }
MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 5cf8250d4e16..cd3d8adbef9f 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -126,7 +126,7 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
MODULE_PARM_DESC(fmr_reserved_mtts,
"number of memory translation table segments reserved for FMR");
-static const char mthca_version[] __devinitdata =
+static char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -735,7 +735,8 @@ static int mthca_init_hca(struct mthca_dev *mdev)
}
mdev->eq_table.inta_pin = adapter.inta_pin;
- mdev->rev_id = adapter.revision_id;
+ if (!mthca_is_memfree(mdev))
+ mdev->rev_id = adapter.revision_id;
memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index aa6c70a6a36f..3b6985557cb2 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -613,8 +613,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
sizeof *(mr->mem.tavor.mpt) * idx;
mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
- if (IS_ERR(mr->mtt))
+ if (IS_ERR(mr->mtt)) {
+ err = PTR_ERR(mr->mtt);
goto err_out_table;
+ }
mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
@@ -627,8 +629,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
- if (IS_ERR(mailbox))
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
goto err_out_free_mtt;
+ }
mpt_entry = mailbox->buf;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 6bcde1cb9688..9e491df6419c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -923,17 +923,13 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
struct mthca_mr *mr;
u64 *page_list;
u64 total_size;
- u64 mask;
+ unsigned long mask;
int shift;
int npages;
int err;
int i, j, n;
- /* First check that we have enough alignment */
- if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
- return ERR_PTR(-EINVAL);
-
- mask = 0;
+ mask = buffer_list[0].addr ^ *iova_start;
total_size = 0;
for (i = 0; i < num_phys_buf; ++i) {
if (i != 0)
@@ -947,17 +943,7 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
if (mask & ~PAGE_MASK)
return ERR_PTR(-EINVAL);
- /* Find largest page shift we can use to cover buffers */
- for (shift = PAGE_SHIFT; shift < 31; ++shift)
- if (num_phys_buf > 1) {
- if ((1ULL << shift) & mask)
- break;
- } else {
- if (1ULL << shift >=
- buffer_list[0].size +
- (buffer_list[0].addr & ((1ULL << shift) - 1)))
- break;
- }
+ shift = __ffs(mask | 1 << 31);
buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
buffer_list[0].addr &= ~0ull << shift;
@@ -1270,6 +1256,8 @@ static int mthca_init_node_data(struct mthca_dev *dev)
goto out;
}
+ if (mthca_is_memfree(dev))
+ dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 0e5461c65731..db5595bbf7f0 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1175,6 +1175,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
{
int ret;
int i;
+ struct mthca_next_seg *next;
qp->refcount = 1;
init_waitqueue_head(&qp->wait);
@@ -1217,7 +1218,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
}
if (mthca_is_memfree(dev)) {
- struct mthca_next_seg *next;
struct mthca_data_seg *scatter;
int size = (sizeof (struct mthca_next_seg) +
qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
@@ -1240,6 +1240,13 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
qp->sq.wqe_shift) +
qp->send_wqe_offset);
}
+ } else {
+ for (i = 0; i < qp->rq.max; ++i) {
+ next = get_recv_wqe(qp, i);
+ next->nda_op = htonl((((i + 1) % qp->rq.max) <<
+ qp->rq.wqe_shift) | 1);
+ }
+
}
qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
@@ -1863,7 +1870,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
prev_wqe = qp->rq.last;
qp->rq.last = wqe;
- ((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
((struct mthca_next_seg *) wqe)->flags = 0;
@@ -1885,9 +1891,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
qp->wrid[ind] = wr->wr_id;
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
- wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD | size);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 553d681f6813..a5ffff6e1026 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -175,9 +175,17 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
* scatter list L_Keys to the sentry value of 0x100.
*/
for (i = 0; i < srq->max; ++i) {
- wqe = get_wqe(srq, i);
+ struct mthca_next_seg *next;
- *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
+ next = wqe = get_wqe(srq, i);
+
+ if (i < srq->max - 1) {
+ *wqe_to_link(wqe) = i + 1;
+ next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
+ } else {
+ *wqe_to_link(wqe) = -1;
+ next->nda_op = 0;
+ }
for (scatter = wqe + sizeof (struct mthca_next_seg);
(void *) scatter < wqe + (1 << srq->wqe_shift);
@@ -470,16 +478,15 @@ out:
void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
{
int ind;
+ struct mthca_next_seg *last_free;
ind = wqe_addr >> srq->wqe_shift;
spin_lock(&srq->lock);
- if (likely(srq->first_free >= 0))
- *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
- else
- srq->first_free = ind;
-
+ last_free = get_wqe(srq, srq->last_free);
+ *wqe_to_link(last_free) = ind;
+ last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
*wqe_to_link(get_wqe(srq, ind)) = -1;
srq->last_free = ind;
@@ -506,15 +513,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
first_ind = srq->first_free;
for (nreq = 0; wr; wr = wr->next) {
- ind = srq->first_free;
-
- if (unlikely(ind < 0)) {
- mthca_err(dev, "SRQ %06x full\n", srq->srqn);
- err = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
+ ind = srq->first_free;
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
@@ -528,7 +527,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
prev_wqe = srq->last;
srq->last = wqe;
- ((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */
@@ -549,9 +547,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
if (i < srq->max_gs)
mthca_set_data_seg_inval(wqe);
- ((struct mthca_next_seg *) prev_wqe)->nda_op =
- cpu_to_be32((ind << srq->wqe_shift) | 1);
- wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
@@ -614,15 +609,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
spin_lock_irqsave(&srq->lock, flags);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- ind = srq->first_free;
-
- if (unlikely(ind < 0)) {
- mthca_err(dev, "SRQ %06x full\n", srq->srqn);
- err = -ENOMEM;
- *bad_wr = wr;
- break;
- }
-
+ ind = srq->first_free;
wqe = get_wqe(srq, ind);
next_ind = *wqe_to_link(wqe);
@@ -633,8 +620,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
break;
}
- ((struct mthca_next_seg *) wqe)->nda_op =
- cpu_to_be32((next_ind << srq->wqe_shift) | 1);
((struct mthca_next_seg *) wqe)->ee_nds = 0;
/* flags field will always remain 0 */
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
new file mode 100644
index 000000000000..2aeb7ac972a9
--- /dev/null
+++ b/drivers/infiniband/hw/nes/Kconfig
@@ -0,0 +1,16 @@
+config INFINIBAND_NES
+ tristate "NetEffect RNIC Driver"
+ depends on PCI && INET && INFINIBAND
+ select LIBCRC32C
+ ---help---
+ This is a low-level driver for NetEffect RDMA enabled
+ Network Interface Cards (RNIC).
+
+config INFINIBAND_NES_DEBUG
+ bool "Verbose debugging output"
+ depends on INFINIBAND_NES
+ default n
+ ---help---
+ This option causes the NetEffect RNIC driver to produce debug
+ messages. Select this if you are developing the driver
+ or trying to diagnose a problem.
diff --git a/drivers/infiniband/hw/nes/Makefile b/drivers/infiniband/hw/nes/Makefile
new file mode 100644
index 000000000000..35148513c47e
--- /dev/null
+++ b/drivers/infiniband/hw/nes/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_INFINIBAND_NES) += iw_nes.o
+
+iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
new file mode 100644
index 000000000000..7f8853b44ee1
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -0,0 +1,1152 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/highmem.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/iw_cm.h>
+
+#include "nes.h"
+
+#include <net/netevent.h>
+#include <net/neighbour.h>
+#include <linux/route.h>
+#include <net/ip_fib.h>
+
+MODULE_AUTHOR("NetEffect");
+MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+int max_mtu = 9000;
+int nics_per_function = 1;
+int interrupt_mod_interval = 0;
+
+
+/* Interoperability */
+int mpa_version = 1;
+module_param(mpa_version, int, 0);
+MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)");
+
+/* Interoperability */
+int disable_mpa_crc = 0;
+module_param(disable_mpa_crc, int, 0);
+MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
+
+unsigned int send_first = 0;
+module_param(send_first, int, 0);
+MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
+
+
+unsigned int nes_drv_opt = 0;
+module_param(nes_drv_opt, int, 0);
+MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
+
+unsigned int nes_debug_level = 0;
+module_param_named(debug_level, nes_debug_level, uint, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug output level");
+
+LIST_HEAD(nes_adapter_list);
+LIST_HEAD(nes_dev_list);
+
+atomic_t qps_destroyed;
+atomic_t cqp_reqs_allocated;
+atomic_t cqp_reqs_freed;
+atomic_t cqp_reqs_dynallocated;
+atomic_t cqp_reqs_dynfreed;
+atomic_t cqp_reqs_queued;
+atomic_t cqp_reqs_redriven;
+
+static void nes_print_macaddr(struct net_device *netdev);
+static irqreturn_t nes_interrupt(int, void *);
+static int __devinit nes_probe(struct pci_dev *, const struct pci_device_id *);
+static void __devexit nes_remove(struct pci_dev *);
+static int __init nes_init_module(void);
+static void __exit nes_exit_module(void);
+static unsigned int ee_flsh_adapter;
+static unsigned int sysfs_nonidx_addr;
+static unsigned int sysfs_idx_addr;
+
+static struct pci_device_id nes_pci_table[] = {
+ {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, nes_pci_table);
+
+static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *);
+static int nes_net_event(struct notifier_block *, unsigned long, void *);
+static int nes_notifiers_registered;
+
+
+static struct notifier_block nes_inetaddr_notifier = {
+ .notifier_call = nes_inetaddr_event
+};
+
+static struct notifier_block nes_net_notifier = {
+ .notifier_call = nes_net_event
+};
+
+
+
+
+/**
+ * nes_inetaddr_event
+ */
+static int nes_inetaddr_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *event_netdev = ifa->ifa_dev->dev;
+ struct nes_device *nesdev;
+ struct net_device *netdev;
+ struct nes_vnic *nesvnic;
+ unsigned int addr;
+ unsigned int mask;
+
+ addr = ntohl(ifa->ifa_address);
+ mask = ntohl(ifa->ifa_mask);
+ nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n",
+ addr, mask);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
+ nesdev, nesdev->netdev[0]->name);
+ netdev = nesdev->netdev[0];
+ nesvnic = netdev_priv(netdev);
+ if (netdev == event_netdev) {
+ if (nesvnic->rdma_enabled == 0) {
+ nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
+ " RDMA is not enabled.\n",
+ netdev->name);
+ return NOTIFY_OK;
+ }
+ /* we have ifa->ifa_address/mask here if we need it */
+ switch (event) {
+ case NETDEV_DOWN:
+ nes_debug(NES_DBG_NETDEV, "event:DOWN\n");
+ nes_write_indexed(nesdev,
+ NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 0);
+
+ nes_manage_arp_cache(netdev, netdev->dev_addr,
+ ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE);
+ nesvnic->local_ipaddr = 0;
+ return NOTIFY_OK;
+ break;
+ case NETDEV_UP:
+ nes_debug(NES_DBG_NETDEV, "event:UP\n");
+
+ if (nesvnic->local_ipaddr != 0) {
+ nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n");
+ return NOTIFY_OK;
+ }
+ /* Add the address to the IP table */
+ nesvnic->local_ipaddr = ifa->ifa_address;
+
+ nes_write_indexed(nesdev,
+ NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
+ ntohl(ifa->ifa_address));
+ nes_manage_arp_cache(netdev, netdev->dev_addr,
+ ntohl(nesvnic->local_ipaddr), NES_ARP_ADD);
+ return NOTIFY_OK;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+
+/**
+ * nes_net_event
+ */
+static int nes_net_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct neighbour *neigh = ptr;
+ struct nes_device *nesdev;
+ struct net_device *netdev;
+ struct nes_vnic *nesvnic;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ /* nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p.\n", nesdev); */
+ netdev = nesdev->netdev[0];
+ nesvnic = netdev_priv(netdev);
+ if (netdev == neigh->dev) {
+ if (nesvnic->rdma_enabled == 0) {
+ nes_debug(NES_DBG_NETDEV, "Skipping device %s since no RDMA\n",
+ netdev->name);
+ } else {
+ if (neigh->nud_state & NUD_VALID) {
+ nes_manage_arp_cache(neigh->dev, neigh->ha,
+ ntohl(*(__be32 *)neigh->primary_key), NES_ARP_ADD);
+ } else {
+ nes_manage_arp_cache(neigh->dev, neigh->ha,
+ ntohl(*(__be32 *)neigh->primary_key), NES_ARP_DELETE);
+ }
+ }
+ return NOTIFY_OK;
+ }
+ }
+ break;
+ default:
+ nes_debug(NES_DBG_NETDEV, "NETEVENT_ %lu undefined\n", event);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+
+/**
+ * nes_add_ref
+ */
+void nes_add_ref(struct ib_qp *ibqp)
+{
+ struct nes_qp *nesqp;
+
+ nesqp = to_nesqp(ibqp);
+ nes_debug(NES_DBG_QP, "Bumping refcount for QP%u. Pre-inc value = %u\n",
+ ibqp->qp_num, atomic_read(&nesqp->refcount));
+ atomic_inc(&nesqp->refcount);
+}
+
+static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
+{
+ unsigned long flags;
+ struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 qp_id;
+
+ atomic_inc(&qps_destroyed);
+
+ /* Free the control structures */
+
+ qp_id = nesqp->hwqp.qp_id;
+ if (nesqp->pbl_vbase) {
+ pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase);
+ nesqp->pbl_vbase = NULL;
+
+ } else {
+ pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
+ }
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id);
+
+ kfree(nesqp->allocated_buffer);
+
+}
+
+/**
+ * nes_rem_ref
+ */
+void nes_rem_ref(struct ib_qp *ibqp)
+{
+ u64 u64temp;
+ struct nes_qp *nesqp;
+ struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ u32 opcode;
+
+ nesqp = to_nesqp(ibqp);
+
+ if (atomic_read(&nesqp->refcount) == 0) {
+ printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n",
+ __FUNCTION__, ibqp->qp_num, nesqp->last_aeq);
+ BUG();
+ }
+
+ if (atomic_dec_and_test(&nesqp->refcount)) {
+ nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL;
+
+ /* Destroy the QP */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
+ return;
+ }
+ cqp_request->waiting = 0;
+ cqp_request->callback = 1;
+ cqp_request->cqp_callback = nes_cqp_rem_ref_callback;
+ cqp_request->cqp_callback_pointer = nesqp;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ opcode = NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_IWARP;
+
+ if (nesqp->hte_added) {
+ opcode |= NES_CQP_QP_DEL_HTE;
+ nesqp->hte_added = 0;
+ }
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
+ u64temp = (u64)nesqp->nesqp_context_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+ }
+}
+
+
+/**
+ * nes_get_qp
+ */
+struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
+{
+ struct nes_vnic *nesvnic = to_nesvnic(device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+ if ((qpn < NES_FIRST_QPN) || (qpn >= (NES_FIRST_QPN + nesadapter->max_qp)))
+ return NULL;
+
+ return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp;
+}
+
+
+/**
+ * nes_print_macaddr
+ */
+static void nes_print_macaddr(struct net_device *netdev)
+{
+ nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n",
+ netdev->name,
+ netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
+ netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
+ netdev->irq);
+}
+
+
+/**
+ * nes_interrupt - handle interrupts
+ */
+static irqreturn_t nes_interrupt(int irq, void *dev_id)
+{
+ struct nes_device *nesdev = (struct nes_device *)dev_id;
+ int handled = 0;
+ u32 int_mask;
+ u32 int_req;
+ u32 int_stat;
+ u32 intf_int_stat;
+ u32 timer_stat;
+
+ if (nesdev->msi_enabled) {
+ /* No need to read the interrupt pending register if msi is enabled */
+ handled = 1;
+ } else {
+ if (unlikely(nesdev->nesadapter->hw_rev == NE020_REV)) {
+ /* Master interrupt enable provides synchronization for kicking off bottom half
+ when interrupt sharing is going on */
+ int_mask = nes_read32(nesdev->regs + NES_INT_MASK);
+ if (int_mask & 0x80000000) {
+ /* Check interrupt status to see if this might be ours */
+ int_stat = nes_read32(nesdev->regs + NES_INT_STAT);
+ int_req = nesdev->int_req;
+ if (int_stat&int_req) {
+ /* if interesting CEQ or AEQ is pending, claim the interrupt */
+ if ((int_stat&int_req) & (~(NES_INT_TIMER|NES_INT_INTF))) {
+ handled = 1;
+ } else {
+ if (((int_stat & int_req) & NES_INT_TIMER) == NES_INT_TIMER) {
+ /* Timer might be running but might be for another function */
+ timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT);
+ if ((timer_stat & nesdev->timer_int_req) != 0) {
+ handled = 1;
+ }
+ }
+ if ((((int_stat & int_req) & NES_INT_INTF) == NES_INT_INTF) &&
+ (handled == 0)) {
+ intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
+ if ((intf_int_stat & nesdev->intf_int_req) != 0) {
+ handled = 1;
+ }
+ }
+ }
+ if (handled) {
+ nes_write32(nesdev->regs+NES_INT_MASK, int_mask & (~0x80000000));
+ int_mask = nes_read32(nesdev->regs+NES_INT_MASK);
+ /* Save off the status to save an additional read */
+ nesdev->int_stat = int_stat;
+ nesdev->napi_isr_ran = 1;
+ }
+ }
+ }
+ } else {
+ handled = nes_read32(nesdev->regs+NES_INT_PENDING);
+ }
+ }
+
+ if (handled) {
+
+ if (nes_napi_isr(nesdev) == 0) {
+ tasklet_schedule(&nesdev->dpc_tasklet);
+
+ }
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
+}
+
+
+/**
+ * nes_probe - Device initialization
+ */
+static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct nes_device *nesdev = NULL;
+ int ret = 0;
+ struct nes_vnic *nesvnic = NULL;
+ void __iomem *mmio_regs = NULL;
+ u8 hw_rev;
+
+ assert(pcidev != NULL);
+ assert(ent != NULL);
+
+ printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n",
+ DRV_VERSION, pci_name(pcidev));
+
+ ret = pci_enable_device(pcidev);
+ if (ret) {
+ printk(KERN_ERR PFX "Unable to enable PCI device. (%s)\n", pci_name(pcidev));
+ goto bail0;
+ }
+
+ nes_debug(NES_DBG_INIT, "BAR0 (@0x%08lX) size = 0x%lX bytes\n",
+ (long unsigned int)pci_resource_start(pcidev, BAR_0),
+ (long unsigned int)pci_resource_len(pcidev, BAR_0));
+ nes_debug(NES_DBG_INIT, "BAR1 (@0x%08lX) size = 0x%lX bytes\n",
+ (long unsigned int)pci_resource_start(pcidev, BAR_1),
+ (long unsigned int)pci_resource_len(pcidev, BAR_1));
+
+ /* Make sure PCI base addr are MMIO */
+ if (!(pci_resource_flags(pcidev, BAR_0) & IORESOURCE_MEM) ||
+ !(pci_resource_flags(pcidev, BAR_1) & IORESOURCE_MEM)) {
+ printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
+ ret = -ENODEV;
+ goto bail1;
+ }
+
+ /* Reserve PCI I/O and memory resources */
+ ret = pci_request_regions(pcidev, DRV_NAME);
+ if (ret) {
+ printk(KERN_ERR PFX "Unable to request regions. (%s)\n", pci_name(pcidev));
+ goto bail1;
+ }
+
+ if ((sizeof(dma_addr_t) > 4)) {
+ ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
+ if (ret < 0) {
+ printk(KERN_ERR PFX "64b DMA mask configuration failed\n");
+ goto bail2;
+ }
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_64BIT_MASK);
+ if (ret) {
+ printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n");
+ goto bail2;
+ }
+ } else {
+ ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
+ if (ret < 0) {
+ printk(KERN_ERR PFX "32b DMA mask configuration failed\n");
+ goto bail2;
+ }
+ ret = pci_set_consistent_dma_mask(pcidev, DMA_32BIT_MASK);
+ if (ret) {
+ printk(KERN_ERR PFX "32b DMA consistent mask configuration failed\n");
+ goto bail2;
+ }
+ }
+
+ pci_set_master(pcidev);
+
+ /* Allocate hardware structure */
+ nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL);
+ if (!nesdev) {
+ printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev));
+ ret = -ENOMEM;
+ goto bail2;
+ }
+
+ nes_debug(NES_DBG_INIT, "Allocated nes device at %p\n", nesdev);
+ nesdev->pcidev = pcidev;
+ pci_set_drvdata(pcidev, nesdev);
+
+ pci_read_config_byte(pcidev, 0x0008, &hw_rev);
+ nes_debug(NES_DBG_INIT, "hw_rev=%u\n", hw_rev);
+
+ spin_lock_init(&nesdev->indexed_regs_lock);
+
+ /* Remap the PCI registers in adapter BAR0 to kernel VA space */
+ mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), sizeof(mmio_regs));
+ if (mmio_regs == NULL) {
+ printk(KERN_ERR PFX "Unable to remap BAR0\n");
+ ret = -EIO;
+ goto bail3;
+ }
+ nesdev->regs = mmio_regs;
+ nesdev->index_reg = 0x50 + (PCI_FUNC(pcidev->devfn)*8) + mmio_regs;
+
+ /* Ensure interrupts are disabled */
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff);
+
+ if (nes_drv_opt & NES_DRV_OPT_ENABLE_MSI) {
+ if (!pci_enable_msi(nesdev->pcidev)) {
+ nesdev->msi_enabled = 1;
+ nes_debug(NES_DBG_INIT, "MSI is enabled for device %s\n",
+ pci_name(pcidev));
+ } else {
+ nes_debug(NES_DBG_INIT, "MSI is disabled by linux for device %s\n",
+ pci_name(pcidev));
+ }
+ } else {
+ nes_debug(NES_DBG_INIT, "MSI not requested due to driver options for device %s\n",
+ pci_name(pcidev));
+ }
+
+ nesdev->csr_start = pci_resource_start(nesdev->pcidev, BAR_0);
+ nesdev->doorbell_region = pci_resource_start(nesdev->pcidev, BAR_1);
+
+ /* Init the adapter */
+ nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev);
+ nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
+ if (!nesdev->nesadapter) {
+ printk(KERN_ERR PFX "Unable to initialize adapter.\n");
+ ret = -ENOMEM;
+ goto bail5;
+ }
+
+ /* nesdev->base_doorbell_index =
+ nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */
+ nesdev->base_doorbell_index = 1;
+ nesdev->doorbell_start = nesdev->nesadapter->doorbell_start;
+ nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count;
+
+ tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
+
+ /* bring up the Control QP */
+ if (nes_init_cqp(nesdev)) {
+ ret = -ENODEV;
+ goto bail6;
+ }
+
+ /* Arm the CCQ */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ PCI_FUNC(nesdev->pcidev->devfn));
+ nes_read32(nesdev->regs+NES_CQE_ALLOC);
+
+ /* Enable the interrupts */
+ nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) |
+ (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
+ if (PCI_FUNC(nesdev->pcidev->devfn) < 4) {
+ nesdev->int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+24));
+ }
+
+ /* TODO: This really should be the first driver to load, not function 0 */
+ if (PCI_FUNC(nesdev->pcidev->devfn) == 0) {
+ /* pick up PCI and critical errors if the first driver to load */
+ nesdev->intf_int_req = NES_INTF_INT_PCIERR | NES_INTF_INT_CRITERR;
+ nesdev->int_req |= NES_INT_INTF;
+ } else {
+ nesdev->intf_int_req = 0;
+ }
+ nesdev->intf_int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0, 0);
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 0);
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS2, 0x00001265);
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS4, 0x18021804);
+
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS3, 0x17801790);
+
+ /* deal with both periodic and one_shot */
+ nesdev->timer_int_req = 0x101 << PCI_FUNC(nesdev->pcidev->devfn);
+ nesdev->nesadapter->timer_int_req |= nesdev->timer_int_req;
+ nes_debug(NES_DBG_INIT, "setting int_req for function %u, nesdev = 0x%04X, adapter = 0x%04X\n",
+ PCI_FUNC(nesdev->pcidev->devfn),
+ nesdev->timer_int_req, nesdev->nesadapter->timer_int_req);
+
+ nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
+
+ list_add_tail(&nesdev->list, &nes_dev_list);
+
+ /* Request an interrupt line for the driver */
+ ret = request_irq(pcidev->irq, nes_interrupt, IRQF_SHARED, DRV_NAME, nesdev);
+ if (ret) {
+ printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
+ pci_name(pcidev), pcidev->irq);
+ goto bail65;
+ }
+
+ nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+
+ if (nes_notifiers_registered == 0) {
+ register_inetaddr_notifier(&nes_inetaddr_notifier);
+ register_netevent_notifier(&nes_net_notifier);
+ }
+ nes_notifiers_registered++;
+
+ /* Initialize network devices */
+ if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) {
+ goto bail7;
+ }
+
+ /* Register network device */
+ ret = register_netdev(netdev);
+ if (ret) {
+ printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
+ nes_netdev_destroy(netdev);
+ goto bail7;
+ }
+
+ nes_print_macaddr(netdev);
+ /* create a CM core for this netdev */
+ nesvnic = netdev_priv(netdev);
+
+ nesdev->netdev_count++;
+ nesdev->nesadapter->netdev_count++;
+
+
+ printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n",
+ pci_name(pcidev));
+ return 0;
+
+ bail7:
+ printk(KERN_ERR PFX "bail7\n");
+ while (nesdev->netdev_count > 0) {
+ nesdev->netdev_count--;
+ nesdev->nesadapter->netdev_count--;
+
+ unregister_netdev(nesdev->netdev[nesdev->netdev_count]);
+ nes_netdev_destroy(nesdev->netdev[nesdev->netdev_count]);
+ }
+
+ nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n",
+ nesdev->netdev_count, nesdev->nesadapter->netdev_count);
+
+ nes_notifiers_registered--;
+ if (nes_notifiers_registered == 0) {
+ unregister_netevent_notifier(&nes_net_notifier);
+ unregister_inetaddr_notifier(&nes_inetaddr_notifier);
+ }
+
+ list_del(&nesdev->list);
+ nes_destroy_cqp(nesdev);
+
+ bail65:
+ printk(KERN_ERR PFX "bail65\n");
+ free_irq(pcidev->irq, nesdev);
+ if (nesdev->msi_enabled) {
+ pci_disable_msi(pcidev);
+ }
+ bail6:
+ printk(KERN_ERR PFX "bail6\n");
+ tasklet_kill(&nesdev->dpc_tasklet);
+ /* Deallocate the Adapter Structure */
+ nes_destroy_adapter(nesdev->nesadapter);
+
+ bail5:
+ printk(KERN_ERR PFX "bail5\n");
+ iounmap(nesdev->regs);
+
+ bail3:
+ printk(KERN_ERR PFX "bail3\n");
+ kfree(nesdev);
+
+ bail2:
+ pci_release_regions(pcidev);
+
+ bail1:
+ pci_disable_device(pcidev);
+
+ bail0:
+ return ret;
+}
+
+
+/**
+ * nes_remove - unload from kernel
+ */
+static void __devexit nes_remove(struct pci_dev *pcidev)
+{
+ struct nes_device *nesdev = pci_get_drvdata(pcidev);
+ struct net_device *netdev;
+ int netdev_index = 0;
+
+ if (nesdev->netdev_count) {
+ netdev = nesdev->netdev[netdev_index];
+ if (netdev) {
+ netif_stop_queue(netdev);
+ unregister_netdev(netdev);
+ nes_netdev_destroy(netdev);
+
+ nesdev->netdev[netdev_index] = NULL;
+ nesdev->netdev_count--;
+ nesdev->nesadapter->netdev_count--;
+ }
+ }
+
+ nes_notifiers_registered--;
+ if (nes_notifiers_registered == 0) {
+ unregister_netevent_notifier(&nes_net_notifier);
+ unregister_inetaddr_notifier(&nes_inetaddr_notifier);
+ }
+
+ list_del(&nesdev->list);
+ nes_destroy_cqp(nesdev);
+ tasklet_kill(&nesdev->dpc_tasklet);
+
+ /* Deallocate the Adapter Structure */
+ nes_destroy_adapter(nesdev->nesadapter);
+
+ free_irq(pcidev->irq, nesdev);
+
+ if (nesdev->msi_enabled) {
+ pci_disable_msi(pcidev);
+ }
+
+ iounmap(nesdev->regs);
+ kfree(nesdev);
+
+ /* nes_debug(NES_DBG_SHUTDOWN, "calling pci_release_regions.\n"); */
+ pci_release_regions(pcidev);
+ pci_disable_device(pcidev);
+ pci_set_drvdata(pcidev, NULL);
+}
+
+
+static struct pci_driver nes_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = nes_pci_table,
+ .probe = nes_probe,
+ .remove = __devexit_p(nes_remove),
+};
+
+static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
+{
+ unsigned int devfn = 0xffffffff;
+ unsigned char bus_number = 0xff;
+ unsigned int i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ devfn = nesdev->nesadapter->devfn;
+ bus_number = nesdev->nesadapter->bus_number;
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%x:%x", bus_number, devfn);
+}
+
+static ssize_t nes_store_adapter(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+
+ ee_flsh_adapter = simple_strtoul(p, &p, 10);
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf)
+{
+ u32 eeprom_cmd = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ eeprom_cmd = nes_read32(nesdev->regs + NES_EEPROM_COMMAND);
+ break;
+ }
+ i++;
+ }
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd);
+}
+
+static ssize_t nes_store_ee_cmd(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write32(nesdev->regs + NES_EEPROM_COMMAND, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf)
+{
+ u32 eeprom_data = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ eeprom_data = nes_read32(nesdev->regs + NES_EEPROM_DATA);
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data);
+}
+
+static ssize_t nes_store_ee_data(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write32(nesdev->regs + NES_EEPROM_DATA, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf)
+{
+ u32 flash_cmd = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ flash_cmd = nes_read32(nesdev->regs + NES_FLASH_COMMAND);
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd);
+}
+
+static ssize_t nes_store_flash_cmd(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write32(nesdev->regs + NES_FLASH_COMMAND, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf)
+{
+ u32 flash_data = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ flash_data = nes_read32(nesdev->regs + NES_FLASH_DATA);
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data);
+}
+
+static ssize_t nes_store_flash_data(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write32(nesdev->regs + NES_FLASH_DATA, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr);
+}
+
+static ssize_t nes_store_nonidx_addr(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X')
+ sysfs_nonidx_addr = simple_strtoul(p, &p, 16);
+
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf)
+{
+ u32 nonidx_data = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nonidx_data = nes_read32(nesdev->regs + sysfs_nonidx_addr);
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data);
+}
+
+static ssize_t nes_store_nonidx_data(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write32(nesdev->regs + sysfs_nonidx_addr, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr);
+}
+
+static ssize_t nes_store_idx_addr(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X')
+ sysfs_idx_addr = simple_strtoul(p, &p, 16);
+
+ return strnlen(buf, count);
+}
+
+static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf)
+{
+ u32 idx_data = 0xdead;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ idx_data = nes_read_indexed(nesdev, sysfs_idx_addr);
+ break;
+ }
+ i++;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data);
+}
+
+static ssize_t nes_store_idx_data(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ char *p = (char *)buf;
+ u32 val;
+ u32 i = 0;
+ struct nes_device *nesdev;
+
+ if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+ val = simple_strtoul(p, &p, 16);
+ list_for_each_entry(nesdev, &nes_dev_list, list) {
+ if (i == ee_flsh_adapter) {
+ nes_write_indexed(nesdev, sysfs_idx_addr, val);
+ break;
+ }
+ i++;
+ }
+ }
+ return strnlen(buf, count);
+}
+
+static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR,
+ nes_show_adapter, nes_store_adapter);
+static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR,
+ nes_show_ee_cmd, nes_store_ee_cmd);
+static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR,
+ nes_show_ee_data, nes_store_ee_data);
+static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR,
+ nes_show_flash_cmd, nes_store_flash_cmd);
+static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR,
+ nes_show_flash_data, nes_store_flash_data);
+static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR,
+ nes_show_nonidx_addr, nes_store_nonidx_addr);
+static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR,
+ nes_show_nonidx_data, nes_store_nonidx_data);
+static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR,
+ nes_show_idx_addr, nes_store_idx_addr);
+static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR,
+ nes_show_idx_data, nes_store_idx_data);
+
+static int nes_create_driver_sysfs(struct pci_driver *drv)
+{
+ int error;
+ error = driver_create_file(&drv->driver, &driver_attr_adapter);
+ error |= driver_create_file(&drv->driver, &driver_attr_eeprom_cmd);
+ error |= driver_create_file(&drv->driver, &driver_attr_eeprom_data);
+ error |= driver_create_file(&drv->driver, &driver_attr_flash_cmd);
+ error |= driver_create_file(&drv->driver, &driver_attr_flash_data);
+ error |= driver_create_file(&drv->driver, &driver_attr_nonidx_addr);
+ error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data);
+ error |= driver_create_file(&drv->driver, &driver_attr_idx_addr);
+ error |= driver_create_file(&drv->driver, &driver_attr_idx_data);
+ return error;
+}
+
+static void nes_remove_driver_sysfs(struct pci_driver *drv)
+{
+ driver_remove_file(&drv->driver, &driver_attr_adapter);
+ driver_remove_file(&drv->driver, &driver_attr_eeprom_cmd);
+ driver_remove_file(&drv->driver, &driver_attr_eeprom_data);
+ driver_remove_file(&drv->driver, &driver_attr_flash_cmd);
+ driver_remove_file(&drv->driver, &driver_attr_flash_data);
+ driver_remove_file(&drv->driver, &driver_attr_nonidx_addr);
+ driver_remove_file(&drv->driver, &driver_attr_nonidx_data);
+ driver_remove_file(&drv->driver, &driver_attr_idx_addr);
+ driver_remove_file(&drv->driver, &driver_attr_idx_data);
+}
+
+/**
+ * nes_init_module - module initialization entry point
+ */
+static int __init nes_init_module(void)
+{
+ int retval;
+ int retval1;
+
+ retval = nes_cm_start();
+ if (retval) {
+ printk(KERN_ERR PFX "Unable to start NetEffect iWARP CM.\n");
+ return retval;
+ }
+ retval = pci_register_driver(&nes_pci_driver);
+ if (retval >= 0) {
+ retval1 = nes_create_driver_sysfs(&nes_pci_driver);
+ if (retval1 < 0)
+ printk(KERN_ERR PFX "Unable to create NetEffect sys files.\n");
+ }
+ return retval;
+}
+
+
+/**
+ * nes_exit_module - module unload entry point
+ */
+static void __exit nes_exit_module(void)
+{
+ nes_cm_stop();
+ nes_remove_driver_sysfs(&nes_pci_driver);
+
+ pci_unregister_driver(&nes_pci_driver);
+}
+
+
+module_init(nes_init_module);
+module_exit(nes_exit_module);
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
new file mode 100644
index 000000000000..fd57e8a1582f
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __NES_H
+#define __NES_H
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <asm/semaphore.h>
+#include <linux/version.h>
+#include <asm/io.h>
+#include <linux/crc32c.h>
+
+#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/iw_cm.h>
+
+#define NES_SEND_FIRST_WRITE
+
+#define QUEUE_DISCONNECTS
+
+#define DRV_BUILD "1"
+
+#define DRV_NAME "iw_nes"
+#define DRV_VERSION "1.0 KO Build " DRV_BUILD
+#define PFX DRV_NAME ": "
+
+/*
+ * NetEffect PCI vendor id and NE010 PCI device id.
+ */
+#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */
+#define PCI_VENDOR_ID_NETEFFECT 0x1678
+#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
+#endif
+
+#define NE020_REV 4
+#define NE020_REV1 5
+
+#define BAR_0 0
+#define BAR_1 2
+
+#define RX_BUF_SIZE (1536 + 8)
+#define NES_REG0_SIZE (4 * 1024)
+#define NES_TX_TIMEOUT (6*HZ)
+#define NES_FIRST_QPN 64
+#define NES_SW_CONTEXT_ALIGN 1024
+
+#define NES_NIC_MAX_NICS 16
+#define NES_MAX_ARP_TABLE_SIZE 4096
+
+#define NES_NIC_CEQ_SIZE 8
+/* NICs will be on a separate CQ */
+#define NES_CCEQ_SIZE ((nesadapter->max_cq / nesadapter->port_count) - 32)
+
+#define NES_MAX_PORT_COUNT 4
+
+#define MAX_DPC_ITERATIONS 128
+
+#define NES_CQP_REQUEST_NO_DOORBELL_RING 0
+#define NES_CQP_REQUEST_RING_DOORBELL 1
+
+#define NES_DRV_OPT_ENABLE_MPA_VER_0 0x00000001
+#define NES_DRV_OPT_DISABLE_MPA_CRC 0x00000002
+#define NES_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
+#define NES_DRV_OPT_DISABLE_INTF 0x00000008
+#define NES_DRV_OPT_ENABLE_MSI 0x00000010
+#define NES_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
+#define NES_DRV_OPT_SUPRESS_OPTION_BC 0x00000040
+#define NES_DRV_OPT_NO_INLINE_DATA 0x00000080
+#define NES_DRV_OPT_DISABLE_INT_MOD 0x00000100
+#define NES_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
+
+#define NES_AEQ_EVENT_TIMEOUT 2500
+#define NES_DISCONNECT_EVENT_TIMEOUT 2000
+
+/* debug levels */
+/* must match userspace */
+#define NES_DBG_HW 0x00000001
+#define NES_DBG_INIT 0x00000002
+#define NES_DBG_ISR 0x00000004
+#define NES_DBG_PHY 0x00000008
+#define NES_DBG_NETDEV 0x00000010
+#define NES_DBG_CM 0x00000020
+#define NES_DBG_CM1 0x00000040
+#define NES_DBG_NIC_RX 0x00000080
+#define NES_DBG_NIC_TX 0x00000100
+#define NES_DBG_CQP 0x00000200
+#define NES_DBG_MMAP 0x00000400
+#define NES_DBG_MR 0x00000800
+#define NES_DBG_PD 0x00001000
+#define NES_DBG_CQ 0x00002000
+#define NES_DBG_QP 0x00004000
+#define NES_DBG_MOD_QP 0x00008000
+#define NES_DBG_AEQ 0x00010000
+#define NES_DBG_IW_RX 0x00020000
+#define NES_DBG_IW_TX 0x00040000
+#define NES_DBG_SHUTDOWN 0x00080000
+#define NES_DBG_RSVD1 0x10000000
+#define NES_DBG_RSVD2 0x20000000
+#define NES_DBG_RSVD3 0x40000000
+#define NES_DBG_RSVD4 0x80000000
+#define NES_DBG_ALL 0xffffffff
+
+#ifdef CONFIG_INFINIBAND_NES_DEBUG
+#define nes_debug(level, fmt, args...) \
+ if (level & nes_debug_level) \
+ printk(KERN_ERR PFX "%s[%u]: " fmt, __FUNCTION__, __LINE__, ##args)
+
+#define assert(expr) \
+if (!(expr)) { \
+ printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
+ #expr, __FILE__, __FUNCTION__, __LINE__); \
+}
+
+#define NES_EVENT_TIMEOUT 1200000
+#else
+#define nes_debug(level, fmt, args...)
+#define assert(expr) do {} while (0)
+
+#define NES_EVENT_TIMEOUT 100000
+#endif
+
+#include "nes_hw.h"
+#include "nes_verbs.h"
+#include "nes_context.h"
+#include "nes_user.h"
+#include "nes_cm.h"
+
+extern int max_mtu;
+extern int nics_per_function;
+#define max_frame_len (max_mtu+ETH_HLEN)
+extern int interrupt_mod_interval;
+extern int nes_if_count;
+extern int mpa_version;
+extern int disable_mpa_crc;
+extern unsigned int send_first;
+extern unsigned int nes_drv_opt;
+extern unsigned int nes_debug_level;
+
+extern struct list_head nes_adapter_list;
+extern struct list_head nes_dev_list;
+
+extern struct nes_cm_core *g_cm_core;
+
+extern atomic_t cm_connects;
+extern atomic_t cm_accepts;
+extern atomic_t cm_disconnects;
+extern atomic_t cm_closes;
+extern atomic_t cm_connecteds;
+extern atomic_t cm_connect_reqs;
+extern atomic_t cm_rejects;
+extern atomic_t mod_qp_timouts;
+extern atomic_t qps_created;
+extern atomic_t qps_destroyed;
+extern atomic_t sw_qps_destroyed;
+extern u32 mh_detected;
+extern u32 mh_pauses_sent;
+extern u32 cm_packets_sent;
+extern u32 cm_packets_bounced;
+extern u32 cm_packets_created;
+extern u32 cm_packets_received;
+extern u32 cm_packets_dropped;
+extern u32 cm_packets_retrans;
+extern u32 cm_listens_created;
+extern u32 cm_listens_destroyed;
+extern u32 cm_backlog_drops;
+extern atomic_t cm_loopbacks;
+extern atomic_t cm_nodes_created;
+extern atomic_t cm_nodes_destroyed;
+extern atomic_t cm_accel_dropped_pkts;
+extern atomic_t cm_resets_recvd;
+
+extern u32 crit_err_count;
+extern u32 int_mod_timer_init;
+extern u32 int_mod_cq_depth_256;
+extern u32 int_mod_cq_depth_128;
+extern u32 int_mod_cq_depth_32;
+extern u32 int_mod_cq_depth_24;
+extern u32 int_mod_cq_depth_16;
+extern u32 int_mod_cq_depth_4;
+extern u32 int_mod_cq_depth_1;
+
+extern atomic_t cqp_reqs_allocated;
+extern atomic_t cqp_reqs_freed;
+extern atomic_t cqp_reqs_dynallocated;
+extern atomic_t cqp_reqs_dynfreed;
+extern atomic_t cqp_reqs_queued;
+extern atomic_t cqp_reqs_redriven;
+
+
+struct nes_device {
+ struct nes_adapter *nesadapter;
+ void __iomem *regs;
+ void __iomem *index_reg;
+ struct pci_dev *pcidev;
+ struct net_device *netdev[NES_NIC_MAX_NICS];
+ u64 link_status_interrupts;
+ struct tasklet_struct dpc_tasklet;
+ spinlock_t indexed_regs_lock;
+ unsigned long csr_start;
+ unsigned long doorbell_region;
+ unsigned long doorbell_start;
+ unsigned long mac_tx_errors;
+ unsigned long mac_pause_frames_sent;
+ unsigned long mac_pause_frames_received;
+ unsigned long mac_rx_errors;
+ unsigned long mac_rx_crc_errors;
+ unsigned long mac_rx_symbol_err_frames;
+ unsigned long mac_rx_jabber_frames;
+ unsigned long mac_rx_oversized_frames;
+ unsigned long mac_rx_short_frames;
+ unsigned long port_rx_discards;
+ unsigned long port_tx_discards;
+ unsigned int mac_index;
+ unsigned int nes_stack_start;
+
+ /* Control Structures */
+ void *cqp_vbase;
+ dma_addr_t cqp_pbase;
+ u32 cqp_mem_size;
+ u8 ceq_index;
+ u8 nic_ceq_index;
+ struct nes_hw_cqp cqp;
+ struct nes_hw_cq ccq;
+ struct list_head cqp_avail_reqs;
+ struct list_head cqp_pending_reqs;
+ struct nes_cqp_request *nes_cqp_requests;
+
+ u32 int_req;
+ u32 int_stat;
+ u32 timer_int_req;
+ u32 timer_only_int_count;
+ u32 intf_int_req;
+ u32 last_mac_tx_pauses;
+ u32 last_used_chunks_tx;
+ struct list_head list;
+
+ u16 base_doorbell_index;
+ u16 currcq_count;
+ u16 deepcq_count;
+ u8 msi_enabled;
+ u8 netdev_count;
+ u8 napi_isr_ran;
+ u8 disable_rx_flow_control;
+ u8 disable_tx_flow_control;
+};
+
+
+static inline void
+set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value)
+{
+ wqe_words[index] = cpu_to_le32((u32) ((unsigned long)value));
+ wqe_words[index + 1] = cpu_to_le32((u32)(upper_32_bits((unsigned long)value)));
+}
+
+static inline void
+set_wqe_32bit_value(__le32 *wqe_words, u32 index, u32 value)
+{
+ wqe_words[index] = cpu_to_le32(value);
+}
+
+static inline void
+nes_fill_init_cqp_wqe(struct nes_hw_cqp_wqe *cqp_wqe, struct nes_device *nesdev)
+{
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_CTX_LOW_IDX,
+ (u64)((unsigned long) &nesdev->cqp));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_LEN_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_HIGH_IDX] = 0;
+}
+
+static inline void
+nes_fill_init_qp_wqe(struct nes_hw_qp_wqe *wqe, struct nes_qp *nesqp, u32 head)
+{
+ u32 value;
+ value = ((u32)((unsigned long) nesqp)) | head;
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX,
+ (u32)(upper_32_bits((unsigned long)(nesqp))));
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, value);
+}
+
+/* Read from memory-mapped device */
+static inline u32 nes_read_indexed(struct nes_device *nesdev, u32 reg_index)
+{
+ unsigned long flags;
+ void __iomem *addr = nesdev->index_reg;
+ u32 value;
+
+ spin_lock_irqsave(&nesdev->indexed_regs_lock, flags);
+
+ writel(reg_index, addr);
+ value = readl((void __iomem *)addr + 4);
+
+ spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags);
+ return value;
+}
+
+static inline u32 nes_read32(const void __iomem *addr)
+{
+ return readl(addr);
+}
+
+static inline u16 nes_read16(const void __iomem *addr)
+{
+ return readw(addr);
+}
+
+static inline u8 nes_read8(const void __iomem *addr)
+{
+ return readb(addr);
+}
+
+/* Write to memory-mapped device */
+static inline void nes_write_indexed(struct nes_device *nesdev, u32 reg_index, u32 val)
+{
+ unsigned long flags;
+ void __iomem *addr = nesdev->index_reg;
+
+ spin_lock_irqsave(&nesdev->indexed_regs_lock, flags);
+
+ writel(reg_index, addr);
+ writel(val, (void __iomem *)addr + 4);
+
+ spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags);
+}
+
+static inline void nes_write32(void __iomem *addr, u32 val)
+{
+ writel(val, addr);
+}
+
+static inline void nes_write16(void __iomem *addr, u16 val)
+{
+ writew(val, addr);
+}
+
+static inline void nes_write8(void __iomem *addr, u8 val)
+{
+ writeb(val, addr);
+}
+
+
+
+static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
+ unsigned long *resource_array, u32 max_resources,
+ u32 *req_resource_num, u32 *next)
+{
+ unsigned long flags;
+ u32 resource_num;
+
+ spin_lock_irqsave(&nesadapter->resource_lock, flags);
+
+ resource_num = find_next_zero_bit(resource_array, max_resources, *next);
+ if (resource_num >= max_resources) {
+ resource_num = find_first_zero_bit(resource_array, max_resources);
+ if (resource_num >= max_resources) {
+ printk(KERN_ERR PFX "%s: No available resourcess.\n", __FUNCTION__);
+ spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
+ return -EMFILE;
+ }
+ }
+ set_bit(resource_num, resource_array);
+ *next = resource_num+1;
+ if (*next == max_resources) {
+ *next = 0;
+ }
+ spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
+ *req_resource_num = resource_num;
+
+ return 0;
+}
+
+static inline int nes_is_resource_allocated(struct nes_adapter *nesadapter,
+ unsigned long *resource_array, u32 resource_num)
+{
+ unsigned long flags;
+ int bit_is_set;
+
+ spin_lock_irqsave(&nesadapter->resource_lock, flags);
+
+ bit_is_set = test_bit(resource_num, resource_array);
+ nes_debug(NES_DBG_HW, "resource_num %u is%s allocated.\n",
+ resource_num, (bit_is_set ? "": " not"));
+ spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
+
+ return bit_is_set;
+}
+
+static inline void nes_free_resource(struct nes_adapter *nesadapter,
+ unsigned long *resource_array, u32 resource_num)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nesadapter->resource_lock, flags);
+ clear_bit(resource_num, resource_array);
+ spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
+}
+
+static inline struct nes_vnic *to_nesvnic(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct nes_ib_device, ibdev)->nesvnic;
+}
+
+static inline struct nes_pd *to_nespd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct nes_pd, ibpd);
+}
+
+static inline struct nes_ucontext *to_nesucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct nes_ucontext, ibucontext);
+}
+
+static inline struct nes_mr *to_nesmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct nes_mr, ibmr);
+}
+
+static inline struct nes_mr *to_nesmr_from_ibfmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct nes_mr, ibfmr);
+}
+
+static inline struct nes_mr *to_nesmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct nes_mr, ibmw);
+}
+
+static inline struct nes_fmr *to_nesfmr(struct nes_mr *nesmr)
+{
+ return container_of(nesmr, struct nes_fmr, nesmr);
+}
+
+static inline struct nes_cq *to_nescq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct nes_cq, ibcq);
+}
+
+static inline struct nes_qp *to_nesqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct nes_qp, ibqp);
+}
+
+
+
+/* nes.c */
+void nes_add_ref(struct ib_qp *);
+void nes_rem_ref(struct ib_qp *);
+struct ib_qp *nes_get_qp(struct ib_device *, int);
+
+
+/* nes_hw.c */
+struct nes_adapter *nes_init_adapter(struct nes_device *, u8);
+void nes_nic_init_timer_defaults(struct nes_device *, u8);
+unsigned int nes_reset_adapter_ne020(struct nes_device *, u8 *);
+int nes_init_serdes(struct nes_device *, u8, u8, u8);
+void nes_init_csr_ne020(struct nes_device *, u8, u8);
+void nes_destroy_adapter(struct nes_adapter *);
+int nes_init_cqp(struct nes_device *);
+int nes_init_phy(struct nes_device *);
+int nes_init_nic_qp(struct nes_device *, struct net_device *);
+void nes_destroy_nic_qp(struct nes_vnic *);
+int nes_napi_isr(struct nes_device *);
+void nes_dpc(unsigned long);
+void nes_process_ceq(struct nes_device *, struct nes_hw_ceq *);
+void nes_process_aeq(struct nes_device *, struct nes_hw_aeq *);
+void nes_process_mac_intr(struct nes_device *, u32);
+void nes_nic_napi_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
+void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
+void nes_cqp_ce_handler(struct nes_device *, struct nes_hw_cq *);
+void nes_process_iwarp_aeqe(struct nes_device *, struct nes_hw_aeqe *);
+void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
+int nes_destroy_cqp(struct nes_device *);
+int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
+
+/* nes_nic.c */
+void nes_netdev_set_multicast_list(struct net_device *);
+void nes_netdev_exit(struct nes_vnic *);
+struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
+void nes_netdev_destroy(struct net_device *);
+int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
+
+/* nes_cm.c */
+void *nes_cm_create(struct net_device *);
+int nes_cm_recv(struct sk_buff *, struct net_device *);
+void nes_update_arp(unsigned char *, u32, u32, u16, u16);
+void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32);
+void nes_sock_release(struct nes_qp *, unsigned long *);
+struct nes_cm_core *nes_cm_alloc_core(void);
+void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32);
+int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32);
+int nes_cm_disconn(struct nes_qp *);
+void nes_cm_disconn_worker(void *);
+
+/* nes_verbs.c */
+int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32);
+int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
+struct nes_ib_device *nes_init_ofa_device(struct net_device *);
+void nes_destroy_ofa_device(struct nes_ib_device *);
+int nes_register_ofa_device(struct nes_ib_device *);
+void nes_unregister_ofa_device(struct nes_ib_device *);
+
+/* nes_util.c */
+int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
+void nes_write_1G_phy_reg(struct nes_device *, u8, u8, u16);
+void nes_read_1G_phy_reg(struct nes_device *, u8, u8, u16 *);
+void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16);
+void nes_read_10G_phy_reg(struct nes_device *, u16, u8);
+struct nes_cqp_request *nes_get_cqp_request(struct nes_device *);
+void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *, int);
+int nes_arp_table(struct nes_device *, u32, u8 *, u32);
+void nes_mh_fix(unsigned long);
+void nes_clc(unsigned long);
+void nes_dump_mem(unsigned int, void *, int);
+u32 nes_crc32(u32, u32, u32, u32, u8 *, u32, u32, u32);
+
+#endif /* __NES_H */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
new file mode 100644
index 000000000000..bd5cfeaac203
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -0,0 +1,3088 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+
+#define TCPOPT_TIMESTAMP 8
+
+#include <asm/atomic.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/notifier.h>
+#include <linux/net.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+
+#include <net/neighbour.h>
+#include <net/route.h>
+#include <net/ip_fib.h>
+
+#include "nes.h"
+
+u32 cm_packets_sent;
+u32 cm_packets_bounced;
+u32 cm_packets_dropped;
+u32 cm_packets_retrans;
+u32 cm_packets_created;
+u32 cm_packets_received;
+u32 cm_listens_created;
+u32 cm_listens_destroyed;
+u32 cm_backlog_drops;
+atomic_t cm_loopbacks;
+atomic_t cm_nodes_created;
+atomic_t cm_nodes_destroyed;
+atomic_t cm_accel_dropped_pkts;
+atomic_t cm_resets_recvd;
+
+static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
+static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *,
+ struct nes_vnic *, struct nes_cm_info *);
+static int add_ref_cm_node(struct nes_cm_node *);
+static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
+static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
+
+
+/* External CM API Interface */
+/* instance of function pointers for client API */
+/* set address of this instance to cm_core->cm_ops at cm_core alloc */
+static struct nes_cm_ops nes_cm_api = {
+ mini_cm_accelerated,
+ mini_cm_listen,
+ mini_cm_del_listen,
+ mini_cm_connect,
+ mini_cm_close,
+ mini_cm_accept,
+ mini_cm_reject,
+ mini_cm_recv_pkt,
+ mini_cm_dealloc_core,
+ mini_cm_get,
+ mini_cm_set
+};
+
+struct nes_cm_core *g_cm_core;
+
+atomic_t cm_connects;
+atomic_t cm_accepts;
+atomic_t cm_disconnects;
+atomic_t cm_closes;
+atomic_t cm_connecteds;
+atomic_t cm_connect_reqs;
+atomic_t cm_rejects;
+
+
+/**
+ * create_event
+ */
+static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
+ enum nes_cm_event_type type)
+{
+ struct nes_cm_event *event;
+
+ if (!cm_node->cm_id)
+ return NULL;
+
+ /* allocate an empty event */
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+
+ if (!event)
+ return NULL;
+
+ event->type = type;
+ event->cm_node = cm_node;
+ event->cm_info.rem_addr = cm_node->rem_addr;
+ event->cm_info.loc_addr = cm_node->loc_addr;
+ event->cm_info.rem_port = cm_node->rem_port;
+ event->cm_info.loc_port = cm_node->loc_port;
+ event->cm_info.cm_id = cm_node->cm_id;
+
+ nes_debug(NES_DBG_CM, "Created event=%p, type=%u, dst_addr=%08x[%x],"
+ " src_addr=%08x[%x]\n",
+ event, type,
+ event->cm_info.loc_addr, event->cm_info.loc_port,
+ event->cm_info.rem_addr, event->cm_info.rem_port);
+
+ nes_cm_post_event(event);
+ return event;
+}
+
+
+/**
+ * send_mpa_request
+ */
+int send_mpa_request(struct nes_cm_node *cm_node)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ skb = get_free_pkt(cm_node);
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ /* send an MPA Request frame */
+ form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
+ cm_node->mpa_frame_size, SET_ACK);
+
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+ if (ret < 0) {
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/**
+ * recv_mpa - process a received TCP pkt, we are expecting an
+ * IETF MPA frame
+ */
+static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len)
+{
+ struct ietf_mpa_frame *mpa_frame;
+
+ /* assume req frame is in tcp data payload */
+ if (len < sizeof(struct ietf_mpa_frame)) {
+ nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len);
+ return -1;
+ }
+
+ mpa_frame = (struct ietf_mpa_frame *)buffer;
+ cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len);
+
+ if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
+ nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
+ " complete (%x + %x != %x)\n",
+ cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len);
+ return -1;
+ }
+
+ /* copy entire MPA frame to our cm_node's frame */
+ memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame),
+ cm_node->mpa_frame_size);
+
+ return 0;
+}
+
+
+/**
+ * handle_exception_pkt - process an exception packet.
+ * We have been in a TSA state, and we have now received SW
+ * TCP/IP traffic should be a FIN request or IP pkt with options
+ */
+static int handle_exception_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb)
+{
+ int ret = 0;
+ struct tcphdr *tcph = tcp_hdr(skb);
+
+ /* first check to see if this a FIN pkt */
+ if (tcph->fin) {
+ /* we need to ACK the FIN request */
+ send_ack(cm_node);
+
+ /* check which side we are (client/server) and set next state accordingly */
+ if (cm_node->tcp_cntxt.client)
+ cm_node->state = NES_CM_STATE_CLOSING;
+ else {
+ /* we are the server side */
+ cm_node->state = NES_CM_STATE_CLOSE_WAIT;
+ /* since this is a self contained CM we don't wait for */
+ /* an APP to close us, just send final FIN immediately */
+ ret = send_fin(cm_node, NULL);
+ cm_node->state = NES_CM_STATE_LAST_ACK;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+
+/**
+ * form_cm_frame - get a free packet and build empty frame Use
+ * node info to build.
+ */
+struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
+ void *options, u32 optionsize, void *data, u32 datasize, u8 flags)
+{
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ struct ethhdr *ethh;
+ u8 *buf;
+ u16 packetsize = sizeof(*iph);
+
+ packetsize += sizeof(*tcph);
+ packetsize += optionsize + datasize;
+
+ memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph));
+
+ skb->len = 0;
+ buf = skb_put(skb, packetsize + ETH_HLEN);
+
+ ethh = (struct ethhdr *) buf;
+ buf += ETH_HLEN;
+
+ iph = (struct iphdr *)buf;
+ buf += sizeof(*iph);
+ tcph = (struct tcphdr *)buf;
+ skb_reset_mac_header(skb);
+ skb_set_network_header(skb, ETH_HLEN);
+ skb_set_transport_header(skb, ETH_HLEN+sizeof(*iph));
+ buf += sizeof(*tcph);
+
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->protocol = htons(0x800);
+ skb->data_len = 0;
+ skb->mac_len = ETH_HLEN;
+
+ memcpy(ethh->h_dest, cm_node->rem_mac, ETH_ALEN);
+ memcpy(ethh->h_source, cm_node->loc_mac, ETH_ALEN);
+ ethh->h_proto = htons(0x0800);
+
+ iph->version = IPVERSION;
+ iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
+ iph->tos = 0;
+ iph->tot_len = htons(packetsize);
+ iph->id = htons(++cm_node->tcp_cntxt.loc_id);
+
+ iph->frag_off = htons(0x4000);
+ iph->ttl = 0x40;
+ iph->protocol = 0x06; /* IPPROTO_TCP */
+
+ iph->saddr = htonl(cm_node->loc_addr);
+ iph->daddr = htonl(cm_node->rem_addr);
+
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
+ tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->ack = 1;
+ } else
+ tcph->ack_seq = 0;
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->syn = 1;
+ } else
+ cm_node->tcp_cntxt.loc_seq_num += datasize; /* data (no headers) */
+
+ if (flags & SET_FIN)
+ tcph->fin = 1;
+
+ if (flags & SET_RST)
+ tcph->rst = 1;
+
+ tcph->doff = (u16)((sizeof(*tcph) + optionsize + 3) >> 2);
+ tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->urg_ptr = 0;
+ if (optionsize)
+ memcpy(buf, options, optionsize);
+ buf += optionsize;
+ if (datasize)
+ memcpy(buf, data, datasize);
+
+ skb_shinfo(skb)->nr_frags = 0;
+ cm_packets_created++;
+
+ return skb;
+}
+
+
+/**
+ * print_core - dump a cm core
+ */
+static void print_core(struct nes_cm_core *core)
+{
+ nes_debug(NES_DBG_CM, "---------------------------------------------\n");
+ nes_debug(NES_DBG_CM, "CM Core -- (core = %p )\n", core);
+ if (!core)
+ return;
+ nes_debug(NES_DBG_CM, "---------------------------------------------\n");
+ nes_debug(NES_DBG_CM, "Session ID : %u \n", atomic_read(&core->session_id));
+
+ nes_debug(NES_DBG_CM, "State : %u \n", core->state);
+
+ nes_debug(NES_DBG_CM, "Tx Free cnt : %u \n", skb_queue_len(&core->tx_free_list));
+ nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt));
+ nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt));
+
+ nes_debug(NES_DBG_CM, "core : %p \n", core);
+
+ nes_debug(NES_DBG_CM, "-------------- end core ---------------\n");
+}
+
+
+/**
+ * schedule_nes_timer
+ * note - cm_node needs to be protected before calling this. Encase in:
+ * rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node);
+ */
+int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ enum nes_timer_type type, int send_retrans,
+ int close_when_complete)
+{
+ unsigned long flags;
+ struct nes_cm_core *cm_core;
+ struct nes_timer_entry *new_send;
+ int ret = 0;
+ u32 was_timer_set;
+
+ new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+ if (!new_send)
+ return -1;
+ if (!cm_node)
+ return -EINVAL;
+
+ /* new_send->timetosend = currenttime */
+ new_send->retrycount = NES_DEFAULT_RETRYS;
+ new_send->retranscount = NES_DEFAULT_RETRANS;
+ new_send->skb = skb;
+ new_send->timetosend = jiffies;
+ new_send->type = type;
+ new_send->netdev = cm_node->netdev;
+ new_send->send_retrans = send_retrans;
+ new_send->close_when_complete = close_when_complete;
+
+ if (type == NES_TIMER_TYPE_CLOSE) {
+ new_send->timetosend += (HZ/2); /* TODO: decide on the correct value here */
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ list_add_tail(&new_send->list, &cm_node->recv_list);
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+ }
+
+ if (type == NES_TIMER_TYPE_SEND) {
+ new_send->seq_num = htonl(tcp_hdr(skb)->seq);
+ atomic_inc(&new_send->skb->users);
+
+ ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
+ if (ret != NETDEV_TX_OK) {
+ nes_debug(NES_DBG_CM, "Error sending packet %p (jiffies = %lu)\n",
+ new_send, jiffies);
+ atomic_dec(&new_send->skb->users);
+ new_send->timetosend = jiffies;
+ } else {
+ cm_packets_sent++;
+ if (!send_retrans) {
+ if (close_when_complete)
+ rem_ref_cm_node(cm_node->cm_core, cm_node);
+ dev_kfree_skb_any(new_send->skb);
+ kfree(new_send);
+ return ret;
+ }
+ new_send->timetosend = jiffies + NES_RETRY_TIMEOUT;
+ }
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ list_add_tail(&new_send->list, &cm_node->retrans_list);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ }
+ if (type == NES_TIMER_TYPE_RECV) {
+ new_send->seq_num = htonl(tcp_hdr(skb)->seq);
+ new_send->timetosend = jiffies;
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ list_add_tail(&new_send->list, &cm_node->recv_list);
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+ }
+ cm_core = cm_node->cm_core;
+
+ was_timer_set = timer_pending(&cm_core->tcp_timer);
+
+ if (!was_timer_set) {
+ cm_core->tcp_timer.expires = new_send->timetosend;
+ add_timer(&cm_core->tcp_timer);
+ }
+
+ return ret;
+}
+
+
+/**
+ * nes_cm_timer_tick
+ */
+void nes_cm_timer_tick(unsigned long pass)
+{
+ unsigned long flags, qplockflags;
+ unsigned long nexttimeout = jiffies + NES_LONG_TIME;
+ struct iw_cm_id *cm_id;
+ struct nes_cm_node *cm_node;
+ struct nes_timer_entry *send_entry, *recv_entry;
+ struct list_head *list_core, *list_core_temp;
+ struct list_head *list_node, *list_node_temp;
+ struct nes_cm_core *cm_core = g_cm_core;
+ struct nes_qp *nesqp;
+ struct sk_buff *skb;
+ u32 settimer = 0;
+ int ret = NETDEV_TX_OK;
+ int node_done;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
+ cm_node = container_of(list_node, struct nes_cm_node, list);
+ add_ref_cm_node(cm_node);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
+ recv_entry = container_of(list_core, struct nes_timer_entry, list);
+ if ((time_after(recv_entry->timetosend, jiffies)) &&
+ (recv_entry->type == NES_TIMER_TYPE_CLOSE)) {
+ if (nexttimeout > recv_entry->timetosend || !settimer) {
+ nexttimeout = recv_entry->timetosend;
+ settimer = 1;
+ }
+ continue;
+ }
+ list_del(&recv_entry->list);
+ cm_id = cm_node->cm_id;
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+ if (recv_entry->type == NES_TIMER_TYPE_CLOSE) {
+ nesqp = (struct nes_qp *)recv_entry->skb;
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+ if (nesqp->cm_id) {
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d: "
+ "****** HIT A NES_TIMER_TYPE_CLOSE"
+ " with something to do!!! ******\n",
+ nesqp->hwqp.qp_id, cm_id,
+ atomic_read(&nesqp->refcount));
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
+ nesqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_cm_disconn(nesqp);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d:"
+ " ****** HIT A NES_TIMER_TYPE_CLOSE"
+ " with nothing to do!!! ******\n",
+ nesqp->hwqp.qp_id, cm_id,
+ atomic_read(&nesqp->refcount));
+ nes_rem_ref(&nesqp->ibqp);
+ }
+ if (cm_id)
+ cm_id->rem_ref(cm_id);
+ }
+ kfree(recv_entry);
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ node_done = 0;
+ list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
+ if (node_done) {
+ break;
+ }
+ send_entry = container_of(list_core, struct nes_timer_entry, list);
+ if (time_after(send_entry->timetosend, jiffies)) {
+ if (cm_node->state != NES_CM_STATE_TSA) {
+ if ((nexttimeout > send_entry->timetosend) || !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ node_done = 1;
+ continue;
+ } else {
+ list_del(&send_entry->list);
+ skb = send_entry->skb;
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ dev_kfree_skb_any(skb);
+ kfree(send_entry);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+ }
+ if (send_entry->type == NES_TIMER_NODE_CLEANUP) {
+ list_del(&send_entry->list);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ kfree(send_entry);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+ if ((send_entry->seq_num < cm_node->tcp_cntxt.rem_ack_num) ||
+ (cm_node->state == NES_CM_STATE_TSA) ||
+ (cm_node->state == NES_CM_STATE_CLOSED)) {
+ skb = send_entry->skb;
+ list_del(&send_entry->list);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ kfree(send_entry);
+ dev_kfree_skb_any(skb);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+
+ if (!send_entry->retranscount || !send_entry->retrycount) {
+ cm_packets_dropped++;
+ skb = send_entry->skb;
+ list_del(&send_entry->list);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ dev_kfree_skb_any(skb);
+ kfree(send_entry);
+ if (cm_node->state == NES_CM_STATE_SYN_RCVD) {
+ /* this node never even generated an indication up to the cm */
+ rem_ref_cm_node(cm_core, cm_node);
+ } else {
+ cm_node->state = NES_CM_STATE_CLOSED;
+ create_event(cm_node, NES_CM_EVENT_ABORTED);
+ }
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+ /* this seems like the correct place, but leave send entry unprotected */
+ // spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ atomic_inc(&send_entry->skb->users);
+ cm_packets_retrans++;
+ nes_debug(NES_DBG_CM, "Retransmitting send_entry %p for node %p,"
+ " jiffies = %lu, time to send = %lu, retranscount = %u, "
+ "send_entry->seq_num = 0x%08X, cm_node->tcp_cntxt.rem_ack_num = 0x%08X\n",
+ send_entry, cm_node, jiffies, send_entry->timetosend, send_entry->retranscount,
+ send_entry->seq_num, cm_node->tcp_cntxt.rem_ack_num);
+
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev);
+ if (ret != NETDEV_TX_OK) {
+ cm_packets_bounced++;
+ atomic_dec(&send_entry->skb->users);
+ send_entry->retrycount--;
+ nexttimeout = jiffies + NES_SHORT_TIME;
+ settimer = 1;
+ node_done = 1;
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ } else {
+ cm_packets_sent++;
+ }
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ list_del(&send_entry->list);
+ nes_debug(NES_DBG_CM, "Packet Sent: retrans count = %u, retry count = %u.\n",
+ send_entry->retranscount, send_entry->retrycount);
+ if (send_entry->send_retrans) {
+ send_entry->retranscount--;
+ send_entry->timetosend = jiffies + NES_RETRY_TIMEOUT;
+ if (nexttimeout > send_entry->timetosend || !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ list_add(&send_entry->list, &cm_node->retrans_list);
+ continue;
+ } else {
+ int close_when_complete;
+ skb = send_entry->skb;
+ close_when_complete = send_entry->close_when_complete;
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ if (close_when_complete) {
+ BUG_ON(atomic_read(&cm_node->ref_count) == 1);
+ rem_ref_cm_node(cm_core, cm_node);
+ }
+ dev_kfree_skb_any(skb);
+ kfree(send_entry);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+ }
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+ rem_ref_cm_node(cm_core, cm_node);
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (ret != NETDEV_TX_OK)
+ break;
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ if (settimer) {
+ if (!timer_pending(&cm_core->tcp_timer)) {
+ cm_core->tcp_timer.expires = nexttimeout;
+ add_timer(&cm_core->tcp_timer);
+ }
+ }
+}
+
+
+/**
+ * send_syn
+ */
+int send_syn(struct nes_cm_node *cm_node, u32 sendack)
+{
+ int ret;
+ int flags = SET_SYN;
+ struct sk_buff *skb;
+ char optionsbuffer[sizeof(struct option_mss) +
+ sizeof(struct option_windowscale) +
+ sizeof(struct option_base) + 1];
+
+ int optionssize = 0;
+ /* Sending MSS option */
+ union all_known_options *options;
+
+ if (!cm_node)
+ return -EINVAL;
+
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_mss.optionnum = OPTION_NUMBER_MSS;
+ options->as_mss.length = sizeof(struct option_mss);
+ options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
+ optionssize += sizeof(struct option_mss);
+
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
+ options->as_windowscale.length = sizeof(struct option_windowscale);
+ options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
+ optionssize += sizeof(struct option_windowscale);
+
+ if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt)
+ ) {
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_base.optionnum = OPTION_NUMBER_WRITE0;
+ options->as_base.length = sizeof(struct option_base);
+ optionssize += sizeof(struct option_base);
+ /* we need the size to be a multiple of 4 */
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_end = 1;
+ optionssize += 1;
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_end = 1;
+ optionssize += 1;
+ }
+
+ options = (union all_known_options *)&optionsbuffer[optionssize];
+ options->as_end = OPTION_NUMBER_END;
+ optionssize += 1;
+
+ skb = get_free_pkt(cm_node);
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ if (sendack)
+ flags |= SET_ACK;
+
+ form_cm_frame(skb, cm_node, optionsbuffer, optionssize, NULL, 0, flags);
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+
+ return ret;
+}
+
+
+/**
+ * send_reset
+ */
+int send_reset(struct nes_cm_node *cm_node)
+{
+ int ret;
+ struct sk_buff *skb = get_free_pkt(cm_node);
+ int flags = SET_RST | SET_ACK;
+
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ add_ref_cm_node(cm_node);
+ form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags);
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 1);
+
+ return ret;
+}
+
+
+/**
+ * send_ack
+ */
+int send_ack(struct nes_cm_node *cm_node)
+{
+ int ret;
+ struct sk_buff *skb = get_free_pkt(cm_node);
+
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK);
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 0);
+
+ return ret;
+}
+
+
+/**
+ * send_fin
+ */
+int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
+{
+ int ret;
+
+ /* if we didn't get a frame get one */
+ if (!skb)
+ skb = get_free_pkt(cm_node);
+
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK | SET_FIN);
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+
+ return ret;
+}
+
+
+/**
+ * get_free_pkt
+ */
+struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node)
+{
+ struct sk_buff *skb, *new_skb;
+
+ /* check to see if we need to repopulate the free tx pkt queue */
+ if (skb_queue_len(&cm_node->cm_core->tx_free_list) < NES_CM_FREE_PKT_LO_WATERMARK) {
+ while (skb_queue_len(&cm_node->cm_core->tx_free_list) <
+ cm_node->cm_core->free_tx_pkt_max) {
+ /* replace the frame we took, we won't get it back */
+ new_skb = dev_alloc_skb(cm_node->cm_core->mtu);
+ BUG_ON(!new_skb);
+ /* add a replacement frame to the free tx list head */
+ skb_queue_head(&cm_node->cm_core->tx_free_list, new_skb);
+ }
+ }
+
+ skb = skb_dequeue(&cm_node->cm_core->tx_free_list);
+
+ return skb;
+}
+
+
+/**
+ * make_hashkey - generate hash key from node tuple
+ */
+static inline int make_hashkey(u16 loc_port, nes_addr_t loc_addr, u16 rem_port,
+ nes_addr_t rem_addr)
+{
+ u32 hashkey = 0;
+
+ hashkey = loc_addr + rem_addr + loc_port + rem_port;
+ hashkey = (hashkey % NES_CM_HASHTABLE_SIZE);
+
+ return hashkey;
+}
+
+
+/**
+ * find_node - find a cm node that matches the reference cm node
+ */
+static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
+ u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr)
+{
+ unsigned long flags;
+ u32 hashkey;
+ struct list_head *list_pos;
+ struct list_head *hte;
+ struct nes_cm_node *cm_node;
+
+ /* make a hash index key for this packet */
+ hashkey = make_hashkey(loc_port, loc_addr, rem_port, rem_addr);
+
+ /* get a handle on the hte */
+ hte = &cm_core->connected_nodes;
+
+ nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n",
+ loc_addr, loc_port, cm_core, hte);
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each(list_pos, hte) {
+ cm_node = container_of(list_pos, struct nes_cm_node, list);
+ /* compare quad, return node handle if a match */
+ nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n",
+ cm_node->loc_addr, cm_node->loc_port,
+ loc_addr, loc_port,
+ cm_node->rem_addr, cm_node->rem_port,
+ rem_addr, rem_port);
+ if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) &&
+ (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) {
+ add_ref_cm_node(cm_node);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ return cm_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ /* no owner node */
+ return NULL;
+}
+
+
+/**
+ * find_listener - find a cm node listening on this addr-port pair
+ */
+static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
+ nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
+{
+ unsigned long flags;
+ struct list_head *listen_list;
+ struct nes_cm_listener *listen_node;
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each(listen_list, &cm_core->listen_list.list) {
+ listen_node = container_of(listen_list, struct nes_cm_listener, list);
+ /* compare node pair, return node handle if a match */
+ if (((listen_node->loc_addr == dst_addr) ||
+ listen_node->loc_addr == 0x00000000) &&
+ (listen_node->loc_port == dst_port) &&
+ (listener_state & listen_node->listener_state)) {
+ atomic_inc(&listen_node->ref_count);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ return listen_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n",
+ dst_addr, dst_port);
+
+ /* no listener */
+ return NULL;
+}
+
+
+/**
+ * add_hte_node - add a cm node to the hash table
+ */
+static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
+{
+ unsigned long flags;
+ u32 hashkey;
+ struct list_head *hte;
+
+ if (!cm_node || !cm_core)
+ return -EINVAL;
+
+ nes_debug(NES_DBG_CM, "Adding Node to Active Connection HT\n");
+
+ /* first, make an index into our hash table */
+ hashkey = make_hashkey(cm_node->loc_port, cm_node->loc_addr,
+ cm_node->rem_port, cm_node->rem_addr);
+ cm_node->hashkey = hashkey;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ /* get a handle on the hash table element (list head for this slot) */
+ hte = &cm_core->connected_nodes;
+ list_add_tail(&cm_node->list, hte);
+ atomic_inc(&cm_core->ht_node_cnt);
+
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ return 0;
+}
+
+
+/**
+ * mini_cm_dec_refcnt_listen
+ */
+static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
+ struct nes_cm_listener *listener, int free_hanging_nodes)
+{
+ int ret = 1;
+ unsigned long flags;
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ if (!atomic_dec_return(&listener->ref_count)) {
+ list_del(&listener->list);
+
+ /* decrement our listen node count */
+ atomic_dec(&cm_core->listen_node_cnt);
+
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ if (listener->nesvnic) {
+ nes_manage_apbvt(listener->nesvnic, listener->loc_port,
+ PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
+ }
+
+ nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
+
+ kfree(listener);
+ ret = 0;
+ cm_listens_destroyed++;
+ } else {
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+ if (listener) {
+ if (atomic_read(&listener->pend_accepts_cnt) > 0)
+ nes_debug(NES_DBG_CM, "destroying listener (%p)"
+ " with non-zero pending accepts=%u\n",
+ listener, atomic_read(&listener->pend_accepts_cnt));
+ }
+
+ return ret;
+}
+
+
+/**
+ * mini_cm_del_listen
+ */
+static int mini_cm_del_listen(struct nes_cm_core *cm_core,
+ struct nes_cm_listener *listener)
+{
+ listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE;
+ listener->cm_id = NULL; /* going to be destroyed pretty soon */
+ return mini_cm_dec_refcnt_listen(cm_core, listener, 1);
+}
+
+
+/**
+ * mini_cm_accelerated
+ */
+static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
+ struct nes_cm_node *cm_node)
+{
+ u32 was_timer_set;
+ cm_node->accelerated = 1;
+
+ if (cm_node->accept_pend) {
+ BUG_ON(!cm_node->listener);
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
+ }
+
+ was_timer_set = timer_pending(&cm_core->tcp_timer);
+ if (!was_timer_set) {
+ cm_core->tcp_timer.expires = jiffies + NES_SHORT_TIME;
+ add_timer(&cm_core->tcp_timer);
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_addr_send_arp
+ */
+static void nes_addr_send_arp(u32 dst_ip)
+{
+ struct rtable *rt;
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof fl);
+ fl.nl_u.ip4_u.daddr = htonl(dst_ip);
+ if (ip_route_output_key(&init_net, &rt, &fl)) {
+ printk("%s: ip_route_output_key failed for 0x%08X\n",
+ __FUNCTION__, dst_ip);
+ return;
+ }
+
+ neigh_event_send(rt->u.dst.neighbour, NULL);
+ ip_rt_put(rt);
+}
+
+
+/**
+ * make_cm_node - create a new instance of a cm node
+ */
+static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
+ struct nes_vnic *nesvnic, struct nes_cm_info *cm_info,
+ struct nes_cm_listener *listener)
+{
+ struct nes_cm_node *cm_node;
+ struct timespec ts;
+ int arpindex = 0;
+ struct nes_device *nesdev;
+ struct nes_adapter *nesadapter;
+
+ /* create an hte and cm_node for this instance */
+ cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
+ if (!cm_node)
+ return NULL;
+
+ /* set our node specific transport info */
+ cm_node->loc_addr = cm_info->loc_addr;
+ cm_node->rem_addr = cm_info->rem_addr;
+ cm_node->loc_port = cm_info->loc_port;
+ cm_node->rem_port = cm_info->rem_port;
+ cm_node->send_write0 = send_first;
+ nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n",
+ cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port);
+ cm_node->listener = listener;
+ cm_node->netdev = nesvnic->netdev;
+ cm_node->cm_id = cm_info->cm_id;
+ memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN);
+
+ nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n",
+ cm_node->listener, cm_node->cm_id);
+
+ INIT_LIST_HEAD(&cm_node->retrans_list);
+ spin_lock_init(&cm_node->retrans_list_lock);
+ INIT_LIST_HEAD(&cm_node->recv_list);
+ spin_lock_init(&cm_node->recv_list_lock);
+
+ cm_node->loopbackpartner = NULL;
+ atomic_set(&cm_node->ref_count, 1);
+ /* associate our parent CM core */
+ cm_node->cm_core = cm_core;
+ cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID;
+ cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
+ cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >>
+ NES_CM_DEFAULT_RCV_WND_SCALE;
+ ts = current_kernel_time();
+ cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
+ cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) -
+ sizeof(struct tcphdr) - ETH_HLEN;
+ cm_node->tcp_cntxt.rcv_nxt = 0;
+ /* get a unique session ID , add thread_id to an upcounter to handle race */
+ atomic_inc(&cm_core->node_cnt);
+ atomic_inc(&cm_core->session_id);
+ cm_node->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
+ cm_node->conn_type = cm_info->conn_type;
+ cm_node->apbvt_set = 0;
+ cm_node->accept_pend = 0;
+
+ cm_node->nesvnic = nesvnic;
+ /* get some device handles, for arp lookup */
+ nesdev = nesvnic->nesdev;
+ nesadapter = nesdev->nesadapter;
+
+ cm_node->loopbackpartner = NULL;
+ /* get the mac addr for the remote node */
+ arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
+ if (arpindex < 0) {
+ kfree(cm_node);
+ nes_addr_send_arp(cm_info->rem_addr);
+ return NULL;
+ }
+
+ /* copy the mac addr to node context */
+ memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
+ nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x,"
+ " %02x, %02x, %02x, %02x, %02x\n",
+ cm_node->rem_mac[0], cm_node->rem_mac[1],
+ cm_node->rem_mac[2], cm_node->rem_mac[3],
+ cm_node->rem_mac[4], cm_node->rem_mac[5]);
+
+ add_hte_node(cm_core, cm_node);
+ atomic_inc(&cm_nodes_created);
+
+ return cm_node;
+}
+
+
+/**
+ * add_ref_cm_node - destroy an instance of a cm node
+ */
+static int add_ref_cm_node(struct nes_cm_node *cm_node)
+{
+ atomic_inc(&cm_node->ref_count);
+ return 0;
+}
+
+
+/**
+ * rem_ref_cm_node - destroy an instance of a cm node
+ */
+static int rem_ref_cm_node(struct nes_cm_core *cm_core,
+ struct nes_cm_node *cm_node)
+{
+ unsigned long flags, qplockflags;
+ struct nes_timer_entry *send_entry;
+ struct nes_timer_entry *recv_entry;
+ struct iw_cm_id *cm_id;
+ struct list_head *list_core, *list_node_temp;
+ struct nes_qp *nesqp;
+
+ if (!cm_node)
+ return -EINVAL;
+
+ spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
+ if (atomic_dec_return(&cm_node->ref_count)) {
+ spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
+ return 0;
+ }
+ list_del(&cm_node->list);
+ atomic_dec(&cm_core->ht_node_cnt);
+ spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
+
+ /* if the node is destroyed before connection was accelerated */
+ if (!cm_node->accelerated && cm_node->accept_pend) {
+ BUG_ON(!cm_node->listener);
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
+ send_entry = container_of(list_core, struct nes_timer_entry, list);
+ list_del(&send_entry->list);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ dev_kfree_skb_any(send_entry->skb);
+ kfree(send_entry);
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
+ recv_entry = container_of(list_core, struct nes_timer_entry, list);
+ list_del(&recv_entry->list);
+ cm_id = cm_node->cm_id;
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+ if (recv_entry->type == NES_TIMER_TYPE_CLOSE) {
+ nesqp = (struct nes_qp *)recv_entry->skb;
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+ if (nesqp->cm_id) {
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE"
+ " with something to do!!! ******\n",
+ nesqp->hwqp.qp_id, cm_id);
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
+ nesqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_cm_disconn(nesqp);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE"
+ " with nothing to do!!! ******\n",
+ nesqp->hwqp.qp_id, cm_id);
+ nes_rem_ref(&nesqp->ibqp);
+ }
+ cm_id->rem_ref(cm_id);
+ } else if (recv_entry->type == NES_TIMER_TYPE_RECV) {
+ dev_kfree_skb_any(recv_entry->skb);
+ }
+ kfree(recv_entry);
+ spin_lock_irqsave(&cm_node->recv_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
+
+ if (cm_node->listener) {
+ mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
+ } else {
+ if (cm_node->apbvt_set && cm_node->nesvnic) {
+ nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
+ PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_DEL);
+ }
+ }
+
+ kfree(cm_node);
+ atomic_dec(&cm_core->node_cnt);
+ atomic_inc(&cm_nodes_destroyed);
+
+ return 0;
+}
+
+
+/**
+ * process_options
+ */
+static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 optionsize, u32 syn_packet)
+{
+ u32 tmp;
+ u32 offset = 0;
+ union all_known_options *all_options;
+ char got_mss_option = 0;
+
+ while (offset < optionsize) {
+ all_options = (union all_known_options *)(optionsloc + offset);
+ switch (all_options->as_base.optionnum) {
+ case OPTION_NUMBER_END:
+ offset = optionsize;
+ break;
+ case OPTION_NUMBER_NONE:
+ offset += 1;
+ continue;
+ case OPTION_NUMBER_MSS:
+ nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n",
+ __FUNCTION__,
+ all_options->as_mss.length, offset, optionsize);
+ got_mss_option = 1;
+ if (all_options->as_mss.length != 4) {
+ return 1;
+ } else {
+ tmp = ntohs(all_options->as_mss.mss);
+ if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
+ cm_node->tcp_cntxt.mss = tmp;
+ }
+ break;
+ case OPTION_NUMBER_WINDOW_SCALE:
+ cm_node->tcp_cntxt.snd_wscale = all_options->as_windowscale.shiftcount;
+ break;
+ case OPTION_NUMBER_WRITE0:
+ cm_node->send_write0 = 1;
+ break;
+ default:
+ nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n",
+ all_options->as_base.optionnum);
+ break;
+ }
+ offset += all_options->as_base.length;
+ }
+ if ((!got_mss_option) && (syn_packet))
+ cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
+ return 0;
+}
+
+
+/**
+ * process_packet
+ */
+int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
+ struct nes_cm_core *cm_core)
+{
+ int optionsize;
+ int datasize;
+ int ret = 0;
+ struct tcphdr *tcph = tcp_hdr(skb);
+ u32 inc_sequence;
+ if (cm_node->state == NES_CM_STATE_SYN_SENT && tcph->syn) {
+ inc_sequence = ntohl(tcph->seq);
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence;
+ }
+
+ if ((!tcph) || (cm_node->state == NES_CM_STATE_TSA)) {
+ BUG_ON(!tcph);
+ atomic_inc(&cm_accel_dropped_pkts);
+ return -1;
+ }
+
+ if (tcph->rst) {
+ atomic_inc(&cm_resets_recvd);
+ nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u. refcnt=%d\n",
+ cm_node, cm_node->state, atomic_read(&cm_node->ref_count));
+ switch (cm_node->state) {
+ case NES_CM_STATE_LISTENING:
+ rem_ref_cm_node(cm_core, cm_node);
+ break;
+ case NES_CM_STATE_TSA:
+ case NES_CM_STATE_CLOSED:
+ break;
+ case NES_CM_STATE_SYN_RCVD:
+ nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
+ " remote 0x%08X:%04X, node state = %u\n",
+ cm_node->loc_addr, cm_node->loc_port,
+ cm_node->rem_addr, cm_node->rem_port,
+ cm_node->state);
+ rem_ref_cm_node(cm_core, cm_node);
+ break;
+ case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_MPAREQ_SENT:
+ default:
+ nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
+ " remote 0x%08X:%04X, node state = %u refcnt=%d\n",
+ cm_node->loc_addr, cm_node->loc_port,
+ cm_node->rem_addr, cm_node->rem_port,
+ cm_node->state, atomic_read(&cm_node->ref_count));
+ // create event
+ cm_node->state = NES_CM_STATE_CLOSED;
+
+ create_event(cm_node, NES_CM_EVENT_ABORTED);
+ break;
+
+ }
+ return -1;
+ }
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+
+ skb_pull(skb, ip_hdr(skb)->ihl << 2);
+ skb_pull(skb, tcph->doff << 2);
+
+ datasize = skb->len;
+ inc_sequence = ntohl(tcph->seq);
+ nes_debug(NES_DBG_CM, "datasize = %u, sequence = 0x%08X, ack_seq = 0x%08X,"
+ " rcv_nxt = 0x%08X Flags: %s %s.\n",
+ datasize, inc_sequence, ntohl(tcph->ack_seq),
+ cm_node->tcp_cntxt.rcv_nxt, (tcph->syn ? "SYN":""),
+ (tcph->ack ? "ACK":""));
+
+ if (!tcph->syn && (inc_sequence != cm_node->tcp_cntxt.rcv_nxt)
+ ) {
+ nes_debug(NES_DBG_CM, "dropping packet, datasize = %u, sequence = 0x%08X,"
+ " ack_seq = 0x%08X, rcv_nxt = 0x%08X Flags: %s.\n",
+ datasize, inc_sequence, ntohl(tcph->ack_seq),
+ cm_node->tcp_cntxt.rcv_nxt, (tcph->ack ? "ACK":""));
+ if (cm_node->state == NES_CM_STATE_LISTENING) {
+ rem_ref_cm_node(cm_core, cm_node);
+ }
+ return -1;
+ }
+
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+
+
+ if (optionsize) {
+ u8 *optionsloc = (u8 *)&tcph[1];
+ if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) {
+ nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __FUNCTION__, cm_node);
+ send_reset(cm_node);
+ if (cm_node->state != NES_CM_STATE_SYN_SENT)
+ rem_ref_cm_node(cm_core, cm_node);
+ return 0;
+ }
+ } else if (tcph->syn)
+ cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
+
+ cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
+ cm_node->tcp_cntxt.snd_wscale;
+
+ if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) {
+ cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+ }
+
+ if (tcph->ack) {
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ switch (cm_node->state) {
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_SYN_SENT:
+ /* read and stash current sequence number */
+ if (cm_node->tcp_cntxt.rem_ack_num != cm_node->tcp_cntxt.loc_seq_num) {
+ nes_debug(NES_DBG_CM, "ERROR - cm_node->tcp_cntxt.rem_ack_num !="
+ " cm_node->tcp_cntxt.loc_seq_num\n");
+ send_reset(cm_node);
+ return 0;
+ }
+ if (cm_node->state == NES_CM_STATE_SYN_SENT)
+ cm_node->state = NES_CM_STATE_ONE_SIDE_ESTABLISHED;
+ else {
+ cm_node->state = NES_CM_STATE_ESTABLISHED;
+ }
+ break;
+ case NES_CM_STATE_LAST_ACK:
+ cm_node->state = NES_CM_STATE_CLOSED;
+ break;
+ case NES_CM_STATE_FIN_WAIT1:
+ cm_node->state = NES_CM_STATE_FIN_WAIT2;
+ break;
+ case NES_CM_STATE_CLOSING:
+ cm_node->state = NES_CM_STATE_TIME_WAIT;
+ /* need to schedule this to happen in 2MSL timeouts */
+ cm_node->state = NES_CM_STATE_CLOSED;
+ break;
+ case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_MPAREQ_SENT:
+ case NES_CM_STATE_CLOSE_WAIT:
+ case NES_CM_STATE_TIME_WAIT:
+ case NES_CM_STATE_CLOSED:
+ break;
+ case NES_CM_STATE_LISTENING:
+ nes_debug(NES_DBG_CM, "Received an ACK on a listening port (SYN %d)\n", tcph->syn);
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ send_reset(cm_node);
+ /* send_reset bumps refcount, this should have been a new node */
+ rem_ref_cm_node(cm_core, cm_node);
+ return -1;
+ break;
+ case NES_CM_STATE_TSA:
+ nes_debug(NES_DBG_CM, "Received a packet with the ack bit set while in TSA state\n");
+ break;
+ case NES_CM_STATE_UNKNOWN:
+ case NES_CM_STATE_INITED:
+ case NES_CM_STATE_ACCEPTING:
+ case NES_CM_STATE_FIN_WAIT2:
+ default:
+ nes_debug(NES_DBG_CM, "Received ack from unknown state: %x\n",
+ cm_node->state);
+ send_reset(cm_node);
+ break;
+ }
+ }
+
+ if (tcph->syn) {
+ if (cm_node->state == NES_CM_STATE_LISTENING) {
+ /* do not exceed backlog */
+ atomic_inc(&cm_node->listener->pend_accepts_cnt);
+ if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+ cm_node->listener->backlog) {
+ nes_debug(NES_DBG_CM, "drop syn due to backlog pressure \n");
+ cm_backlog_drops++;
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ rem_ref_cm_node(cm_core, cm_node);
+ return 0;
+ }
+ cm_node->accept_pend = 1;
+
+ }
+ if (datasize == 0)
+ cm_node->tcp_cntxt.rcv_nxt ++;
+
+ if (cm_node->state == NES_CM_STATE_LISTENING) {
+ cm_node->state = NES_CM_STATE_SYN_RCVD;
+ send_syn(cm_node, 1);
+ }
+ if (cm_node->state == NES_CM_STATE_ONE_SIDE_ESTABLISHED) {
+ cm_node->state = NES_CM_STATE_ESTABLISHED;
+ /* send final handshake ACK */
+ ret = send_ack(cm_node);
+ if (ret < 0)
+ return ret;
+
+ cm_node->state = NES_CM_STATE_MPAREQ_SENT;
+ ret = send_mpa_request(cm_node);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (tcph->fin) {
+ cm_node->tcp_cntxt.rcv_nxt++;
+ switch (cm_node->state) {
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_ACCEPTING:
+ case NES_CM_STATE_MPAREQ_SENT:
+ cm_node->state = NES_CM_STATE_CLOSE_WAIT;
+ cm_node->state = NES_CM_STATE_LAST_ACK;
+ ret = send_fin(cm_node, NULL);
+ break;
+ case NES_CM_STATE_FIN_WAIT1:
+ cm_node->state = NES_CM_STATE_CLOSING;
+ ret = send_ack(cm_node);
+ break;
+ case NES_CM_STATE_FIN_WAIT2:
+ cm_node->state = NES_CM_STATE_TIME_WAIT;
+ cm_node->tcp_cntxt.loc_seq_num ++;
+ ret = send_ack(cm_node);
+ /* need to schedule this to happen in 2MSL timeouts */
+ cm_node->state = NES_CM_STATE_CLOSED;
+ break;
+ case NES_CM_STATE_CLOSE_WAIT:
+ case NES_CM_STATE_LAST_ACK:
+ case NES_CM_STATE_CLOSING:
+ case NES_CM_STATE_TSA:
+ default:
+ nes_debug(NES_DBG_CM, "Received a fin while in %x state\n",
+ cm_node->state);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (datasize) {
+ u8 *dataloc = skb->data;
+ /* figure out what state we are in and handle transition to next state */
+ switch (cm_node->state) {
+ case NES_CM_STATE_LISTENING:
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_FIN_WAIT1:
+ case NES_CM_STATE_FIN_WAIT2:
+ case NES_CM_STATE_CLOSE_WAIT:
+ case NES_CM_STATE_LAST_ACK:
+ case NES_CM_STATE_CLOSING:
+ break;
+ case NES_CM_STATE_MPAREQ_SENT:
+ /* recv the mpa res frame, ret=frame len (incl priv data) */
+ ret = parse_mpa(cm_node, dataloc, datasize);
+ if (ret < 0)
+ break;
+ /* set the req frame payload len in skb */
+ /* we are done handling this state, set node to a TSA state */
+ cm_node->state = NES_CM_STATE_TSA;
+ send_ack(cm_node);
+ create_event(cm_node, NES_CM_EVENT_CONNECTED);
+ break;
+
+ case NES_CM_STATE_ESTABLISHED:
+ /* we are expecting an MPA req frame */
+ ret = parse_mpa(cm_node, dataloc, datasize);
+ if (ret < 0) {
+ break;
+ }
+ cm_node->state = NES_CM_STATE_TSA;
+ send_ack(cm_node);
+ /* we got a valid MPA request, create an event */
+ create_event(cm_node, NES_CM_EVENT_MPA_REQ);
+ break;
+ case NES_CM_STATE_TSA:
+ handle_exception_pkt(cm_node, skb);
+ break;
+ case NES_CM_STATE_UNKNOWN:
+ case NES_CM_STATE_INITED:
+ default:
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+
+/**
+ * mini_cm_listen - create a listen node with params
+ */
+static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
+ struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
+{
+ struct nes_cm_listener *listener;
+ unsigned long flags;
+
+ nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",
+ cm_info->loc_addr, cm_info->loc_port);
+
+ /* cannot have multiple matching listeners */
+ listener = find_listener(cm_core, htonl(cm_info->loc_addr),
+ htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE);
+ if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {
+ /* find automatically incs ref count ??? */
+ atomic_dec(&listener->ref_count);
+ nes_debug(NES_DBG_CM, "Not creating listener since it already exists\n");
+ return NULL;
+ }
+
+ if (!listener) {
+ /* create a CM listen node (1/2 node to compare incoming traffic to) */
+ listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
+ if (!listener) {
+ nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n");
+ return NULL;
+ }
+
+ memset(listener, 0, sizeof(struct nes_cm_listener));
+ listener->loc_addr = htonl(cm_info->loc_addr);
+ listener->loc_port = htons(cm_info->loc_port);
+ listener->reused_node = 0;
+
+ atomic_set(&listener->ref_count, 1);
+ }
+ /* pasive case */
+ /* find already inc'ed the ref count */
+ else {
+ listener->reused_node = 1;
+ }
+
+ listener->cm_id = cm_info->cm_id;
+ atomic_set(&listener->pend_accepts_cnt, 0);
+ listener->cm_core = cm_core;
+ listener->nesvnic = nesvnic;
+ atomic_inc(&cm_core->node_cnt);
+ atomic_inc(&cm_core->session_id);
+
+ listener->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
+ listener->conn_type = cm_info->conn_type;
+ listener->backlog = cm_info->backlog;
+ listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE;
+
+ if (!listener->reused_node) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_add(&listener->list, &cm_core->listen_list.list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ atomic_inc(&cm_core->listen_node_cnt);
+ }
+
+ nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x,"
+ " listener = %p, backlog = %d, cm_id = %p.\n",
+ cm_info->loc_addr, cm_info->loc_port,
+ listener, listener->backlog, listener->cm_id);
+
+ return listener;
+}
+
+
+/**
+ * mini_cm_connect - make a connection node with params
+ */
+struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
+ struct nes_vnic *nesvnic, struct ietf_mpa_frame *mpa_frame,
+ struct nes_cm_info *cm_info)
+{
+ int ret = 0;
+ struct nes_cm_node *cm_node;
+ struct nes_cm_listener *loopbackremotelistener;
+ struct nes_cm_node *loopbackremotenode;
+ struct nes_cm_info loopback_cm_info;
+
+ u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
+ ntohs(mpa_frame->priv_data_len);
+
+ cm_info->loc_addr = htonl(cm_info->loc_addr);
+ cm_info->rem_addr = htonl(cm_info->rem_addr);
+ cm_info->loc_port = htons(cm_info->loc_port);
+ cm_info->rem_port = htons(cm_info->rem_port);
+
+ /* create a CM connection node */
+ cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL);
+ if (!cm_node)
+ return NULL;
+
+ // set our node side to client (active) side
+ cm_node->tcp_cntxt.client = 1;
+ cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
+
+ if (cm_info->loc_addr == cm_info->rem_addr) {
+ loopbackremotelistener = find_listener(cm_core, cm_node->rem_addr,
+ cm_node->rem_port, NES_CM_LISTENER_ACTIVE_STATE);
+ if (loopbackremotelistener == NULL) {
+ create_event(cm_node, NES_CM_EVENT_ABORTED);
+ } else {
+ atomic_inc(&cm_loopbacks);
+ loopback_cm_info = *cm_info;
+ loopback_cm_info.loc_port = cm_info->rem_port;
+ loopback_cm_info.rem_port = cm_info->loc_port;
+ loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
+ loopbackremotenode = make_cm_node(cm_core, nesvnic, &loopback_cm_info,
+ loopbackremotelistener);
+ loopbackremotenode->loopbackpartner = cm_node;
+ loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
+ cm_node->loopbackpartner = loopbackremotenode;
+ memcpy(loopbackremotenode->mpa_frame_buf, &mpa_frame->priv_data,
+ mpa_frame_size);
+ loopbackremotenode->mpa_frame_size = mpa_frame_size -
+ sizeof(struct ietf_mpa_frame);
+
+ // we are done handling this state, set node to a TSA state
+ cm_node->state = NES_CM_STATE_TSA;
+ cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num;
+ loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num;
+ cm_node->tcp_cntxt.max_snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd;
+ loopbackremotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ cm_node->tcp_cntxt.snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd;
+ loopbackremotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ cm_node->tcp_cntxt.snd_wscale = loopbackremotenode->tcp_cntxt.rcv_wscale;
+ loopbackremotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
+
+ create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ);
+ }
+ return cm_node;
+ }
+
+ /* set our node side to client (active) side */
+ cm_node->tcp_cntxt.client = 1;
+ /* init our MPA frame ptr */
+ memcpy(&cm_node->mpa_frame, mpa_frame, mpa_frame_size);
+ cm_node->mpa_frame_size = mpa_frame_size;
+
+ /* send a syn and goto syn sent state */
+ cm_node->state = NES_CM_STATE_SYN_SENT;
+ ret = send_syn(cm_node, 0);
+
+ nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X, port=0x%04x,"
+ " cm_node=%p, cm_id = %p.\n",
+ cm_node->rem_addr, cm_node->rem_port, cm_node, cm_node->cm_id);
+
+ return cm_node;
+}
+
+
+/**
+ * mini_cm_accept - accept a connection
+ * This function is never called
+ */
+int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
+ struct nes_cm_node *cm_node)
+{
+ return 0;
+}
+
+
+/**
+ * mini_cm_reject - reject and teardown a connection
+ */
+int mini_cm_reject(struct nes_cm_core *cm_core,
+ struct ietf_mpa_frame *mpa_frame,
+ struct nes_cm_node *cm_node)
+{
+ int ret = 0;
+ struct sk_buff *skb;
+ u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
+ ntohs(mpa_frame->priv_data_len);
+
+ skb = get_free_pkt(cm_node);
+ if (!skb) {
+ nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
+ return -1;
+ }
+
+ /* send an MPA Request frame */
+ form_cm_frame(skb, cm_node, NULL, 0, mpa_frame, mpa_frame_size, SET_ACK | SET_FIN);
+ ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
+
+ cm_node->state = NES_CM_STATE_CLOSED;
+ ret = send_fin(cm_node, NULL);
+
+ if (ret < 0) {
+ printk(KERN_INFO PFX "failed to send MPA Reply (reject)\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+
+/**
+ * mini_cm_close
+ */
+int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
+{
+ int ret = 0;
+
+ if (!cm_core || !cm_node)
+ return -EINVAL;
+
+ switch (cm_node->state) {
+ /* if passed in node is null, create a reference key node for node search */
+ /* check if we found an owner node for this pkt */
+ case NES_CM_STATE_SYN_RCVD:
+ case NES_CM_STATE_SYN_SENT:
+ case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case NES_CM_STATE_ESTABLISHED:
+ case NES_CM_STATE_ACCEPTING:
+ case NES_CM_STATE_MPAREQ_SENT:
+ cm_node->state = NES_CM_STATE_FIN_WAIT1;
+ send_fin(cm_node, NULL);
+ break;
+ case NES_CM_STATE_CLOSE_WAIT:
+ cm_node->state = NES_CM_STATE_LAST_ACK;
+ send_fin(cm_node, NULL);
+ break;
+ case NES_CM_STATE_FIN_WAIT1:
+ case NES_CM_STATE_FIN_WAIT2:
+ case NES_CM_STATE_LAST_ACK:
+ case NES_CM_STATE_TIME_WAIT:
+ case NES_CM_STATE_CLOSING:
+ ret = -1;
+ break;
+ case NES_CM_STATE_LISTENING:
+ case NES_CM_STATE_UNKNOWN:
+ case NES_CM_STATE_INITED:
+ case NES_CM_STATE_CLOSED:
+ case NES_CM_STATE_TSA:
+ ret = rem_ref_cm_node(cm_core, cm_node);
+ break;
+ }
+ cm_node->cm_id = NULL;
+ return ret;
+}
+
+
+/**
+ * recv_pkt - recv an ETHERNET packet, and process it through CM
+ * node state machine
+ */
+int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
+ struct sk_buff *skb)
+{
+ struct nes_cm_node *cm_node = NULL;
+ struct nes_cm_listener *listener = NULL;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ struct nes_cm_info nfo;
+ int ret = 0;
+
+ if (!skb || skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ iph = (struct iphdr *)skb->data;
+ tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+ skb_set_transport_header(skb, sizeof(*tcph));
+ skb->len = ntohs(iph->tot_len);
+
+ nfo.loc_addr = ntohl(iph->daddr);
+ nfo.loc_port = ntohs(tcph->dest);
+ nfo.rem_addr = ntohl(iph->saddr);
+ nfo.rem_port = ntohs(tcph->source);
+
+ nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n",
+ iph->daddr, tcph->dest, iph->saddr, tcph->source);
+
+ /* note: this call is going to increment cm_node ref count */
+ cm_node = find_node(cm_core,
+ nfo.rem_port, nfo.rem_addr,
+ nfo.loc_port, nfo.loc_addr);
+
+ if (!cm_node) {
+ listener = find_listener(cm_core, nfo.loc_addr, nfo.loc_port,
+ NES_CM_LISTENER_ACTIVE_STATE);
+ if (listener) {
+ nfo.cm_id = listener->cm_id;
+ nfo.conn_type = listener->conn_type;
+ } else {
+ nfo.cm_id = NULL;
+ nfo.conn_type = 0;
+ }
+
+ cm_node = make_cm_node(cm_core, nesvnic, &nfo, listener);
+ if (!cm_node) {
+ nes_debug(NES_DBG_CM, "Unable to allocate node\n");
+ if (listener) {
+ nes_debug(NES_DBG_CM, "unable to allocate node and decrementing listener refcount\n");
+ atomic_dec(&listener->ref_count);
+ }
+ ret = -1;
+ goto out;
+ }
+ if (!listener) {
+ nes_debug(NES_DBG_CM, "Packet found for unknown port %x refcnt=%d\n",
+ nfo.loc_port, atomic_read(&cm_node->ref_count));
+ if (!tcph->rst) {
+ nes_debug(NES_DBG_CM, "Packet found for unknown port=%d"
+ " rem_port=%d refcnt=%d\n",
+ nfo.loc_port, nfo.rem_port, atomic_read(&cm_node->ref_count));
+
+ cm_node->tcp_cntxt.rcv_nxt = ntohl(tcph->seq);
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ send_reset(cm_node);
+ }
+ rem_ref_cm_node(cm_core, cm_node);
+ ret = -1;
+ goto out;
+ }
+ add_ref_cm_node(cm_node);
+ cm_node->state = NES_CM_STATE_LISTENING;
+ }
+
+ nes_debug(NES_DBG_CM, "Processing Packet for node %p, data = (%p):\n",
+ cm_node, skb->data);
+ process_packet(cm_node, skb, cm_core);
+
+ rem_ref_cm_node(cm_core, cm_node);
+ out:
+ if (skb)
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+
+/**
+ * nes_cm_alloc_core - allocate a top level instance of a cm core
+ */
+struct nes_cm_core *nes_cm_alloc_core(void)
+{
+ int i;
+
+ struct nes_cm_core *cm_core;
+ struct sk_buff *skb = NULL;
+
+ /* setup the CM core */
+ /* alloc top level core control structure */
+ cm_core = kzalloc(sizeof(*cm_core), GFP_KERNEL);
+ if (!cm_core)
+ return NULL;
+
+ INIT_LIST_HEAD(&cm_core->connected_nodes);
+ init_timer(&cm_core->tcp_timer);
+ cm_core->tcp_timer.function = nes_cm_timer_tick;
+
+ cm_core->mtu = NES_CM_DEFAULT_MTU;
+ cm_core->state = NES_CM_STATE_INITED;
+ cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS;
+
+ atomic_set(&cm_core->session_id, 0);
+ atomic_set(&cm_core->events_posted, 0);
+
+ /* init the packet lists */
+ skb_queue_head_init(&cm_core->tx_free_list);
+
+ for (i = 0; i < NES_CM_DEFAULT_FRAME_CNT; i++) {
+ skb = dev_alloc_skb(cm_core->mtu);
+ if (!skb) {
+ kfree(cm_core);
+ return NULL;
+ }
+ /* add 'raw' skb to free frame list */
+ skb_queue_head(&cm_core->tx_free_list, skb);
+ }
+
+ cm_core->api = &nes_cm_api;
+
+ spin_lock_init(&cm_core->ht_lock);
+ spin_lock_init(&cm_core->listen_list_lock);
+
+ INIT_LIST_HEAD(&cm_core->listen_list.list);
+
+ nes_debug(NES_DBG_CM, "Init CM Core completed -- cm_core=%p\n", cm_core);
+
+ nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
+ cm_core->event_wq = create_singlethread_workqueue("nesewq");
+ cm_core->post_event = nes_cm_post_event;
+ nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
+ cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
+
+ print_core(cm_core);
+ return cm_core;
+}
+
+
+/**
+ * mini_cm_dealloc_core - deallocate a top level instance of a cm core
+ */
+int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
+{
+ nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core);
+
+ if (!cm_core)
+ return -EINVAL;
+
+ barrier();
+
+ if (timer_pending(&cm_core->tcp_timer)) {
+ del_timer(&cm_core->tcp_timer);
+ }
+
+ destroy_workqueue(cm_core->event_wq);
+ destroy_workqueue(cm_core->disconn_wq);
+ nes_debug(NES_DBG_CM, "\n");
+ kfree(cm_core);
+
+ return 0;
+}
+
+
+/**
+ * mini_cm_get
+ */
+int mini_cm_get(struct nes_cm_core *cm_core)
+{
+ return cm_core->state;
+}
+
+
+/**
+ * mini_cm_set
+ */
+int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
+{
+ int ret = 0;
+
+ switch (type) {
+ case NES_CM_SET_PKT_SIZE:
+ cm_core->mtu = value;
+ break;
+ case NES_CM_SET_FREE_PKT_Q_SIZE:
+ cm_core->free_tx_pkt_max = value;
+ break;
+ default:
+ /* unknown set option */
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+
+/**
+ * nes_cm_init_tsa_conn setup HW; MPA frames must be
+ * successfully exchanged when this is called
+ */
+static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_node)
+{
+ int ret = 0;
+
+ if (!nesqp)
+ return -EINVAL;
+
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 |
+ NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG |
+ NES_QPCONTEXT_MISC_DROS);
+
+ if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale)
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE);
+
+ nesqp->nesqp_context->misc2 |= cpu_to_le32(64 << NES_QPCONTEXT_MISC2_TTL_SHIFT);
+
+ nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16);
+
+ nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32(
+ (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT);
+
+ nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
+ (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) &
+ NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK);
+
+ nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
+ (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) &
+ NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK);
+
+ nesqp->nesqp_context->keepalive = cpu_to_le32(0x80);
+ nesqp->nesqp_context->ts_recent = 0;
+ nesqp->nesqp_context->ts_age = 0;
+ nesqp->nesqp_context->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
+ nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
+ nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
+ cm_node->tcp_cntxt.rcv_wscale);
+ nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ nesqp->nesqp_context->srtt = 0;
+ nesqp->nesqp_context->rttvar = cpu_to_le32(0x6);
+ nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000);
+ nesqp->nesqp_context->cwnd = cpu_to_le32(2*cm_node->tcp_cntxt.mss);
+ nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
+ nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+ nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
+
+ nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X,"
+ " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n",
+ nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
+ le32_to_cpu(nesqp->nesqp_context->snd_nxt),
+ cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale),
+ le32_to_cpu(nesqp->nesqp_context->rcv_wnd),
+ le32_to_cpu(nesqp->nesqp_context->misc));
+ nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd));
+ nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd));
+ nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd));
+
+ nes_debug(NES_DBG_CM, "Change cm_node state to TSA\n");
+ cm_node->state = NES_CM_STATE_TSA;
+
+ return ret;
+}
+
+
+/**
+ * nes_cm_disconn
+ */
+int nes_cm_disconn(struct nes_qp *nesqp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+ if (nesqp->disconn_pending == 0) {
+ nesqp->disconn_pending++;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ /* nes_add_ref(&nesqp->ibqp); */
+ /* init our disconnect work element, to */
+ INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
+
+ queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_rem_ref(&nesqp->ibqp);
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_disconnect_worker
+ */
+void nes_disconnect_worker(struct work_struct *work)
+{
+ struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
+
+ nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
+ nesqp->last_aeq, nesqp->hwqp.qp_id);
+ nes_cm_disconn_true(nesqp);
+}
+
+
+/**
+ * nes_cm_disconn_true
+ */
+int nes_cm_disconn_true(struct nes_qp *nesqp)
+{
+ unsigned long flags;
+ int ret = 0;
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ struct nes_vnic *nesvnic;
+ u16 last_ae;
+ u8 original_hw_tcp_state;
+ u8 original_ibqp_state;
+ u8 issued_disconnect_reset = 0;
+
+ if (!nesqp) {
+ nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
+ return -1;
+ }
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+ cm_id = nesqp->cm_id;
+ /* make sure we havent already closed this connection */
+ if (!cm_id) {
+ nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n",
+ nesqp->hwqp.qp_id);
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -1;
+ }
+
+ nesvnic = to_nesvnic(nesqp->ibqp.device);
+ nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id);
+
+ original_hw_tcp_state = nesqp->hw_tcp_state;
+ original_ibqp_state = nesqp->ibqp_state;
+ last_ae = nesqp->last_aeq;
+
+
+ nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state);
+
+ if ((nesqp->cm_id) && (cm_id->event_handler)) {
+ if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+ ((original_ibqp_state == IB_QPS_RTS) &&
+ (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ atomic_inc(&cm_disconnects);
+ cm_event.event = IW_CM_EVENT_DISCONNECT;
+ if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
+ issued_disconnect_reset = 1;
+ cm_event.status = IW_CM_EVENT_STATUS_RESET;
+ nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event (status reset) for "
+ " QP%u, cm_id = %p. \n",
+ nesqp->hwqp.qp_id, cm_id);
+ } else {
+ cm_event.status = IW_CM_EVENT_STATUS_OK;
+ }
+
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+
+ nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event for "
+ " QP%u, SQ Head = %u, SQ Tail = %u. cm_id = %p, refcount = %u.\n",
+ nesqp->hwqp.qp_id,
+ nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail, cm_id,
+ atomic_read(&nesqp->refcount));
+
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ if (ret)
+ nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ }
+
+ nesqp->disconn_pending = 0;
+ /* There might have been another AE while the lock was released */
+ original_hw_tcp_state = nesqp->hw_tcp_state;
+ original_ibqp_state = nesqp->ibqp_state;
+ last_ae = nesqp->last_aeq;
+
+ if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
+ ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
+ (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
+ (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
+ (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ atomic_inc(&cm_closes);
+ nesqp->cm_id = NULL;
+ nesqp->in_disconnect = 0;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_disconnect(nesqp, 1);
+
+ cm_id->provider_data = nesqp;
+ /* Send up the close complete event */
+ cm_event.event = IW_CM_EVENT_CLOSE;
+ cm_event.status = IW_CM_EVENT_STATUS_OK;
+ cm_event.provider_data = cm_id->provider_data;
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ if (ret) {
+ nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+ }
+
+ cm_id->rem_ref(cm_id);
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+ if (nesqp->flush_issued == 0) {
+ nesqp->flush_issued = 1;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ flush_wqes(nesvnic->nesdev, nesqp, NES_CQP_FLUSH_RQ, 1);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ }
+
+ /* This reference is from either ModifyQP or the AE processing,
+ there is still a race here with modifyqp */
+ nes_rem_ref(&nesqp->ibqp);
+
+ } else {
+ cm_id = nesqp->cm_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ /* check to see if the inbound reset beat the outbound reset */
+ if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
+ nes_debug(NES_DBG_CM, "QP%u: Decing refcount due to inbound reset"
+ " beating the outbound reset.\n",
+ nesqp->hwqp.qp_id);
+ nes_rem_ref(&nesqp->ibqp);
+ }
+ }
+ } else {
+ nesqp->disconn_pending = 0;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ }
+ nes_rem_ref(&nesqp->ibqp);
+
+ return 0;
+}
+
+
+/**
+ * nes_disconnect
+ */
+int nes_disconnect(struct nes_qp *nesqp, int abrupt)
+{
+ int ret = 0;
+ struct nes_vnic *nesvnic;
+ struct nes_device *nesdev;
+
+ nesvnic = to_nesvnic(nesqp->ibqp.device);
+ if (!nesvnic)
+ return -EINVAL;
+
+ nesdev = nesvnic->nesdev;
+
+ nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+ atomic_read(&nesvnic->netdev->refcnt));
+
+ if (nesqp->active_conn) {
+
+ /* indicate this connection is NOT active */
+ nesqp->active_conn = 0;
+ } else {
+ /* Need to free the Last Streaming Mode Message */
+ if (nesqp->ietf_frame) {
+ pci_free_consistent(nesdev->pcidev,
+ nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
+ nesqp->ietf_frame, nesqp->ietf_frame_pbase);
+ }
+ }
+
+ /* close the CM node down if it is still active */
+ if (nesqp->cm_node) {
+ nes_debug(NES_DBG_CM, "Call close API\n");
+
+ g_cm_core->api->close(g_cm_core, nesqp->cm_node);
+ nesqp->cm_node = NULL;
+ }
+
+ return ret;
+}
+
+
+/**
+ * nes_accept
+ */
+int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ u64 u64temp;
+ struct ib_qp *ibqp;
+ struct nes_qp *nesqp;
+ struct nes_vnic *nesvnic;
+ struct nes_device *nesdev;
+ struct nes_cm_node *cm_node;
+ struct nes_adapter *adapter;
+ struct ib_qp_attr attr;
+ struct iw_cm_event cm_event;
+ struct nes_hw_qp_wqe *wqe;
+ struct nes_v4_quad nes_quad;
+ int ret;
+
+ ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+
+ /* get all our handles */
+ nesqp = to_nesqp(ibqp);
+ nesvnic = to_nesvnic(nesqp->ibqp.device);
+ nesdev = nesvnic->nesdev;
+ adapter = nesdev->nesadapter;
+
+ nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
+ nesvnic, nesvnic->netdev, nesvnic->netdev->name);
+
+ /* since this is from a listen, we were able to put node handle into cm_id */
+ cm_node = (struct nes_cm_node *)cm_id->provider_data;
+
+ /* associate the node with the QP */
+ nesqp->cm_node = (void *)cm_node;
+
+ nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu\n",
+ nesqp->hwqp.qp_id, cm_node, jiffies);
+ atomic_inc(&cm_accepts);
+
+ nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
+ atomic_read(&nesvnic->netdev->refcnt));
+
+ /* allocate the ietf frame and space for private data */
+ nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
+ sizeof(struct ietf_mpa_frame) + conn_param->private_data_len,
+ &nesqp->ietf_frame_pbase);
+
+ if (!nesqp->ietf_frame) {
+ nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n");
+ return -ENOMEM;
+ }
+
+
+ /* setup the MPA frame */
+ nesqp->private_data_len = conn_param->private_data_len;
+ memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+
+ memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
+ conn_param->private_data_len);
+
+ nesqp->ietf_frame->priv_data_len = cpu_to_be16(conn_param->private_data_len);
+ nesqp->ietf_frame->rev = mpa_version;
+ nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
+
+ /* setup our first outgoing iWarp send WQE (the IETF frame response) */
+ wqe = &nesqp->hwqp.sq_vbase[0];
+
+ if (cm_id->remote_addr.sin_addr.s_addr != cm_id->local_addr.sin_addr.s_addr) {
+ u64temp = (unsigned long)nesqp;
+ u64temp |= NES_SW_CONTEXT_ALIGN>>1;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
+ u64temp);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
+ cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | NES_IWARP_SQ_WQE_WRPDU);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
+ cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
+ cpu_to_le32((u32)nesqp->ietf_frame_pbase);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
+ cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
+ cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame));
+ wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
+ NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | NES_QPCONTEXT_ORDIRD_WRPDU);
+ } else {
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+ NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM));
+ }
+ nesqp->skip_lsmm = 1;
+
+
+ /* Cache the cm_id in the qp */
+ nesqp->cm_id = cm_id;
+ cm_node->cm_id = cm_id;
+
+ /* nesqp->cm_node = (void *)cm_id->provider_data; */
+ cm_id->provider_data = nesqp;
+ nesqp->active_conn = 0;
+
+ nes_cm_init_tsa_conn(nesqp, cm_node);
+
+ nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
+ nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
+ nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+
+ nesqp->nesqp_context->misc2 |= cpu_to_le32(
+ (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
+
+ nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
+ nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), NULL,
+ NES_ARP_RESOLVE) << 16);
+
+ nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
+ jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
+
+ nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
+
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
+ ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT));
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
+
+ memset(&nes_quad, 0, sizeof(nes_quad));
+ nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
+ nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
+ nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
+ nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+
+ /* Produce hash key */
+ nesqp->hte_index = cpu_to_be32(
+ crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff);
+ nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n",
+ nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
+
+ nesqp->hte_index &= adapter->hte_index_mask;
+ nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
+
+ cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
+
+ nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X,"
+ " rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + private data length=%zu.\n",
+ nesqp->hwqp.qp_id,
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port),
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port),
+ le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
+ le32_to_cpu(nesqp->nesqp_context->snd_nxt),
+ conn_param->private_data_len+sizeof(struct ietf_mpa_frame));
+
+ attr.qp_state = IB_QPS_RTS;
+ nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+ /* notify OF layer that accept event was successfull */
+ cm_id->add_ref(cm_id);
+
+ cm_event.event = IW_CM_EVENT_ESTABLISHED;
+ cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
+ cm_event.provider_data = (void *)nesqp;
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ if (cm_node->loopbackpartner) {
+ cm_node->loopbackpartner->mpa_frame_size = nesqp->private_data_len;
+ /* copy entire MPA frame to our cm_node's frame */
+ memcpy(cm_node->loopbackpartner->mpa_frame_buf, nesqp->ietf_frame->priv_data,
+ nesqp->private_data_len);
+ create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED);
+ }
+ if (ret)
+ printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
+ __FUNCTION__, __LINE__, ret);
+
+ return 0;
+}
+
+
+/**
+ * nes_reject
+ */
+int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct nes_cm_node *cm_node;
+ struct nes_cm_core *cm_core;
+
+ atomic_inc(&cm_rejects);
+ cm_node = (struct nes_cm_node *) cm_id->provider_data;
+ cm_core = cm_node->cm_core;
+ cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len;
+
+ strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP);
+ memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len);
+
+ cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
+ cm_node->mpa_frame.rev = mpa_version;
+ cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT;
+
+ cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node);
+
+ return 0;
+}
+
+
+/**
+ * nes_connect
+ * setup and launch cm connect node
+ */
+int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct nes_qp *nesqp;
+ struct nes_vnic *nesvnic;
+ struct nes_device *nesdev;
+ struct nes_cm_node *cm_node;
+ struct nes_cm_info cm_info;
+
+ ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+ nesqp = to_nesqp(ibqp);
+ if (!nesqp)
+ return -EINVAL;
+ nesvnic = to_nesvnic(nesqp->ibqp.device);
+ if (!nesvnic)
+ return -EINVAL;
+ nesdev = nesvnic->nesdev;
+ if (!nesdev)
+ return -EINVAL;
+
+ atomic_inc(&cm_connects);
+
+ nesqp->ietf_frame = kzalloc(sizeof(struct ietf_mpa_frame) +
+ conn_param->private_data_len, GFP_KERNEL);
+ if (!nesqp->ietf_frame)
+ return -ENOMEM;
+
+ /* set qp as having an active connection */
+ nesqp->active_conn = 1;
+
+ nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X.\n",
+ nesqp->hwqp.qp_id,
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port),
+ ntohl(cm_id->local_addr.sin_addr.s_addr),
+ ntohs(cm_id->local_addr.sin_port));
+
+ /* cache the cm_id in the qp */
+ nesqp->cm_id = cm_id;
+
+ cm_id->provider_data = nesqp;
+
+ /* copy the private data */
+ if (conn_param->private_data_len) {
+ memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
+ conn_param->private_data_len);
+ }
+
+ nesqp->private_data_len = conn_param->private_data_len;
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
+ nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
+ nes_debug(NES_DBG_CM, "mpa private data len =%u\n", conn_param->private_data_len);
+
+ strcpy(&nesqp->ietf_frame->key[0], IEFT_MPA_KEY_REQ);
+ nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
+ nesqp->ietf_frame->rev = IETF_MPA_VERSION;
+ nesqp->ietf_frame->priv_data_len = htons(conn_param->private_data_len);
+
+ if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr)
+ nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
+ PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+
+ /* set up the connection params for the node */
+ cm_info.loc_addr = (cm_id->local_addr.sin_addr.s_addr);
+ cm_info.loc_port = (cm_id->local_addr.sin_port);
+ cm_info.rem_addr = (cm_id->remote_addr.sin_addr.s_addr);
+ cm_info.rem_port = (cm_id->remote_addr.sin_port);
+ cm_info.cm_id = cm_id;
+ cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
+
+ cm_id->add_ref(cm_id);
+ nes_add_ref(&nesqp->ibqp);
+
+ /* create a connect CM node connection */
+ cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, nesqp->ietf_frame, &cm_info);
+ if (!cm_node) {
+ if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr)
+ nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
+ PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
+ nes_rem_ref(&nesqp->ibqp);
+ kfree(nesqp->ietf_frame);
+ nesqp->ietf_frame = NULL;
+ cm_id->rem_ref(cm_id);
+ return -ENOMEM;
+ }
+
+ cm_node->apbvt_set = 1;
+ nesqp->cm_node = cm_node;
+
+ return 0;
+}
+
+
+/**
+ * nes_create_listen
+ */
+int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct nes_vnic *nesvnic;
+ struct nes_cm_listener *cm_node;
+ struct nes_cm_info cm_info;
+ struct nes_adapter *adapter;
+ int err;
+
+
+ nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
+ cm_id, ntohs(cm_id->local_addr.sin_port));
+
+ nesvnic = to_nesvnic(cm_id->device);
+ if (!nesvnic)
+ return -EINVAL;
+ adapter = nesvnic->nesdev->nesadapter;
+ nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
+ nesvnic, nesvnic->netdev, nesvnic->netdev->name);
+
+ nes_debug(NES_DBG_CM, "nesvnic->local_ipaddr=0x%08x, sin_addr.s_addr=0x%08x\n",
+ nesvnic->local_ipaddr, cm_id->local_addr.sin_addr.s_addr);
+
+ /* setup listen params in our api call struct */
+ cm_info.loc_addr = nesvnic->local_ipaddr;
+ cm_info.loc_port = cm_id->local_addr.sin_port;
+ cm_info.backlog = backlog;
+ cm_info.cm_id = cm_id;
+
+ cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
+
+
+ cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
+ if (!cm_node) {
+ printk("%s[%u] Error returned from listen API call\n",
+ __FUNCTION__, __LINE__);
+ return -ENOMEM;
+ }
+
+ cm_id->provider_data = cm_node;
+
+ if (!cm_node->reused_node) {
+ err = nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
+ PCI_FUNC(nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+ if (err) {
+ printk("nes_manage_apbvt call returned %d.\n", err);
+ g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
+ return err;
+ }
+ cm_listens_created++;
+ }
+
+ cm_id->add_ref(cm_id);
+ cm_id->provider_data = (void *)cm_node;
+
+
+ return 0;
+}
+
+
+/**
+ * nes_destroy_listen
+ */
+int nes_destroy_listen(struct iw_cm_id *cm_id)
+{
+ if (cm_id->provider_data)
+ g_cm_core->api->stop_listener(g_cm_core, cm_id->provider_data);
+ else
+ nes_debug(NES_DBG_CM, "cm_id->provider_data was NULL\n");
+
+ cm_id->rem_ref(cm_id);
+
+ return 0;
+}
+
+
+/**
+ * nes_cm_recv
+ */
+int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice)
+{
+ cm_packets_received++;
+ if ((g_cm_core) && (g_cm_core->api)) {
+ g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb);
+ } else {
+ nes_debug(NES_DBG_CM, "Unable to process packet for CM,"
+ " cm is not setup properly.\n");
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_cm_start
+ * Start and init a cm core module
+ */
+int nes_cm_start(void)
+{
+ nes_debug(NES_DBG_CM, "\n");
+ /* create the primary CM core, pass this handle to subsequent core inits */
+ g_cm_core = nes_cm_alloc_core();
+ if (g_cm_core) {
+ return 0;
+ } else {
+ return -ENOMEM;
+ }
+}
+
+
+/**
+ * nes_cm_stop
+ * stop and dealloc all cm core instances
+ */
+int nes_cm_stop(void)
+{
+ g_cm_core->api->destroy_cm_core(g_cm_core);
+ return 0;
+}
+
+
+/**
+ * cm_event_connected
+ * handle a connected event, setup QPs and HW
+ */
+void cm_event_connected(struct nes_cm_event *event)
+{
+ u64 u64temp;
+ struct nes_qp *nesqp;
+ struct nes_vnic *nesvnic;
+ struct nes_device *nesdev;
+ struct nes_cm_node *cm_node;
+ struct nes_adapter *nesadapter;
+ struct ib_qp_attr attr;
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ struct nes_hw_qp_wqe *wqe;
+ struct nes_v4_quad nes_quad;
+ int ret;
+
+ /* get all our handles */
+ cm_node = event->cm_node;
+ cm_id = cm_node->cm_id;
+ nes_debug(NES_DBG_CM, "cm_event_connected - %p - cm_id = %p\n", cm_node, cm_id);
+ nesqp = (struct nes_qp *)cm_id->provider_data;
+ nesvnic = to_nesvnic(nesqp->ibqp.device);
+ nesdev = nesvnic->nesdev;
+ nesadapter = nesdev->nesadapter;
+
+ if (nesqp->destroyed) {
+ return;
+ }
+ atomic_inc(&cm_connecteds);
+ nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
+ " local port 0x%04X. jiffies = %lu.\n",
+ nesqp->hwqp.qp_id,
+ ntohl(cm_id->remote_addr.sin_addr.s_addr),
+ ntohs(cm_id->remote_addr.sin_port),
+ ntohs(cm_id->local_addr.sin_port),
+ jiffies);
+
+ nes_cm_init_tsa_conn(nesqp, cm_node);
+
+ /* set the QP tsa context */
+ nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
+ nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
+ nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+
+ nesqp->nesqp_context->misc2 |= cpu_to_le32(
+ (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
+ nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
+ nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0),
+ NULL, NES_ARP_RESOLVE) << 16);
+ nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
+ jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
+ nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
+ nesqp->nesqp_context->ird_ord_sizes |=
+ cpu_to_le32((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT);
+
+ /* Adjust tail for not having a LSMM */
+ nesqp->hwqp.sq_tail = 1;
+
+#if defined(NES_SEND_FIRST_WRITE)
+ if (cm_node->send_write0) {
+ nes_debug(NES_DBG_CM, "Sending first write.\n");
+ wqe = &nesqp->hwqp.sq_vbase[0];
+ u64temp = (unsigned long)nesqp;
+ u64temp |= NES_SW_CONTEXT_ALIGN>>1;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
+ u64temp);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(NES_IWARP_SQ_OP_RDMAW);
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
+ wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+
+ /* use the reserved spot on the WQ for the extra first WQE */
+ nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
+ NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM));
+ nesqp->skip_lsmm = 1;
+ nesqp->hwqp.sq_tail = 0;
+ nes_write32(nesdev->regs + NES_WQE_ALLOC,
+ (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
+ }
+#endif
+
+ memset(&nes_quad, 0, sizeof(nes_quad));
+
+ nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
+ nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
+ nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
+ nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+
+ /* Produce hash key */
+ nesqp->hte_index = cpu_to_be32(
+ crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff);
+ nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n",
+ nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
+
+ nesqp->hte_index &= nesadapter->hte_index_mask;
+ nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
+
+ nesqp->ietf_frame = &cm_node->mpa_frame;
+ nesqp->private_data_len = (u8) cm_node->mpa_frame_size;
+ cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
+
+ /* modify QP state to rts */
+ attr.qp_state = IB_QPS_RTS;
+ nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+ /* notify OF layer we successfully created the requested connection */
+ cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
+ cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
+ cm_event.provider_data = cm_id->provider_data;
+ cm_event.local_addr.sin_family = AF_INET;
+ cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
+ cm_event.remote_addr = cm_id->remote_addr;
+
+ cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
+ cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size;
+
+ cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr;
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+
+ if (ret)
+ printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
+ __FUNCTION__, __LINE__, ret);
+ nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n",
+ nesqp->hwqp.qp_id, jiffies );
+
+ nes_rem_ref(&nesqp->ibqp);
+
+ return;
+}
+
+
+/**
+ * cm_event_connect_error
+ */
+void cm_event_connect_error(struct nes_cm_event *event)
+{
+ struct nes_qp *nesqp;
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ /* struct nes_cm_info cm_info; */
+ int ret;
+
+ if (!event->cm_node)
+ return;
+
+ cm_id = event->cm_node->cm_id;
+ if (!cm_id) {
+ return;
+ }
+
+ nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id);
+ nesqp = cm_id->provider_data;
+
+ if (!nesqp) {
+ return;
+ }
+
+ /* notify OF layer about this connection error event */
+ /* cm_id->rem_ref(cm_id); */
+ nesqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
+ cm_event.status = IW_CM_EVENT_STATUS_REJECTED;
+ cm_event.provider_data = cm_id->provider_data;
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+
+ nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, remove_addr=%08x\n",
+ cm_event.local_addr.sin_addr.s_addr, cm_event.remote_addr.sin_addr.s_addr);
+
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+ if (ret)
+ printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
+ __FUNCTION__, __LINE__, ret);
+ nes_rem_ref(&nesqp->ibqp);
+ cm_id->rem_ref(cm_id);
+
+ return;
+}
+
+
+/**
+ * cm_event_reset
+ */
+void cm_event_reset(struct nes_cm_event *event)
+{
+ struct nes_qp *nesqp;
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ /* struct nes_cm_info cm_info; */
+ int ret;
+
+ if (!event->cm_node)
+ return;
+
+ if (!event->cm_node->cm_id)
+ return;
+
+ cm_id = event->cm_node->cm_id;
+
+ nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id);
+ nesqp = cm_id->provider_data;
+
+ nesqp->cm_id = NULL;
+ /* cm_id->provider_data = NULL; */
+ cm_event.event = IW_CM_EVENT_DISCONNECT;
+ cm_event.status = IW_CM_EVENT_STATUS_RESET;
+ cm_event.provider_data = cm_id->provider_data;
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
+
+
+ /* notify OF layer about this connection error event */
+ cm_id->rem_ref(cm_id);
+
+ return;
+}
+
+
+/**
+ * cm_event_mpa_req
+ */
+void cm_event_mpa_req(struct nes_cm_event *event)
+{
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ int ret;
+ struct nes_cm_node *cm_node;
+
+ cm_node = event->cm_node;
+ if (!cm_node)
+ return;
+ cm_id = cm_node->cm_id;
+
+ atomic_inc(&cm_connect_reqs);
+ nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
+ cm_node, cm_id, jiffies);
+
+ cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
+ cm_event.status = IW_CM_EVENT_STATUS_OK;
+ cm_event.provider_data = (void *)cm_node;
+
+ cm_event.local_addr.sin_family = AF_INET;
+ cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
+ cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
+
+ cm_event.remote_addr.sin_family = AF_INET;
+ cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
+ cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
+
+ cm_event.private_data = cm_node->mpa_frame_buf;
+ cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
+
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ if (ret)
+ printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
+ __FUNCTION__, __LINE__, ret);
+
+ return;
+}
+
+
+static void nes_cm_event_handler(struct work_struct *);
+
+/**
+ * nes_cm_post_event
+ * post an event to the cm event handler
+ */
+int nes_cm_post_event(struct nes_cm_event *event)
+{
+ atomic_inc(&event->cm_node->cm_core->events_posted);
+ add_ref_cm_node(event->cm_node);
+ event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
+ INIT_WORK(&event->event_work, nes_cm_event_handler);
+ nes_debug(NES_DBG_CM, "queue_work, event=%p\n", event);
+
+ queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
+
+ nes_debug(NES_DBG_CM, "Exit\n");
+ return 0;
+}
+
+
+/**
+ * nes_cm_event_handler
+ * worker function to handle cm events
+ * will free instance of nes_cm_event
+ */
+static void nes_cm_event_handler(struct work_struct *work)
+{
+ struct nes_cm_event *event = container_of(work, struct nes_cm_event, event_work);
+ struct nes_cm_core *cm_core;
+
+ if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) {
+ return;
+ }
+ cm_core = event->cm_node->cm_core;
+ nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n",
+ event, event->type, atomic_read(&cm_core->events_posted));
+
+ switch (event->type) {
+ case NES_CM_EVENT_MPA_REQ:
+ cm_event_mpa_req(event);
+ nes_debug(NES_DBG_CM, "CM Event: MPA REQUEST\n");
+ break;
+ case NES_CM_EVENT_RESET:
+ nes_debug(NES_DBG_CM, "CM Event: RESET\n");
+ cm_event_reset(event);
+ break;
+ case NES_CM_EVENT_CONNECTED:
+ if ((!event->cm_node->cm_id) ||
+ (event->cm_node->state != NES_CM_STATE_TSA)) {
+ break;
+ }
+ cm_event_connected(event);
+ nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
+ break;
+ case NES_CM_EVENT_ABORTED:
+ if ((!event->cm_node->cm_id) || (event->cm_node->state == NES_CM_STATE_TSA)) {
+ break;
+ }
+ cm_event_connect_error(event);
+ nes_debug(NES_DBG_CM, "CM Event: ABORTED\n");
+ break;
+ case NES_CM_EVENT_DROPPED_PKT:
+ nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n");
+ break;
+ default:
+ nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n");
+ break;
+ }
+
+ atomic_dec(&cm_core->events_posted);
+ event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
+ rem_ref_cm_node(cm_core, event->cm_node);
+ kfree(event);
+
+ return;
+}
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
new file mode 100644
index 000000000000..a59f0a7fb278
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef NES_CM_H
+#define NES_CM_H
+
+#define QUEUE_EVENTS
+
+#define NES_MANAGE_APBVT_DEL 0
+#define NES_MANAGE_APBVT_ADD 1
+
+/* IETF MPA -- defines, enums, structs */
+#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
+#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
+#define IETF_MPA_KEY_SIZE 16
+#define IETF_MPA_VERSION 1
+
+enum ietf_mpa_flags {
+ IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
+ IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
+ IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
+};
+
+struct ietf_mpa_frame {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ u8 priv_data[0];
+};
+
+#define ietf_mpa_req_resp_frame ietf_mpa_frame
+
+struct nes_v4_quad {
+ u32 rsvd0;
+ __le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */
+ __be32 SrcIpadr;
+ __be16 TcpPorts[2]; /* src is low, dest is high */
+};
+
+struct nes_cm_node;
+enum nes_timer_type {
+ NES_TIMER_TYPE_SEND,
+ NES_TIMER_TYPE_RECV,
+ NES_TIMER_NODE_CLEANUP,
+ NES_TIMER_TYPE_CLOSE,
+};
+
+#define MAX_NES_IFS 4
+
+#define SET_ACK 1
+#define SET_SYN 2
+#define SET_FIN 4
+#define SET_RST 8
+
+struct option_base {
+ u8 optionnum;
+ u8 length;
+};
+
+enum option_numbers {
+ OPTION_NUMBER_END,
+ OPTION_NUMBER_NONE,
+ OPTION_NUMBER_MSS,
+ OPTION_NUMBER_WINDOW_SCALE,
+ OPTION_NUMBER_SACK_PERM,
+ OPTION_NUMBER_SACK,
+ OPTION_NUMBER_WRITE0 = 0xbc
+};
+
+struct option_mss {
+ u8 optionnum;
+ u8 length;
+ __be16 mss;
+};
+
+struct option_windowscale {
+ u8 optionnum;
+ u8 length;
+ u8 shiftcount;
+};
+
+union all_known_options {
+ char as_end;
+ struct option_base as_base;
+ struct option_mss as_mss;
+ struct option_windowscale as_windowscale;
+};
+
+struct nes_timer_entry {
+ struct list_head list;
+ unsigned long timetosend; /* jiffies */
+ struct sk_buff *skb;
+ u32 type;
+ u32 retrycount;
+ u32 retranscount;
+ u32 context;
+ u32 seq_num;
+ u32 send_retrans;
+ int close_when_complete;
+ struct net_device *netdev;
+};
+
+#define NES_DEFAULT_RETRYS 64
+#define NES_DEFAULT_RETRANS 8
+#ifdef CONFIG_INFINIBAND_NES_DEBUG
+#define NES_RETRY_TIMEOUT (1000*HZ/1000)
+#else
+#define NES_RETRY_TIMEOUT (3000*HZ/1000)
+#endif
+#define NES_SHORT_TIME (10)
+#define NES_LONG_TIME (2000*HZ/1000)
+
+#define NES_CM_HASHTABLE_SIZE 1024
+#define NES_CM_TCP_TIMER_INTERVAL 3000
+#define NES_CM_DEFAULT_MTU 1540
+#define NES_CM_DEFAULT_FRAME_CNT 10
+#define NES_CM_THREAD_STACK_SIZE 256
+#define NES_CM_DEFAULT_RCV_WND 64240 // before we know that window scaling is allowed
+#define NES_CM_DEFAULT_RCV_WND_SCALED 256960 // after we know that window scaling is allowed
+#define NES_CM_DEFAULT_RCV_WND_SCALE 2
+#define NES_CM_DEFAULT_FREE_PKTS 0x000A
+#define NES_CM_FREE_PKT_LO_WATERMARK 2
+
+#define NES_CM_DEFAULT_MSS 536
+
+#define NES_CM_DEF_SEQ 0x159bf75f
+#define NES_CM_DEF_LOCAL_ID 0x3b47
+
+#define NES_CM_DEF_SEQ2 0x18ed5740
+#define NES_CM_DEF_LOCAL_ID2 0xb807
+
+typedef u32 nes_addr_t;
+
+#define nes_cm_tsa_context nes_qp_context
+
+struct nes_qp;
+
+/* cm node transition states */
+enum nes_cm_node_state {
+ NES_CM_STATE_UNKNOWN,
+ NES_CM_STATE_INITED,
+ NES_CM_STATE_LISTENING,
+ NES_CM_STATE_SYN_RCVD,
+ NES_CM_STATE_SYN_SENT,
+ NES_CM_STATE_ONE_SIDE_ESTABLISHED,
+ NES_CM_STATE_ESTABLISHED,
+ NES_CM_STATE_ACCEPTING,
+ NES_CM_STATE_MPAREQ_SENT,
+ NES_CM_STATE_TSA,
+ NES_CM_STATE_FIN_WAIT1,
+ NES_CM_STATE_FIN_WAIT2,
+ NES_CM_STATE_CLOSE_WAIT,
+ NES_CM_STATE_TIME_WAIT,
+ NES_CM_STATE_LAST_ACK,
+ NES_CM_STATE_CLOSING,
+ NES_CM_STATE_CLOSED
+};
+
+/* type of nes connection */
+enum nes_cm_conn_type {
+ NES_CM_IWARP_CONN_TYPE,
+};
+
+/* CM context params */
+struct nes_cm_tcp_context {
+ u8 client;
+
+ u32 loc_seq_num;
+ u32 loc_ack_num;
+ u32 rem_ack_num;
+ u32 rcv_nxt;
+
+ u32 loc_id;
+ u32 rem_id;
+
+ u32 snd_wnd;
+ u32 max_snd_wnd;
+
+ u32 rcv_wnd;
+ u32 mss;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+
+ struct nes_cm_tsa_context tsa_cntxt;
+ struct timeval sent_ts;
+};
+
+
+enum nes_cm_listener_state {
+ NES_CM_LISTENER_PASSIVE_STATE=1,
+ NES_CM_LISTENER_ACTIVE_STATE=2,
+ NES_CM_LISTENER_EITHER_STATE=3
+};
+
+struct nes_cm_listener {
+ struct list_head list;
+ u64 session_id;
+ struct nes_cm_core *cm_core;
+ u8 loc_mac[ETH_ALEN];
+ nes_addr_t loc_addr;
+ u16 loc_port;
+ struct iw_cm_id *cm_id;
+ enum nes_cm_conn_type conn_type;
+ atomic_t ref_count;
+ struct nes_vnic *nesvnic;
+ atomic_t pend_accepts_cnt;
+ int backlog;
+ enum nes_cm_listener_state listener_state;
+ u32 reused_node;
+};
+
+/* per connection node and node state information */
+struct nes_cm_node {
+ u64 session_id;
+ u32 hashkey;
+
+ nes_addr_t loc_addr, rem_addr;
+ u16 loc_port, rem_port;
+
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+
+ enum nes_cm_node_state state;
+ struct nes_cm_tcp_context tcp_cntxt;
+ struct nes_cm_core *cm_core;
+ struct sk_buff_head resend_list;
+ atomic_t ref_count;
+ struct net_device *netdev;
+
+ struct nes_cm_node *loopbackpartner;
+ struct list_head retrans_list;
+ spinlock_t retrans_list_lock;
+ struct list_head recv_list;
+ spinlock_t recv_list_lock;
+
+ int send_write0;
+ union {
+ struct ietf_mpa_frame mpa_frame;
+ u8 mpa_frame_buf[NES_CM_DEFAULT_MTU];
+ };
+ u16 mpa_frame_size;
+ struct iw_cm_id *cm_id;
+ struct list_head list;
+ int accelerated;
+ struct nes_cm_listener *listener;
+ enum nes_cm_conn_type conn_type;
+ struct nes_vnic *nesvnic;
+ int apbvt_set;
+ int accept_pend;
+};
+
+/* structure for client or CM to fill when making CM api calls. */
+/* - only need to set relevant data, based on op. */
+struct nes_cm_info {
+ union {
+ struct iw_cm_id *cm_id;
+ struct net_device *netdev;
+ };
+
+ u16 loc_port;
+ u16 rem_port;
+ nes_addr_t loc_addr;
+ nes_addr_t rem_addr;
+
+ enum nes_cm_conn_type conn_type;
+ int backlog;
+};
+
+/* CM event codes */
+enum nes_cm_event_type {
+ NES_CM_EVENT_UNKNOWN,
+ NES_CM_EVENT_ESTABLISHED,
+ NES_CM_EVENT_MPA_REQ,
+ NES_CM_EVENT_MPA_CONNECT,
+ NES_CM_EVENT_MPA_ACCEPT,
+ NES_CM_EVENT_MPA_ESTABLISHED,
+ NES_CM_EVENT_CONNECTED,
+ NES_CM_EVENT_CLOSED,
+ NES_CM_EVENT_RESET,
+ NES_CM_EVENT_DROPPED_PKT,
+ NES_CM_EVENT_CLOSE_IMMED,
+ NES_CM_EVENT_CLOSE_HARD,
+ NES_CM_EVENT_CLOSE_CLEAN,
+ NES_CM_EVENT_ABORTED,
+ NES_CM_EVENT_SEND_FIRST
+};
+
+/* event to post to CM event handler */
+struct nes_cm_event {
+ enum nes_cm_event_type type;
+
+ struct nes_cm_info cm_info;
+ struct work_struct event_work;
+ struct nes_cm_node *cm_node;
+};
+
+struct nes_cm_core {
+ enum nes_cm_node_state state;
+ atomic_t session_id;
+
+ atomic_t listen_node_cnt;
+ struct nes_cm_node listen_list;
+ spinlock_t listen_list_lock;
+
+ u32 mtu;
+ u32 free_tx_pkt_max;
+ u32 rx_pkt_posted;
+ struct sk_buff_head tx_free_list;
+ atomic_t ht_node_cnt;
+ struct list_head connected_nodes;
+ /* struct list_head hashtable[NES_CM_HASHTABLE_SIZE]; */
+ spinlock_t ht_lock;
+
+ struct timer_list tcp_timer;
+
+ struct nes_cm_ops *api;
+
+ int (*post_event)(struct nes_cm_event *event);
+ atomic_t events_posted;
+ struct workqueue_struct *event_wq;
+ struct workqueue_struct *disconn_wq;
+
+ atomic_t node_cnt;
+ u64 aborted_connects;
+ u32 options;
+
+ struct nes_cm_node *current_listen_node;
+};
+
+
+#define NES_CM_SET_PKT_SIZE (1 << 1)
+#define NES_CM_SET_FREE_PKT_Q_SIZE (1 << 2)
+
+/* CM ops/API for client interface */
+struct nes_cm_ops {
+ int (*accelerated)(struct nes_cm_core *, struct nes_cm_node *);
+ struct nes_cm_listener * (*listen)(struct nes_cm_core *, struct nes_vnic *,
+ struct nes_cm_info *);
+ int (*stop_listener)(struct nes_cm_core *, struct nes_cm_listener *);
+ struct nes_cm_node * (*connect)(struct nes_cm_core *,
+ struct nes_vnic *, struct ietf_mpa_frame *,
+ struct nes_cm_info *);
+ int (*close)(struct nes_cm_core *, struct nes_cm_node *);
+ int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *,
+ struct nes_cm_node *);
+ int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *,
+ struct nes_cm_node *);
+ int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *,
+ struct sk_buff *);
+ int (*destroy_cm_core)(struct nes_cm_core *);
+ int (*get)(struct nes_cm_core *);
+ int (*set)(struct nes_cm_core *, u32, u32);
+};
+
+
+int send_mpa_request(struct nes_cm_node *);
+struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
+ void *, u32, void *, u32, u8);
+int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
+ enum nes_timer_type, int, int);
+void nes_cm_timer_tick(unsigned long);
+int send_syn(struct nes_cm_node *, u32);
+int send_reset(struct nes_cm_node *);
+int send_ack(struct nes_cm_node *);
+int send_fin(struct nes_cm_node *, struct sk_buff *);
+struct sk_buff *get_free_pkt(struct nes_cm_node *);
+int process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *);
+
+struct nes_cm_node * mini_cm_connect(struct nes_cm_core *,
+ struct nes_vnic *, struct ietf_mpa_frame *, struct nes_cm_info *);
+int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
+int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
+int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
+int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *);
+struct nes_cm_core *mini_cm_alloc_core(struct nes_cm_info *);
+int mini_cm_dealloc_core(struct nes_cm_core *);
+int mini_cm_get(struct nes_cm_core *);
+int mini_cm_set(struct nes_cm_core *, u32, u32);
+
+int nes_cm_disconn(struct nes_qp *);
+void nes_disconnect_worker(struct work_struct *);
+int nes_cm_disconn_true(struct nes_qp *);
+int nes_disconnect(struct nes_qp *, int);
+
+int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
+int nes_reject(struct iw_cm_id *, const void *, u8);
+int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
+int nes_create_listen(struct iw_cm_id *, int);
+int nes_destroy_listen(struct iw_cm_id *);
+
+int nes_cm_recv(struct sk_buff *, struct net_device *);
+int nes_cm_start(void);
+int nes_cm_stop(void);
+
+/* CM event handler functions */
+void cm_event_connected(struct nes_cm_event *);
+void cm_event_connect_error(struct nes_cm_event *);
+void cm_event_reset(struct nes_cm_event *);
+void cm_event_mpa_req(struct nes_cm_event *);
+int nes_cm_post_event(struct nes_cm_event *);
+
+#endif /* NES_CM_H */
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
new file mode 100644
index 000000000000..da9daba8e668
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_context.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NES_CONTEXT_H
+#define NES_CONTEXT_H
+
+struct nes_qp_context {
+ __le32 misc;
+ __le32 cqs;
+ __le32 sq_addr_low;
+ __le32 sq_addr_high;
+ __le32 rq_addr_low;
+ __le32 rq_addr_high;
+ __le32 misc2;
+ __le16 tcpPorts[2];
+ __le32 ip0;
+ __le32 ip1;
+ __le32 ip2;
+ __le32 ip3;
+ __le32 mss;
+ __le32 arp_index_vlan;
+ __le32 tcp_state_flow_label;
+ __le32 pd_index_wscale;
+ __le32 keepalive;
+ u32 ts_recent;
+ u32 ts_age;
+ __le32 snd_nxt;
+ __le32 snd_wnd;
+ __le32 rcv_nxt;
+ __le32 rcv_wnd;
+ __le32 snd_max;
+ __le32 snd_una;
+ u32 srtt;
+ __le32 rttvar;
+ __le32 ssthresh;
+ __le32 cwnd;
+ __le32 snd_wl1;
+ __le32 snd_wl2;
+ __le32 max_snd_wnd;
+ __le32 ts_val_delta;
+ u32 retransmit;
+ u32 probe_cnt;
+ u32 hte_index;
+ __le32 q2_addr_low;
+ __le32 q2_addr_high;
+ __le32 ird_index;
+ u32 Rsvd3;
+ __le32 ird_ord_sizes;
+ u32 mrkr_offset;
+ __le32 aeq_token_low;
+ __le32 aeq_token_high;
+};
+
+/* QP Context Misc Field */
+
+#define NES_QPCONTEXT_MISC_IWARP_VER_MASK 0x00000003
+#define NES_QPCONTEXT_MISC_IWARP_VER_SHIFT 0
+#define NES_QPCONTEXT_MISC_EFB_SIZE_MASK 0x000000C0
+#define NES_QPCONTEXT_MISC_EFB_SIZE_SHIFT 6
+#define NES_QPCONTEXT_MISC_RQ_SIZE_MASK 0x00000300
+#define NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT 8
+#define NES_QPCONTEXT_MISC_SQ_SIZE_MASK 0x00000c00
+#define NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT 10
+#define NES_QPCONTEXT_MISC_PCI_FCN_MASK 0x00007000
+#define NES_QPCONTEXT_MISC_PCI_FCN_SHIFT 12
+#define NES_QPCONTEXT_MISC_DUP_ACKS_MASK 0x00070000
+#define NES_QPCONTEXT_MISC_DUP_ACKS_SHIFT 16
+
+enum nes_qp_context_misc_bits {
+ NES_QPCONTEXT_MISC_RX_WQE_SIZE = 0x00000004,
+ NES_QPCONTEXT_MISC_IPV4 = 0x00000008,
+ NES_QPCONTEXT_MISC_DO_NOT_FRAG = 0x00000010,
+ NES_QPCONTEXT_MISC_INSERT_VLAN = 0x00000020,
+ NES_QPCONTEXT_MISC_DROS = 0x00008000,
+ NES_QPCONTEXT_MISC_WSCALE = 0x00080000,
+ NES_QPCONTEXT_MISC_KEEPALIVE = 0x00100000,
+ NES_QPCONTEXT_MISC_TIMESTAMP = 0x00200000,
+ NES_QPCONTEXT_MISC_SACK = 0x00400000,
+ NES_QPCONTEXT_MISC_RDMA_WRITE_EN = 0x00800000,
+ NES_QPCONTEXT_MISC_RDMA_READ_EN = 0x01000000,
+ NES_QPCONTEXT_MISC_WBIND_EN = 0x10000000,
+ NES_QPCONTEXT_MISC_FAST_REGISTER_EN = 0x20000000,
+ NES_QPCONTEXT_MISC_PRIV_EN = 0x40000000,
+ NES_QPCONTEXT_MISC_NO_NAGLE = 0x80000000
+};
+
+enum nes_qp_acc_wq_sizes {
+ HCONTEXT_TSA_WQ_SIZE_4 = 0,
+ HCONTEXT_TSA_WQ_SIZE_32 = 1,
+ HCONTEXT_TSA_WQ_SIZE_128 = 2,
+ HCONTEXT_TSA_WQ_SIZE_512 = 3
+};
+
+/* QP Context Misc2 Fields */
+#define NES_QPCONTEXT_MISC2_TTL_MASK 0x000000ff
+#define NES_QPCONTEXT_MISC2_TTL_SHIFT 0
+#define NES_QPCONTEXT_MISC2_HOP_LIMIT_MASK 0x000000ff
+#define NES_QPCONTEXT_MISC2_HOP_LIMIT_SHIFT 0
+#define NES_QPCONTEXT_MISC2_LIMIT_MASK 0x00000300
+#define NES_QPCONTEXT_MISC2_LIMIT_SHIFT 8
+#define NES_QPCONTEXT_MISC2_NIC_INDEX_MASK 0x0000fc00
+#define NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT 10
+#define NES_QPCONTEXT_MISC2_SRC_IP_MASK 0x001f0000
+#define NES_QPCONTEXT_MISC2_SRC_IP_SHIFT 16
+#define NES_QPCONTEXT_MISC2_TOS_MASK 0xff000000
+#define NES_QPCONTEXT_MISC2_TOS_SHIFT 24
+#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_MASK 0xff000000
+#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_SHIFT 24
+
+/* QP Context Tcp State/Flow Label Fields */
+#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_MASK 0x000fffff
+#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_SHIFT 0
+#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_MASK 0xf0000000
+#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT 28
+
+enum nes_qp_tcp_state {
+ NES_QPCONTEXT_TCPSTATE_CLOSED = 1,
+ NES_QPCONTEXT_TCPSTATE_EST = 5,
+ NES_QPCONTEXT_TCPSTATE_TIME_WAIT = 11,
+};
+
+/* QP Context PD Index/wscale Fields */
+#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK 0x0000000f
+#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT 0
+#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK 0x00000f00
+#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT 8
+#define NES_QPCONTEXT_PDWSCALE_PDINDEX_MASK 0xffff0000
+#define NES_QPCONTEXT_PDWSCALE_PDINDEX_SHIFT 16
+
+/* QP Context Keepalive Fields */
+#define NES_QPCONTEXT_KEEPALIVE_DELTA_MASK 0x0000ffff
+#define NES_QPCONTEXT_KEEPALIVE_DELTA_SHIFT 0
+#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_MASK 0x00ff0000
+#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_SHIFT 16
+#define NES_QPCONTEXT_KEEPALIVE_INTV_MASK 0xff000000
+#define NES_QPCONTEXT_KEEPALIVE_INTV_SHIFT 24
+
+/* QP Context ORD/IRD Fields */
+#define NES_QPCONTEXT_ORDIRD_ORDSIZE_MASK 0x0000007f
+#define NES_QPCONTEXT_ORDIRD_ORDSIZE_SHIFT 0
+#define NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK 0x00030000
+#define NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT 16
+#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_MASK 0x30000000
+#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT 28
+
+enum nes_ord_ird_bits {
+ NES_QPCONTEXT_ORDIRD_WRPDU = 0x02000000,
+ NES_QPCONTEXT_ORDIRD_LSMM_PRESENT = 0x04000000,
+ NES_QPCONTEXT_ORDIRD_ALSMM = 0x08000000,
+ NES_QPCONTEXT_ORDIRD_AAH = 0x40000000,
+ NES_QPCONTEXT_ORDIRD_RNMC = 0x80000000
+};
+
+enum nes_iwarp_qp_state {
+ NES_QPCONTEXT_IWARP_STATE_NONEXIST = 0,
+ NES_QPCONTEXT_IWARP_STATE_IDLE = 1,
+ NES_QPCONTEXT_IWARP_STATE_RTS = 2,
+ NES_QPCONTEXT_IWARP_STATE_CLOSING = 3,
+ NES_QPCONTEXT_IWARP_STATE_TERMINATE = 5,
+ NES_QPCONTEXT_IWARP_STATE_ERROR = 6
+};
+
+
+#endif /* NES_CONTEXT_H */
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
new file mode 100644
index 000000000000..7c4c0fbf0abd
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -0,0 +1,3080 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+
+#include "nes.h"
+
+u32 crit_err_count = 0;
+u32 int_mod_timer_init;
+u32 int_mod_cq_depth_256;
+u32 int_mod_cq_depth_128;
+u32 int_mod_cq_depth_32;
+u32 int_mod_cq_depth_24;
+u32 int_mod_cq_depth_16;
+u32 int_mod_cq_depth_4;
+u32 int_mod_cq_depth_1;
+
+#include "nes_cm.h"
+
+
+#ifdef CONFIG_INFINIBAND_NES_DEBUG
+static unsigned char *nes_iwarp_state_str[] = {
+ "Non-Existant",
+ "Idle",
+ "RTS",
+ "Closing",
+ "RSVD1",
+ "Terminate",
+ "Error",
+ "RSVD2",
+};
+
+static unsigned char *nes_tcp_state_str[] = {
+ "Non-Existant",
+ "Closed",
+ "Listen",
+ "SYN Sent",
+ "SYN Rcvd",
+ "Established",
+ "Close Wait",
+ "FIN Wait 1",
+ "Closing",
+ "Last Ack",
+ "FIN Wait 2",
+ "Time Wait",
+ "RSVD1",
+ "RSVD2",
+ "RSVD3",
+ "RSVD4",
+};
+#endif
+
+
+/**
+ * nes_nic_init_timer_defaults
+ */
+void nes_nic_init_timer_defaults(struct nes_device *nesdev, u8 jumbomode)
+{
+ unsigned long flags;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
+
+ spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
+
+ shared_timer->timer_in_use_min = NES_NIC_FAST_TIMER_LOW;
+ shared_timer->timer_in_use_max = NES_NIC_FAST_TIMER_HIGH;
+ if (jumbomode) {
+ shared_timer->threshold_low = DEFAULT_JUMBO_NES_QL_LOW;
+ shared_timer->threshold_target = DEFAULT_JUMBO_NES_QL_TARGET;
+ shared_timer->threshold_high = DEFAULT_JUMBO_NES_QL_HIGH;
+ } else {
+ shared_timer->threshold_low = DEFAULT_NES_QL_LOW;
+ shared_timer->threshold_target = DEFAULT_NES_QL_TARGET;
+ shared_timer->threshold_high = DEFAULT_NES_QL_HIGH;
+ }
+
+ /* todo use netdev->mtu to set thresholds */
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+}
+
+
+/**
+ * nes_nic_init_timer
+ */
+static void nes_nic_init_timer(struct nes_device *nesdev)
+{
+ unsigned long flags;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
+
+ spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
+
+ if (shared_timer->timer_in_use_old == 0) {
+ nesdev->deepcq_count = 0;
+ shared_timer->timer_direction_upward = 0;
+ shared_timer->timer_direction_downward = 0;
+ shared_timer->timer_in_use = NES_NIC_FAST_TIMER;
+ shared_timer->timer_in_use_old = 0;
+
+ }
+ if (shared_timer->timer_in_use != shared_timer->timer_in_use_old) {
+ shared_timer->timer_in_use_old = shared_timer->timer_in_use;
+ nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
+ 0x80000000 | ((u32)(shared_timer->timer_in_use*8)));
+ }
+ /* todo use netdev->mtu to set thresholds */
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+}
+
+
+/**
+ * nes_nic_tune_timer
+ */
+static void nes_nic_tune_timer(struct nes_device *nesdev)
+{
+ unsigned long flags;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
+ u16 cq_count = nesdev->currcq_count;
+
+ spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
+
+ if (shared_timer->cq_count_old < cq_count) {
+ if (cq_count > shared_timer->threshold_low)
+ shared_timer->cq_direction_downward=0;
+ }
+ if (shared_timer->cq_count_old >= cq_count)
+ shared_timer->cq_direction_downward++;
+ shared_timer->cq_count_old = cq_count;
+ if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) {
+ if (cq_count <= shared_timer->threshold_low) {
+ shared_timer->threshold_low = shared_timer->threshold_low/2;
+ shared_timer->cq_direction_downward=0;
+ nesdev->currcq_count = 0;
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+ return;
+ }
+ }
+
+ if (cq_count > 1) {
+ nesdev->deepcq_count += cq_count;
+ if (cq_count <= shared_timer->threshold_low) { /* increase timer gently */
+ shared_timer->timer_direction_upward++;
+ shared_timer->timer_direction_downward = 0;
+ } else if (cq_count <= shared_timer->threshold_target) { /* balanced */
+ shared_timer->timer_direction_upward = 0;
+ shared_timer->timer_direction_downward = 0;
+ } else if (cq_count <= shared_timer->threshold_high) { /* decrease timer gently */
+ shared_timer->timer_direction_downward++;
+ shared_timer->timer_direction_upward = 0;
+ } else if (cq_count <= (shared_timer->threshold_high) * 2) {
+ shared_timer->timer_in_use -= 2;
+ shared_timer->timer_direction_upward = 0;
+ shared_timer->timer_direction_downward++;
+ } else {
+ shared_timer->timer_in_use -= 4;
+ shared_timer->timer_direction_upward = 0;
+ shared_timer->timer_direction_downward++;
+ }
+
+ if (shared_timer->timer_direction_upward > 3 ) { /* using history */
+ shared_timer->timer_in_use += 3;
+ shared_timer->timer_direction_upward = 0;
+ shared_timer->timer_direction_downward = 0;
+ }
+ if (shared_timer->timer_direction_downward > 5) { /* using history */
+ shared_timer->timer_in_use -= 4 ;
+ shared_timer->timer_direction_downward = 0;
+ shared_timer->timer_direction_upward = 0;
+ }
+ }
+
+ /* boundary checking */
+ if (shared_timer->timer_in_use > NES_NIC_FAST_TIMER_HIGH)
+ shared_timer->timer_in_use = NES_NIC_FAST_TIMER_HIGH;
+ else if (shared_timer->timer_in_use < NES_NIC_FAST_TIMER_LOW) {
+ shared_timer->timer_in_use = NES_NIC_FAST_TIMER_LOW;
+ }
+
+ nesdev->currcq_count = 0;
+
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+}
+
+
+/**
+ * nes_init_adapter - initialize adapter
+ */
+struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
+ struct nes_adapter *nesadapter = NULL;
+ unsigned long num_pds;
+ u32 u32temp;
+ u32 port_count;
+ u16 max_rq_wrs;
+ u16 max_sq_wrs;
+ u32 max_mr;
+ u32 max_256pbl;
+ u32 max_4kpbl;
+ u32 max_qp;
+ u32 max_irrq;
+ u32 max_cq;
+ u32 hte_index_mask;
+ u32 adapter_size;
+ u32 arp_table_size;
+ u16 vendor_id;
+ u8 OneG_Mode;
+ u8 func_index;
+
+ /* search the list of existing adapters */
+ list_for_each_entry(nesadapter, &nes_adapter_list, list) {
+ nes_debug(NES_DBG_INIT, "Searching Adapter list for PCI devfn = 0x%X,"
+ " adapter PCI slot/bus = %u/%u, pci devices PCI slot/bus = %u/%u, .\n",
+ nesdev->pcidev->devfn,
+ PCI_SLOT(nesadapter->devfn),
+ nesadapter->bus_number,
+ PCI_SLOT(nesdev->pcidev->devfn),
+ nesdev->pcidev->bus->number );
+ if ((PCI_SLOT(nesadapter->devfn) == PCI_SLOT(nesdev->pcidev->devfn)) &&
+ (nesadapter->bus_number == nesdev->pcidev->bus->number)) {
+ nesadapter->ref_count++;
+ return nesadapter;
+ }
+ }
+
+ /* no adapter found */
+ num_pds = pci_resource_len(nesdev->pcidev, BAR_1) >> PAGE_SHIFT;
+ if ((hw_rev != NE020_REV) && (hw_rev != NE020_REV1)) {
+ nes_debug(NES_DBG_INIT, "NE020 driver detected unknown hardware revision 0x%x\n",
+ hw_rev);
+ return NULL;
+ }
+
+ nes_debug(NES_DBG_INIT, "Determine Soft Reset, QP_control=0x%x, CPU0=0x%x, CPU1=0x%x, CPU2=0x%x\n",
+ nes_read_indexed(nesdev, NES_IDX_QP_CONTROL + PCI_FUNC(nesdev->pcidev->devfn) * 8),
+ nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS),
+ nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 4),
+ nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 8));
+
+ nes_debug(NES_DBG_INIT, "Reset and init NE020\n");
+
+
+ if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0)
+ return NULL;
+ if (nes_init_serdes(nesdev, hw_rev, port_count, OneG_Mode))
+ return NULL;
+ nes_init_csr_ne020(nesdev, hw_rev, port_count);
+
+ max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE);
+ nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp);
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_QUAD_HASH_TABLE_SIZE);
+ if (max_qp > ((u32)1 << (u32temp & 0x001f))) {
+ nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to hash table size = 0x%08X\n",
+ max_qp, u32temp);
+ max_qp = (u32)1 << (u32temp & 0x001f);
+ }
+
+ hte_index_mask = ((u32)1 << ((u32temp & 0x001f)+1))-1;
+ nes_debug(NES_DBG_INIT, "Max QP = %u, hte_index_mask = 0x%08X.\n",
+ max_qp, hte_index_mask);
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_IRRQ_COUNT);
+
+ max_irrq = 1 << (u32temp & 0x001f);
+
+ if (max_qp > max_irrq) {
+ max_qp = max_irrq;
+ nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to Available Q1s.\n",
+ max_qp);
+ }
+
+ /* there should be no reason to allocate more pds than qps */
+ if (num_pds > max_qp)
+ num_pds = max_qp;
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MRT_SIZE);
+ max_mr = (u32)8192 << (u32temp & 0x7);
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_PBL_REGION_SIZE);
+ max_256pbl = (u32)1 << (u32temp & 0x0000001f);
+ max_4kpbl = (u32)1 << ((u32temp >> 16) & 0x0000001f);
+ max_cq = nes_read_indexed(nesdev, NES_IDX_CQ_CTX_SIZE);
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_ARP_CACHE_SIZE);
+ arp_table_size = 1 << u32temp;
+
+ adapter_size = (sizeof(struct nes_adapter) +
+ (sizeof(unsigned long)-1)) & (~(sizeof(unsigned long)-1));
+ adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
+ adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
+ adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
+ adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
+ adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
+ adapter_size += sizeof(struct nes_qp **) * max_qp;
+
+ /* allocate a new adapter struct */
+ nesadapter = kzalloc(adapter_size, GFP_KERNEL);
+ if (nesadapter == NULL) {
+ return NULL;
+ }
+
+ nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
+ nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
+
+ /* populate the new nesadapter */
+ nesadapter->devfn = nesdev->pcidev->devfn;
+ nesadapter->bus_number = nesdev->pcidev->bus->number;
+ nesadapter->ref_count = 1;
+ nesadapter->timer_int_req = 0xffff0000;
+ nesadapter->OneG_Mode = OneG_Mode;
+ nesadapter->doorbell_start = nesdev->doorbell_region;
+
+ /* nesadapter->tick_delta = clk_divisor; */
+ nesadapter->hw_rev = hw_rev;
+ nesadapter->port_count = port_count;
+
+ nesadapter->max_qp = max_qp;
+ nesadapter->hte_index_mask = hte_index_mask;
+ nesadapter->max_irrq = max_irrq;
+ nesadapter->max_mr = max_mr;
+ nesadapter->max_256pbl = max_256pbl - 1;
+ nesadapter->max_4kpbl = max_4kpbl - 1;
+ nesadapter->max_cq = max_cq;
+ nesadapter->free_256pbl = max_256pbl - 1;
+ nesadapter->free_4kpbl = max_4kpbl - 1;
+ nesadapter->max_pd = num_pds;
+ nesadapter->arp_table_size = arp_table_size;
+
+ nesadapter->et_pkt_rate_low = NES_TIMER_ENABLE_LIMIT;
+ if (nes_drv_opt & NES_DRV_OPT_DISABLE_INT_MOD) {
+ nesadapter->et_use_adaptive_rx_coalesce = 0;
+ nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
+ nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
+ } else {
+ nesadapter->et_use_adaptive_rx_coalesce = 1;
+ nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
+ nesadapter->et_rx_coalesce_usecs_irq = 0;
+ printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __FUNCTION__);
+ }
+ /* Setup and enable the periodic timer */
+ if (nesadapter->et_rx_coalesce_usecs_irq)
+ nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x80000000 |
+ ((u32)(nesadapter->et_rx_coalesce_usecs_irq * 8)));
+ else
+ nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x00000000);
+
+ nesadapter->base_pd = 1;
+
+ nesadapter->device_cap_flags =
+ IB_DEVICE_ZERO_STAG | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW;
+
+ nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
+ [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
+ nesadapter->allocated_cqs = &nesadapter->allocated_qps[BITS_TO_LONGS(max_qp)];
+ nesadapter->allocated_mrs = &nesadapter->allocated_cqs[BITS_TO_LONGS(max_cq)];
+ nesadapter->allocated_pds = &nesadapter->allocated_mrs[BITS_TO_LONGS(max_mr)];
+ nesadapter->allocated_arps = &nesadapter->allocated_pds[BITS_TO_LONGS(num_pds)];
+ nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
+
+
+ /* mark the usual suspect QPs and CQs as in use */
+ for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) {
+ set_bit(u32temp, nesadapter->allocated_qps);
+ set_bit(u32temp, nesadapter->allocated_cqs);
+ }
+
+ for (u32temp = 0; u32temp < 20; u32temp++)
+ set_bit(u32temp, nesadapter->allocated_pds);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_QP_MAX_CFG_SIZES);
+
+ max_rq_wrs = ((u32temp >> 8) & 3);
+ switch (max_rq_wrs) {
+ case 0:
+ max_rq_wrs = 4;
+ break;
+ case 1:
+ max_rq_wrs = 16;
+ break;
+ case 2:
+ max_rq_wrs = 32;
+ break;
+ case 3:
+ max_rq_wrs = 512;
+ break;
+ }
+
+ max_sq_wrs = (u32temp & 3);
+ switch (max_sq_wrs) {
+ case 0:
+ max_sq_wrs = 4;
+ break;
+ case 1:
+ max_sq_wrs = 16;
+ break;
+ case 2:
+ max_sq_wrs = 32;
+ break;
+ case 3:
+ max_sq_wrs = 512;
+ break;
+ }
+ nesadapter->max_qp_wr = min(max_rq_wrs, max_sq_wrs);
+ nesadapter->max_irrq_wr = (u32temp >> 16) & 3;
+
+ nesadapter->max_sge = 4;
+ nesadapter->max_cqe = 32767;
+
+ if (nes_read_eeprom_values(nesdev, nesadapter)) {
+ printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
+ kfree(nesadapter);
+ return NULL;
+ }
+
+ u32temp = nes_read_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG);
+ nes_write_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG,
+ (u32temp & 0xff000000) | (nesadapter->tcp_timer_core_clk_divisor & 0x00ffffff));
+
+ /* setup port configuration */
+ if (nesadapter->port_count == 1) {
+ u32temp = 0x00000000;
+ if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT)
+ nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002);
+ else
+ nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
+ } else {
+ if (nesadapter->port_count == 2)
+ u32temp = 0x00000044;
+ else
+ u32temp = 0x000000e4;
+ nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
+ }
+
+ nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT, u32temp);
+ nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n",
+ nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT));
+
+ spin_lock_init(&nesadapter->resource_lock);
+ spin_lock_init(&nesadapter->phy_lock);
+ spin_lock_init(&nesadapter->pbl_lock);
+ spin_lock_init(&nesadapter->periodic_timer_lock);
+
+ INIT_LIST_HEAD(&nesadapter->nesvnic_list[0]);
+ INIT_LIST_HEAD(&nesadapter->nesvnic_list[1]);
+ INIT_LIST_HEAD(&nesadapter->nesvnic_list[2]);
+ INIT_LIST_HEAD(&nesadapter->nesvnic_list[3]);
+
+ if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) {
+ u32 pcs_control_status0, pcs_control_status1;
+ u32 reset_value;
+ u32 i = 0;
+ u32 int_cnt = 0;
+ u32 ext_cnt = 0;
+ unsigned long flags;
+ u32 j = 0;
+
+ pcs_control_status0 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0);
+ pcs_control_status1 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+
+ for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
+ pcs_control_status0 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0);
+ pcs_control_status1 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+ if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
+ || (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
+ int_cnt++;
+ msleep(1);
+ }
+ if (int_cnt > 1) {
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
+ mh_detected++;
+ reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+ reset_value |= 0x0000003d;
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
+
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
+ & 0x00000040) != 0x00000040) && (j++ < 5000));
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+
+ pcs_control_status0 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0);
+ pcs_control_status1 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+
+ for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
+ pcs_control_status0 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0);
+ pcs_control_status1 = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+ if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
+ || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) {
+ if (++ext_cnt > int_cnt) {
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1,
+ 0x0000F0C8);
+ mh_detected++;
+ reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+ reset_value |= 0x0000003d;
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
+
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
+ & 0x00000040) != 0x00000040) && (j++ < 5000));
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ break;
+ }
+ }
+ msleep(1);
+ }
+ }
+ }
+
+ if (nesadapter->hw_rev == NE020_REV) {
+ init_timer(&nesadapter->mh_timer);
+ nesadapter->mh_timer.function = nes_mh_fix;
+ nesadapter->mh_timer.expires = jiffies + (HZ/5); /* 1 second */
+ nesadapter->mh_timer.data = (unsigned long)nesdev;
+ add_timer(&nesadapter->mh_timer);
+ } else {
+ nes_write32(nesdev->regs+NES_INTF_INT_STAT, 0x0f000000);
+ }
+
+ init_timer(&nesadapter->lc_timer);
+ nesadapter->lc_timer.function = nes_clc;
+ nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
+ nesadapter->lc_timer.data = (unsigned long)nesdev;
+ add_timer(&nesadapter->lc_timer);
+
+ list_add_tail(&nesadapter->list, &nes_adapter_list);
+
+ for (func_index = 0; func_index < 8; func_index++) {
+ pci_bus_read_config_word(nesdev->pcidev->bus,
+ PCI_DEVFN(PCI_SLOT(nesdev->pcidev->devfn),
+ func_index), 0, &vendor_id);
+ if (vendor_id == 0xffff)
+ break;
+ }
+ nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __FUNCTION__,
+ func_index, pci_name(nesdev->pcidev));
+ nesadapter->adapter_fcn_count = func_index;
+
+ return nesadapter;
+}
+
+
+/**
+ * nes_reset_adapter_ne020
+ */
+unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
+{
+ u32 port_count;
+ u32 u32temp;
+ u32 i;
+
+ u32temp = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+ port_count = ((u32temp & 0x00000300) >> 8) + 1;
+ /* TODO: assuming that both SERDES are set the same for now */
+ *OneG_Mode = (u32temp & 0x00003c00) ? 0 : 1;
+ nes_debug(NES_DBG_INIT, "Initial Software Reset = 0x%08X, port_count=%u\n",
+ u32temp, port_count);
+ if (*OneG_Mode)
+ nes_debug(NES_DBG_INIT, "Running in 1G mode.\n");
+ u32temp &= 0xff00ffc0;
+ switch (port_count) {
+ case 1:
+ u32temp |= 0x00ee0000;
+ break;
+ case 2:
+ u32temp |= 0x00cc0000;
+ break;
+ case 4:
+ u32temp |= 0x00000000;
+ break;
+ default:
+ return 0;
+ break;
+ }
+
+ /* check and do full reset if needed */
+ if (nes_read_indexed(nesdev, NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))) {
+ nes_debug(NES_DBG_INIT, "Issuing Full Soft reset = 0x%08X\n", u32temp | 0xd);
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd);
+
+ i = 0;
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
+ mdelay(1);
+ if (i >= 10000) {
+ nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
+ return 0;
+ }
+ }
+
+ /* port reset */
+ switch (port_count) {
+ case 1:
+ u32temp |= 0x00ee0010;
+ break;
+ case 2:
+ u32temp |= 0x00cc0030;
+ break;
+ case 4:
+ u32temp |= 0x00000030;
+ break;
+ }
+
+ nes_debug(NES_DBG_INIT, "Issuing Port Soft reset = 0x%08X\n", u32temp | 0xd);
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd);
+
+ i = 0;
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
+ mdelay(1);
+ if (i >= 10000) {
+ nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n");
+ return 0;
+ }
+
+ /* serdes 0 */
+ i = 0;
+ while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
+ & 0x0000000f)) != 0x0000000f) && i++ < 5000)
+ mdelay(1);
+ if (i >= 5000) {
+ nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp);
+ return 0;
+ }
+
+ /* serdes 1 */
+ if (port_count > 1) {
+ i = 0;
+ while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
+ & 0x0000000f)) != 0x0000000f) && i++ < 5000)
+ mdelay(1);
+ if (i >= 5000) {
+ nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp);
+ return 0;
+ }
+ }
+
+
+
+ i = 0;
+ while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
+ mdelay(1);
+ if (i >= 10000) {
+ printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
+ nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
+ return 0;
+ }
+
+ return port_count;
+}
+
+
+/**
+ * nes_init_serdes
+ */
+int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 OneG_Mode)
+{
+ int i;
+ u32 u32temp;
+
+ if (hw_rev != NE020_REV) {
+ /* init serdes 0 */
+
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ if (!OneG_Mode)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
+ if (port_count > 1) {
+ /* init serdes 1 */
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
+ if (!OneG_Mode)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
+ }
+ } else {
+ /* init serdes 0 */
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
+ i = 0;
+ while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
+ & 0x0000000f)) != 0x0000000f) && i++ < 5000)
+ mdelay(1);
+ if (i >= 5000) {
+ nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp);
+ return 1;
+ }
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000);
+ if (OneG_Mode)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222);
+ else
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
+
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
+ if (port_count > 1) {
+ /* init serdes 1 */
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x00000048);
+ i = 0;
+ while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
+ & 0x0000000f)) != 0x0000000f) && (i++ < 5000))
+ mdelay(1);
+ if (i >= 5000) {
+ printk("%s: Init: serdes 1 not ready, status=%x\n", __FUNCTION__, u32temp);
+ /* return 1; */
+ }
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE1, 0x9ce73000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE1, 0x0ff00000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET1, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS1, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL1, 0xf0002222);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000ff);
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * nes_init_csr_ne020
+ * Initialize registers for ne020 hardware
+ */
+void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count)
+{
+ u32 u32temp;
+
+ nes_debug(NES_DBG_INIT, "port_count=%d\n", port_count);
+
+ nes_write_indexed(nesdev, 0x000001E4, 0x00000007);
+ /* nes_write_indexed(nesdev, 0x000001E8, 0x000208C4); */
+ nes_write_indexed(nesdev, 0x000001E8, 0x00020874);
+ nes_write_indexed(nesdev, 0x000001D8, 0x00048002);
+ /* nes_write_indexed(nesdev, 0x000001D8, 0x0004B002); */
+ nes_write_indexed(nesdev, 0x000001FC, 0x00050005);
+ nes_write_indexed(nesdev, 0x00000600, 0x55555555);
+ nes_write_indexed(nesdev, 0x00000604, 0x55555555);
+
+ /* TODO: move these MAC register settings to NIC bringup */
+ nes_write_indexed(nesdev, 0x00002000, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002004, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002008, 0x0000FFFF);
+ nes_write_indexed(nesdev, 0x0000200C, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002010, 0x000003c1);
+ nes_write_indexed(nesdev, 0x0000201C, 0x75345678);
+ if (port_count > 1) {
+ nes_write_indexed(nesdev, 0x00002200, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002204, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002208, 0x0000FFFF);
+ nes_write_indexed(nesdev, 0x0000220C, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002210, 0x000003c1);
+ nes_write_indexed(nesdev, 0x0000221C, 0x75345678);
+ nes_write_indexed(nesdev, 0x00000908, 0x20000001);
+ }
+ if (port_count > 2) {
+ nes_write_indexed(nesdev, 0x00002400, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002404, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002408, 0x0000FFFF);
+ nes_write_indexed(nesdev, 0x0000240C, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002410, 0x000003c1);
+ nes_write_indexed(nesdev, 0x0000241C, 0x75345678);
+ nes_write_indexed(nesdev, 0x00000910, 0x20000001);
+
+ nes_write_indexed(nesdev, 0x00002600, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002604, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002608, 0x0000FFFF);
+ nes_write_indexed(nesdev, 0x0000260C, 0x00000001);
+ nes_write_indexed(nesdev, 0x00002610, 0x000003c1);
+ nes_write_indexed(nesdev, 0x0000261C, 0x75345678);
+ nes_write_indexed(nesdev, 0x00000918, 0x20000001);
+ }
+
+ nes_write_indexed(nesdev, 0x00005000, 0x00018000);
+ /* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */
+ nes_write_indexed(nesdev, 0x00005004, 0x00020001);
+ nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F);
+ nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F);
+ nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F);
+ nes_write_indexed(nesdev, 0x00005020, 0x1F1F1F1F);
+ nes_write_indexed(nesdev, 0x00006090, 0xFFFFFFFF);
+
+ /* TODO: move this to code, get from EEPROM */
+ nes_write_indexed(nesdev, 0x00000900, 0x20000001);
+ nes_write_indexed(nesdev, 0x000060C0, 0x0000028e);
+ nes_write_indexed(nesdev, 0x000060C8, 0x00000020);
+ //
+ nes_write_indexed(nesdev, 0x000001EC, 0x7b2625a0);
+ /* nes_write_indexed(nesdev, 0x000001EC, 0x5f2625a0); */
+
+ if (hw_rev != NE020_REV) {
+ u32temp = nes_read_indexed(nesdev, 0x000008e8);
+ u32temp |= 0x80000000;
+ nes_write_indexed(nesdev, 0x000008e8, u32temp);
+ u32temp = nes_read_indexed(nesdev, 0x000021f8);
+ u32temp &= 0x7fffffff;
+ u32temp |= 0x7fff0010;
+ nes_write_indexed(nesdev, 0x000021f8, u32temp);
+ }
+}
+
+
+/**
+ * nes_destroy_adapter - destroy the adapter structure
+ */
+void nes_destroy_adapter(struct nes_adapter *nesadapter)
+{
+ struct nes_adapter *tmp_adapter;
+
+ list_for_each_entry(tmp_adapter, &nes_adapter_list, list) {
+ nes_debug(NES_DBG_SHUTDOWN, "Nes Adapter list entry = 0x%p.\n",
+ tmp_adapter);
+ }
+
+ nesadapter->ref_count--;
+ if (!nesadapter->ref_count) {
+ if (nesadapter->hw_rev == NE020_REV) {
+ del_timer(&nesadapter->mh_timer);
+ }
+ del_timer(&nesadapter->lc_timer);
+
+ list_del(&nesadapter->list);
+ kfree(nesadapter);
+ }
+}
+
+
+/**
+ * nes_init_cqp
+ */
+int nes_init_cqp(struct nes_device *nesdev)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_cqp_qp_context *cqp_qp_context;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_hw_ceq *ceq;
+ struct nes_hw_ceq *nic_ceq;
+ struct nes_hw_aeq *aeq;
+ void *vmem;
+ dma_addr_t pmem;
+ u32 count=0;
+ u32 cqp_head;
+ u64 u64temp;
+ u32 u32temp;
+
+ /* allocate CQP memory */
+ /* Need to add max_cq to the aeq size once cq overflow checking is added back */
+ /* SQ is 512 byte aligned, others are 256 byte aligned */
+ nesdev->cqp_mem_size = 512 +
+ (sizeof(struct nes_hw_cqp_wqe) * NES_CQP_SQ_SIZE) +
+ (sizeof(struct nes_hw_cqe) * NES_CCQ_SIZE) +
+ max(((u32)sizeof(struct nes_hw_ceqe) * NES_CCEQ_SIZE), (u32)256) +
+ max(((u32)sizeof(struct nes_hw_ceqe) * NES_NIC_CEQ_SIZE), (u32)256) +
+ (sizeof(struct nes_hw_aeqe) * nesadapter->max_qp) +
+ sizeof(struct nes_hw_cqp_qp_context);
+
+ nesdev->cqp_vbase = pci_alloc_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
+ &nesdev->cqp_pbase);
+ if (!nesdev->cqp_vbase) {
+ nes_debug(NES_DBG_INIT, "Unable to allocate memory for host descriptor rings\n");
+ return -ENOMEM;
+ }
+ memset(nesdev->cqp_vbase, 0, nesdev->cqp_mem_size);
+
+ /* Allocate a twice the number of CQP requests as the SQ size */
+ nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
+ 2 * NES_CQP_SQ_SIZE, GFP_KERNEL);
+ if (nesdev->nes_cqp_requests == NULL) {
+ nes_debug(NES_DBG_INIT, "Unable to allocate memory CQP request entries.\n");
+ pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
+ nesdev->cqp.sq_pbase);
+ return -ENOMEM;
+ }
+
+ nes_debug(NES_DBG_INIT, "Allocated CQP structures at %p (phys = %016lX), size = %u.\n",
+ nesdev->cqp_vbase, (unsigned long)nesdev->cqp_pbase, nesdev->cqp_mem_size);
+
+ spin_lock_init(&nesdev->cqp.lock);
+ init_waitqueue_head(&nesdev->cqp.waitq);
+
+ /* Setup Various Structures */
+ vmem = (void *)(((unsigned long)nesdev->cqp_vbase + (512 - 1)) &
+ ~(unsigned long)(512 - 1));
+ pmem = (dma_addr_t)(((unsigned long long)nesdev->cqp_pbase + (512 - 1)) &
+ ~(unsigned long long)(512 - 1));
+
+ nesdev->cqp.sq_vbase = vmem;
+ nesdev->cqp.sq_pbase = pmem;
+ nesdev->cqp.sq_size = NES_CQP_SQ_SIZE;
+ nesdev->cqp.sq_head = 0;
+ nesdev->cqp.sq_tail = 0;
+ nesdev->cqp.qp_id = PCI_FUNC(nesdev->pcidev->devfn);
+
+ vmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size);
+ pmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size);
+
+ nesdev->ccq.cq_vbase = vmem;
+ nesdev->ccq.cq_pbase = pmem;
+ nesdev->ccq.cq_size = NES_CCQ_SIZE;
+ nesdev->ccq.cq_head = 0;
+ nesdev->ccq.ce_handler = nes_cqp_ce_handler;
+ nesdev->ccq.cq_number = PCI_FUNC(nesdev->pcidev->devfn);
+
+ vmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size);
+ pmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size);
+
+ nesdev->ceq_index = PCI_FUNC(nesdev->pcidev->devfn);
+ ceq = &nesadapter->ceq[nesdev->ceq_index];
+ ceq->ceq_vbase = vmem;
+ ceq->ceq_pbase = pmem;
+ ceq->ceq_size = NES_CCEQ_SIZE;
+ ceq->ceq_head = 0;
+
+ vmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256);
+ pmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256);
+
+ nesdev->nic_ceq_index = PCI_FUNC(nesdev->pcidev->devfn) + 8;
+ nic_ceq = &nesadapter->ceq[nesdev->nic_ceq_index];
+ nic_ceq->ceq_vbase = vmem;
+ nic_ceq->ceq_pbase = pmem;
+ nic_ceq->ceq_size = NES_NIC_CEQ_SIZE;
+ nic_ceq->ceq_head = 0;
+
+ vmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256);
+ pmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256);
+
+ aeq = &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)];
+ aeq->aeq_vbase = vmem;
+ aeq->aeq_pbase = pmem;
+ aeq->aeq_size = nesadapter->max_qp;
+ aeq->aeq_head = 0;
+
+ /* Setup QP Context */
+ vmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size);
+ pmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size);
+
+ cqp_qp_context = vmem;
+ cqp_qp_context->context_words[0] =
+ cpu_to_le32((PCI_FUNC(nesdev->pcidev->devfn) << 12) + (2 << 10));
+ cqp_qp_context->context_words[1] = 0;
+ cqp_qp_context->context_words[2] = cpu_to_le32((u32)nesdev->cqp.sq_pbase);
+ cqp_qp_context->context_words[3] = cpu_to_le32(((u64)nesdev->cqp.sq_pbase) >> 32);
+
+
+ /* Write the address to Create CQP */
+ if ((sizeof(dma_addr_t) > 4)) {
+ nes_write_indexed(nesdev,
+ NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8),
+ ((u64)pmem) >> 32);
+ } else {
+ nes_write_indexed(nesdev,
+ NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8), 0);
+ }
+ nes_write_indexed(nesdev,
+ NES_IDX_CREATE_CQP_LOW + (PCI_FUNC(nesdev->pcidev->devfn) * 8),
+ (u32)pmem);
+
+ INIT_LIST_HEAD(&nesdev->cqp_avail_reqs);
+ INIT_LIST_HEAD(&nesdev->cqp_pending_reqs);
+
+ for (count = 0; count < 2*NES_CQP_SQ_SIZE; count++) {
+ init_waitqueue_head(&nesdev->nes_cqp_requests[count].waitq);
+ list_add_tail(&nesdev->nes_cqp_requests[count].list, &nesdev->cqp_avail_reqs);
+ }
+
+ /* Write Create CCQ WQE */
+ cqp_head = nesdev->cqp.sq_head++;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
+ NES_CQP_CQ_CHK_OVERFLOW | ((u32)nesdev->ccq.cq_size << 16)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ (nesdev->ccq.cq_number |
+ ((u32)nesdev->ceq_index << 16)));
+ u64temp = (u64)nesdev->ccq.cq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
+ u64temp = (unsigned long)&nesdev->ccq;
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] =
+ cpu_to_le32((u32)(u64temp >> 1));
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
+ cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
+
+ /* Write Create CEQ WQE */
+ cqp_head = nesdev->cqp.sq_head++;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_CREATE_CEQ + ((u32)nesdev->ceq_index << 8)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, ceq->ceq_size);
+ u64temp = (u64)ceq->ceq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+
+ /* Write Create AEQ WQE */
+ cqp_head = nesdev->cqp.sq_head++;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_CREATE_AEQ + ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX, aeq->aeq_size);
+ u64temp = (u64)aeq->aeq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+
+ /* Write Create NIC CEQ WQE */
+ cqp_head = nesdev->cqp.sq_head++;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_CREATE_CEQ + ((u32)nesdev->nic_ceq_index << 8)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, nic_ceq->ceq_size);
+ u64temp = (u64)nic_ceq->ceq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+
+ /* Poll until CCQP done */
+ count = 0;
+ do {
+ if (count++ > 1000) {
+ printk(KERN_ERR PFX "Error creating CQP\n");
+ pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
+ nesdev->cqp_vbase, nesdev->cqp_pbase);
+ return -1;
+ }
+ udelay(10);
+ } while (!(nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn) * 8)) & (1 << 8)));
+
+ nes_debug(NES_DBG_INIT, "CQP Status = 0x%08X\n", nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
+
+ u32temp = 0x04800000;
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, u32temp | nesdev->cqp.qp_id);
+
+ /* wait for the CCQ, CEQ, and AEQ to get created */
+ count = 0;
+ do {
+ if (count++ > 1000) {
+ printk(KERN_ERR PFX "Error creating CCQ, CEQ, and AEQ\n");
+ pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
+ nesdev->cqp_vbase, nesdev->cqp_pbase);
+ return -1;
+ }
+ udelay(10);
+ } while (((nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15<<8)) != (15<<8)));
+
+ /* dump the QP status value */
+ nes_debug(NES_DBG_INIT, "QP Status = 0x%08X\n", nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
+
+ nesdev->cqp.sq_tail++;
+
+ return 0;
+}
+
+
+/**
+ * nes_destroy_cqp
+ */
+int nes_destroy_cqp(struct nes_device *nesdev)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ u32 count = 0;
+ u32 cqp_head;
+ unsigned long flags;
+
+ do {
+ if (count++ > 1000)
+ break;
+ udelay(10);
+ } while (!(nesdev->cqp.sq_head == nesdev->cqp.sq_tail));
+
+ /* Reset CCQ */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_RESET |
+ nesdev->ccq.cq_number);
+
+ /* Disable device interrupts */
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff);
+
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+
+ /* Destroy the AEQ */
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_AEQ |
+ ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = 0;
+
+ /* Destroy the NIC CEQ */
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ |
+ ((u32)nesdev->nic_ceq_index << 8));
+
+ /* Destroy the CEQ */
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ |
+ (nesdev->ceq_index << 8));
+
+ /* Destroy the CCQ */
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CQ);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->ccq.cq_number |
+ ((u32)nesdev->ceq_index << 16));
+
+ /* Destroy CQP */
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_QP |
+ NES_CQP_QP_TYPE_CQP);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->cqp.qp_id);
+
+ barrier();
+ /* Ring doorbell (5 WQEs) */
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x05800000 | nesdev->cqp.qp_id);
+
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+
+ /* wait for the CCQ, CEQ, and AEQ to get destroyed */
+ count = 0;
+ do {
+ if (count++ > 1000) {
+ printk(KERN_ERR PFX "Function%d: Error destroying CCQ, CEQ, and AEQ\n",
+ PCI_FUNC(nesdev->pcidev->devfn));
+ break;
+ }
+ udelay(10);
+ } while (((nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15 << 8)) != 0));
+
+ /* dump the QP status value */
+ nes_debug(NES_DBG_SHUTDOWN, "Function%d: QP Status = 0x%08X\n",
+ PCI_FUNC(nesdev->pcidev->devfn),
+ nes_read_indexed(nesdev,
+ NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
+
+ kfree(nesdev->nes_cqp_requests);
+
+ /* Free the control structures */
+ pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
+ nesdev->cqp.sq_pbase);
+
+ return 0;
+}
+
+
+/**
+ * nes_init_phy
+ */
+int nes_init_phy(struct nes_device *nesdev)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 counter = 0;
+ u32 mac_index = nesdev->mac_index;
+ u32 tx_config;
+ u16 phy_data;
+
+ if (nesadapter->OneG_Mode) {
+ nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
+ if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
+ printk(PFX "%s: Programming mdc config for 1G\n", __FUNCTION__);
+ tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ tx_config |= 0x04;
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+ }
+
+ nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n",
+ nesadapter->phy_index[mac_index], phy_data);
+ nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000);
+
+ /* Reset the PHY */
+ nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000);
+ udelay(100);
+ counter = 0;
+ do {
+ nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data);
+ if (counter++ > 100) break;
+ } while (phy_data & 0x8000);
+
+ /* Setting no phy loopback */
+ phy_data &= 0xbfff;
+ phy_data |= 0x1140;
+ nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data);
+ nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 0x17, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x17 = 0x%X.\n", phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 0x1e, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x1e = 0x%X.\n", phy_data);
+
+ /* Setting the interrupt mask */
+ nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data);
+ nes_write_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], 0xffee);
+
+ nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data);
+
+ /* turning on flow control */
+ nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data);
+ nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index],
+ (phy_data & ~(0x03E0)) | 0xc00);
+ /* nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index],
+ phy_data | 0xc00); */
+ nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data);
+ /* Clear Half duplex */
+ nes_write_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index],
+ phy_data & ~(0x0100));
+ nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
+ nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300);
+ } else {
+ if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) {
+ /* setup 10G MDIO operation */
+ tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ tx_config |= 0x14;
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * nes_replenish_nic_rq
+ */
+static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
+{
+ unsigned long flags;
+ dma_addr_t bus_address;
+ struct sk_buff *skb;
+ struct nes_hw_nic_rq_wqe *nic_rqe;
+ struct nes_hw_nic *nesnic;
+ struct nes_device *nesdev;
+ u32 rx_wqes_posted = 0;
+
+ nesnic = &nesvnic->nic;
+ nesdev = nesvnic->nesdev;
+ spin_lock_irqsave(&nesnic->rq_lock, flags);
+ if (nesnic->replenishing_rq !=0) {
+ if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) &&
+ (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) {
+ atomic_set(&nesvnic->rx_skb_timer_running, 1);
+ spin_unlock_irqrestore(&nesnic->rq_lock, flags);
+ nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */
+ add_timer(&nesvnic->rq_wqes_timer);
+ } else
+ spin_unlock_irqrestore(&nesnic->rq_lock, flags);
+ return;
+ }
+ nesnic->replenishing_rq = 1;
+ spin_unlock_irqrestore(&nesnic->rq_lock, flags);
+ do {
+ skb = dev_alloc_skb(nesvnic->max_frame_size);
+ if (skb) {
+ skb->dev = nesvnic->netdev;
+
+ bus_address = pci_map_single(nesdev->pcidev,
+ skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+
+ nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_head];
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
+ cpu_to_le32(nesvnic->max_frame_size);
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] =
+ cpu_to_le32((u32)bus_address);
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] =
+ cpu_to_le32((u32)((u64)bus_address >> 32));
+ nesnic->rx_skb[nesnic->rq_head] = skb;
+ nesnic->rq_head++;
+ nesnic->rq_head &= nesnic->rq_size - 1;
+ atomic_dec(&nesvnic->rx_skbs_needed);
+ barrier();
+ if (++rx_wqes_posted == 255) {
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id);
+ rx_wqes_posted = 0;
+ }
+ } else {
+ spin_lock_irqsave(&nesnic->rq_lock, flags);
+ if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) &&
+ (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) {
+ atomic_set(&nesvnic->rx_skb_timer_running, 1);
+ spin_unlock_irqrestore(&nesnic->rq_lock, flags);
+ nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */
+ add_timer(&nesvnic->rq_wqes_timer);
+ } else
+ spin_unlock_irqrestore(&nesnic->rq_lock, flags);
+ break;
+ }
+ } while (atomic_read(&nesvnic->rx_skbs_needed));
+ barrier();
+ if (rx_wqes_posted)
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id);
+ nesnic->replenishing_rq = 0;
+}
+
+
+/**
+ * nes_rq_wqes_timeout
+ */
+static void nes_rq_wqes_timeout(unsigned long parm)
+{
+ struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
+ printk("%s: Timer fired.\n", __FUNCTION__);
+ atomic_set(&nesvnic->rx_skb_timer_running, 0);
+ if (atomic_read(&nesvnic->rx_skbs_needed))
+ nes_replenish_nic_rq(nesvnic);
+}
+
+
+/**
+ * nes_init_nic_qp
+ */
+int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_hw_nic_sq_wqe *nic_sqe;
+ struct nes_hw_nic_qp_context *nic_context;
+ struct sk_buff *skb;
+ struct nes_hw_nic_rq_wqe *nic_rqe;
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ unsigned long flags;
+ void *vmem;
+ dma_addr_t pmem;
+ u64 u64temp;
+ int ret;
+ u32 cqp_head;
+ u32 counter;
+ u32 wqe_count;
+ u8 jumbomode=0;
+
+ /* Allocate fragment, SQ, RQ, and CQ; Reuse CEQ based on the PCI function */
+ nesvnic->nic_mem_size = 256 +
+ (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag)) +
+ (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe)) +
+ (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe)) +
+ (NES_NIC_WQ_SIZE * 2 * sizeof(struct nes_hw_nic_cqe)) +
+ sizeof(struct nes_hw_nic_qp_context);
+
+ nesvnic->nic_vbase = pci_alloc_consistent(nesdev->pcidev, nesvnic->nic_mem_size,
+ &nesvnic->nic_pbase);
+ if (!nesvnic->nic_vbase) {
+ nes_debug(NES_DBG_INIT, "Unable to allocate memory for NIC host descriptor rings\n");
+ return -ENOMEM;
+ }
+ memset(nesvnic->nic_vbase, 0, nesvnic->nic_mem_size);
+ nes_debug(NES_DBG_INIT, "Allocated NIC QP structures at %p (phys = %016lX), size = %u.\n",
+ nesvnic->nic_vbase, (unsigned long)nesvnic->nic_pbase, nesvnic->nic_mem_size);
+
+ vmem = (void *)(((unsigned long)nesvnic->nic_vbase + (256 - 1)) &
+ ~(unsigned long)(256 - 1));
+ pmem = (dma_addr_t)(((unsigned long long)nesvnic->nic_pbase + (256 - 1)) &
+ ~(unsigned long long)(256 - 1));
+
+ /* Setup the first Fragment buffers */
+ nesvnic->nic.first_frag_vbase = vmem;
+
+ for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) {
+ nesvnic->nic.frag_paddr[counter] = pmem;
+ pmem += sizeof(struct nes_first_frag);
+ }
+
+ /* setup the SQ */
+ vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag));
+
+ nesvnic->nic.sq_vbase = (void *)vmem;
+ nesvnic->nic.sq_pbase = pmem;
+ nesvnic->nic.sq_head = 0;
+ nesvnic->nic.sq_tail = 0;
+ nesvnic->nic.sq_size = NES_NIC_WQ_SIZE;
+ for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) {
+ nic_sqe = &nesvnic->nic.sq_vbase[counter];
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_MISC_IDX] =
+ cpu_to_le32(NES_NIC_SQ_WQE_DISABLE_CHKSUM |
+ NES_NIC_SQ_WQE_COMPLETION);
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX] =
+ cpu_to_le32((u32)NES_FIRST_FRAG_SIZE << 16);
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX] =
+ cpu_to_le32((u32)nesvnic->nic.frag_paddr[counter]);
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX] =
+ cpu_to_le32((u32)((u64)nesvnic->nic.frag_paddr[counter] >> 32));
+ }
+
+ nesvnic->get_cqp_request = nes_get_cqp_request;
+ nesvnic->post_cqp_request = nes_post_cqp_request;
+ nesvnic->mcrq_mcast_filter = NULL;
+
+ spin_lock_init(&nesvnic->nic.sq_lock);
+ spin_lock_init(&nesvnic->nic.rq_lock);
+
+ /* setup the RQ */
+ vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe));
+ pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe));
+
+
+ nesvnic->nic.rq_vbase = vmem;
+ nesvnic->nic.rq_pbase = pmem;
+ nesvnic->nic.rq_head = 0;
+ nesvnic->nic.rq_tail = 0;
+ nesvnic->nic.rq_size = NES_NIC_WQ_SIZE;
+
+ /* setup the CQ */
+ vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe));
+ pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe));
+
+ if (nesdev->nesadapter->netdev_count > 2)
+ nesvnic->mcrq_qp_id = nesvnic->nic_index + 32;
+ else
+ nesvnic->mcrq_qp_id = nesvnic->nic.qp_id + 4;
+
+ nesvnic->nic_cq.cq_vbase = vmem;
+ nesvnic->nic_cq.cq_pbase = pmem;
+ nesvnic->nic_cq.cq_head = 0;
+ nesvnic->nic_cq.cq_size = NES_NIC_WQ_SIZE * 2;
+
+ nesvnic->nic_cq.ce_handler = nes_nic_napi_ce_handler;
+
+ /* Send CreateCQ request to CQP */
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ cqp_head = nesdev->cqp.sq_head;
+
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
+ NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
+ ((u32)nesvnic->nic_cq.cq_size << 16));
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
+ nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16));
+ u64temp = (u64)nesvnic->nic_cq.cq_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
+ u64temp = (unsigned long)&nesvnic->nic_cq;
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1));
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
+ cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ /* Send CreateQP request to CQP */
+ nic_context = (void *)(&nesvnic->nic_cq.cq_vbase[nesvnic->nic_cq.cq_size]);
+ nic_context->context_words[NES_NIC_CTX_MISC_IDX] =
+ cpu_to_le32((u32)NES_NIC_CTX_SIZE |
+ ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12));
+ nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n",
+ nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE),
+ nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE));
+ if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0) {
+ nic_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE);
+ }
+
+ u64temp = (u64)nesvnic->nic.sq_pbase;
+ nic_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
+ nic_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
+ u64temp = (u64)nesvnic->nic.rq_pbase;
+ nic_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
+ nic_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
+ NES_CQP_QP_TYPE_NIC);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesvnic->nic.qp_id);
+ u64temp = (u64)nesvnic->nic_cq.cq_pbase +
+ (nesvnic->nic_cq.cq_size * sizeof(struct nes_hw_nic_cqe));
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
+
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+ nesdev->cqp.sq_head = cqp_head;
+
+ barrier();
+
+ /* Ring doorbell (2 WQEs) */
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
+
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ nes_debug(NES_DBG_INIT, "Waiting for create NIC QP%u to complete.\n",
+ nesvnic->nic.qp_id);
+
+ ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_INIT, "Create NIC QP%u completed, wait_event_timeout ret = %u.\n",
+ nesvnic->nic.qp_id, ret);
+ if (!ret) {
+ nes_debug(NES_DBG_INIT, "NIC QP%u create timeout expired\n", nesvnic->nic.qp_id);
+ pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase,
+ nesvnic->nic_pbase);
+ return -EIO;
+ }
+
+ /* Populate the RQ */
+ for (counter = 0; counter < (NES_NIC_WQ_SIZE - 1); counter++) {
+ skb = dev_alloc_skb(nesvnic->max_frame_size);
+ if (!skb) {
+ nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name);
+
+ nes_destroy_nic_qp(nesvnic);
+ return -ENOMEM;
+ }
+
+ skb->dev = netdev;
+
+ pmem = pci_map_single(nesdev->pcidev, skb->data,
+ nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+
+ nic_rqe = &nesvnic->nic.rq_vbase[counter];
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size);
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
+ nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
+ nesvnic->nic.rx_skb[counter] = skb;
+ }
+
+ wqe_count = NES_NIC_WQ_SIZE - 1;
+ nesvnic->nic.rq_head = wqe_count;
+ barrier();
+ do {
+ counter = min(wqe_count, ((u32)255));
+ wqe_count -= counter;
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter << 24) | nesvnic->nic.qp_id);
+ } while (wqe_count);
+ init_timer(&nesvnic->rq_wqes_timer);
+ nesvnic->rq_wqes_timer.function = nes_rq_wqes_timeout;
+ nesvnic->rq_wqes_timer.data = (unsigned long)nesvnic;
+ nes_debug(NES_DBG_INIT, "NAPI support Enabled\n");
+
+ if (nesdev->nesadapter->et_use_adaptive_rx_coalesce)
+ {
+ nes_nic_init_timer(nesdev);
+ if (netdev->mtu > 1500)
+ jumbomode = 1;
+ nes_nic_init_timer_defaults(nesdev, jumbomode);
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_destroy_nic_qp
+ */
+void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
+{
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_hw_nic_rq_wqe *nic_rqe;
+ u64 wqe_frag;
+ u32 cqp_head;
+ unsigned long flags;
+ int ret;
+
+ /* Free remaining NIC receive buffers */
+ while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) {
+ nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail];
+ wqe_frag = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
+ wqe_frag |= ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
+ pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag,
+ nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]);
+ nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1);
+ }
+
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+
+ /* Destroy NIC QP */
+ cqp_head = nesdev->cqp.sq_head;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ nesvnic->nic.qp_id);
+
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+
+ /* Destroy NIC CQ */
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ (NES_CQP_DESTROY_CQ | ((u32)nesvnic->nic_cq.cq_size << 16)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ (nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16)));
+
+ if (++cqp_head >= nesdev->cqp.sq_size)
+ cqp_head = 0;
+
+ nesdev->cqp.sq_head = cqp_head;
+ barrier();
+
+ /* Ring doorbell (2 WQEs) */
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
+
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u,"
+ " cqp.sq_tail=%u, cqp.sq_size=%u\n",
+ cqp_head, nesdev->cqp.sq_head,
+ nesdev->cqp.sq_tail, nesdev->cqp.sq_size);
+
+ ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
+ NES_EVENT_TIMEOUT);
+
+ nes_debug(NES_DBG_SHUTDOWN, "Destroy NIC QP returned, wait_event_timeout ret = %u, cqp_head=%u,"
+ " cqp.sq_head=%u, cqp.sq_tail=%u\n",
+ ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
+ if (!ret) {
+ nes_debug(NES_DBG_SHUTDOWN, "NIC QP%u destroy timeout expired\n",
+ nesvnic->nic.qp_id);
+ }
+
+ pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase,
+ nesvnic->nic_pbase);
+}
+
+/**
+ * nes_napi_isr
+ */
+int nes_napi_isr(struct nes_device *nesdev)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 int_stat;
+
+ if (nesdev->napi_isr_ran) {
+ /* interrupt status has already been read in ISR */
+ int_stat = nesdev->int_stat;
+ } else {
+ int_stat = nes_read32(nesdev->regs + NES_INT_STAT);
+ nesdev->int_stat = int_stat;
+ nesdev->napi_isr_ran = 1;
+ }
+
+ int_stat &= nesdev->int_req;
+ /* iff NIC, process here, else wait for DPC */
+ if ((int_stat) && ((int_stat & 0x0000ff00) == int_stat)) {
+ nesdev->napi_isr_ran = 0;
+ nes_write32(nesdev->regs+NES_INT_STAT,
+ (int_stat &
+ ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)));
+
+ /* Process the CEQs */
+ nes_process_ceq(nesdev, &nesdev->nesadapter->ceq[nesdev->nic_ceq_index]);
+
+ if (unlikely((((nesadapter->et_rx_coalesce_usecs_irq) &&
+ (!nesadapter->et_use_adaptive_rx_coalesce)) ||
+ ((nesadapter->et_use_adaptive_rx_coalesce) &&
+ (nesdev->deepcq_count > nesadapter->et_pkt_rate_low)))) ) {
+ if ((nesdev->int_req & NES_INT_TIMER) == 0) {
+ /* Enable Periodic timer interrupts */
+ nesdev->int_req |= NES_INT_TIMER;
+ /* ack any pending periodic timer interrupts so we don't get an immediate interrupt */
+ /* TODO: need to also ack other unused periodic timer values, get from nesadapter */
+ nes_write32(nesdev->regs+NES_TIMER_STAT,
+ nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
+ nes_write32(nesdev->regs+NES_INTF_INT_MASK,
+ ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER));
+ }
+
+ if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
+ {
+ nes_nic_init_timer(nesdev);
+ }
+ /* Enable interrupts, except CEQs */
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
+ } else {
+ /* Enable interrupts, make sure timer is off */
+ nesdev->int_req &= ~NES_INT_TIMER;
+ nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
+ nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+ nesadapter->tune_timer.timer_in_use_old = 0;
+ }
+ nesdev->deepcq_count = 0;
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+
+/**
+ * nes_dpc
+ */
+void nes_dpc(unsigned long param)
+{
+ struct nes_device *nesdev = (struct nes_device *)param;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 counter;
+ u32 loop_counter = 0;
+ u32 int_status_bit;
+ u32 int_stat;
+ u32 timer_stat;
+ u32 temp_int_stat;
+ u32 intf_int_stat;
+ u32 debug_error;
+ u32 processed_intf_int = 0;
+ u16 processed_timer_int = 0;
+ u16 completion_ints = 0;
+ u16 timer_ints = 0;
+
+ /* nes_debug(NES_DBG_ISR, "\n"); */
+
+ do {
+ timer_stat = 0;
+ if (nesdev->napi_isr_ran) {
+ nesdev->napi_isr_ran = 0;
+ int_stat = nesdev->int_stat;
+ } else
+ int_stat = nes_read32(nesdev->regs+NES_INT_STAT);
+ if (processed_intf_int != 0)
+ int_stat &= nesdev->int_req & ~NES_INT_INTF;
+ else
+ int_stat &= nesdev->int_req;
+ if (processed_timer_int == 0) {
+ processed_timer_int = 1;
+ if (int_stat & NES_INT_TIMER) {
+ timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT);
+ if ((timer_stat & nesdev->timer_int_req) == 0) {
+ int_stat &= ~NES_INT_TIMER;
+ }
+ }
+ } else {
+ int_stat &= ~NES_INT_TIMER;
+ }
+
+ if (int_stat) {
+ if (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
+ NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)) {
+ /* Ack the interrupts */
+ nes_write32(nesdev->regs+NES_INT_STAT,
+ (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
+ NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)));
+ }
+
+ temp_int_stat = int_stat;
+ for (counter = 0, int_status_bit = 1; counter < 16; counter++) {
+ if (int_stat & int_status_bit) {
+ nes_process_ceq(nesdev, &nesadapter->ceq[counter]);
+ temp_int_stat &= ~int_status_bit;
+ completion_ints = 1;
+ }
+ if (!(temp_int_stat & 0x0000ffff))
+ break;
+ int_status_bit <<= 1;
+ }
+
+ /* Process the AEQ for this pci function */
+ int_status_bit = 1 << (16 + PCI_FUNC(nesdev->pcidev->devfn));
+ if (int_stat & int_status_bit) {
+ nes_process_aeq(nesdev, &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)]);
+ }
+
+ /* Process the MAC interrupt for this pci function */
+ int_status_bit = 1 << (24 + nesdev->mac_index);
+ if (int_stat & int_status_bit) {
+ nes_process_mac_intr(nesdev, nesdev->mac_index);
+ }
+
+ if (int_stat & NES_INT_TIMER) {
+ if (timer_stat & nesdev->timer_int_req) {
+ nes_write32(nesdev->regs + NES_TIMER_STAT,
+ (timer_stat & nesdev->timer_int_req) |
+ ~(nesdev->nesadapter->timer_int_req));
+ timer_ints = 1;
+ }
+ }
+
+ if (int_stat & NES_INT_INTF) {
+ processed_intf_int = 1;
+ intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
+ intf_int_stat &= nesdev->intf_int_req;
+ if (NES_INTF_INT_CRITERR & intf_int_stat) {
+ debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS);
+ printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n",
+ (u16)debug_error);
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS,
+ 0x01010000 | (debug_error & 0x0000ffff));
+ /* BUG(); */
+ if (crit_err_count++ > 10)
+ nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
+ }
+ if (NES_INTF_INT_PCIERR & intf_int_stat) {
+ printk(KERN_ERR PFX "PCI Error reported by device!!!\n");
+ BUG();
+ }
+ if (NES_INTF_INT_AEQ_OFLOW & intf_int_stat) {
+ printk(KERN_ERR PFX "AEQ Overflow reported by device!!!\n");
+ BUG();
+ }
+ nes_write32(nesdev->regs+NES_INTF_INT_STAT, intf_int_stat);
+ }
+
+ if (int_stat & NES_INT_TSW) {
+ }
+ }
+ /* Don't use the interface interrupt bit stay in loop */
+ int_stat &= ~NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
+ NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3;
+ } while ((int_stat != 0) && (loop_counter++ < MAX_DPC_ITERATIONS));
+
+ if (timer_ints == 1) {
+ if ((nesadapter->et_rx_coalesce_usecs_irq) || (nesadapter->et_use_adaptive_rx_coalesce)) {
+ if (completion_ints == 0) {
+ nesdev->timer_only_int_count++;
+ if (nesdev->timer_only_int_count>=nesadapter->timer_int_limit) {
+ nesdev->timer_only_int_count = 0;
+ nesdev->int_req &= ~NES_INT_TIMER;
+ nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
+ nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+ nesdev->nesadapter->tune_timer.timer_in_use_old = 0;
+ } else {
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req));
+ }
+ } else {
+ if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
+ {
+ nes_nic_init_timer(nesdev);
+ }
+ nesdev->timer_only_int_count = 0;
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req));
+ }
+ } else {
+ nesdev->timer_only_int_count = 0;
+ nesdev->int_req &= ~NES_INT_TIMER;
+ nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
+ nes_write32(nesdev->regs+NES_TIMER_STAT,
+ nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
+ nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+ }
+ } else {
+ if ( (completion_ints == 1) &&
+ (((nesadapter->et_rx_coalesce_usecs_irq) &&
+ (!nesadapter->et_use_adaptive_rx_coalesce)) ||
+ ((nesdev->deepcq_count > nesadapter->et_pkt_rate_low) &&
+ (nesadapter->et_use_adaptive_rx_coalesce) )) ) {
+ /* nes_debug(NES_DBG_ISR, "Enabling periodic timer interrupt.\n" ); */
+ nesdev->timer_only_int_count = 0;
+ nesdev->int_req |= NES_INT_TIMER;
+ nes_write32(nesdev->regs+NES_TIMER_STAT,
+ nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
+ nes_write32(nesdev->regs+NES_INTF_INT_MASK,
+ ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER));
+ nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
+ } else {
+ nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+ }
+ }
+ nesdev->deepcq_count = 0;
+}
+
+
+/**
+ * nes_process_ceq
+ */
+void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
+{
+ u64 u64temp;
+ struct nes_hw_cq *cq;
+ u32 head;
+ u32 ceq_size;
+
+ /* nes_debug(NES_DBG_CQ, "\n"); */
+ head = ceq->ceq_head;
+ ceq_size = ceq->ceq_size;
+
+ do {
+ if (le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]) &
+ NES_CEQE_VALID) {
+ u64temp = (((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX])))<<32) |
+ ((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_LOW_IDX])));
+ u64temp <<= 1;
+ cq = *((struct nes_hw_cq **)&u64temp);
+ /* nes_debug(NES_DBG_CQ, "pCQ = %p\n", cq); */
+ barrier();
+ ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX] = 0;
+
+ /* call the event handler */
+ cq->ce_handler(nesdev, cq);
+
+ if (++head >= ceq_size)
+ head = 0;
+ } else {
+ break;
+ }
+
+ } while (1);
+
+ ceq->ceq_head = head;
+}
+
+
+/**
+ * nes_process_aeq
+ */
+void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
+{
+// u64 u64temp;
+ u32 head;
+ u32 aeq_size;
+ u32 aeqe_misc;
+ u32 aeqe_cq_id;
+ struct nes_hw_aeqe volatile *aeqe;
+
+ head = aeq->aeq_head;
+ aeq_size = aeq->aeq_size;
+
+ do {
+ aeqe = &aeq->aeq_vbase[head];
+ if ((le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]) & NES_AEQE_VALID) == 0)
+ break;
+ aeqe_misc = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+ aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
+ if (aeqe_misc & (NES_AEQE_QP|NES_AEQE_CQ)) {
+ if (aeqe_cq_id >= NES_FIRST_QPN) {
+ /* dealing with an accelerated QP related AE */
+// u64temp = (((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])))<<32) |
+// ((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX])));
+ nes_process_iwarp_aeqe(nesdev, (struct nes_hw_aeqe *)aeqe);
+ } else {
+ /* TODO: dealing with a CQP related AE */
+ nes_debug(NES_DBG_AEQ, "Processing CQP related AE, misc = 0x%04X\n",
+ (u16)(aeqe_misc >> 16));
+ }
+ }
+
+ aeqe->aeqe_words[NES_AEQE_MISC_IDX] = 0;
+
+ if (++head >= aeq_size)
+ head = 0;
+ }
+ while (1);
+ aeq->aeq_head = head;
+}
+
+static void nes_reset_link(struct nes_device *nesdev, u32 mac_index)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 reset_value;
+ u32 i=0;
+ u32 u32temp;
+
+ if (nesadapter->hw_rev == NE020_REV) {
+ return;
+ }
+ mh_detected++;
+
+ reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+
+ if ((mac_index == 0) || ((mac_index == 1) && (nesadapter->OneG_Mode)))
+ reset_value |= 0x0000001d;
+ else
+ reset_value |= 0x0000002d;
+
+ if (4 <= (nesadapter->link_interrupt_count[mac_index] / ((u16)NES_MAX_LINK_INTERRUPTS))) {
+ if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) {
+ nesadapter->link_interrupt_count[0] = 0;
+ nesadapter->link_interrupt_count[1] = 0;
+ u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
+ if (0x00000040 & u32temp)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
+ else
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8);
+
+ reset_value |= 0x0000003d;
+ }
+ nesadapter->link_interrupt_count[mac_index] = 0;
+ }
+
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
+
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
+ & 0x00000040) != 0x00000040) && (i++ < 5000));
+
+ if (0x0000003d == (reset_value & 0x0000003d)) {
+ u32 pcs_control_status0, pcs_control_status1;
+
+ for (i = 0; i < 10; i++) {
+ pcs_control_status0 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0);
+ pcs_control_status1 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+ if (((0x0F000000 == (pcs_control_status0 & 0x0F000000))
+ && (pcs_control_status0 & 0x00100000))
+ || ((0x0F000000 == (pcs_control_status1 & 0x0F000000))
+ && (pcs_control_status1 & 0x00100000)))
+ continue;
+ else
+ break;
+ }
+ if (10 == i) {
+ u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
+ if (0x00000040 & u32temp)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
+ else
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8);
+
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
+
+ while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET)
+ & 0x00000040) != 0x00000040) && (i++ < 5000));
+ }
+ }
+}
+
+/**
+ * nes_process_mac_intr
+ */
+void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
+{
+ unsigned long flags;
+ u32 pcs_control_status;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_vnic *nesvnic;
+ u32 mac_status;
+ u32 mac_index = nesdev->mac_index;
+ u32 u32temp;
+ u16 phy_data;
+ u16 temp_phy_data;
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) {
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ return;
+ }
+ nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+
+ /* ack the MAC interrupt */
+ mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
+ /* Clear the interrupt */
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200), mac_status);
+
+ nes_debug(NES_DBG_PHY, "MAC%u interrupt status = 0x%X.\n", mac_number, mac_status);
+
+ if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
+ nesdev->link_status_interrupts++;
+ if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) {
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nes_reset_link(nesdev, mac_index);
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ }
+ /* read the PHY interrupt status register */
+ if (nesadapter->OneG_Mode) {
+ do {
+ nes_read_1G_phy_reg(nesdev, 0x1a,
+ nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1a = 0x%X.\n",
+ nesadapter->phy_index[mac_index], phy_data);
+ } while (phy_data&0x8000);
+
+ temp_phy_data = 0;
+ do {
+ nes_read_1G_phy_reg(nesdev, 0x11,
+ nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy%d data from register 0x11 = 0x%X.\n",
+ nesadapter->phy_index[mac_index], phy_data);
+ if (temp_phy_data == phy_data)
+ break;
+ temp_phy_data = phy_data;
+ } while (1);
+
+ nes_read_1G_phy_reg(nesdev, 0x1e,
+ nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1e = 0x%X.\n",
+ nesadapter->phy_index[mac_index], phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 1,
+ nesadapter->phy_index[mac_index], &phy_data);
+ nes_debug(NES_DBG_PHY, "1G phy%u data from register 1 = 0x%X\n",
+ nesadapter->phy_index[mac_index], phy_data);
+
+ if (temp_phy_data & 0x1000) {
+ nes_debug(NES_DBG_PHY, "The Link is up according to the PHY\n");
+ phy_data = 4;
+ } else {
+ nes_debug(NES_DBG_PHY, "The Link is down according to the PHY\n");
+ }
+ }
+ nes_debug(NES_DBG_PHY, "Eth SERDES Common Status: 0=0x%08X, 1=0x%08X\n",
+ nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0),
+ nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0+0x200));
+ pcs_control_status = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200));
+ pcs_control_status = nes_read_indexed(nesdev,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200));
+ nes_debug(NES_DBG_PHY, "PCS PHY Control/Status%u: 0x%08X\n",
+ mac_index, pcs_control_status);
+ if (nesadapter->OneG_Mode) {
+ u32temp = 0x01010000;
+ if (nesadapter->port_count > 2) {
+ u32temp |= 0x02020000;
+ }
+ if ((pcs_control_status & u32temp)!= u32temp) {
+ phy_data = 0;
+ nes_debug(NES_DBG_PHY, "PCS says the link is down\n");
+ }
+ } else if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) {
+ nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]);
+ temp_phy_data = (u16)nes_read_indexed(nesdev,
+ NES_IDX_MAC_MDIO_CONTROL);
+ u32temp = 20;
+ do {
+ nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]);
+ phy_data = (u16)nes_read_indexed(nesdev,
+ NES_IDX_MAC_MDIO_CONTROL);
+ if ((phy_data == temp_phy_data) || (!(--u32temp)))
+ break;
+ temp_phy_data = phy_data;
+ } while (1);
+ nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
+ __FUNCTION__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
+
+ } else {
+ phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0;
+ }
+
+ if (phy_data & 0x0004) {
+ nesadapter->mac_link_down[mac_index] = 0;
+ list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+ nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n",
+ nesvnic->linkup);
+ if (nesvnic->linkup == 0) {
+ printk(PFX "The Link is now up for port %u, netdev %p.\n",
+ mac_index, nesvnic->netdev);
+ if (netif_queue_stopped(nesvnic->netdev))
+ netif_start_queue(nesvnic->netdev);
+ nesvnic->linkup = 1;
+ netif_carrier_on(nesvnic->netdev);
+ }
+ }
+ } else {
+ nesadapter->mac_link_down[mac_index] = 1;
+ list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
+ nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n",
+ nesvnic->linkup);
+ if (nesvnic->linkup == 1) {
+ printk(PFX "The Link is now down for port %u, netdev %p.\n",
+ mac_index, nesvnic->netdev);
+ if (!(netif_queue_stopped(nesvnic->netdev)))
+ netif_stop_queue(nesvnic->netdev);
+ nesvnic->linkup = 0;
+ netif_carrier_off(nesvnic->netdev);
+ }
+ }
+ }
+ }
+
+ nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
+}
+
+
+
+void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
+{
+ struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
+
+ netif_rx_schedule(nesdev->netdev[nesvnic->netdev_index], &nesvnic->napi);
+}
+
+
+/* The MAX_RQES_TO_PROCESS defines how many max read requests to complete before
+* getting out of nic_ce_handler
+*/
+#define MAX_RQES_TO_PROCESS 384
+
+/**
+ * nes_nic_ce_handler
+ */
+void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
+{
+ u64 u64temp;
+ dma_addr_t bus_address;
+ struct nes_hw_nic *nesnic;
+ struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_nic_rq_wqe *nic_rqe;
+ struct nes_hw_nic_sq_wqe *nic_sqe;
+ struct sk_buff *skb;
+ struct sk_buff *rx_skb;
+ __le16 *wqe_fragment_length;
+ u32 head;
+ u32 cq_size;
+ u32 rx_pkt_size;
+ u32 cqe_count=0;
+ u32 cqe_errv;
+ u32 cqe_misc;
+ u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
+ u16 vlan_tag;
+ u16 pkt_type;
+ u16 rqes_processed = 0;
+ u8 sq_cqes = 0;
+
+ head = cq->cq_head;
+ cq_size = cq->cq_size;
+ cq->cqes_pending = 1;
+ do {
+ if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
+ NES_NIC_CQE_VALID) {
+ nesnic = &nesvnic->nic;
+ cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]);
+ if (cqe_misc & NES_NIC_CQE_SQ) {
+ sq_cqes++;
+ wqe_fragment_index = 1;
+ nic_sqe = &nesnic->sq_vbase[nesnic->sq_tail];
+ skb = nesnic->tx_skb[nesnic->sq_tail];
+ wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
+ /* bump past the vlan tag */
+ wqe_fragment_length++;
+ if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) {
+ u64temp = (u64) le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]);
+ u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32;
+ bus_address = (dma_addr_t)u64temp;
+ if (test_and_clear_bit(nesnic->sq_tail, nesnic->first_frag_overflow)) {
+ pci_unmap_single(nesdev->pcidev,
+ bus_address,
+ le16_to_cpu(wqe_fragment_length[wqe_fragment_index++]),
+ PCI_DMA_TODEVICE);
+ }
+ for (; wqe_fragment_index < 5; wqe_fragment_index++) {
+ if (wqe_fragment_length[wqe_fragment_index]) {
+ u64temp = le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]);
+ u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32;
+ bus_address = (dma_addr_t)u64temp;
+ pci_unmap_page(nesdev->pcidev,
+ bus_address,
+ le16_to_cpu(wqe_fragment_length[wqe_fragment_index]),
+ PCI_DMA_TODEVICE);
+ } else
+ break;
+ }
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ nesnic->sq_tail++;
+ nesnic->sq_tail &= nesnic->sq_size-1;
+ if (sq_cqes > 128) {
+ barrier();
+ /* restart the queue if it had been stopped */
+ if (netif_queue_stopped(nesvnic->netdev))
+ netif_wake_queue(nesvnic->netdev);
+ sq_cqes = 0;
+ }
+ } else {
+ rqes_processed ++;
+
+ cq->rx_cqes_completed++;
+ cq->rx_pkts_indicated++;
+ rx_pkt_size = cqe_misc & 0x0000ffff;
+ nic_rqe = &nesnic->rq_vbase[nesnic->rq_tail];
+ /* Get the skb */
+ rx_skb = nesnic->rx_skb[nesnic->rq_tail];
+ nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_tail];
+ bus_address = (dma_addr_t)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
+ bus_address += ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
+ pci_unmap_single(nesdev->pcidev, bus_address,
+ nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
+ /* rx_skb->tail = rx_skb->data + rx_pkt_size; */
+ /* rx_skb->len = rx_pkt_size; */
+ rx_skb->len = 0; /* TODO: see if this is necessary */
+ skb_put(rx_skb, rx_pkt_size);
+ rx_skb->protocol = eth_type_trans(rx_skb, nesvnic->netdev);
+ nesnic->rq_tail++;
+ nesnic->rq_tail &= nesnic->rq_size - 1;
+
+ atomic_inc(&nesvnic->rx_skbs_needed);
+ if (atomic_read(&nesvnic->rx_skbs_needed) > (nesvnic->nic.rq_size>>1)) {
+ nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ cq->cq_number | (cqe_count << 16));
+// nesadapter->tune_timer.cq_count += cqe_count;
+ nesdev->currcq_count += cqe_count;
+ cqe_count = 0;
+ nes_replenish_nic_rq(nesvnic);
+ }
+ pkt_type = (u16)(le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX]));
+ cqe_errv = (cqe_misc & NES_NIC_CQE_ERRV_MASK) >> NES_NIC_CQE_ERRV_SHIFT;
+ rx_skb->ip_summed = CHECKSUM_NONE;
+
+ if ((NES_PKT_TYPE_TCPV4_BITS == (pkt_type & NES_PKT_TYPE_TCPV4_MASK)) ||
+ (NES_PKT_TYPE_UDPV4_BITS == (pkt_type & NES_PKT_TYPE_UDPV4_MASK))) {
+ if ((cqe_errv &
+ (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR |
+ NES_NIC_ERRV_BITS_IPH_ERR | NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) {
+ if (nesvnic->rx_checksum_disabled == 0) {
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ } else
+ nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet."
+ " errv = 0x%X, pkt_type = 0x%X.\n",
+ nesvnic->netdev->name, cqe_errv, pkt_type);
+
+ } else if ((pkt_type & NES_PKT_TYPE_IPV4_MASK) == NES_PKT_TYPE_IPV4_BITS) {
+ if ((cqe_errv &
+ (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_IPH_ERR |
+ NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) {
+ if (nesvnic->rx_checksum_disabled == 0) {
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* nes_debug(NES_DBG_CQ, "%s: Reporting successfully checksummed IPv4 packet.\n",
+ nesvnic->netdev->name); */
+ }
+ } else
+ nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet."
+ " errv = 0x%X, pkt_type = 0x%X.\n",
+ nesvnic->netdev->name, cqe_errv, pkt_type);
+ }
+ /* nes_debug(NES_DBG_CQ, "pkt_type=%x, APBVT_MASK=%x\n",
+ pkt_type, (pkt_type & NES_PKT_TYPE_APBVT_MASK)); */
+
+ if ((pkt_type & NES_PKT_TYPE_APBVT_MASK) == NES_PKT_TYPE_APBVT_BITS) {
+ nes_cm_recv(rx_skb, nesvnic->netdev);
+ } else {
+ if ((cqe_misc & NES_NIC_CQE_TAG_VALID) && (nesvnic->vlan_grp != NULL)) {
+ vlan_tag = (u16)(le32_to_cpu(
+ cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX])
+ >> 16);
+ nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
+ nesvnic->netdev->name, vlan_tag);
+ nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag);
+ } else {
+ nes_netif_rx(rx_skb);
+ }
+ }
+
+ nesvnic->netdev->last_rx = jiffies;
+ /* nesvnic->netstats.rx_packets++; */
+ /* nesvnic->netstats.rx_bytes += rx_pkt_size; */
+ }
+
+ cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
+ /* Accounting... */
+ cqe_count++;
+ if (++head >= cq_size)
+ head = 0;
+ if (cqe_count == 255) {
+ /* Replenish Nic CQ */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ cq->cq_number | (cqe_count << 16));
+// nesdev->nesadapter->tune_timer.cq_count += cqe_count;
+ nesdev->currcq_count += cqe_count;
+ cqe_count = 0;
+ }
+
+ if (cq->rx_cqes_completed >= nesvnic->budget)
+ break;
+ } else {
+ cq->cqes_pending = 0;
+ break;
+ }
+
+ } while (1);
+
+ if (sq_cqes) {
+ barrier();
+ /* restart the queue if it had been stopped */
+ if (netif_queue_stopped(nesvnic->netdev))
+ netif_wake_queue(nesvnic->netdev);
+ }
+
+ cq->cq_head = head;
+ /* nes_debug(NES_DBG_CQ, "CQ%u Processed = %u cqes, new head = %u.\n",
+ cq->cq_number, cqe_count, cq->cq_head); */
+ cq->cqe_allocs_pending = cqe_count;
+ if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
+ {
+// nesdev->nesadapter->tune_timer.cq_count += cqe_count;
+ nesdev->currcq_count += cqe_count;
+ nes_nic_tune_timer(nesdev);
+ }
+ if (atomic_read(&nesvnic->rx_skbs_needed))
+ nes_replenish_nic_rq(nesvnic);
+ }
+
+
+/**
+ * nes_cqp_ce_handler
+ */
+void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
+{
+ u64 u64temp;
+ unsigned long flags;
+ struct nes_hw_cqp *cqp = NULL;
+ struct nes_cqp_request *cqp_request;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ u32 head;
+ u32 cq_size;
+ u32 cqe_count=0;
+ u32 error_code;
+ /* u32 counter; */
+
+ head = cq->cq_head;
+ cq_size = cq->cq_size;
+
+ do {
+ /* process the CQE */
+ /* nes_debug(NES_DBG_CQP, "head=%u cqe_words=%08X\n", head,
+ le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])); */
+
+ if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
+ u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head].
+ cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
+ ((u64)(le32_to_cpu(cq->cq_vbase[head].
+ cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX])));
+ cqp = *((struct nes_hw_cqp **)&u64temp);
+
+ error_code = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX]);
+ if (error_code) {
+ nes_debug(NES_DBG_CQP, "Bad Completion code for opcode 0x%02X from CQP,"
+ " Major/Minor codes = 0x%04X:%04X.\n",
+ le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])&0x3f,
+ (u16)(error_code >> 16),
+ (u16)error_code);
+ nes_debug(NES_DBG_CQP, "cqp: qp_id=%u, sq_head=%u, sq_tail=%u\n",
+ cqp->qp_id, cqp->sq_head, cqp->sq_tail);
+ }
+
+ u64temp = (((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
+ wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX])))<<32) |
+ ((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
+ wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX])));
+ cqp_request = *((struct nes_cqp_request **)&u64temp);
+ if (cqp_request) {
+ if (cqp_request->waiting) {
+ /* nes_debug(NES_DBG_CQP, "%s: Waking up requestor\n"); */
+ cqp_request->major_code = (u16)(error_code >> 16);
+ cqp_request->minor_code = (u16)error_code;
+ barrier();
+ cqp_request->request_done = 1;
+ wake_up(&cqp_request->waitq);
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
+ cqp_request,
+ le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f);
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ } else if (cqp_request->callback) {
+ /* Envoke the callback routine */
+ cqp_request->cqp_callback(nesdev, cqp_request);
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ } else {
+ nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
+ cqp_request,
+ le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f);
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ } else {
+ wake_up(&nesdev->cqp.waitq);
+ }
+
+ cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, cq->cq_number | (1 << 16));
+ if (++cqp->sq_tail >= cqp->sq_size)
+ cqp->sq_tail = 0;
+
+ /* Accounting... */
+ cqe_count++;
+ if (++head >= cq_size)
+ head = 0;
+ } else {
+ break;
+ }
+ } while (1);
+ cq->cq_head = head;
+
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ while ((!list_empty(&nesdev->cqp_pending_reqs)) &&
+ ((((nesdev->cqp.sq_tail+nesdev->cqp.sq_size)-nesdev->cqp.sq_head) &
+ (nesdev->cqp.sq_size - 1)) != 1)) {
+ cqp_request = list_entry(nesdev->cqp_pending_reqs.next,
+ struct nes_cqp_request, list);
+ list_del_init(&cqp_request->list);
+ head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[head];
+ memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
+ barrier();
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] =
+ cpu_to_le32((u32)((unsigned long)cqp_request));
+ cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] =
+ cpu_to_le32((u32)(upper_32_bits((unsigned long)cqp_request)));
+ nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) put on CQPs SQ wqe%u.\n",
+ cqp_request, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, head);
+ /* Ring doorbell (1 WQEs) */
+ barrier();
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
+ }
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+
+ /* Arm the CCQ */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ cq->cq_number);
+ nes_read32(nesdev->regs+NES_CQE_ALLOC);
+}
+
+
+/**
+ * nes_process_iwarp_aeqe
+ */
+void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe)
+{
+ u64 context;
+ u64 aeqe_context = 0;
+ unsigned long flags;
+ struct nes_qp *nesqp;
+ int resource_allocated;
+ /* struct iw_cm_id *cm_id; */
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct ib_event ibevent;
+ /* struct iw_cm_event cm_event; */
+ u32 aeq_info;
+ u32 next_iwarp_state = 0;
+ u16 async_event_id;
+ u8 tcp_state;
+ u8 iwarp_state;
+
+ nes_debug(NES_DBG_AEQ, "\n");
+ aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+ if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) {
+ context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
+ context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
+ } else {
+ aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
+ aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
+ context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
+ aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
+ BUG_ON(!context);
+ }
+
+ async_event_id = (u16)aeq_info;
+ tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
+ iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
+ nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
+ " Tcp state = %s, iWARP state = %s\n",
+ async_event_id,
+ le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
+ nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
+
+
+ switch (async_event_id) {
+ case NES_AEQE_AEID_LLP_FIN_RECEIVED:
+ nesqp = *((struct nes_qp **)&context);
+ if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+ nesqp->cm_id->add_ref(nesqp->cm_id);
+ nes_add_ref(&nesqp->ibqp);
+ schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
+ NES_TIMER_TYPE_CLOSE, 1, 0);
+ nes_debug(NES_DBG_AEQ, "QP%u Not decrementing QP refcount (%d),"
+ " need ae to finish up, original_last_aeq = 0x%04X."
+ " last_aeq = 0x%04X, scheduling timer. TCP state = %d\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ async_event_id, nesqp->last_aeq, tcp_state);
+ }
+ if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+ (nesqp->ibqp_state != IB_QPS_RTS)) {
+ /* FIN Received but tcp state or IB state moved on,
+ should expect a close complete */
+ return;
+ }
+ case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
+ case NES_AEQE_AEID_LLP_CONNECTION_RESET:
+ case NES_AEQE_AEID_TERMINATE_SENT:
+ case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
+ case NES_AEQE_AEID_RESET_SENT:
+ nesqp = *((struct nes_qp **)&context);
+ if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
+ tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ }
+ nes_add_ref(&nesqp->ibqp);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+
+ if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
+ (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
+ nesqp->hte_added = 0;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
+ nesqp->hwqp.qp_id);
+ nes_hw_modify_qp(nesdev, nesqp,
+ NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ }
+
+ if ((nesqp->ibqp_state == IB_QPS_RTS) &&
+ ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+ (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ switch (nesqp->hw_iwarp_state) {
+ case NES_AEQE_IWARP_STATE_RTS:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ break;
+ case NES_AEQE_IWARP_STATE_TERMINATE:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
+ if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
+ next_iwarp_state |= 0x02000000;
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ }
+ break;
+ default:
+ next_iwarp_state = 0;
+ }
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ if (next_iwarp_state) {
+ nes_add_ref(&nesqp->ibqp);
+ nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
+ " also added another reference\n",
+ nesqp->hwqp.qp_id, next_iwarp_state);
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
+ }
+ nes_cm_disconn(nesqp);
+ } else {
+ if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
+ /* FIN Received but ib state not RTS,
+ close complete will be on its way */
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_rem_ref(&nesqp->ibqp);
+ return;
+ }
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
+ " also added another reference\n",
+ nesqp->hwqp.qp_id, next_iwarp_state);
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
+ }
+ nes_cm_disconn(nesqp);
+ }
+ break;
+ case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
+ nesqp = *((struct nes_qp **)&context);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED"
+ " event on QP%u \n Q2 Data:\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_FATAL;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+ ((nesqp->ibqp_state == IB_QPS_RTS)&&
+ (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+ nes_add_ref(&nesqp->ibqp);
+ nes_cm_disconn(nesqp);
+ } else {
+ nesqp->in_disconnect = 0;
+ wake_up(&nesqp->kick_waitq);
+ }
+ break;
+ case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
+ nesqp = *((struct nes_qp **)&context);
+ nes_add_ref(&nesqp->ibqp);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nesqp->last_aeq = async_event_id;
+ if (nesqp->cm_id) {
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
+ " event on QP%u, remote IP = 0x%08X \n",
+ nesqp->hwqp.qp_id,
+ ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
+ } else {
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
+ " event on QP%u \n",
+ nesqp->hwqp.qp_id);
+ }
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_FATAL;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ break;
+ case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
+ if (NES_AEQE_INBOUND_RDMA&aeq_info) {
+ nesqp = nesadapter->qp_table[le32_to_cpu(
+ aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
+ } else {
+ /* TODO: get the actual WQE and mask off wqe index */
+ context &= ~((u64)511);
+ nesqp = *((struct nes_qp **)&context);
+ }
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ break;
+ case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
+ nesqp = *((struct nes_qp **)&context);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ break;
+ case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
+ nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words
+ [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u,"
+ " nesqp = %p, AE reported %p\n",
+ nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context));
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ break;
+ case NES_AEQE_AEID_CQ_OPERATION_ERROR:
+ context <<= 1;
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
+ le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), (void *)(unsigned long)context);
+ resource_allocated = nes_is_resource_allocated(nesadapter, nesadapter->allocated_cqs,
+ le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
+ if (resource_allocated) {
+ printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
+ __FUNCTION__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
+ }
+ break;
+ case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ nesqp = nesadapter->qp_table[le32_to_cpu(
+ aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG"
+ "_FOR_AVAILABLE_BUFFER event on QP%u\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ /* tell cm to disconnect, cm will queue work to thread */
+ nes_add_ref(&nesqp->ibqp);
+ nes_cm_disconn(nesqp);
+ break;
+ case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ nesqp = *((struct nes_qp **)&context);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN"
+ "_NO_BUFFER_AVAILABLE event on QP%u\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_FATAL;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ /* tell cm to disconnect, cm will queue work to thread */
+ nes_add_ref(&nesqp->ibqp);
+ nes_cm_disconn(nesqp);
+ break;
+ case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
+ nesqp = *((struct nes_qp **)&context);
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR"
+ " event on QP%u \n Q2 Data:\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->ibqp.event_handler) {
+ ibevent.device = nesqp->ibqp.device;
+ ibevent.element.qp = &nesqp->ibqp;
+ ibevent.event = IB_EVENT_QP_FATAL;
+ nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+ }
+ /* tell cm to disconnect, cm will queue work to thread */
+ nes_add_ref(&nesqp->ibqp);
+ nes_cm_disconn(nesqp);
+ break;
+ /* TODO: additional AEs need to be here */
+ default:
+ nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
+ async_event_id);
+ break;
+ }
+
+}
+
+
+/**
+ * nes_iwarp_ce_handler
+ */
+void nes_iwarp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *hw_cq)
+{
+ struct nes_cq *nescq = container_of(hw_cq, struct nes_cq, hw_cq);
+
+ /* nes_debug(NES_DBG_CQ, "Processing completion event for iWARP CQ%u.\n",
+ nescq->hw_cq.cq_number); */
+ nes_write32(nesdev->regs+NES_CQ_ACK, nescq->hw_cq.cq_number);
+
+ if (nescq->ibcq.comp_handler)
+ nescq->ibcq.comp_handler(&nescq->ibcq, nescq->ibcq.cq_context);
+
+ return;
+}
+
+
+/**
+ * nes_manage_apbvt()
+ */
+int nes_manage_apbvt(struct nes_vnic *nesvnic, u32 accel_local_port,
+ u32 nic_index, u32 add_port)
+{
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ unsigned long flags;
+ struct nes_cqp_request *cqp_request;
+ int ret = 0;
+ u16 major_code;
+
+ /* Send manage APBVT request to CQP */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ nes_debug(NES_DBG_QP, "%s APBV for local port=%u(0x%04x), nic_index=%u\n",
+ (add_port == NES_MANAGE_APBVT_ADD) ? "ADD" : "DEL",
+ accel_local_port, accel_local_port, nic_index);
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_MANAGE_APBVT |
+ ((add_port == NES_MANAGE_APBVT_ADD) ? NES_CQP_APBVT_ADD : 0)));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ ((nic_index << NES_CQP_APBVT_NIC_SHIFT) | accel_local_port));
+
+ nes_debug(NES_DBG_QP, "Waiting for CQP completion for APBVT.\n");
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ if (add_port == NES_MANAGE_APBVT_ADD)
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_QP, "Completed, ret=%u, CQP Major:Minor codes = 0x%04X:0x%04X\n",
+ ret, cqp_request->major_code, cqp_request->minor_code);
+ major_code = cqp_request->major_code;
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret)
+ return -ETIME;
+ else if (major_code)
+ return -EIO;
+ else
+ return 0;
+}
+
+
+/**
+ * nes_manage_arp_cache
+ */
+void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
+ u32 ip_addr, u32 action)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev;
+ struct nes_cqp_request *cqp_request;
+ int arp_index;
+
+ nesdev = nesvnic->nesdev;
+ arp_index = nes_arp_table(nesdev, ip_addr, mac_addr, action);
+ if (arp_index == -1) {
+ return;
+ }
+
+ /* update the ARP entry */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_NETDEV, "Failed to get a cqp_request.\n");
+ return;
+ }
+ cqp_request->waiting = 0;
+ cqp_wqe = &cqp_request->cqp_wqe;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
+ NES_CQP_MANAGE_ARP_CACHE | NES_CQP_ARP_PERM);
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(
+ (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_CQP_ARP_AEQ_INDEX_SHIFT);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(arp_index);
+
+ if (action == NES_ARP_ADD) {
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_ARP_VALID);
+ cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = cpu_to_le32(
+ (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
+ (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
+ cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
+ (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]);
+ } else {
+ cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
+ cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
+ }
+
+ nes_debug(NES_DBG_NETDEV, "Not waiting for CQP, cqp.sq_head=%u, cqp.sq_tail=%u\n",
+ nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
+
+ atomic_set(&cqp_request->refcount, 1);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+}
+
+
+/**
+ * flush_wqes
+ */
+void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
+ u32 which_wq, u32 wait_completion)
+{
+ unsigned long flags;
+ struct nes_cqp_request *cqp_request;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ int ret;
+
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
+ return;
+ }
+ if (wait_completion) {
+ cqp_request->waiting = 1;
+ atomic_set(&cqp_request->refcount, 2);
+ } else {
+ cqp_request->waiting = 0;
+ }
+ cqp_wqe = &cqp_request->cqp_wqe;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
+ cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
+ cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
+
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ if (wait_completion) {
+ /* Wait for CQP */
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_QP, "Flush SQ QP WQEs completed, ret=%u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X\n",
+ ret, cqp_request->major_code, cqp_request->minor_code);
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+}
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
new file mode 100644
index 000000000000..1e10df550c9e
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -0,0 +1,1206 @@
+/*
+* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses. You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenIB.org BSD license below:
+*
+* Redistribution and use in source and binary forms, with or
+* without modification, are permitted provided that the following
+* conditions are met:
+*
+* - Redistributions of source code must retain the above
+* copyright notice, this list of conditions and the following
+* disclaimer.
+*
+* - Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials
+* provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*/
+
+#ifndef __NES_HW_H
+#define __NES_HW_H
+
+#define NES_PHY_TYPE_1G 2
+#define NES_PHY_TYPE_IRIS 3
+#define NES_PHY_TYPE_PUMA_10G 6
+
+#define NES_MULTICAST_PF_MAX 8
+
+enum pci_regs {
+ NES_INT_STAT = 0x0000,
+ NES_INT_MASK = 0x0004,
+ NES_INT_PENDING = 0x0008,
+ NES_INTF_INT_STAT = 0x000C,
+ NES_INTF_INT_MASK = 0x0010,
+ NES_TIMER_STAT = 0x0014,
+ NES_PERIODIC_CONTROL = 0x0018,
+ NES_ONE_SHOT_CONTROL = 0x001C,
+ NES_EEPROM_COMMAND = 0x0020,
+ NES_EEPROM_DATA = 0x0024,
+ NES_FLASH_COMMAND = 0x0028,
+ NES_FLASH_DATA = 0x002C,
+ NES_SOFTWARE_RESET = 0x0030,
+ NES_CQ_ACK = 0x0034,
+ NES_WQE_ALLOC = 0x0040,
+ NES_CQE_ALLOC = 0x0044,
+};
+
+enum indexed_regs {
+ NES_IDX_CREATE_CQP_LOW = 0x0000,
+ NES_IDX_CREATE_CQP_HIGH = 0x0004,
+ NES_IDX_QP_CONTROL = 0x0040,
+ NES_IDX_FLM_CONTROL = 0x0080,
+ NES_IDX_INT_CPU_STATUS = 0x00a0,
+ NES_IDX_GPIO_CONTROL = 0x00f0,
+ NES_IDX_GPIO_DATA = 0x00f4,
+ NES_IDX_TCP_CONFIG0 = 0x01e4,
+ NES_IDX_TCP_TIMER_CONFIG = 0x01ec,
+ NES_IDX_TCP_NOW = 0x01f0,
+ NES_IDX_QP_MAX_CFG_SIZES = 0x0200,
+ NES_IDX_QP_CTX_SIZE = 0x0218,
+ NES_IDX_TCP_TIMER_SIZE0 = 0x0238,
+ NES_IDX_TCP_TIMER_SIZE1 = 0x0240,
+ NES_IDX_ARP_CACHE_SIZE = 0x0258,
+ NES_IDX_CQ_CTX_SIZE = 0x0260,
+ NES_IDX_MRT_SIZE = 0x0278,
+ NES_IDX_PBL_REGION_SIZE = 0x0280,
+ NES_IDX_IRRQ_COUNT = 0x02b0,
+ NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x02f0,
+ NES_IDX_RX_WINDOW_BUFFER_SIZE = 0x0300,
+ NES_IDX_DST_IP_ADDR = 0x0400,
+ NES_IDX_PCIX_DIAG = 0x08e8,
+ NES_IDX_MPP_DEBUG = 0x0a00,
+ NES_IDX_PORT_RX_DISCARDS = 0x0a30,
+ NES_IDX_PORT_TX_DISCARDS = 0x0a34,
+ NES_IDX_MPP_LB_DEBUG = 0x0b00,
+ NES_IDX_DENALI_CTL_22 = 0x1058,
+ NES_IDX_MAC_TX_CONTROL = 0x2000,
+ NES_IDX_MAC_TX_CONFIG = 0x2004,
+ NES_IDX_MAC_TX_PAUSE_QUANTA = 0x2008,
+ NES_IDX_MAC_RX_CONTROL = 0x200c,
+ NES_IDX_MAC_RX_CONFIG = 0x2010,
+ NES_IDX_MAC_EXACT_MATCH_BOTTOM = 0x201c,
+ NES_IDX_MAC_MDIO_CONTROL = 0x2084,
+ NES_IDX_MAC_TX_OCTETS_LOW = 0x2100,
+ NES_IDX_MAC_TX_OCTETS_HIGH = 0x2104,
+ NES_IDX_MAC_TX_FRAMES_LOW = 0x2108,
+ NES_IDX_MAC_TX_FRAMES_HIGH = 0x210c,
+ NES_IDX_MAC_TX_PAUSE_FRAMES = 0x2118,
+ NES_IDX_MAC_TX_ERRORS = 0x2138,
+ NES_IDX_MAC_RX_OCTETS_LOW = 0x213c,
+ NES_IDX_MAC_RX_OCTETS_HIGH = 0x2140,
+ NES_IDX_MAC_RX_FRAMES_LOW = 0x2144,
+ NES_IDX_MAC_RX_FRAMES_HIGH = 0x2148,
+ NES_IDX_MAC_RX_BC_FRAMES_LOW = 0x214c,
+ NES_IDX_MAC_RX_MC_FRAMES_HIGH = 0x2150,
+ NES_IDX_MAC_RX_PAUSE_FRAMES = 0x2154,
+ NES_IDX_MAC_RX_SHORT_FRAMES = 0x2174,
+ NES_IDX_MAC_RX_OVERSIZED_FRAMES = 0x2178,
+ NES_IDX_MAC_RX_JABBER_FRAMES = 0x217c,
+ NES_IDX_MAC_RX_CRC_ERR_FRAMES = 0x2180,
+ NES_IDX_MAC_RX_LENGTH_ERR_FRAMES = 0x2184,
+ NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES = 0x2188,
+ NES_IDX_MAC_INT_STATUS = 0x21f0,
+ NES_IDX_MAC_INT_MASK = 0x21f4,
+ NES_IDX_PHY_PCS_CONTROL_STATUS0 = 0x2800,
+ NES_IDX_PHY_PCS_CONTROL_STATUS1 = 0x2a00,
+ NES_IDX_ETH_SERDES_COMMON_CONTROL0 = 0x2808,
+ NES_IDX_ETH_SERDES_COMMON_CONTROL1 = 0x2a08,
+ NES_IDX_ETH_SERDES_COMMON_STATUS0 = 0x280c,
+ NES_IDX_ETH_SERDES_COMMON_STATUS1 = 0x2a0c,
+ NES_IDX_ETH_SERDES_TX_EMP0 = 0x2810,
+ NES_IDX_ETH_SERDES_TX_EMP1 = 0x2a10,
+ NES_IDX_ETH_SERDES_TX_DRIVE0 = 0x2814,
+ NES_IDX_ETH_SERDES_TX_DRIVE1 = 0x2a14,
+ NES_IDX_ETH_SERDES_RX_MODE0 = 0x2818,
+ NES_IDX_ETH_SERDES_RX_MODE1 = 0x2a18,
+ NES_IDX_ETH_SERDES_RX_SIGDET0 = 0x281c,
+ NES_IDX_ETH_SERDES_RX_SIGDET1 = 0x2a1c,
+ NES_IDX_ETH_SERDES_BYPASS0 = 0x2820,
+ NES_IDX_ETH_SERDES_BYPASS1 = 0x2a20,
+ NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0 = 0x2824,
+ NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1 = 0x2a24,
+ NES_IDX_ETH_SERDES_RX_EQ_CONTROL0 = 0x2828,
+ NES_IDX_ETH_SERDES_RX_EQ_CONTROL1 = 0x2a28,
+ NES_IDX_ETH_SERDES_RX_EQ_STATUS0 = 0x282c,
+ NES_IDX_ETH_SERDES_RX_EQ_STATUS1 = 0x2a2c,
+ NES_IDX_ETH_SERDES_CDR_RESET0 = 0x2830,
+ NES_IDX_ETH_SERDES_CDR_RESET1 = 0x2a30,
+ NES_IDX_ETH_SERDES_CDR_CONTROL0 = 0x2834,
+ NES_IDX_ETH_SERDES_CDR_CONTROL1 = 0x2a34,
+ NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0 = 0x2838,
+ NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1 = 0x2a38,
+ NES_IDX_ENDNODE0_NSTAT_RX_DISCARD = 0x3080,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO = 0x3000,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI = 0x3004,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO = 0x3008,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI = 0x300c,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO = 0x7000,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c,
+ NES_IDX_CM_CONFIG = 0x5100,
+ NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000,
+ NES_IDX_NIC_PHYPORT_TO_USW = 0x6008,
+ NES_IDX_NIC_ACTIVE = 0x6010,
+ NES_IDX_NIC_UNICAST_ALL = 0x6018,
+ NES_IDX_NIC_MULTICAST_ALL = 0x6020,
+ NES_IDX_NIC_MULTICAST_ENABLE = 0x6028,
+ NES_IDX_NIC_BROADCAST_ON = 0x6030,
+ NES_IDX_USED_CHUNKS_TX = 0x60b0,
+ NES_IDX_TX_POOL_SIZE = 0x60b8,
+ NES_IDX_QUAD_HASH_TABLE_SIZE = 0x6148,
+ NES_IDX_PERFECT_FILTER_LOW = 0x6200,
+ NES_IDX_PERFECT_FILTER_HIGH = 0x6204,
+ NES_IDX_IPV4_TCP_REXMITS = 0x7080,
+ NES_IDX_DEBUG_ERROR_CONTROL_STATUS = 0x913c,
+ NES_IDX_DEBUG_ERROR_MASKS0 = 0x9140,
+ NES_IDX_DEBUG_ERROR_MASKS1 = 0x9144,
+ NES_IDX_DEBUG_ERROR_MASKS2 = 0x9148,
+ NES_IDX_DEBUG_ERROR_MASKS3 = 0x914c,
+ NES_IDX_DEBUG_ERROR_MASKS4 = 0x9150,
+ NES_IDX_DEBUG_ERROR_MASKS5 = 0x9154,
+};
+
+#define NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE 1
+#define NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE (1 << 17)
+
+enum nes_cqp_opcodes {
+ NES_CQP_CREATE_QP = 0x00,
+ NES_CQP_MODIFY_QP = 0x01,
+ NES_CQP_DESTROY_QP = 0x02,
+ NES_CQP_CREATE_CQ = 0x03,
+ NES_CQP_MODIFY_CQ = 0x04,
+ NES_CQP_DESTROY_CQ = 0x05,
+ NES_CQP_ALLOCATE_STAG = 0x09,
+ NES_CQP_REGISTER_STAG = 0x0a,
+ NES_CQP_QUERY_STAG = 0x0b,
+ NES_CQP_REGISTER_SHARED_STAG = 0x0c,
+ NES_CQP_DEALLOCATE_STAG = 0x0d,
+ NES_CQP_MANAGE_ARP_CACHE = 0x0f,
+ NES_CQP_SUSPEND_QPS = 0x11,
+ NES_CQP_UPLOAD_CONTEXT = 0x13,
+ NES_CQP_CREATE_CEQ = 0x16,
+ NES_CQP_DESTROY_CEQ = 0x18,
+ NES_CQP_CREATE_AEQ = 0x19,
+ NES_CQP_DESTROY_AEQ = 0x1b,
+ NES_CQP_LMI_ACCESS = 0x20,
+ NES_CQP_FLUSH_WQES = 0x22,
+ NES_CQP_MANAGE_APBVT = 0x23
+};
+
+enum nes_cqp_wqe_word_idx {
+ NES_CQP_WQE_OPCODE_IDX = 0,
+ NES_CQP_WQE_ID_IDX = 1,
+ NES_CQP_WQE_COMP_CTX_LOW_IDX = 2,
+ NES_CQP_WQE_COMP_CTX_HIGH_IDX = 3,
+ NES_CQP_WQE_COMP_SCRATCH_LOW_IDX = 4,
+ NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX = 5,
+};
+
+enum nes_cqp_cq_wqeword_idx {
+ NES_CQP_CQ_WQE_PBL_LOW_IDX = 6,
+ NES_CQP_CQ_WQE_PBL_HIGH_IDX = 7,
+ NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX = 8,
+ NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX = 9,
+ NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX = 10,
+};
+
+enum nes_cqp_stag_wqeword_idx {
+ NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX = 1,
+ NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX = 6,
+ NES_CQP_STAG_WQE_LEN_LOW_IDX = 7,
+ NES_CQP_STAG_WQE_STAG_IDX = 8,
+ NES_CQP_STAG_WQE_VA_LOW_IDX = 10,
+ NES_CQP_STAG_WQE_VA_HIGH_IDX = 11,
+ NES_CQP_STAG_WQE_PA_LOW_IDX = 12,
+ NES_CQP_STAG_WQE_PA_HIGH_IDX = 13,
+ NES_CQP_STAG_WQE_PBL_LEN_IDX = 14
+};
+
+#define NES_CQP_OP_IWARP_STATE_SHIFT 28
+
+enum nes_cqp_qp_bits {
+ NES_CQP_QP_ARP_VALID = (1<<8),
+ NES_CQP_QP_WINBUF_VALID = (1<<9),
+ NES_CQP_QP_CONTEXT_VALID = (1<<10),
+ NES_CQP_QP_ORD_VALID = (1<<11),
+ NES_CQP_QP_WINBUF_DATAIND_EN = (1<<12),
+ NES_CQP_QP_VIRT_WQS = (1<<13),
+ NES_CQP_QP_DEL_HTE = (1<<14),
+ NES_CQP_QP_CQS_VALID = (1<<15),
+ NES_CQP_QP_TYPE_TSA = 0,
+ NES_CQP_QP_TYPE_IWARP = (1<<16),
+ NES_CQP_QP_TYPE_CQP = (4<<16),
+ NES_CQP_QP_TYPE_NIC = (5<<16),
+ NES_CQP_QP_MSS_CHG = (1<<20),
+ NES_CQP_QP_STATIC_RESOURCES = (1<<21),
+ NES_CQP_QP_IGNORE_MW_BOUND = (1<<22),
+ NES_CQP_QP_VWQ_USE_LMI = (1<<23),
+ NES_CQP_QP_IWARP_STATE_IDLE = (1<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_IWARP_STATE_RTS = (2<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_IWARP_STATE_CLOSING = (3<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
+ NES_CQP_QP_RESET = (1<<31),
+};
+
+enum nes_cqp_qp_wqe_word_idx {
+ NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
+ NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
+ NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
+};
+
+enum nes_nic_ctx_bits {
+ NES_NIC_CTX_RQ_SIZE_32 = (3<<8),
+ NES_NIC_CTX_RQ_SIZE_512 = (3<<8),
+ NES_NIC_CTX_SQ_SIZE_32 = (1<<10),
+ NES_NIC_CTX_SQ_SIZE_512 = (3<<10),
+};
+
+enum nes_nic_qp_ctx_word_idx {
+ NES_NIC_CTX_MISC_IDX = 0,
+ NES_NIC_CTX_SQ_LOW_IDX = 2,
+ NES_NIC_CTX_SQ_HIGH_IDX = 3,
+ NES_NIC_CTX_RQ_LOW_IDX = 4,
+ NES_NIC_CTX_RQ_HIGH_IDX = 5,
+};
+
+enum nes_cqp_cq_bits {
+ NES_CQP_CQ_CEQE_MASK = (1<<9),
+ NES_CQP_CQ_CEQ_VALID = (1<<10),
+ NES_CQP_CQ_RESIZE = (1<<11),
+ NES_CQP_CQ_CHK_OVERFLOW = (1<<12),
+ NES_CQP_CQ_4KB_CHUNK = (1<<14),
+ NES_CQP_CQ_VIRT = (1<<15),
+};
+
+enum nes_cqp_stag_bits {
+ NES_CQP_STAG_VA_TO = (1<<9),
+ NES_CQP_STAG_DEALLOC_PBLS = (1<<10),
+ NES_CQP_STAG_PBL_BLK_SIZE = (1<<11),
+ NES_CQP_STAG_MR = (1<<13),
+ NES_CQP_STAG_RIGHTS_LOCAL_READ = (1<<16),
+ NES_CQP_STAG_RIGHTS_LOCAL_WRITE = (1<<17),
+ NES_CQP_STAG_RIGHTS_REMOTE_READ = (1<<18),
+ NES_CQP_STAG_RIGHTS_REMOTE_WRITE = (1<<19),
+ NES_CQP_STAG_RIGHTS_WINDOW_BIND = (1<<20),
+ NES_CQP_STAG_REM_ACC_EN = (1<<21),
+ NES_CQP_STAG_LEAVE_PENDING = (1<<31),
+};
+
+enum nes_cqp_ceq_wqeword_idx {
+ NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX = 1,
+ NES_CQP_CEQ_WQE_PBL_LOW_IDX = 6,
+ NES_CQP_CEQ_WQE_PBL_HIGH_IDX = 7,
+};
+
+enum nes_cqp_ceq_bits {
+ NES_CQP_CEQ_4KB_CHUNK = (1<<14),
+ NES_CQP_CEQ_VIRT = (1<<15),
+};
+
+enum nes_cqp_aeq_wqeword_idx {
+ NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX = 1,
+ NES_CQP_AEQ_WQE_PBL_LOW_IDX = 6,
+ NES_CQP_AEQ_WQE_PBL_HIGH_IDX = 7,
+};
+
+enum nes_cqp_aeq_bits {
+ NES_CQP_AEQ_4KB_CHUNK = (1<<14),
+ NES_CQP_AEQ_VIRT = (1<<15),
+};
+
+enum nes_cqp_lmi_wqeword_idx {
+ NES_CQP_LMI_WQE_LMI_OFFSET_IDX = 1,
+ NES_CQP_LMI_WQE_FRAG_LOW_IDX = 8,
+ NES_CQP_LMI_WQE_FRAG_HIGH_IDX = 9,
+ NES_CQP_LMI_WQE_FRAG_LEN_IDX = 10,
+};
+
+enum nes_cqp_arp_wqeword_idx {
+ NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX = 6,
+ NES_CQP_ARP_WQE_MAC_HIGH_IDX = 7,
+ NES_CQP_ARP_WQE_REACHABILITY_MAX_IDX = 1,
+};
+
+enum nes_cqp_upload_wqeword_idx {
+ NES_CQP_UPLOAD_WQE_CTXT_LOW_IDX = 6,
+ NES_CQP_UPLOAD_WQE_CTXT_HIGH_IDX = 7,
+ NES_CQP_UPLOAD_WQE_HTE_IDX = 8,
+};
+
+enum nes_cqp_arp_bits {
+ NES_CQP_ARP_VALID = (1<<8),
+ NES_CQP_ARP_PERM = (1<<9),
+};
+
+enum nes_cqp_flush_bits {
+ NES_CQP_FLUSH_SQ = (1<<30),
+ NES_CQP_FLUSH_RQ = (1<<31),
+};
+
+enum nes_cqe_opcode_bits {
+ NES_CQE_STAG_VALID = (1<<6),
+ NES_CQE_ERROR = (1<<7),
+ NES_CQE_SQ = (1<<8),
+ NES_CQE_SE = (1<<9),
+ NES_CQE_PSH = (1<<29),
+ NES_CQE_FIN = (1<<30),
+ NES_CQE_VALID = (1<<31),
+};
+
+
+enum nes_cqe_word_idx {
+ NES_CQE_PAYLOAD_LENGTH_IDX = 0,
+ NES_CQE_COMP_COMP_CTX_LOW_IDX = 2,
+ NES_CQE_COMP_COMP_CTX_HIGH_IDX = 3,
+ NES_CQE_INV_STAG_IDX = 4,
+ NES_CQE_QP_ID_IDX = 5,
+ NES_CQE_ERROR_CODE_IDX = 6,
+ NES_CQE_OPCODE_IDX = 7,
+};
+
+enum nes_ceqe_word_idx {
+ NES_CEQE_CQ_CTX_LOW_IDX = 0,
+ NES_CEQE_CQ_CTX_HIGH_IDX = 1,
+};
+
+enum nes_ceqe_status_bit {
+ NES_CEQE_VALID = (1<<31),
+};
+
+enum nes_int_bits {
+ NES_INT_CEQ0 = (1<<0),
+ NES_INT_CEQ1 = (1<<1),
+ NES_INT_CEQ2 = (1<<2),
+ NES_INT_CEQ3 = (1<<3),
+ NES_INT_CEQ4 = (1<<4),
+ NES_INT_CEQ5 = (1<<5),
+ NES_INT_CEQ6 = (1<<6),
+ NES_INT_CEQ7 = (1<<7),
+ NES_INT_CEQ8 = (1<<8),
+ NES_INT_CEQ9 = (1<<9),
+ NES_INT_CEQ10 = (1<<10),
+ NES_INT_CEQ11 = (1<<11),
+ NES_INT_CEQ12 = (1<<12),
+ NES_INT_CEQ13 = (1<<13),
+ NES_INT_CEQ14 = (1<<14),
+ NES_INT_CEQ15 = (1<<15),
+ NES_INT_AEQ0 = (1<<16),
+ NES_INT_AEQ1 = (1<<17),
+ NES_INT_AEQ2 = (1<<18),
+ NES_INT_AEQ3 = (1<<19),
+ NES_INT_AEQ4 = (1<<20),
+ NES_INT_AEQ5 = (1<<21),
+ NES_INT_AEQ6 = (1<<22),
+ NES_INT_AEQ7 = (1<<23),
+ NES_INT_MAC0 = (1<<24),
+ NES_INT_MAC1 = (1<<25),
+ NES_INT_MAC2 = (1<<26),
+ NES_INT_MAC3 = (1<<27),
+ NES_INT_TSW = (1<<28),
+ NES_INT_TIMER = (1<<29),
+ NES_INT_INTF = (1<<30),
+};
+
+enum nes_intf_int_bits {
+ NES_INTF_INT_PCIERR = (1<<0),
+ NES_INTF_PERIODIC_TIMER = (1<<2),
+ NES_INTF_ONE_SHOT_TIMER = (1<<3),
+ NES_INTF_INT_CRITERR = (1<<14),
+ NES_INTF_INT_AEQ0_OFLOW = (1<<16),
+ NES_INTF_INT_AEQ1_OFLOW = (1<<17),
+ NES_INTF_INT_AEQ2_OFLOW = (1<<18),
+ NES_INTF_INT_AEQ3_OFLOW = (1<<19),
+ NES_INTF_INT_AEQ4_OFLOW = (1<<20),
+ NES_INTF_INT_AEQ5_OFLOW = (1<<21),
+ NES_INTF_INT_AEQ6_OFLOW = (1<<22),
+ NES_INTF_INT_AEQ7_OFLOW = (1<<23),
+ NES_INTF_INT_AEQ_OFLOW = (0xff<<16),
+};
+
+enum nes_mac_int_bits {
+ NES_MAC_INT_LINK_STAT_CHG = (1<<1),
+ NES_MAC_INT_XGMII_EXT = (1<<2),
+ NES_MAC_INT_TX_UNDERFLOW = (1<<6),
+ NES_MAC_INT_TX_ERROR = (1<<7),
+};
+
+enum nes_cqe_allocate_bits {
+ NES_CQE_ALLOC_INC_SELECT = (1<<28),
+ NES_CQE_ALLOC_NOTIFY_NEXT = (1<<29),
+ NES_CQE_ALLOC_NOTIFY_SE = (1<<30),
+ NES_CQE_ALLOC_RESET = (1<<31),
+};
+
+enum nes_nic_rq_wqe_word_idx {
+ NES_NIC_RQ_WQE_LENGTH_1_0_IDX = 0,
+ NES_NIC_RQ_WQE_LENGTH_3_2_IDX = 1,
+ NES_NIC_RQ_WQE_FRAG0_LOW_IDX = 2,
+ NES_NIC_RQ_WQE_FRAG0_HIGH_IDX = 3,
+ NES_NIC_RQ_WQE_FRAG1_LOW_IDX = 4,
+ NES_NIC_RQ_WQE_FRAG1_HIGH_IDX = 5,
+ NES_NIC_RQ_WQE_FRAG2_LOW_IDX = 6,
+ NES_NIC_RQ_WQE_FRAG2_HIGH_IDX = 7,
+ NES_NIC_RQ_WQE_FRAG3_LOW_IDX = 8,
+ NES_NIC_RQ_WQE_FRAG3_HIGH_IDX = 9,
+};
+
+enum nes_nic_sq_wqe_word_idx {
+ NES_NIC_SQ_WQE_MISC_IDX = 0,
+ NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX = 1,
+ NES_NIC_SQ_WQE_LSO_INFO_IDX = 2,
+ NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX = 3,
+ NES_NIC_SQ_WQE_LENGTH_2_1_IDX = 4,
+ NES_NIC_SQ_WQE_LENGTH_4_3_IDX = 5,
+ NES_NIC_SQ_WQE_FRAG0_LOW_IDX = 6,
+ NES_NIC_SQ_WQE_FRAG0_HIGH_IDX = 7,
+ NES_NIC_SQ_WQE_FRAG1_LOW_IDX = 8,
+ NES_NIC_SQ_WQE_FRAG1_HIGH_IDX = 9,
+ NES_NIC_SQ_WQE_FRAG2_LOW_IDX = 10,
+ NES_NIC_SQ_WQE_FRAG2_HIGH_IDX = 11,
+ NES_NIC_SQ_WQE_FRAG3_LOW_IDX = 12,
+ NES_NIC_SQ_WQE_FRAG3_HIGH_IDX = 13,
+ NES_NIC_SQ_WQE_FRAG4_LOW_IDX = 14,
+ NES_NIC_SQ_WQE_FRAG4_HIGH_IDX = 15,
+};
+
+enum nes_iwarp_sq_wqe_word_idx {
+ NES_IWARP_SQ_WQE_MISC_IDX = 0,
+ NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX = 1,
+ NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX = 2,
+ NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX = 3,
+ NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
+ NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
+ NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX = 7,
+ NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX = 8,
+ NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX = 9,
+ NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX = 10,
+ NES_IWARP_SQ_WQE_RDMA_STAG_IDX = 11,
+ NES_IWARP_SQ_WQE_IMM_DATA_START_IDX = 12,
+ NES_IWARP_SQ_WQE_FRAG0_LOW_IDX = 16,
+ NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX = 17,
+ NES_IWARP_SQ_WQE_LENGTH0_IDX = 18,
+ NES_IWARP_SQ_WQE_STAG0_IDX = 19,
+ NES_IWARP_SQ_WQE_FRAG1_LOW_IDX = 20,
+ NES_IWARP_SQ_WQE_FRAG1_HIGH_IDX = 21,
+ NES_IWARP_SQ_WQE_LENGTH1_IDX = 22,
+ NES_IWARP_SQ_WQE_STAG1_IDX = 23,
+ NES_IWARP_SQ_WQE_FRAG2_LOW_IDX = 24,
+ NES_IWARP_SQ_WQE_FRAG2_HIGH_IDX = 25,
+ NES_IWARP_SQ_WQE_LENGTH2_IDX = 26,
+ NES_IWARP_SQ_WQE_STAG2_IDX = 27,
+ NES_IWARP_SQ_WQE_FRAG3_LOW_IDX = 28,
+ NES_IWARP_SQ_WQE_FRAG3_HIGH_IDX = 29,
+ NES_IWARP_SQ_WQE_LENGTH3_IDX = 30,
+ NES_IWARP_SQ_WQE_STAG3_IDX = 31,
+};
+
+enum nes_iwarp_sq_bind_wqe_word_idx {
+ NES_IWARP_SQ_BIND_WQE_MR_IDX = 6,
+ NES_IWARP_SQ_BIND_WQE_MW_IDX = 7,
+ NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX = 8,
+ NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX = 9,
+ NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX = 10,
+ NES_IWARP_SQ_BIND_WQE_VA_FBO_HIGH_IDX = 11,
+};
+
+enum nes_iwarp_sq_fmr_wqe_word_idx {
+ NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX = 7,
+ NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX = 8,
+ NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX = 9,
+ NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX = 10,
+ NES_IWARP_SQ_FMR_WQE_VA_FBO_HIGH_IDX = 11,
+ NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX = 12,
+ NES_IWARP_SQ_FMR_WQE_PBL_ADDR_HIGH_IDX = 13,
+ NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14,
+};
+
+enum nes_iwarp_sq_locinv_wqe_word_idx {
+ NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6,
+};
+
+
+enum nes_iwarp_rq_wqe_word_idx {
+ NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
+ NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
+ NES_IWARP_RQ_WQE_COMP_CTX_HIGH_IDX = 3,
+ NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
+ NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
+ NES_IWARP_RQ_WQE_FRAG0_LOW_IDX = 8,
+ NES_IWARP_RQ_WQE_FRAG0_HIGH_IDX = 9,
+ NES_IWARP_RQ_WQE_LENGTH0_IDX = 10,
+ NES_IWARP_RQ_WQE_STAG0_IDX = 11,
+ NES_IWARP_RQ_WQE_FRAG1_LOW_IDX = 12,
+ NES_IWARP_RQ_WQE_FRAG1_HIGH_IDX = 13,
+ NES_IWARP_RQ_WQE_LENGTH1_IDX = 14,
+ NES_IWARP_RQ_WQE_STAG1_IDX = 15,
+ NES_IWARP_RQ_WQE_FRAG2_LOW_IDX = 16,
+ NES_IWARP_RQ_WQE_FRAG2_HIGH_IDX = 17,
+ NES_IWARP_RQ_WQE_LENGTH2_IDX = 18,
+ NES_IWARP_RQ_WQE_STAG2_IDX = 19,
+ NES_IWARP_RQ_WQE_FRAG3_LOW_IDX = 20,
+ NES_IWARP_RQ_WQE_FRAG3_HIGH_IDX = 21,
+ NES_IWARP_RQ_WQE_LENGTH3_IDX = 22,
+ NES_IWARP_RQ_WQE_STAG3_IDX = 23,
+};
+
+enum nes_nic_sq_wqe_bits {
+ NES_NIC_SQ_WQE_PHDR_CS_READY = (1<<21),
+ NES_NIC_SQ_WQE_LSO_ENABLE = (1<<22),
+ NES_NIC_SQ_WQE_TAGVALUE_ENABLE = (1<<23),
+ NES_NIC_SQ_WQE_DISABLE_CHKSUM = (1<<30),
+ NES_NIC_SQ_WQE_COMPLETION = (1<<31),
+};
+
+enum nes_nic_cqe_word_idx {
+ NES_NIC_CQE_ACCQP_ID_IDX = 0,
+ NES_NIC_CQE_TAG_PKT_TYPE_IDX = 2,
+ NES_NIC_CQE_MISC_IDX = 3,
+};
+
+#define NES_PKT_TYPE_APBVT_BITS 0xC112
+#define NES_PKT_TYPE_APBVT_MASK 0xff3e
+
+#define NES_PKT_TYPE_PVALID_BITS 0x10000000
+#define NES_PKT_TYPE_PVALID_MASK 0x30000000
+
+#define NES_PKT_TYPE_TCPV4_BITS 0x0110
+#define NES_PKT_TYPE_TCPV4_MASK 0x3f30
+
+#define NES_PKT_TYPE_UDPV4_BITS 0x0210
+#define NES_PKT_TYPE_UDPV4_MASK 0x3f30
+
+#define NES_PKT_TYPE_IPV4_BITS 0x0010
+#define NES_PKT_TYPE_IPV4_MASK 0x3f30
+
+#define NES_PKT_TYPE_OTHER_BITS 0x0000
+#define NES_PKT_TYPE_OTHER_MASK 0x0030
+
+#define NES_NIC_CQE_ERRV_SHIFT 16
+enum nes_nic_ev_bits {
+ NES_NIC_ERRV_BITS_MODE = (1<<0),
+ NES_NIC_ERRV_BITS_IPV4_CSUM_ERR = (1<<1),
+ NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR = (1<<2),
+ NES_NIC_ERRV_BITS_WQE_OVERRUN = (1<<3),
+ NES_NIC_ERRV_BITS_IPH_ERR = (1<<4),
+};
+
+enum nes_nic_cqe_bits {
+ NES_NIC_CQE_ERRV_MASK = (0xff<<NES_NIC_CQE_ERRV_SHIFT),
+ NES_NIC_CQE_SQ = (1<<24),
+ NES_NIC_CQE_ACCQP_PORT = (1<<28),
+ NES_NIC_CQE_ACCQP_VALID = (1<<29),
+ NES_NIC_CQE_TAG_VALID = (1<<30),
+ NES_NIC_CQE_VALID = (1<<31),
+};
+
+enum nes_aeqe_word_idx {
+ NES_AEQE_COMP_CTXT_LOW_IDX = 0,
+ NES_AEQE_COMP_CTXT_HIGH_IDX = 1,
+ NES_AEQE_COMP_QP_CQ_ID_IDX = 2,
+ NES_AEQE_MISC_IDX = 3,
+};
+
+enum nes_aeqe_bits {
+ NES_AEQE_QP = (1<<16),
+ NES_AEQE_CQ = (1<<17),
+ NES_AEQE_SQ = (1<<18),
+ NES_AEQE_INBOUND_RDMA = (1<<19),
+ NES_AEQE_IWARP_STATE_MASK = (7<<20),
+ NES_AEQE_TCP_STATE_MASK = (0xf<<24),
+ NES_AEQE_VALID = (1<<31),
+};
+
+#define NES_AEQE_IWARP_STATE_SHIFT 20
+#define NES_AEQE_TCP_STATE_SHIFT 24
+
+enum nes_aeqe_iwarp_state {
+ NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
+ NES_AEQE_IWARP_STATE_IDLE = 1,
+ NES_AEQE_IWARP_STATE_RTS = 2,
+ NES_AEQE_IWARP_STATE_CLOSING = 3,
+ NES_AEQE_IWARP_STATE_TERMINATE = 5,
+ NES_AEQE_IWARP_STATE_ERROR = 6
+};
+
+enum nes_aeqe_tcp_state {
+ NES_AEQE_TCP_STATE_NON_EXISTANT = 0,
+ NES_AEQE_TCP_STATE_CLOSED = 1,
+ NES_AEQE_TCP_STATE_LISTEN = 2,
+ NES_AEQE_TCP_STATE_SYN_SENT = 3,
+ NES_AEQE_TCP_STATE_SYN_RCVD = 4,
+ NES_AEQE_TCP_STATE_ESTABLISHED = 5,
+ NES_AEQE_TCP_STATE_CLOSE_WAIT = 6,
+ NES_AEQE_TCP_STATE_FIN_WAIT_1 = 7,
+ NES_AEQE_TCP_STATE_CLOSING = 8,
+ NES_AEQE_TCP_STATE_LAST_ACK = 9,
+ NES_AEQE_TCP_STATE_FIN_WAIT_2 = 10,
+ NES_AEQE_TCP_STATE_TIME_WAIT = 11
+};
+
+enum nes_aeqe_aeid {
+ NES_AEQE_AEID_AMP_UNALLOCATED_STAG = 0x0102,
+ NES_AEQE_AEID_AMP_INVALID_STAG = 0x0103,
+ NES_AEQE_AEID_AMP_BAD_QP = 0x0104,
+ NES_AEQE_AEID_AMP_BAD_PD = 0x0105,
+ NES_AEQE_AEID_AMP_BAD_STAG_KEY = 0x0106,
+ NES_AEQE_AEID_AMP_BAD_STAG_INDEX = 0x0107,
+ NES_AEQE_AEID_AMP_BOUNDS_VIOLATION = 0x0108,
+ NES_AEQE_AEID_AMP_RIGHTS_VIOLATION = 0x0109,
+ NES_AEQE_AEID_AMP_TO_WRAP = 0x010a,
+ NES_AEQE_AEID_AMP_FASTREG_SHARED = 0x010b,
+ NES_AEQE_AEID_AMP_FASTREG_VALID_STAG = 0x010c,
+ NES_AEQE_AEID_AMP_FASTREG_MW_STAG = 0x010d,
+ NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS = 0x010e,
+ NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW = 0x010f,
+ NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH = 0x0110,
+ NES_AEQE_AEID_AMP_INVALIDATE_SHARED = 0x0111,
+ NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS = 0x0112,
+ NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS = 0x0113,
+ NES_AEQE_AEID_AMP_MWBIND_VALID_STAG = 0x0114,
+ NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG = 0x0115,
+ NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG = 0x0116,
+ NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG = 0x0117,
+ NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS = 0x0118,
+ NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS = 0x0119,
+ NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT = 0x011a,
+ NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED = 0x011b,
+ NES_AEQE_AEID_BAD_CLOSE = 0x0201,
+ NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE = 0x0202,
+ NES_AEQE_AEID_CQ_OPERATION_ERROR = 0x0203,
+ NES_AEQE_AEID_PRIV_OPERATION_DENIED = 0x0204,
+ NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO = 0x0205,
+ NES_AEQE_AEID_STAG_ZERO_INVALID = 0x0206,
+ NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN = 0x0301,
+ NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID = 0x0302,
+ NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER = 0x0303,
+ NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION = 0x0304,
+ NES_AEQE_AEID_DDP_UBE_INVALID_MO = 0x0305,
+ NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE = 0x0306,
+ NES_AEQE_AEID_DDP_UBE_INVALID_QN = 0x0307,
+ NES_AEQE_AEID_DDP_NO_L_BIT = 0x0308,
+ NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION = 0x0311,
+ NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE = 0x0312,
+ NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST = 0x0313,
+ NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP = 0x0314,
+ NES_AEQE_AEID_INVALID_ARP_ENTRY = 0x0401,
+ NES_AEQE_AEID_INVALID_TCP_OPTION_RCVD = 0x0402,
+ NES_AEQE_AEID_STALE_ARP_ENTRY = 0x0403,
+ NES_AEQE_AEID_LLP_CLOSE_COMPLETE = 0x0501,
+ NES_AEQE_AEID_LLP_CONNECTION_RESET = 0x0502,
+ NES_AEQE_AEID_LLP_FIN_RECEIVED = 0x0503,
+ NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH = 0x0504,
+ NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR = 0x0505,
+ NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE = 0x0506,
+ NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL = 0x0507,
+ NES_AEQE_AEID_LLP_SYN_RECEIVED = 0x0508,
+ NES_AEQE_AEID_LLP_TERMINATE_RECEIVED = 0x0509,
+ NES_AEQE_AEID_LLP_TOO_MANY_RETRIES = 0x050a,
+ NES_AEQE_AEID_LLP_TOO_MANY_KEEPALIVE_RETRIES = 0x050b,
+ NES_AEQE_AEID_RESET_SENT = 0x0601,
+ NES_AEQE_AEID_TERMINATE_SENT = 0x0602,
+ NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC = 0x0700
+};
+
+enum nes_iwarp_sq_opcodes {
+ NES_IWARP_SQ_WQE_WRPDU = (1<<15),
+ NES_IWARP_SQ_WQE_PSH = (1<<21),
+ NES_IWARP_SQ_WQE_STREAMING = (1<<23),
+ NES_IWARP_SQ_WQE_IMM_DATA = (1<<28),
+ NES_IWARP_SQ_WQE_READ_FENCE = (1<<29),
+ NES_IWARP_SQ_WQE_LOCAL_FENCE = (1<<30),
+ NES_IWARP_SQ_WQE_SIGNALED_COMPL = (1<<31),
+};
+
+enum nes_iwarp_sq_wqe_bits {
+ NES_IWARP_SQ_OP_RDMAW = 0,
+ NES_IWARP_SQ_OP_RDMAR = 1,
+ NES_IWARP_SQ_OP_SEND = 3,
+ NES_IWARP_SQ_OP_SENDINV = 4,
+ NES_IWARP_SQ_OP_SENDSE = 5,
+ NES_IWARP_SQ_OP_SENDSEINV = 6,
+ NES_IWARP_SQ_OP_BIND = 8,
+ NES_IWARP_SQ_OP_FAST_REG = 9,
+ NES_IWARP_SQ_OP_LOCINV = 10,
+ NES_IWARP_SQ_OP_RDMAR_LOCINV = 11,
+ NES_IWARP_SQ_OP_NOP = 12,
+};
+
+#define NES_EEPROM_READ_REQUEST (1<<16)
+#define NES_MAC_ADDR_VALID (1<<20)
+
+/*
+ * NES index registers init values.
+ */
+struct nes_init_values {
+ u32 index;
+ u32 data;
+ u8 wrt;
+};
+
+/*
+ * NES registers in BAR0.
+ */
+struct nes_pci_regs {
+ u32 int_status;
+ u32 int_mask;
+ u32 int_pending;
+ u32 intf_int_status;
+ u32 intf_int_mask;
+ u32 other_regs[59]; /* pad out to 256 bytes for now */
+};
+
+#define NES_CQP_SQ_SIZE 128
+#define NES_CCQ_SIZE 128
+#define NES_NIC_WQ_SIZE 512
+#define NES_NIC_CTX_SIZE ((NES_NIC_CTX_RQ_SIZE_512) | (NES_NIC_CTX_SQ_SIZE_512))
+#define NES_NIC_BACK_STORE 0x00038000
+
+struct nes_device;
+
+struct nes_hw_nic_qp_context {
+ __le32 context_words[6];
+};
+
+struct nes_hw_nic_sq_wqe {
+ __le32 wqe_words[16];
+};
+
+struct nes_hw_nic_rq_wqe {
+ __le32 wqe_words[16];
+};
+
+struct nes_hw_nic_cqe {
+ __le32 cqe_words[4];
+};
+
+struct nes_hw_cqp_qp_context {
+ __le32 context_words[4];
+};
+
+struct nes_hw_cqp_wqe {
+ __le32 wqe_words[16];
+};
+
+struct nes_hw_qp_wqe {
+ __le32 wqe_words[32];
+};
+
+struct nes_hw_cqe {
+ __le32 cqe_words[8];
+};
+
+struct nes_hw_ceqe {
+ __le32 ceqe_words[2];
+};
+
+struct nes_hw_aeqe {
+ __le32 aeqe_words[4];
+};
+
+struct nes_cqp_request {
+ union {
+ u64 cqp_callback_context;
+ void *cqp_callback_pointer;
+ };
+ wait_queue_head_t waitq;
+ struct nes_hw_cqp_wqe cqp_wqe;
+ struct list_head list;
+ atomic_t refcount;
+ void (*cqp_callback)(struct nes_device *nesdev, struct nes_cqp_request *cqp_request);
+ u16 major_code;
+ u16 minor_code;
+ u8 waiting;
+ u8 request_done;
+ u8 dynamic;
+ u8 callback;
+};
+
+struct nes_hw_cqp {
+ struct nes_hw_cqp_wqe *sq_vbase;
+ dma_addr_t sq_pbase;
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ u16 qp_id;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 sq_size;
+};
+
+#define NES_FIRST_FRAG_SIZE 128
+struct nes_first_frag {
+ u8 buffer[NES_FIRST_FRAG_SIZE];
+};
+
+struct nes_hw_nic {
+ struct nes_first_frag *first_frag_vbase; /* virtual address of first frags */
+ struct nes_hw_nic_sq_wqe *sq_vbase; /* virtual address of sq */
+ struct nes_hw_nic_rq_wqe *rq_vbase; /* virtual address of rq */
+ struct sk_buff *tx_skb[NES_NIC_WQ_SIZE];
+ struct sk_buff *rx_skb[NES_NIC_WQ_SIZE];
+ dma_addr_t frag_paddr[NES_NIC_WQ_SIZE];
+ unsigned long first_frag_overflow[BITS_TO_LONGS(NES_NIC_WQ_SIZE)];
+ dma_addr_t sq_pbase; /* PCI memory for host rings */
+ dma_addr_t rq_pbase; /* PCI memory for host rings */
+
+ u16 qp_id;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 sq_size;
+ u16 rq_head;
+ u16 rq_tail;
+ u16 rq_size;
+ u8 replenishing_rq;
+ u8 reserved;
+
+ spinlock_t sq_lock;
+ spinlock_t rq_lock;
+};
+
+struct nes_hw_nic_cq {
+ struct nes_hw_nic_cqe volatile *cq_vbase; /* PCI memory for host rings */
+ void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
+ dma_addr_t cq_pbase; /* PCI memory for host rings */
+ int rx_cqes_completed;
+ int cqe_allocs_pending;
+ int rx_pkts_indicated;
+ u16 cq_head;
+ u16 cq_size;
+ u16 cq_number;
+ u8 cqes_pending;
+};
+
+struct nes_hw_qp {
+ struct nes_hw_qp_wqe *sq_vbase; /* PCI memory for host rings */
+ struct nes_hw_qp_wqe *rq_vbase; /* PCI memory for host rings */
+ void *q2_vbase; /* PCI memory for host rings */
+ dma_addr_t sq_pbase; /* PCI memory for host rings */
+ dma_addr_t rq_pbase; /* PCI memory for host rings */
+ dma_addr_t q2_pbase; /* PCI memory for host rings */
+ u32 qp_id;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 sq_size;
+ u16 rq_head;
+ u16 rq_tail;
+ u16 rq_size;
+ u8 rq_encoded_size;
+ u8 sq_encoded_size;
+};
+
+struct nes_hw_cq {
+ struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */
+ void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
+ dma_addr_t cq_pbase; /* PCI memory for host rings */
+ u16 cq_head;
+ u16 cq_size;
+ u16 cq_number;
+};
+
+struct nes_hw_ceq {
+ struct nes_hw_ceqe volatile *ceq_vbase; /* PCI memory for host rings */
+ dma_addr_t ceq_pbase; /* PCI memory for host rings */
+ u16 ceq_head;
+ u16 ceq_size;
+};
+
+struct nes_hw_aeq {
+ struct nes_hw_aeqe volatile *aeq_vbase; /* PCI memory for host rings */
+ dma_addr_t aeq_pbase; /* PCI memory for host rings */
+ u16 aeq_head;
+ u16 aeq_size;
+};
+
+struct nic_qp_map {
+ u8 qpid;
+ u8 nic_index;
+ u8 logical_port;
+ u8 is_hnic;
+};
+
+#define NES_CQP_ARP_AEQ_INDEX_MASK 0x000f0000
+#define NES_CQP_ARP_AEQ_INDEX_SHIFT 16
+
+#define NES_CQP_APBVT_ADD 0x00008000
+#define NES_CQP_APBVT_NIC_SHIFT 16
+
+#define NES_ARP_ADD 1
+#define NES_ARP_DELETE 2
+#define NES_ARP_RESOLVE 3
+
+#define NES_MAC_SW_IDLE 0
+#define NES_MAC_SW_INTERRUPT 1
+#define NES_MAC_SW_MH 2
+
+struct nes_arp_entry {
+ u32 ip_addr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define NES_NIC_FAST_TIMER 96
+#define NES_NIC_FAST_TIMER_LOW 40
+#define NES_NIC_FAST_TIMER_HIGH 1000
+#define DEFAULT_NES_QL_HIGH 256
+#define DEFAULT_NES_QL_LOW 16
+#define DEFAULT_NES_QL_TARGET 64
+#define DEFAULT_JUMBO_NES_QL_LOW 12
+#define DEFAULT_JUMBO_NES_QL_TARGET 40
+#define DEFAULT_JUMBO_NES_QL_HIGH 128
+#define NES_NIC_CQ_DOWNWARD_TREND 8
+
+struct nes_hw_tune_timer {
+ //u16 cq_count;
+ u16 threshold_low;
+ u16 threshold_target;
+ u16 threshold_high;
+ u16 timer_in_use;
+ u16 timer_in_use_old;
+ u16 timer_in_use_min;
+ u16 timer_in_use_max;
+ u8 timer_direction_upward;
+ u8 timer_direction_downward;
+ u16 cq_count_old;
+ u8 cq_direction_downward;
+};
+
+#define NES_TIMER_INT_LIMIT 2
+#define NES_TIMER_INT_LIMIT_DYNAMIC 10
+#define NES_TIMER_ENABLE_LIMIT 4
+#define NES_MAX_LINK_INTERRUPTS 128
+#define NES_MAX_LINK_CHECK 200
+
+struct nes_adapter {
+ u64 fw_ver;
+ unsigned long *allocated_qps;
+ unsigned long *allocated_cqs;
+ unsigned long *allocated_mrs;
+ unsigned long *allocated_pds;
+ unsigned long *allocated_arps;
+ struct nes_qp **qp_table;
+ struct workqueue_struct *work_q;
+
+ struct list_head list;
+ struct list_head active_listeners;
+ /* list of the netdev's associated with each logical port */
+ struct list_head nesvnic_list[4];
+
+ struct timer_list mh_timer;
+ struct timer_list lc_timer;
+ struct work_struct work;
+ spinlock_t resource_lock;
+ spinlock_t phy_lock;
+ spinlock_t pbl_lock;
+ spinlock_t periodic_timer_lock;
+
+ struct nes_arp_entry arp_table[NES_MAX_ARP_TABLE_SIZE];
+
+ /* Adapter CEQ and AEQs */
+ struct nes_hw_ceq ceq[16];
+ struct nes_hw_aeq aeq[8];
+
+ struct nes_hw_tune_timer tune_timer;
+
+ unsigned long doorbell_start;
+
+ u32 hw_rev;
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 device_cap_flags;
+ u32 tick_delta;
+ u32 timer_int_req;
+ u32 arp_table_size;
+ u32 next_arp_index;
+
+ u32 max_mr;
+ u32 max_256pbl;
+ u32 max_4kpbl;
+ u32 free_256pbl;
+ u32 free_4kpbl;
+ u32 max_mr_size;
+ u32 max_qp;
+ u32 next_qp;
+ u32 max_irrq;
+ u32 max_qp_wr;
+ u32 max_sge;
+ u32 max_cq;
+ u32 next_cq;
+ u32 max_cqe;
+ u32 max_pd;
+ u32 base_pd;
+ u32 next_pd;
+ u32 hte_index_mask;
+
+ /* EEPROM information */
+ u32 rx_pool_size;
+ u32 tx_pool_size;
+ u32 rx_threshold;
+ u32 tcp_timer_core_clk_divisor;
+ u32 iwarp_config;
+ u32 cm_config;
+ u32 sws_timer_config;
+ u32 tcp_config1;
+ u32 wqm_wat;
+ u32 core_clock;
+ u32 firmware_version;
+
+ u32 nic_rx_eth_route_err;
+
+ u32 et_rx_coalesce_usecs;
+ u32 et_rx_max_coalesced_frames;
+ u32 et_rx_coalesce_usecs_irq;
+ u32 et_rx_max_coalesced_frames_irq;
+ u32 et_pkt_rate_low;
+ u32 et_rx_coalesce_usecs_low;
+ u32 et_rx_max_coalesced_frames_low;
+ u32 et_pkt_rate_high;
+ u32 et_rx_coalesce_usecs_high;
+ u32 et_rx_max_coalesced_frames_high;
+ u32 et_rate_sample_interval;
+ u32 timer_int_limit;
+
+ /* Adapter base MAC address */
+ u32 mac_addr_low;
+ u16 mac_addr_high;
+
+ u16 firmware_eeprom_offset;
+ u16 software_eeprom_offset;
+
+ u16 max_irrq_wr;
+
+ /* pd config for each port */
+ u16 pd_config_size[4];
+ u16 pd_config_base[4];
+
+ u16 link_interrupt_count[4];
+
+ /* the phy index for each port */
+ u8 phy_index[4];
+ u8 mac_sw_state[4];
+ u8 mac_link_down[4];
+ u8 phy_type[4];
+
+ /* PCI information */
+ unsigned int devfn;
+ unsigned char bus_number;
+ unsigned char OneG_Mode;
+
+ unsigned char ref_count;
+ u8 netdev_count;
+ u8 netdev_max; /* from host nic address count in EEPROM */
+ u8 port_count;
+ u8 virtwq;
+ u8 et_use_adaptive_rx_coalesce;
+ u8 adapter_fcn_count;
+};
+
+struct nes_pbl {
+ u64 *pbl_vbase;
+ dma_addr_t pbl_pbase;
+ struct page *page;
+ unsigned long user_base;
+ u32 pbl_size;
+ struct list_head list;
+ /* TODO: need to add list for two level tables */
+};
+
+struct nes_listener {
+ struct work_struct work;
+ struct workqueue_struct *wq;
+ struct nes_vnic *nesvnic;
+ struct iw_cm_id *cm_id;
+ struct list_head list;
+ unsigned long socket;
+ u8 accept_failed;
+};
+
+struct nes_ib_device;
+
+struct nes_vnic {
+ struct nes_ib_device *nesibdev;
+ u64 sq_full;
+ u64 sq_locked;
+ u64 tso_requests;
+ u64 segmented_tso_requests;
+ u64 linearized_skbs;
+ u64 tx_sw_dropped;
+ u64 endnode_nstat_rx_discard;
+ u64 endnode_nstat_rx_octets;
+ u64 endnode_nstat_rx_frames;
+ u64 endnode_nstat_tx_octets;
+ u64 endnode_nstat_tx_frames;
+ u64 endnode_ipv4_tcp_retransmits;
+ /* void *mem; */
+ struct nes_device *nesdev;
+ struct net_device *netdev;
+ struct vlan_group *vlan_grp;
+ atomic_t rx_skbs_needed;
+ atomic_t rx_skb_timer_running;
+ int budget;
+ u32 msg_enable;
+ /* u32 tx_avail; */
+ __be32 local_ipaddr;
+ struct napi_struct napi;
+ spinlock_t tx_lock; /* could use netdev tx lock? */
+ struct timer_list rq_wqes_timer;
+ u32 nic_mem_size;
+ void *nic_vbase;
+ dma_addr_t nic_pbase;
+ struct nes_hw_nic nic;
+ struct nes_hw_nic_cq nic_cq;
+ u32 mcrq_qp_id;
+ struct nes_ucontext *mcrq_ucontext;
+ struct nes_cqp_request* (*get_cqp_request)(struct nes_device *nesdev);
+ void (*post_cqp_request)(struct nes_device*, struct nes_cqp_request *, int);
+ int (*mcrq_mcast_filter)( struct nes_vnic* nesvnic, __u8* dmi_addr );
+ struct net_device_stats netstats;
+ /* used to put the netdev on the adapters logical port list */
+ struct list_head list;
+ u16 max_frame_size;
+ u8 netdev_open;
+ u8 linkup;
+ u8 logical_port;
+ u8 netdev_index; /* might not be needed, indexes nesdev->netdev */
+ u8 perfect_filter_index;
+ u8 nic_index;
+ u8 qp_nic_index[4];
+ u8 next_qp_nic_index;
+ u8 of_device_registered;
+ u8 rdma_enabled;
+ u8 rx_checksum_disabled;
+};
+
+struct nes_ib_device {
+ struct ib_device ibdev;
+ struct nes_vnic *nesvnic;
+
+ /* Virtual RNIC Limits */
+ u32 max_mr;
+ u32 max_qp;
+ u32 max_cq;
+ u32 max_pd;
+ u32 num_mr;
+ u32 num_qp;
+ u32 num_cq;
+ u32 num_pd;
+};
+
+#define nes_vlan_rx vlan_hwaccel_receive_skb
+#define nes_netif_rx netif_receive_skb
+
+#endif /* __NES_HW_H */
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
new file mode 100644
index 000000000000..b6cc265aa9a4
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -0,0 +1,1703 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+#include <net/tcp.h>
+
+#include <net/inet_common.h>
+#include <linux/inet.h>
+
+#include "nes.h"
+
+static struct nic_qp_map nic_qp_mapping_0[] = {
+ {16,0,0,1},{24,4,0,0},{28,8,0,0},{32,12,0,0},
+ {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0},
+ {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
+ {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_1[] = {
+ {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
+ {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_2[] = {
+ {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_3[] = {
+ {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_4[] = {
+ {28,8,0,0},{32,12,0,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_5[] = {
+ {29,9,1,0},{33,13,1,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_6[] = {
+ {30,10,2,0},{34,14,2,0}
+};
+
+static struct nic_qp_map nic_qp_mapping_7[] = {
+ {31,11,3,0},{35,15,3,0}
+};
+
+static struct nic_qp_map *nic_qp_mapping_per_function[] = {
+ nic_qp_mapping_0, nic_qp_mapping_1, nic_qp_mapping_2, nic_qp_mapping_3,
+ nic_qp_mapping_4, nic_qp_mapping_5, nic_qp_mapping_6, nic_qp_mapping_7
+};
+
+static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+static int debug = -1;
+
+
+static int nes_netdev_open(struct net_device *);
+static int nes_netdev_stop(struct net_device *);
+static int nes_netdev_start_xmit(struct sk_buff *, struct net_device *);
+static struct net_device_stats *nes_netdev_get_stats(struct net_device *);
+static void nes_netdev_tx_timeout(struct net_device *);
+static int nes_netdev_set_mac_address(struct net_device *, void *);
+static int nes_netdev_change_mtu(struct net_device *, int);
+
+/**
+ * nes_netdev_poll
+ */
+static int nes_netdev_poll(struct napi_struct *napi, int budget)
+{
+ struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi);
+ struct net_device *netdev = nesvnic->netdev;
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq;
+
+ nesvnic->budget = budget;
+ nescq->cqes_pending = 0;
+ nescq->rx_cqes_completed = 0;
+ nescq->cqe_allocs_pending = 0;
+ nescq->rx_pkts_indicated = 0;
+
+ nes_nic_ce_handler(nesdev, nescq);
+
+ if (nescq->cqes_pending == 0) {
+ netif_rx_complete(netdev, napi);
+ /* clear out completed cqes and arm */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ nescq->cq_number | (nescq->cqe_allocs_pending << 16));
+ nes_read32(nesdev->regs+NES_CQE_ALLOC);
+ } else {
+ /* clear out completed cqes but don't arm */
+ nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ nescq->cq_number | (nescq->cqe_allocs_pending << 16));
+ nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n",
+ nesvnic->netdev->name);
+ }
+ return nescq->rx_pkts_indicated;
+}
+
+
+/**
+ * nes_netdev_open - Activate the network interface; ifconfig
+ * ethx up.
+ */
+static int nes_netdev_open(struct net_device *netdev)
+{
+ u32 macaddr_low;
+ u16 macaddr_high;
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ int ret;
+ int i;
+ struct nes_vnic *first_nesvnic;
+ u32 nic_active_bit;
+ u32 nic_active;
+
+ assert(nesdev != NULL);
+
+ first_nesvnic = list_entry(nesdev->nesadapter->nesvnic_list[nesdev->mac_index].next,
+ struct nes_vnic, list);
+
+ if (netif_msg_ifup(nesvnic))
+ printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name);
+
+ ret = nes_init_nic_qp(nesdev, netdev);
+ if (ret) {
+ return ret;
+ }
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ if ((!nesvnic->of_device_registered) && (nesvnic->rdma_enabled)) {
+ nesvnic->nesibdev = nes_init_ofa_device(netdev);
+ if (nesvnic->nesibdev == NULL) {
+ printk(KERN_ERR PFX "%s: nesvnic->nesibdev alloc failed", netdev->name);
+ } else {
+ nesvnic->nesibdev->nesvnic = nesvnic;
+ ret = nes_register_ofa_device(nesvnic->nesibdev);
+ if (ret) {
+ printk(KERN_ERR PFX "%s: Unable to register RDMA device, ret = %d\n",
+ netdev->name, ret);
+ }
+ }
+ }
+ /* Set packet filters */
+ nic_active_bit = 1 << nesvnic->nic_index;
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
+
+ macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
+ macaddr_high += (u16)netdev->dev_addr[1];
+ macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
+ macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
+ macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
+ macaddr_low += (u32)netdev->dev_addr[5];
+
+ /* Program the various MAC regs */
+ for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
+ if (nesvnic->qp_nic_index[i] == 0xf) {
+ break;
+ }
+ nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW"
+ " (Addr:%08X) = %08X, HIGH = %08X.\n",
+ i, nesvnic->qp_nic_index[i],
+ NES_IDX_PERFECT_FILTER_LOW+((nesvnic->perfect_filter_index + i) * 8),
+ macaddr_low,
+ (u32)macaddr_high | NES_MAC_ADDR_VALID |
+ ((((u32)nesvnic->nic_index) << 16)));
+ nes_write_indexed(nesdev,
+ NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
+ macaddr_low);
+ nes_write_indexed(nesdev,
+ NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
+ (u32)macaddr_high | NES_MAC_ADDR_VALID |
+ ((((u32)nesvnic->nic_index) << 16)));
+ }
+
+
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
+ nesvnic->nic_cq.cq_number);
+ nes_read32(nesdev->regs+NES_CQE_ALLOC);
+
+ if (first_nesvnic->linkup) {
+ /* Enable network packets */
+ nesvnic->linkup = 1;
+ netif_start_queue(netdev);
+ netif_carrier_on(netdev);
+ }
+ napi_enable(&nesvnic->napi);
+ nesvnic->netdev_open = 1;
+
+ return 0;
+}
+
+
+/**
+ * nes_netdev_stop
+ */
+static int nes_netdev_stop(struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u32 nic_active_mask;
+ u32 nic_active;
+
+ nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
+ nesvnic, nesdev, netdev, netdev->name);
+ if (nesvnic->netdev_open == 0)
+ return 0;
+
+ if (netif_msg_ifdown(nesvnic))
+ printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
+
+ /* Disable network packets */
+ napi_disable(&nesvnic->napi);
+ netif_stop_queue(netdev);
+ if ((nesdev->netdev[0] == netdev) & (nesvnic->logical_port == nesdev->mac_index)) {
+ nes_write_indexed(nesdev,
+ NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
+ }
+
+ nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
+ nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH+
+ (nesvnic->perfect_filter_index*8), 0);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
+ nic_active &= nic_active_mask;
+ nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
+ nic_active &= nic_active_mask;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
+ nic_active &= nic_active_mask;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
+ nic_active &= nic_active_mask;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
+ nic_active &= nic_active_mask;
+ nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
+
+
+ if (nesvnic->of_device_registered) {
+ nes_destroy_ofa_device(nesvnic->nesibdev);
+ nesvnic->nesibdev = NULL;
+ nesvnic->of_device_registered = 0;
+ }
+ nes_destroy_nic_qp(nesvnic);
+
+ nesvnic->netdev_open = 0;
+
+ return 0;
+}
+
+
+/**
+ * nes_nic_send
+ */
+static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_hw_nic *nesnic = &nesvnic->nic;
+ struct nes_hw_nic_sq_wqe *nic_sqe;
+ struct tcphdr *tcph;
+ __le16 *wqe_fragment_length;
+ u32 wqe_misc;
+ u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
+ u16 skb_fragment_index;
+ dma_addr_t bus_address;
+
+ nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
+ wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
+
+ /* setup the VLAN tag if present */
+ if (vlan_tx_tag_present(skb)) {
+ nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
+ netdev->name, vlan_tx_tag_get(skb));
+ wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
+ wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+ } else
+ wqe_misc = 0;
+
+ /* bump past the vlan tag */
+ wqe_fragment_length++;
+ /* wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ tcph = tcp_hdr(skb);
+ if (1) {
+ if (skb_is_gso(skb)) {
+ /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n",
+ netdev->name, skb_is_gso(skb)); */
+ wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE |
+ NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
+ ((u32)tcph->doff) |
+ (((u32)(((unsigned char *)tcph) - skb->data)) << 4));
+ } else {
+ wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
+ }
+ }
+ } else { /* CHECKSUM_HW */
+ wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION;
+ }
+
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
+ skb->len);
+ memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
+ skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb)));
+ wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
+ skb_headlen(skb)));
+ wqe_fragment_length[1] = 0;
+ if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
+ if ((skb_shinfo(skb)->nr_frags + 1) > 4) {
+ nes_debug(NES_DBG_NIC_TX, "%s: Packet with %u fragments not sent, skb_headlen=%u\n",
+ netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
+ kfree_skb(skb);
+ nesvnic->tx_sw_dropped++;
+ return NETDEV_TX_LOCKED;
+ }
+ set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
+ bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
+ skb_headlen(skb) - NES_FIRST_FRAG_SIZE, PCI_DMA_TODEVICE);
+ wqe_fragment_length[wqe_fragment_index++] =
+ cpu_to_le16(skb_headlen(skb) - NES_FIRST_FRAG_SIZE);
+ wqe_fragment_length[wqe_fragment_index] = 0;
+ set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
+ ((u64)(bus_address)));
+ nesnic->tx_skb[nesnic->sq_head] = skb;
+ }
+
+ if (skb_headlen(skb) == skb->len) {
+ if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
+ nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
+ nesnic->tx_skb[nesnic->sq_head] = NULL;
+ dev_kfree_skb(skb);
+ }
+ } else {
+ /* Deal with Fragments */
+ nesnic->tx_skb[nesnic->sq_head] = skb;
+ for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags;
+ skb_fragment_index++) {
+ bus_address = pci_map_page( nesdev->pcidev,
+ skb_shinfo(skb)->frags[skb_fragment_index].page,
+ skb_shinfo(skb)->frags[skb_fragment_index].page_offset,
+ skb_shinfo(skb)->frags[skb_fragment_index].size,
+ PCI_DMA_TODEVICE);
+ wqe_fragment_length[wqe_fragment_index] =
+ cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size);
+ set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
+ bus_address);
+ wqe_fragment_index++;
+ if (wqe_fragment_index < 5)
+ wqe_fragment_length[wqe_fragment_index] = 0;
+ }
+ }
+
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
+ nesnic->sq_head++;
+ nesnic->sq_head &= nesnic->sq_size - 1;
+
+ return NETDEV_TX_OK;
+}
+
+
+/**
+ * nes_netdev_start_xmit
+ */
+static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_hw_nic *nesnic = &nesvnic->nic;
+ struct nes_hw_nic_sq_wqe *nic_sqe;
+ struct tcphdr *tcph;
+ /* struct udphdr *udph; */
+#define NES_MAX_TSO_FRAGS 18
+ /* 64K segment plus overflow on each side */
+ dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS];
+ dma_addr_t bus_address;
+ u32 tso_frag_index;
+ u32 tso_frag_count;
+ u32 tso_wqe_length;
+ u32 curr_tcp_seq;
+ u32 wqe_count=1;
+ u32 send_rc;
+ struct iphdr *iph;
+ unsigned long flags;
+ __le16 *wqe_fragment_length;
+ u32 nr_frags;
+ u32 original_first_length;
+// u64 *wqe_fragment_address;
+ /* first fragment (0) is used by copy buffer */
+ u16 wqe_fragment_index=1;
+ u16 hoffset;
+ u16 nhoffset;
+ u16 wqes_needed;
+ u16 wqes_available;
+ u32 old_head;
+ u32 wqe_misc;
+
+ /* nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
+ " (%u frags), tso_size=%u\n",
+ netdev->name, skb->len, skb_headlen(skb),
+ skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
+ */
+
+ if (!netif_carrier_ok(netdev))
+ return NETDEV_TX_OK;
+
+ if (netif_queue_stopped(netdev))
+ return NETDEV_TX_BUSY;
+
+ local_irq_save(flags);
+ if (!spin_trylock(&nesnic->sq_lock)) {
+ local_irq_restore(flags);
+ nesvnic->sq_locked++;
+ return NETDEV_TX_LOCKED;
+ }
+
+ /* Check if SQ is full */
+ if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
+ if (!netif_queue_stopped(netdev)) {
+ netif_stop_queue(netdev);
+ barrier();
+ if ((((((volatile u16)nesnic->sq_tail)+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) != 1) {
+ netif_start_queue(netdev);
+ goto sq_no_longer_full;
+ }
+ }
+ nesvnic->sq_full++;
+ spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+sq_no_longer_full:
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
+ nr_frags++;
+ }
+ /* Check if too many fragments */
+ if (unlikely((nr_frags > 4))) {
+ if (skb_is_gso(skb)) {
+ nesvnic->segmented_tso_requests++;
+ nesvnic->tso_requests++;
+ old_head = nesnic->sq_head;
+ /* Basically 4 fragments available per WQE with extended fragments */
+ wqes_needed = nr_frags >> 2;
+ wqes_needed += (nr_frags&3)?1:0;
+ wqes_available = (((nesnic->sq_tail+nesnic->sq_size)-nesnic->sq_head) - 1) &
+ (nesnic->sq_size - 1);
+
+ if (unlikely(wqes_needed > wqes_available)) {
+ if (!netif_queue_stopped(netdev)) {
+ netif_stop_queue(netdev);
+ barrier();
+ wqes_available = (((((volatile u16)nesnic->sq_tail)+nesnic->sq_size)-nesnic->sq_head) - 1) &
+ (nesnic->sq_size - 1);
+ if (wqes_needed <= wqes_available) {
+ netif_start_queue(netdev);
+ goto tso_sq_no_longer_full;
+ }
+ }
+ nesvnic->sq_full++;
+ spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
+ netdev->name);
+ return NETDEV_TX_BUSY;
+ }
+tso_sq_no_longer_full:
+ /* Map all the buffers */
+ for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags;
+ tso_frag_count++) {
+ tso_bus_address[tso_frag_count] = pci_map_page( nesdev->pcidev,
+ skb_shinfo(skb)->frags[tso_frag_count].page,
+ skb_shinfo(skb)->frags[tso_frag_count].page_offset,
+ skb_shinfo(skb)->frags[tso_frag_count].size,
+ PCI_DMA_TODEVICE);
+ }
+
+ tso_frag_index = 0;
+ curr_tcp_seq = ntohl(tcp_hdr(skb)->seq);
+ hoffset = skb_transport_header(skb) - skb->data;
+ nhoffset = skb_network_header(skb) - skb->data;
+ original_first_length = hoffset + ((((struct tcphdr *)skb_transport_header(skb))->doff)<<2);
+
+ for (wqe_count=0; wqe_count<((u32)wqes_needed); wqe_count++) {
+ tso_wqe_length = 0;
+ nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
+ wqe_fragment_length =
+ (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
+ /* setup the VLAN tag if present */
+ if (vlan_tx_tag_present(skb)) {
+ nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
+ netdev->name, vlan_tx_tag_get(skb) );
+ wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
+ wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
+ } else
+ wqe_misc = 0;
+
+ /* bump past the vlan tag */
+ wqe_fragment_length++;
+
+ /* Assumes header totally fits in allocated buffer and is in first fragment */
+ if (original_first_length > NES_FIRST_FRAG_SIZE) {
+ nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n",
+ original_first_length, NES_FIRST_FRAG_SIZE);
+ nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
+ " (%u frags), tso_size=%u\n",
+ netdev->name,
+ skb->len, skb_headlen(skb),
+ skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
+ }
+ memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
+ skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE),
+ original_first_length));
+ iph = (struct iphdr *)
+ (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[nhoffset]);
+ tcph = (struct tcphdr *)
+ (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[hoffset]);
+ if ((wqe_count+1)!=(u32)wqes_needed) {
+ tcph->fin = 0;
+ tcph->psh = 0;
+ tcph->rst = 0;
+ tcph->urg = 0;
+ }
+ if (wqe_count) {
+ tcph->syn = 0;
+ }
+ tcph->seq = htonl(curr_tcp_seq);
+ wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
+ original_first_length));
+
+ wqe_fragment_index = 1;
+ if ((wqe_count==0) && (skb_headlen(skb) > original_first_length)) {
+ set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
+ bus_address = pci_map_single(nesdev->pcidev, skb->data + original_first_length,
+ skb_headlen(skb) - original_first_length, PCI_DMA_TODEVICE);
+ wqe_fragment_length[wqe_fragment_index++] =
+ cpu_to_le16(skb_headlen(skb) - original_first_length);
+ wqe_fragment_length[wqe_fragment_index] = 0;
+ set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
+ bus_address);
+ }
+ while (wqe_fragment_index < 5) {
+ wqe_fragment_length[wqe_fragment_index] =
+ cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size);
+ set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
+ (u64)tso_bus_address[tso_frag_index]);
+ wqe_fragment_index++;
+ tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size;
+ if (wqe_fragment_index < 5)
+ wqe_fragment_length[wqe_fragment_index] = 0;
+ if (tso_frag_index == tso_frag_count)
+ break;
+ }
+ if ((wqe_count+1) == (u32)wqes_needed) {
+ nesnic->tx_skb[nesnic->sq_head] = skb;
+ } else {
+ nesnic->tx_skb[nesnic->sq_head] = NULL;
+ }
+ wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
+ if ((tso_wqe_length + original_first_length) > skb_is_gso(skb)) {
+ wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE;
+ } else {
+ iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset);
+ }
+
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX,
+ wqe_misc);
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
+ ((u32)tcph->doff) | (((u32)hoffset) << 4));
+
+ set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
+ tso_wqe_length + original_first_length);
+ curr_tcp_seq += tso_wqe_length;
+ nesnic->sq_head++;
+ nesnic->sq_head &= nesnic->sq_size-1;
+ }
+ } else {
+ nesvnic->linearized_skbs++;
+ hoffset = skb_transport_header(skb) - skb->data;
+ nhoffset = skb_network_header(skb) - skb->data;
+ skb_linearize(skb);
+ skb_set_transport_header(skb, hoffset);
+ skb_set_network_header(skb, nhoffset);
+ send_rc = nes_nic_send(skb, netdev);
+ if (send_rc != NETDEV_TX_OK) {
+ spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ return NETDEV_TX_OK;
+ }
+ }
+ } else {
+ send_rc = nes_nic_send(skb, netdev);
+ if (send_rc != NETDEV_TX_OK) {
+ spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ barrier();
+
+ if (wqe_count)
+ nes_write32(nesdev->regs+NES_WQE_ALLOC,
+ (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
+
+ netdev->trans_start = jiffies;
+ spin_unlock_irqrestore(&nesnic->sq_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+
+/**
+ * nes_netdev_get_stats
+ */
+static struct net_device_stats *nes_netdev_get_stats(struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u64 u64temp;
+ u32 u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->nic_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->endnode_nstat_rx_discard += u32temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->nic_index*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
+
+ nesvnic->endnode_nstat_rx_octets += u64temp;
+ nesvnic->netstats.rx_bytes += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->nic_index*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
+
+ nesvnic->endnode_nstat_rx_frames += u64temp;
+ nesvnic->netstats.rx_packets += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->nic_index*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
+
+ nesvnic->endnode_nstat_tx_octets += u64temp;
+ nesvnic->netstats.tx_bytes += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->nic_index*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
+
+ nesvnic->endnode_nstat_tx_frames += u64temp;
+ nesvnic->netstats.tx_packets += u64temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_short_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->netstats.rx_length_errors += u32temp;
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_rx_errors += u32temp;
+ nesvnic->nesdev->mac_rx_crc_errors += u32temp;
+ nesvnic->netstats.rx_crc_errors += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_tx_errors += u32temp;
+ nesvnic->netstats.tx_errors += u32temp;
+
+ return &nesvnic->netstats;
+}
+
+
+/**
+ * nes_netdev_tx_timeout
+ */
+static void nes_netdev_tx_timeout(struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ if (netif_msg_timer(nesvnic))
+ nes_debug(NES_DBG_NIC_TX, "%s: tx timeout\n", netdev->name);
+}
+
+
+/**
+ * nes_netdev_set_mac_address
+ */
+static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct sockaddr *mac_addr = p;
+ int i;
+ u32 macaddr_low;
+ u16 macaddr_high;
+
+ if (!is_valid_ether_addr(mac_addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
+ printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
+ __FUNCTION__, netdev->addr_len,
+ mac_addr->sa_data[0], mac_addr->sa_data[1],
+ mac_addr->sa_data[2], mac_addr->sa_data[3],
+ mac_addr->sa_data[4], mac_addr->sa_data[5]);
+ macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
+ macaddr_high += (u16)netdev->dev_addr[1];
+ macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
+ macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
+ macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
+ macaddr_low += (u32)netdev->dev_addr[5];
+
+ for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
+ if (nesvnic->qp_nic_index[i] == 0xf) {
+ break;
+ }
+ nes_write_indexed(nesdev,
+ NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
+ macaddr_low);
+ nes_write_indexed(nesdev,
+ NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
+ (u32)macaddr_high | NES_MAC_ADDR_VALID |
+ ((((u32)nesvnic->nic_index) << 16)));
+ }
+ return 0;
+}
+
+
+/**
+ * nes_netdev_set_multicast_list
+ */
+void nes_netdev_set_multicast_list(struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct dev_mc_list *multicast_addr;
+ u32 nic_active_bit;
+ u32 nic_active;
+ u32 perfect_filter_register_address;
+ u32 macaddr_low;
+ u16 macaddr_high;
+ u8 mc_all_on = 0;
+ u8 mc_index;
+ int mc_nic_index = -1;
+
+ nic_active_bit = 1 << nesvnic->nic_index;
+
+ if (netdev->flags & IFF_PROMISC) {
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
+ mc_all_on = 1;
+ } else if ((netdev->flags & IFF_ALLMULTI) || (netdev->mc_count > NES_MULTICAST_PF_MAX) ||
+ (nesvnic->nic_index > 3)) {
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
+ nic_active &= ~nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
+ mc_all_on = 1;
+ } else {
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
+ nic_active &= ~nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
+ nic_active &= ~nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
+ }
+
+ nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
+ netdev->mc_count, (netdev->flags & IFF_PROMISC)?1:0,
+ (netdev->flags & IFF_ALLMULTI)?1:0);
+ if (!mc_all_on) {
+ multicast_addr = netdev->mc_list;
+ perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + 0x80;
+ perfect_filter_register_address += nesvnic->nic_index*0x40;
+ for (mc_index=0; mc_index < NES_MULTICAST_PF_MAX; mc_index++) {
+ while (multicast_addr && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, multicast_addr->dmi_addr)) == 0))
+ multicast_addr = multicast_addr->next;
+
+ if (mc_nic_index < 0)
+ mc_nic_index = nesvnic->nic_index;
+ if (multicast_addr) {
+ nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n",
+ multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1],
+ multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3],
+ multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5],
+ perfect_filter_register_address+(mc_index * 8), mc_nic_index);
+ macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
+ macaddr_high += (u16)multicast_addr->dmi_addr[1];
+ macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
+ macaddr_low += ((u32)multicast_addr->dmi_addr[3]) << 16;
+ macaddr_low += ((u32)multicast_addr->dmi_addr[4]) << 8;
+ macaddr_low += (u32)multicast_addr->dmi_addr[5];
+ nes_write_indexed(nesdev,
+ perfect_filter_register_address+(mc_index * 8),
+ macaddr_low);
+ nes_write_indexed(nesdev,
+ perfect_filter_register_address+4+(mc_index * 8),
+ (u32)macaddr_high | NES_MAC_ADDR_VALID |
+ ((((u32)(1<<mc_nic_index)) << 16)));
+ multicast_addr = multicast_addr->next;
+ } else {
+ nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n",
+ perfect_filter_register_address+(mc_index * 8));
+ nes_write_indexed(nesdev,
+ perfect_filter_register_address+4+(mc_index * 8),
+ 0);
+ }
+ }
+ }
+}
+
+
+/**
+ * nes_netdev_change_mtu
+ */
+static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ int ret = 0;
+ u8 jumbomode=0;
+
+ if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
+ return -EINVAL;
+
+ netdev->mtu = new_mtu;
+ nesvnic->max_frame_size = new_mtu+ETH_HLEN;
+
+ if (netdev->mtu > 1500) {
+ jumbomode=1;
+ }
+ nes_nic_init_timer_defaults(nesdev, jumbomode);
+
+ if (netif_running(netdev)) {
+ nes_netdev_stop(netdev);
+ nes_netdev_open(netdev);
+ }
+
+ return ret;
+}
+
+
+/**
+ * nes_netdev_exit - destroy network device
+ */
+void nes_netdev_exit(struct nes_vnic *nesvnic)
+{
+ struct net_device *netdev = nesvnic->netdev;
+ struct nes_ib_device *nesibdev = nesvnic->nesibdev;
+
+ nes_debug(NES_DBG_SHUTDOWN, "\n");
+
+ // destroy the ibdevice if RDMA enabled
+ if ((nesvnic->rdma_enabled)&&(nesvnic->of_device_registered)) {
+ nes_destroy_ofa_device( nesibdev );
+ nesvnic->of_device_registered = 0;
+ nesvnic->nesibdev = NULL;
+ }
+ unregister_netdev(netdev);
+ nes_debug(NES_DBG_SHUTDOWN, "\n");
+}
+
+
+#define NES_ETHTOOL_STAT_COUNT 55
+static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = {
+ "Link Change Interrupts",
+ "Linearized SKBs",
+ "T/GSO Requests",
+ "Pause Frames Sent",
+ "Pause Frames Received",
+ "Internal Routing Errors",
+ "SQ SW Dropped SKBs",
+ "SQ Locked",
+ "SQ Full",
+ "Segmented TSO Requests",
+ "Rx Symbol Errors",
+ "Rx Jabber Errors",
+ "Rx Oversized Frames",
+ "Rx Short Frames",
+ "Endnode Rx Discards",
+ "Endnode Rx Octets",
+ "Endnode Rx Frames",
+ "Endnode Tx Octets",
+ "Endnode Tx Frames",
+ "mh detected",
+ "mh pauses",
+ "Retransmission Count",
+ "CM Connects",
+ "CM Accepts",
+ "Disconnects",
+ "Connected Events",
+ "Connect Requests",
+ "CM Rejects",
+ "ModifyQP Timeouts",
+ "CreateQPs",
+ "SW DestroyQPs",
+ "DestroyQPs",
+ "CM Closes",
+ "CM Packets Sent",
+ "CM Packets Bounced",
+ "CM Packets Created",
+ "CM Packets Rcvd",
+ "CM Packets Dropped",
+ "CM Packets Retrans",
+ "CM Listens Created",
+ "CM Listens Destroyed",
+ "CM Backlog Drops",
+ "CM Loopbacks",
+ "CM Nodes Created",
+ "CM Nodes Destroyed",
+ "CM Accel Drops",
+ "CM Resets Received",
+ "Timer Inits",
+ "CQ Depth 1",
+ "CQ Depth 4",
+ "CQ Depth 16",
+ "CQ Depth 24",
+ "CQ Depth 32",
+ "CQ Depth 128",
+ "CQ Depth 256",
+};
+
+
+/**
+ * nes_netdev_get_rx_csum
+ */
+static u32 nes_netdev_get_rx_csum (struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ if (nesvnic->rx_checksum_disabled)
+ return 0;
+ else
+ return 1;
+}
+
+
+/**
+ * nes_netdev_set_rc_csum
+ */
+static int nes_netdev_set_rx_csum(struct net_device *netdev, u32 enable)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ if (enable)
+ nesvnic->rx_checksum_disabled = 0;
+ else
+ nesvnic->rx_checksum_disabled = 1;
+ return 0;
+}
+
+
+/**
+ * nes_netdev_get_stats_count
+ */
+static int nes_netdev_get_stats_count(struct net_device *netdev)
+{
+ return NES_ETHTOOL_STAT_COUNT;
+}
+
+
+/**
+ * nes_netdev_get_strings
+ */
+static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *ethtool_strings)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(ethtool_strings,
+ &nes_ethtool_stringset,
+ sizeof(nes_ethtool_stringset));
+}
+
+
+/**
+ * nes_netdev_get_ethtool_stats
+ */
+static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
+{
+ u64 u64temp;
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u32 nic_count;
+ u32 u32temp;
+
+ target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
+ target_stat_values[0] = nesvnic->nesdev->link_status_interrupts;
+ target_stat_values[1] = nesvnic->linearized_skbs;
+ target_stat_values[2] = nesvnic->tso_requests;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_pause_frames_sent += u32temp;
+ target_stat_values[3] = nesvnic->nesdev->mac_pause_frames_sent;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
+ nesvnic->nesdev->mac_pause_frames_received += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_PORT_RX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
+ nesvnic->nesdev->port_rx_discards += u32temp;
+ nesvnic->netstats.rx_dropped += u32temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_PORT_TX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
+ nesvnic->nesdev->port_tx_discards += u32temp;
+ nesvnic->netstats.tx_dropped += u32temp;
+
+ for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
+ if (nesvnic->qp_nic_index[nic_count] == 0xf)
+ break;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_DISCARD +
+ (nesvnic->qp_nic_index[nic_count]*0x200));
+ nesvnic->netstats.rx_dropped += u32temp;
+ nesvnic->endnode_nstat_rx_discard += u32temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO +
+ (nesvnic->qp_nic_index[nic_count]*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI +
+ (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
+
+ nesvnic->endnode_nstat_rx_octets += u64temp;
+ nesvnic->netstats.rx_bytes += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO +
+ (nesvnic->qp_nic_index[nic_count]*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI +
+ (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
+
+ nesvnic->endnode_nstat_rx_frames += u64temp;
+ nesvnic->netstats.rx_packets += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO +
+ (nesvnic->qp_nic_index[nic_count]*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI +
+ (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
+
+ nesvnic->endnode_nstat_tx_octets += u64temp;
+ nesvnic->netstats.tx_bytes += u64temp;
+
+ u64temp = (u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO +
+ (nesvnic->qp_nic_index[nic_count]*0x200));
+ u64temp += ((u64)nes_read_indexed(nesdev,
+ NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI +
+ (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
+
+ nesvnic->endnode_nstat_tx_frames += u64temp;
+ nesvnic->netstats.tx_packets += u64temp;
+
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_IPV4_TCP_REXMITS + (nesvnic->qp_nic_index[nic_count]*0x200));
+ nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
+ }
+
+ target_stat_values[4] = nesvnic->nesdev->mac_pause_frames_received;
+ target_stat_values[5] = nesdev->nesadapter->nic_rx_eth_route_err;
+ target_stat_values[6] = nesvnic->tx_sw_dropped;
+ target_stat_values[7] = nesvnic->sq_locked;
+ target_stat_values[8] = nesvnic->sq_full;
+ target_stat_values[9] = nesvnic->segmented_tso_requests;
+ target_stat_values[10] = nesvnic->nesdev->mac_rx_symbol_err_frames;
+ target_stat_values[11] = nesvnic->nesdev->mac_rx_jabber_frames;
+ target_stat_values[12] = nesvnic->nesdev->mac_rx_oversized_frames;
+ target_stat_values[13] = nesvnic->nesdev->mac_rx_short_frames;
+ target_stat_values[14] = nesvnic->endnode_nstat_rx_discard;
+ target_stat_values[15] = nesvnic->endnode_nstat_rx_octets;
+ target_stat_values[16] = nesvnic->endnode_nstat_rx_frames;
+ target_stat_values[17] = nesvnic->endnode_nstat_tx_octets;
+ target_stat_values[18] = nesvnic->endnode_nstat_tx_frames;
+ target_stat_values[19] = mh_detected;
+ target_stat_values[20] = mh_pauses_sent;
+ target_stat_values[21] = nesvnic->endnode_ipv4_tcp_retransmits;
+ target_stat_values[22] = atomic_read(&cm_connects);
+ target_stat_values[23] = atomic_read(&cm_accepts);
+ target_stat_values[24] = atomic_read(&cm_disconnects);
+ target_stat_values[25] = atomic_read(&cm_connecteds);
+ target_stat_values[26] = atomic_read(&cm_connect_reqs);
+ target_stat_values[27] = atomic_read(&cm_rejects);
+ target_stat_values[28] = atomic_read(&mod_qp_timouts);
+ target_stat_values[29] = atomic_read(&qps_created);
+ target_stat_values[30] = atomic_read(&sw_qps_destroyed);
+ target_stat_values[31] = atomic_read(&qps_destroyed);
+ target_stat_values[32] = atomic_read(&cm_closes);
+ target_stat_values[33] = cm_packets_sent;
+ target_stat_values[34] = cm_packets_bounced;
+ target_stat_values[35] = cm_packets_created;
+ target_stat_values[36] = cm_packets_received;
+ target_stat_values[37] = cm_packets_dropped;
+ target_stat_values[38] = cm_packets_retrans;
+ target_stat_values[39] = cm_listens_created;
+ target_stat_values[40] = cm_listens_destroyed;
+ target_stat_values[41] = cm_backlog_drops;
+ target_stat_values[42] = atomic_read(&cm_loopbacks);
+ target_stat_values[43] = atomic_read(&cm_nodes_created);
+ target_stat_values[44] = atomic_read(&cm_nodes_destroyed);
+ target_stat_values[45] = atomic_read(&cm_accel_dropped_pkts);
+ target_stat_values[46] = atomic_read(&cm_resets_recvd);
+ target_stat_values[47] = int_mod_timer_init;
+ target_stat_values[48] = int_mod_cq_depth_1;
+ target_stat_values[49] = int_mod_cq_depth_4;
+ target_stat_values[50] = int_mod_cq_depth_16;
+ target_stat_values[51] = int_mod_cq_depth_24;
+ target_stat_values[52] = int_mod_cq_depth_32;
+ target_stat_values[53] = int_mod_cq_depth_128;
+ target_stat_values[54] = int_mod_cq_depth_256;
+
+}
+
+
+/**
+ * nes_netdev_get_drvinfo
+ */
+static void nes_netdev_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ strcpy(drvinfo->driver, DRV_NAME);
+ strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev));
+ strcpy(drvinfo->fw_version, "TBD");
+ strcpy(drvinfo->version, DRV_VERSION);
+ drvinfo->n_stats = nes_netdev_get_stats_count(netdev);
+ drvinfo->testinfo_len = 0;
+ drvinfo->eedump_len = 0;
+ drvinfo->regdump_len = 0;
+}
+
+
+/**
+ * nes_netdev_set_coalesce
+ */
+static int nes_netdev_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
+ if (et_coalesce->rx_max_coalesced_frames_low) {
+ shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low;
+ }
+ if (et_coalesce->rx_max_coalesced_frames_irq) {
+ shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq;
+ }
+ if (et_coalesce->rx_max_coalesced_frames_high) {
+ shared_timer->threshold_high = et_coalesce->rx_max_coalesced_frames_high;
+ }
+ if (et_coalesce->rx_coalesce_usecs_low) {
+ shared_timer->timer_in_use_min = et_coalesce->rx_coalesce_usecs_low;
+ }
+ if (et_coalesce->rx_coalesce_usecs_high) {
+ shared_timer->timer_in_use_max = et_coalesce->rx_coalesce_usecs_high;
+ }
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+
+ /* using this to drive total interrupt moderation */
+ nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq;
+ if (et_coalesce->use_adaptive_rx_coalesce) {
+ nesadapter->et_use_adaptive_rx_coalesce = 1;
+ nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
+ nesadapter->et_rx_coalesce_usecs_irq = 0;
+ if (et_coalesce->pkt_rate_low) {
+ nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low;
+ }
+ } else {
+ nesadapter->et_use_adaptive_rx_coalesce = 0;
+ nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
+ if (nesadapter->et_rx_coalesce_usecs_irq) {
+ nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
+ 0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8)));
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * nes_netdev_get_coalesce
+ */
+static int nes_netdev_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *et_coalesce)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct ethtool_coalesce temp_et_coalesce;
+ struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
+ unsigned long flags;
+
+ memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce));
+ temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq;
+ temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce;
+ temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval;
+ temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low;
+ spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
+ temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low;
+ temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target;
+ temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high;
+ temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min;
+ temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max;
+ if (nesadapter->et_use_adaptive_rx_coalesce) {
+ temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use;
+ }
+ spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
+ memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce));
+ return 0;
+}
+
+
+/**
+ * nes_netdev_get_pauseparam
+ */
+static void nes_netdev_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *et_pauseparam)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ et_pauseparam->autoneg = 0;
+ et_pauseparam->rx_pause = (nesvnic->nesdev->disable_rx_flow_control == 0) ? 1:0;
+ et_pauseparam->tx_pause = (nesvnic->nesdev->disable_tx_flow_control == 0) ? 1:0;
+}
+
+
+/**
+ * nes_netdev_set_pauseparam
+ */
+static int nes_netdev_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *et_pauseparam)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u32 u32temp;
+
+ if (et_pauseparam->autoneg) {
+ /* TODO: should return unsupported */
+ return 0;
+ }
+ if ((et_pauseparam->tx_pause == 1) && (nesdev->disable_tx_flow_control == 1)) {
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
+ u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
+ nes_write_indexed(nesdev,
+ NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+ nesdev->disable_tx_flow_control = 0;
+ } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
+ u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
+ nes_write_indexed(nesdev,
+ NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
+ nesdev->disable_tx_flow_control = 1;
+ }
+ if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
+ u32temp &= ~NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
+ nes_write_indexed(nesdev,
+ NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
+ nesdev->disable_rx_flow_control = 0;
+ } else if ((et_pauseparam->rx_pause == 0) && (nesdev->disable_rx_flow_control == 0)) {
+ u32temp = nes_read_indexed(nesdev,
+ NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
+ u32temp |= NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
+ nes_write_indexed(nesdev,
+ NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
+ nesdev->disable_rx_flow_control = 1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_netdev_get_settings
+ */
+static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u16 phy_data;
+
+ et_cmd->duplex = DUPLEX_FULL;
+ et_cmd->port = PORT_MII;
+ if (nesadapter->OneG_Mode) {
+ et_cmd->supported = SUPPORTED_1000baseT_Full|SUPPORTED_Autoneg;
+ et_cmd->advertising = ADVERTISED_1000baseT_Full|ADVERTISED_Autoneg;
+ et_cmd->speed = SPEED_1000;
+ nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
+ &phy_data);
+ if (phy_data&0x1000) {
+ et_cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ et_cmd->autoneg = AUTONEG_DISABLE;
+ }
+ et_cmd->transceiver = XCVR_EXTERNAL;
+ et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
+ } else {
+ if (nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) {
+ et_cmd->transceiver = XCVR_EXTERNAL;
+ et_cmd->port = PORT_FIBRE;
+ et_cmd->supported = SUPPORTED_FIBRE;
+ et_cmd->advertising = ADVERTISED_FIBRE;
+ et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
+ } else {
+ et_cmd->transceiver = XCVR_INTERNAL;
+ et_cmd->supported = SUPPORTED_10000baseT_Full;
+ et_cmd->advertising = ADVERTISED_10000baseT_Full;
+ et_cmd->phy_address = nesdev->mac_index;
+ }
+ et_cmd->speed = SPEED_10000;
+ et_cmd->autoneg = AUTONEG_DISABLE;
+ }
+ et_cmd->maxtxpkt = 511;
+ et_cmd->maxrxpkt = 511;
+ return 0;
+}
+
+
+/**
+ * nes_netdev_set_settings
+ */
+static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u16 phy_data;
+
+ if (nesadapter->OneG_Mode) {
+ nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
+ &phy_data);
+ if (et_cmd->autoneg) {
+ /* Turn on Full duplex, Autoneg, and restart autonegotiation */
+ phy_data |= 0x1300;
+ } else {
+ // Turn off autoneg
+ phy_data &= ~0x1000;
+ }
+ nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
+ phy_data);
+ }
+
+ return 0;
+}
+
+
+static struct ethtool_ops nes_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = nes_netdev_get_settings,
+ .set_settings = nes_netdev_set_settings,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .get_rx_csum = nes_netdev_get_rx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .get_strings = nes_netdev_get_strings,
+ .get_stats_count = nes_netdev_get_stats_count,
+ .get_ethtool_stats = nes_netdev_get_ethtool_stats,
+ .get_drvinfo = nes_netdev_get_drvinfo,
+ .get_coalesce = nes_netdev_get_coalesce,
+ .set_coalesce = nes_netdev_set_coalesce,
+ .get_pauseparam = nes_netdev_get_pauseparam,
+ .set_pauseparam = nes_netdev_set_pauseparam,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_rx_csum = nes_netdev_set_rx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+};
+
+
+static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ u32 u32temp;
+
+ nesvnic->vlan_grp = grp;
+
+ /* Enable/Disable VLAN Stripping */
+ u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
+ if (grp)
+ u32temp &= 0xfdffffff;
+ else
+ u32temp |= 0x02000000;
+
+ nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp);
+}
+
+
+/**
+ * nes_netdev_init - initialize network device
+ */
+struct net_device *nes_netdev_init(struct nes_device *nesdev,
+ void __iomem *mmio_addr)
+{
+ u64 u64temp;
+ struct nes_vnic *nesvnic = NULL;
+ struct net_device *netdev;
+ struct nic_qp_map *curr_qp_map;
+ u32 u32temp;
+ u16 phy_data;
+ u16 temp_phy_data;
+
+ netdev = alloc_etherdev(sizeof(struct nes_vnic));
+ if (!netdev) {
+ printk(KERN_ERR PFX "nesvnic etherdev alloc failed");
+ return NULL;
+ }
+
+ nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name);
+
+ SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev);
+
+ nesvnic = netdev_priv(netdev);
+ memset(nesvnic, 0, sizeof(*nesvnic));
+
+ netdev->open = nes_netdev_open;
+ netdev->stop = nes_netdev_stop;
+ netdev->hard_start_xmit = nes_netdev_start_xmit;
+ netdev->get_stats = nes_netdev_get_stats;
+ netdev->tx_timeout = nes_netdev_tx_timeout;
+ netdev->set_mac_address = nes_netdev_set_mac_address;
+ netdev->set_multicast_list = nes_netdev_set_multicast_list;
+ netdev->change_mtu = nes_netdev_change_mtu;
+ netdev->watchdog_timeo = NES_TX_TIMEOUT;
+ netdev->irq = nesdev->pcidev->irq;
+ netdev->mtu = ETH_DATA_LEN;
+ netdev->hard_header_len = ETH_HLEN;
+ netdev->addr_len = ETH_ALEN;
+ netdev->type = ARPHRD_ETHER;
+ netdev->features = NETIF_F_HIGHDMA;
+ netdev->ethtool_ops = &nes_ethtool_ops;
+ netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
+ nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
+ netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
+ netdev->features |= NETIF_F_LLTX;
+
+ /* Fill in the port structure */
+ nesvnic->netdev = netdev;
+ nesvnic->nesdev = nesdev;
+ nesvnic->msg_enable = netif_msg_init(debug, default_msg);
+ nesvnic->netdev_index = nesdev->netdev_count;
+ nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count;
+ nesvnic->max_frame_size = netdev->mtu+netdev->hard_header_len;
+
+ curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)];
+ nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid;
+ nesvnic->nic_index = curr_qp_map[nesdev->netdev_count].nic_index;
+ nesvnic->logical_port = curr_qp_map[nesdev->netdev_count].logical_port;
+
+ /* Setup the burned in MAC address */
+ u64temp = (u64)nesdev->nesadapter->mac_addr_low;
+ u64temp += ((u64)nesdev->nesadapter->mac_addr_high) << 32;
+ u64temp += nesvnic->nic_index;
+ netdev->dev_addr[0] = (u8)(u64temp>>40);
+ netdev->dev_addr[1] = (u8)(u64temp>>32);
+ netdev->dev_addr[2] = (u8)(u64temp>>24);
+ netdev->dev_addr[3] = (u8)(u64temp>>16);
+ netdev->dev_addr[4] = (u8)(u64temp>>8);
+ netdev->dev_addr[5] = (u8)u64temp;
+ memcpy(netdev->perm_addr, netdev->dev_addr, 6);
+
+ if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
+ netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
+ netdev->features |= NETIF_F_GSO | NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
+ } else {
+ netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ }
+
+ nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
+ " nic_index = %d, logical_port = %d, mac_index = %d.\n",
+ nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id,
+ nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index);
+
+ if (nesvnic->nesdev->nesadapter->port_count == 1) {
+ nesvnic->qp_nic_index[0] = nesvnic->nic_index;
+ nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1;
+ if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) {
+ nesvnic->qp_nic_index[2] = 0xf;
+ nesvnic->qp_nic_index[3] = 0xf;
+ } else {
+ nesvnic->qp_nic_index[2] = nesvnic->nic_index + 2;
+ nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3;
+ }
+ } else {
+ if (nesvnic->nesdev->nesadapter->port_count == 2) {
+ nesvnic->qp_nic_index[0] = nesvnic->nic_index;
+ nesvnic->qp_nic_index[1] = nesvnic->nic_index + 2;
+ nesvnic->qp_nic_index[2] = 0xf;
+ nesvnic->qp_nic_index[3] = 0xf;
+ } else {
+ nesvnic->qp_nic_index[0] = nesvnic->nic_index;
+ nesvnic->qp_nic_index[1] = 0xf;
+ nesvnic->qp_nic_index[2] = 0xf;
+ nesvnic->qp_nic_index[3] = 0xf;
+ }
+ }
+ nesvnic->next_qp_nic_index = 0;
+
+ if (nesdev->netdev_count == 0) {
+ nesvnic->rdma_enabled = 1;
+ } else {
+ nesvnic->rdma_enabled = 0;
+ }
+ nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
+ spin_lock_init(&nesvnic->tx_lock);
+ nesdev->netdev[nesdev->netdev_count] = netdev;
+
+ nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
+ nesvnic, nesdev->mac_index);
+ list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]);
+
+ if ((nesdev->netdev_count == 0) &&
+ (PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index)) {
+ nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
+ NES_IDX_PHY_PCS_CONTROL_STATUS0+(0x200*(nesvnic->logical_port&1)));
+ u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
+ (0x200*(nesvnic->logical_port&1)));
+ u32temp |= 0x00200000;
+ nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
+ (0x200*(nesvnic->logical_port&1)), u32temp);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
+ (0x200*(nesvnic->logical_port&1)) );
+ if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
+ if (nesdev->nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) {
+ nes_init_phy(nesdev);
+ nes_read_10G_phy_reg(nesdev, 1,
+ nesdev->nesadapter->phy_index[nesvnic->logical_port]);
+ temp_phy_data = (u16)nes_read_indexed(nesdev,
+ NES_IDX_MAC_MDIO_CONTROL);
+ u32temp = 20;
+ do {
+ nes_read_10G_phy_reg(nesdev, 1,
+ nesdev->nesadapter->phy_index[nesvnic->logical_port]);
+ phy_data = (u16)nes_read_indexed(nesdev,
+ NES_IDX_MAC_MDIO_CONTROL);
+ if ((phy_data == temp_phy_data) || (!(--u32temp)))
+ break;
+ temp_phy_data = phy_data;
+ } while (1);
+ if (phy_data & 4) {
+ nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
+ nesvnic->linkup = 1;
+ } else {
+ nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n");
+ }
+ } else {
+ nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
+ nesvnic->linkup = 1;
+ }
+ }
+ nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n");
+ /* clear the MAC interrupt status, assumes direct logical to physical mapping */
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port));
+ nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port), u32temp);
+
+ if (nesdev->nesadapter->phy_type[nesvnic->logical_port] != NES_PHY_TYPE_IRIS)
+ nes_init_phy(nesdev);
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesvnic->logical_port),
+ ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
+ NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
+ }
+
+ return netdev;
+}
+
+
+/**
+ * nes_netdev_destroy - destroy network device structure
+ */
+void nes_netdev_destroy(struct net_device *netdev)
+{
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+
+ /* make sure 'stop' method is called by Linux stack */
+ /* nes_netdev_stop(netdev); */
+
+ list_del(&nesvnic->list);
+
+ if (nesvnic->of_device_registered) {
+ nes_destroy_ofa_device(nesvnic->nesibdev);
+ }
+
+ free_netdev(netdev);
+}
+
+
+/**
+ * nes_nic_cm_xmit -- CM calls this to send out pkts
+ */
+int nes_nic_cm_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ int ret;
+
+ skb->dev = netdev;
+ ret = dev_queue_xmit(skb);
+ if (ret) {
+ nes_debug(NES_DBG_CM, "Bad return code from dev_queue_xmit %d\n", ret);
+ }
+
+ return ret;
+}
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
new file mode 100644
index 000000000000..e64306bce80b
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect. All rights reserved.
+ * Copyright (c) 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef NES_USER_H
+#define NES_USER_H
+
+#include <linux/types.h>
+
+#define NES_ABI_USERSPACE_VER 1
+#define NES_ABI_KERNEL_VER 1
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+
+struct nes_alloc_ucontext_req {
+ __u32 reserved32;
+ __u8 userspace_ver;
+ __u8 reserved8[3];
+};
+
+struct nes_alloc_ucontext_resp {
+ __u32 max_pds; /* maximum pds allowed for this user process */
+ __u32 max_qps; /* maximum qps allowed for this user process */
+ __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+ __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
+ __u8 kernel_ver;
+ __u8 reserved[2];
+};
+
+struct nes_alloc_pd_resp {
+ __u32 pd_id;
+ __u32 mmap_db_index;
+};
+
+struct nes_create_cq_req {
+ __u64 user_cq_buffer;
+ __u32 mcrqf;
+ __u8 reserved[4];
+};
+
+struct nes_create_qp_req {
+ __u64 user_wqe_buffers;
+};
+
+enum iwnes_memreg_type {
+ IWNES_MEMREG_TYPE_MEM = 0x0000,
+ IWNES_MEMREG_TYPE_QP = 0x0001,
+ IWNES_MEMREG_TYPE_CQ = 0x0002,
+ IWNES_MEMREG_TYPE_MW = 0x0003,
+ IWNES_MEMREG_TYPE_FMR = 0x0004,
+};
+
+struct nes_mem_reg_req {
+ __u32 reg_type; /* indicates if id is memory, QP or CQ */
+ __u32 reserved;
+};
+
+struct nes_create_cq_resp {
+ __u32 cq_id;
+ __u32 cq_size;
+ __u32 mmap_db_index;
+ __u32 reserved;
+};
+
+struct nes_create_qp_resp {
+ __u32 qp_id;
+ __u32 actual_sq_size;
+ __u32 actual_rq_size;
+ __u32 mmap_sq_db_index;
+ __u32 mmap_rq_db_index;
+ __u32 nes_drv_opt;
+};
+
+#endif /* NES_USER_H */
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
new file mode 100644
index 000000000000..c4ec6ac63461
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+
+#include "nes.h"
+
+
+
+static u16 nes_read16_eeprom(void __iomem *addr, u16 offset);
+
+u32 mh_detected;
+u32 mh_pauses_sent;
+
+/**
+ * nes_read_eeprom_values -
+ */
+int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesadapter)
+{
+ u32 mac_addr_low;
+ u16 mac_addr_high;
+ u16 eeprom_data;
+ u16 eeprom_offset;
+ u16 next_section_address;
+ u16 sw_section_ver;
+ u8 major_ver = 0;
+ u8 minor_ver = 0;
+
+ /* TODO: deal with EEPROM endian issues */
+ if (nesadapter->firmware_eeprom_offset == 0) {
+ /* Read the EEPROM Parameters */
+ eeprom_data = nes_read16_eeprom(nesdev->regs, 0);
+ nes_debug(NES_DBG_HW, "EEPROM Offset 0 = 0x%04X\n", eeprom_data);
+ eeprom_offset = 2 + (((eeprom_data & 0x007f) << 3) <<
+ ((eeprom_data & 0x0080) >> 7));
+ nes_debug(NES_DBG_HW, "Firmware Offset = 0x%04X\n", eeprom_offset);
+ nesadapter->firmware_eeprom_offset = eeprom_offset;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4);
+ if (eeprom_data != 0x5746) {
+ nes_debug(NES_DBG_HW, "Not a valid Firmware Image = 0x%04X\n", eeprom_data);
+ return -1;
+ }
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ eeprom_offset += ((eeprom_data & 0x00ff) << 3) << ((eeprom_data & 0x0100) >> 8);
+ nes_debug(NES_DBG_HW, "Software Offset = 0x%04X\n", eeprom_offset);
+ nesadapter->software_eeprom_offset = eeprom_offset;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4);
+ if (eeprom_data != 0x5753) {
+ printk("Not a valid Software Image = 0x%04X\n", eeprom_data);
+ return -1;
+ }
+ sw_section_ver = nes_read16_eeprom(nesdev->regs, nesadapter->software_eeprom_offset + 6);
+ nes_debug(NES_DBG_HW, "Software section version number = 0x%04X\n",
+ sw_section_ver);
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) <<
+ ((eeprom_data & 0x0100) >> 8));
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x414d) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_offset = next_section_address;
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) <<
+ ((eeprom_data & 0x0100) >> 8));
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x4f52) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x4f52 but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_offset = next_section_address;
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x5746) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5746 but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_offset = next_section_address;
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x5753) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5753 but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_offset = next_section_address;
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x414d) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_offset = next_section_address;
+
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
+ nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
+ eeprom_offset + 2, eeprom_data);
+ next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
+ if (eeprom_data != 0x464e) {
+ nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x464e but was 0x%04X\n",
+ eeprom_data);
+ goto no_fw_rev;
+ }
+ eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 8);
+ printk(PFX "Firmware version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data);
+ major_ver = (u8)(eeprom_data >> 8);
+ minor_ver = (u8)(eeprom_data);
+
+ if (nes_drv_opt & NES_DRV_OPT_DISABLE_VIRT_WQ) {
+ nes_debug(NES_DBG_HW, "Virtual WQs have been disabled\n");
+ } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
+ nesadapter->virtwq = 1;
+ }
+ nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
+ (u32)((u8)eeprom_data);
+
+no_fw_rev:
+ /* eeprom is valid */
+ eeprom_offset = nesadapter->software_eeprom_offset;
+ eeprom_offset += 8;
+ nesadapter->netdev_max = (u8)nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ mac_addr_high = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ mac_addr_low = (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ mac_addr_low <<= 16;
+ mac_addr_low += (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "Base MAC Address = 0x%04X%08X\n",
+ mac_addr_high, mac_addr_low);
+ nes_debug(NES_DBG_HW, "MAC Address count = %u\n", nesadapter->netdev_max);
+
+ nesadapter->mac_addr_low = mac_addr_low;
+ nesadapter->mac_addr_high = mac_addr_high;
+
+ /* Read the Phy Type array */
+ eeprom_offset += 10;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->phy_type[0] = (u8)(eeprom_data >> 8);
+ nesadapter->phy_type[1] = (u8)eeprom_data;
+
+ /* Read the port array */
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->phy_type[2] = (u8)(eeprom_data >> 8);
+ nesadapter->phy_type[3] = (u8)eeprom_data;
+ /* port_count is set by soft reset reg */
+ nes_debug(NES_DBG_HW, "port_count = %u, port 0 -> %u, port 1 -> %u,"
+ " port 2 -> %u, port 3 -> %u\n",
+ nesadapter->port_count,
+ nesadapter->phy_type[0], nesadapter->phy_type[1],
+ nesadapter->phy_type[2], nesadapter->phy_type[3]);
+
+ /* Read PD config array */
+ eeprom_offset += 10;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_size[0] = eeprom_data;
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_base[0] = eeprom_data;
+ nes_debug(NES_DBG_HW, "PD0 config, size=0x%04x, base=0x%04x\n",
+ nesadapter->pd_config_size[0], nesadapter->pd_config_base[0]);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_size[1] = eeprom_data;
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_base[1] = eeprom_data;
+ nes_debug(NES_DBG_HW, "PD1 config, size=0x%04x, base=0x%04x\n",
+ nesadapter->pd_config_size[1], nesadapter->pd_config_base[1]);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_size[2] = eeprom_data;
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_base[2] = eeprom_data;
+ nes_debug(NES_DBG_HW, "PD2 config, size=0x%04x, base=0x%04x\n",
+ nesadapter->pd_config_size[2], nesadapter->pd_config_base[2]);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_size[3] = eeprom_data;
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->pd_config_base[3] = eeprom_data;
+ nes_debug(NES_DBG_HW, "PD3 config, size=0x%04x, base=0x%04x\n",
+ nesadapter->pd_config_size[3], nesadapter->pd_config_base[3]);
+
+ /* Read Rx Pool Size */
+ eeprom_offset += 22; /* 46 */
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->rx_pool_size = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "rx_pool_size = 0x%08X\n", nesadapter->rx_pool_size);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->tx_pool_size = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "tx_pool_size = 0x%08X\n", nesadapter->tx_pool_size);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->rx_threshold = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "rx_threshold = 0x%08X\n", nesadapter->rx_threshold);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->tcp_timer_core_clk_divisor = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "tcp_timer_core_clk_divisor = 0x%08X\n",
+ nesadapter->tcp_timer_core_clk_divisor);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->iwarp_config = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "iwarp_config = 0x%08X\n", nesadapter->iwarp_config);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->cm_config = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "cm_config = 0x%08X\n", nesadapter->cm_config);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->sws_timer_config = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "sws_timer_config = 0x%08X\n", nesadapter->sws_timer_config);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->tcp_config1 = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "tcp_config1 = 0x%08X\n", nesadapter->tcp_config1);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->wqm_wat = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "wqm_wat = 0x%08X\n", nesadapter->wqm_wat);
+
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ eeprom_offset += 2;
+ nesadapter->core_clock = (((u32)eeprom_data) << 16) +
+ nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nes_debug(NES_DBG_HW, "core_clock = 0x%08X\n", nesadapter->core_clock);
+
+ if ((sw_section_ver) && (nesadapter->hw_rev != NE020_REV)) {
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->phy_index[0] = (eeprom_data & 0xff00)>>8;
+ nesadapter->phy_index[1] = eeprom_data & 0x00ff;
+ eeprom_offset += 2;
+ eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
+ nesadapter->phy_index[2] = (eeprom_data & 0xff00)>>8;
+ nesadapter->phy_index[3] = eeprom_data & 0x00ff;
+ } else {
+ nesadapter->phy_index[0] = 4;
+ nesadapter->phy_index[1] = 5;
+ nesadapter->phy_index[2] = 6;
+ nesadapter->phy_index[3] = 7;
+ }
+ nes_debug(NES_DBG_HW, "Phy address map = 0 > %u, 1 > %u, 2 > %u, 3 > %u\n",
+ nesadapter->phy_index[0],nesadapter->phy_index[1],
+ nesadapter->phy_index[2],nesadapter->phy_index[3]);
+ }
+
+ return 0;
+}
+
+
+/**
+ * nes_read16_eeprom
+ */
+static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
+{
+ writel(NES_EEPROM_READ_REQUEST + (offset >> 1),
+ (void __iomem *)addr + NES_EEPROM_COMMAND);
+
+ do {
+ } while (readl((void __iomem *)addr + NES_EEPROM_COMMAND) &
+ NES_EEPROM_READ_REQUEST);
+
+ return readw((void __iomem *)addr + NES_EEPROM_DATA);
+}
+
+
+/**
+ * nes_write_1G_phy_reg
+ */
+void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 u32temp;
+ u32 counter;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1))
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+}
+
+
+/**
+ * nes_read_1G_phy_reg
+ * This routine only issues the read, the data must be read
+ * separately.
+ */
+void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 u32temp;
+ u32 counter;
+ unsigned long flags;
+
+ /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
+ phy_addr, nesdev->mac_index); */
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1)) {
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+ *data = 0xffff;
+ } else {
+ *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ }
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+}
+
+
+/**
+ * nes_write_10G_phy_reg
+ */
+void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg,
+ u8 phy_addr, u16 data)
+{
+ u32 dev_addr;
+ u32 port_addr;
+ u32 u32temp;
+ u32 counter;
+
+ dev_addr = 1;
+ port_addr = phy_addr;
+
+ /* set address */
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1))
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+
+ /* set data */
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x10020000 | (u32)data | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1))
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+}
+
+
+/**
+ * nes_read_10G_phy_reg
+ * This routine only issues the read, the data must be read
+ * separately.
+ */
+void nes_read_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg, u8 phy_addr)
+{
+ u32 dev_addr;
+ u32 port_addr;
+ u32 u32temp;
+ u32 counter;
+
+ dev_addr = 1;
+ port_addr = phy_addr;
+
+ /* set address */
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1))
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+
+ /* issue read */
+ nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
+ 0x30020000 | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
+ for (counter = 0; counter < 100 ; counter++) {
+ udelay(30);
+ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
+ if (u32temp & 1) {
+ nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
+ break;
+ }
+ }
+ if (!(u32temp & 1))
+ nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
+ u32temp);
+}
+
+
+/**
+ * nes_get_cqp_request
+ */
+struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
+{
+ unsigned long flags;
+ struct nes_cqp_request *cqp_request = NULL;
+
+ if (!list_empty(&nesdev->cqp_avail_reqs)) {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ cqp_request = list_entry(nesdev->cqp_avail_reqs.next,
+ struct nes_cqp_request, list);
+ list_del_init(&cqp_request->list);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ } else {
+ cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL);
+ if (cqp_request) {
+ cqp_request->dynamic = 1;
+ INIT_LIST_HEAD(&cqp_request->list);
+ }
+ }
+
+ if (cqp_request) {
+ init_waitqueue_head(&cqp_request->waitq);
+ cqp_request->waiting = 0;
+ cqp_request->request_done = 0;
+ cqp_request->callback = 0;
+ init_waitqueue_head(&cqp_request->waitq);
+ nes_debug(NES_DBG_CQP, "Got cqp request %p from the available list \n",
+ cqp_request);
+ } else
+ printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n",
+ __FUNCTION__);
+
+ return cqp_request;
+}
+
+
+/**
+ * nes_post_cqp_request
+ */
+void nes_post_cqp_request(struct nes_device *nesdev,
+ struct nes_cqp_request *cqp_request, int ring_doorbell)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ unsigned long flags;
+ u32 cqp_head;
+ u64 u64temp;
+
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+
+ if (((((nesdev->cqp.sq_tail+(nesdev->cqp.sq_size*2))-nesdev->cqp.sq_head) &
+ (nesdev->cqp.sq_size - 1)) != 1)
+ && (list_empty(&nesdev->cqp_pending_reqs))) {
+ cqp_head = nesdev->cqp.sq_head++;
+ nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
+ cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
+ memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
+ barrier();
+ u64temp = (unsigned long)cqp_request;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_SCRATCH_LOW_IDX,
+ u64temp);
+ nes_debug(NES_DBG_CQP, "CQP request (opcode 0x%02X), line 1 = 0x%08X put on CQPs SQ,"
+ " request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u,"
+ " waiting = %d, refcount = %d.\n",
+ le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f,
+ le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request,
+ nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size,
+ cqp_request->waiting, atomic_read(&cqp_request->refcount));
+ barrier();
+ if (ring_doorbell) {
+ /* Ring doorbell (1 WQEs) */
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
+ }
+
+ barrier();
+ } else {
+ nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X), line 1 = 0x%08X"
+ " put on the pending queue.\n",
+ cqp_request,
+ le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f,
+ le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_ID_IDX]));
+ list_add_tail(&cqp_request->list, &nesdev->cqp_pending_reqs);
+ }
+
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+
+ return;
+}
+
+
+/**
+ * nes_arp_table
+ */
+int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 action)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ int arp_index;
+ int err = 0;
+
+ for (arp_index = 0; (u32) arp_index < nesadapter->arp_table_size; arp_index++) {
+ if (nesadapter->arp_table[arp_index].ip_addr == ip_addr)
+ break;
+ }
+
+ if (action == NES_ARP_ADD) {
+ if (arp_index != nesadapter->arp_table_size) {
+ return -1;
+ }
+
+ arp_index = 0;
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_arps,
+ nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index);
+ if (err) {
+ nes_debug(NES_DBG_NETDEV, "nes_alloc_resource returned error = %u\n", err);
+ return err;
+ }
+ nes_debug(NES_DBG_NETDEV, "ADD, arp_index=%d\n", arp_index);
+
+ nesadapter->arp_table[arp_index].ip_addr = ip_addr;
+ memcpy(nesadapter->arp_table[arp_index].mac_addr, mac_addr, ETH_ALEN);
+ return arp_index;
+ }
+
+ /* DELETE or RESOLVE */
+ if (arp_index == nesadapter->arp_table_size) {
+ nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n");
+ return -1;
+ }
+
+ if (action == NES_ARP_RESOLVE) {
+ nes_debug(NES_DBG_NETDEV, "RESOLVE, arp_index=%d\n", arp_index);
+ return arp_index;
+ }
+
+ if (action == NES_ARP_DELETE) {
+ nes_debug(NES_DBG_NETDEV, "DELETE, arp_index=%d\n", arp_index);
+ nesadapter->arp_table[arp_index].ip_addr = 0;
+ memset(nesadapter->arp_table[arp_index].mac_addr, 0x00, ETH_ALEN);
+ nes_free_resource(nesadapter, nesadapter->allocated_arps, arp_index);
+ return arp_index;
+ }
+
+ return -1;
+}
+
+
+/**
+ * nes_mh_fix
+ */
+void nes_mh_fix(unsigned long parm)
+{
+ unsigned long flags;
+ struct nes_device *nesdev = (struct nes_device *)parm;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_vnic *nesvnic;
+ u32 used_chunks_tx;
+ u32 temp_used_chunks_tx;
+ u32 temp_last_used_chunks_tx;
+ u32 used_chunks_mask;
+ u32 mac_tx_frames_low;
+ u32 mac_tx_frames_high;
+ u32 mac_tx_pauses;
+ u32 serdes_status;
+ u32 reset_value;
+ u32 tx_control;
+ u32 tx_config;
+ u32 tx_pause_quanta;
+ u32 rx_control;
+ u32 rx_config;
+ u32 mac_exact_match;
+ u32 mpp_debug;
+ u32 i=0;
+ u32 chunks_tx_progress = 0;
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ if ((nesadapter->mac_sw_state[0] != NES_MAC_SW_IDLE) || (nesadapter->mac_link_down[0])) {
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ goto no_mh_work;
+ }
+ nesadapter->mac_sw_state[0] = NES_MAC_SW_MH;
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+ do {
+ mac_tx_frames_low = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_LOW);
+ mac_tx_frames_high = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_HIGH);
+ mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
+ used_chunks_tx = nes_read_indexed(nesdev, NES_IDX_USED_CHUNKS_TX);
+ nesdev->mac_pause_frames_sent += mac_tx_pauses;
+ used_chunks_mask = 0;
+ temp_used_chunks_tx = used_chunks_tx;
+ temp_last_used_chunks_tx = nesdev->last_used_chunks_tx;
+
+ if (nesdev->netdev[0]) {
+ nesvnic = netdev_priv(nesdev->netdev[0]);
+ } else {
+ break;
+ }
+
+ for (i=0; i<4; i++) {
+ used_chunks_mask <<= 8;
+ if (nesvnic->qp_nic_index[i] != 0xff) {
+ used_chunks_mask |= 0xff;
+ if ((temp_used_chunks_tx&0xff)<(temp_last_used_chunks_tx&0xff)) {
+ chunks_tx_progress = 1;
+ }
+ }
+ temp_used_chunks_tx >>= 8;
+ temp_last_used_chunks_tx >>= 8;
+ }
+ if ((mac_tx_frames_low) || (mac_tx_frames_high) ||
+ (!(used_chunks_tx&used_chunks_mask)) ||
+ (!(nesdev->last_used_chunks_tx&used_chunks_mask)) ||
+ (chunks_tx_progress) ) {
+ nesdev->last_used_chunks_tx = used_chunks_tx;
+ break;
+ }
+ nesdev->last_used_chunks_tx = used_chunks_tx;
+ barrier();
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000005);
+ mh_pauses_sent++;
+ mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
+ if (mac_tx_pauses) {
+ nesdev->mac_pause_frames_sent += mac_tx_pauses;
+ break;
+ }
+
+ tx_control = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONTROL);
+ tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ tx_pause_quanta = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA);
+ rx_control = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONTROL);
+ rx_config = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONFIG);
+ mac_exact_match = nes_read_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM);
+ mpp_debug = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG);
+
+ /* one last ditch effort to avoid a false positive */
+ mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
+ if (mac_tx_pauses) {
+ nesdev->last_mac_tx_pauses = nesdev->mac_pause_frames_sent;
+ nes_debug(NES_DBG_HW, "failsafe caught slow outbound pause\n");
+ break;
+ }
+ mh_detected++;
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, 0x00000000);
+ reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
+
+ nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value | 0x0000001d);
+
+ while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
+ & 0x00000040) != 0x00000040) && (i++ < 5000)) {
+ /* mdelay(1); */
+ }
+
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
+ serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0);
+
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000);
+ if (nesadapter->OneG_Mode) {
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222);
+ } else {
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
+ }
+ serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
+
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, tx_control);
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA, tx_pause_quanta);
+ nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONTROL, rx_control);
+ nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONFIG, rx_config);
+ nes_write_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM, mac_exact_match);
+ nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG, mpp_debug);
+
+ } while (0);
+
+ nesadapter->mac_sw_state[0] = NES_MAC_SW_IDLE;
+no_mh_work:
+ nesdev->nesadapter->mh_timer.expires = jiffies + (HZ/5);
+ add_timer(&nesdev->nesadapter->mh_timer);
+}
+
+/**
+ * nes_clc
+ */
+void nes_clc(unsigned long parm)
+{
+ unsigned long flags;
+ struct nes_device *nesdev = (struct nes_device *)parm;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+ spin_lock_irqsave(&nesadapter->phy_lock, flags);
+ nesadapter->link_interrupt_count[0] = 0;
+ nesadapter->link_interrupt_count[1] = 0;
+ nesadapter->link_interrupt_count[2] = 0;
+ nesadapter->link_interrupt_count[3] = 0;
+ spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
+
+ nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
+ add_timer(&nesadapter->lc_timer);
+}
+
+
+/**
+ * nes_dump_mem
+ */
+void nes_dump_mem(unsigned int dump_debug_level, void *addr, int length)
+{
+ char xlate[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'a', 'b', 'c', 'd', 'e', 'f'};
+ char *ptr;
+ char hex_buf[80];
+ char ascii_buf[20];
+ int num_char;
+ int num_ascii;
+ int num_hex;
+
+ if (!(nes_debug_level & dump_debug_level)) {
+ return;
+ }
+
+ ptr = addr;
+ if (length > 0x100) {
+ nes_debug(dump_debug_level, "Length truncated from %x to %x\n", length, 0x100);
+ length = 0x100;
+ }
+ nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", ptr, length, length);
+
+ memset(ascii_buf, 0, 20);
+ memset(hex_buf, 0, 80);
+
+ num_ascii = 0;
+ num_hex = 0;
+ for (num_char = 0; num_char < length; num_char++) {
+ if (num_ascii == 8) {
+ ascii_buf[num_ascii++] = ' ';
+ hex_buf[num_hex++] = '-';
+ hex_buf[num_hex++] = ' ';
+ }
+
+ if (*ptr < 0x20 || *ptr > 0x7e)
+ ascii_buf[num_ascii++] = '.';
+ else
+ ascii_buf[num_ascii++] = *ptr;
+ hex_buf[num_hex++] = xlate[((*ptr & 0xf0) >> 4)];
+ hex_buf[num_hex++] = xlate[*ptr & 0x0f];
+ hex_buf[num_hex++] = ' ';
+ ptr++;
+
+ if (num_ascii >= 17) {
+ /* output line and reset */
+ nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
+ memset(ascii_buf, 0, 20);
+ memset(hex_buf, 0, 80);
+ num_ascii = 0;
+ num_hex = 0;
+ }
+ }
+
+ /* output the rest */
+ if (num_ascii) {
+ while (num_ascii < 17) {
+ if (num_ascii == 8) {
+ hex_buf[num_hex++] = ' ';
+ hex_buf[num_hex++] = ' ';
+ }
+ hex_buf[num_hex++] = ' ';
+ hex_buf[num_hex++] = ' ';
+ hex_buf[num_hex++] = ' ';
+ num_ascii++;
+ }
+
+ nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
+ }
+}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
new file mode 100644
index 000000000000..ffd4b425567f
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -0,0 +1,3917 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/highmem.h>
+#include <asm/byteorder.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "nes.h"
+
+#include <rdma/ib_umem.h>
+
+atomic_t mod_qp_timouts;
+atomic_t qps_created;
+atomic_t sw_qps_destroyed;
+
+
+/**
+ * nes_alloc_mw
+ */
+static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
+ unsigned long flags;
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_cqp_request *cqp_request;
+ struct nes_mr *nesmr;
+ struct ib_mw *ibmw;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ int ret;
+ u32 stag;
+ u32 stag_index = 0;
+ u32 next_stag_index = 0;
+ u32 driver_key = 0;
+ u8 stag_key = 0;
+
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+
+ driver_key = 0;
+
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
+
+ ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
+ nesadapter->max_mr, &stag_index, &next_stag_index);
+ if (ret) {
+ return ERR_PTR(ret);
+ }
+
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
+ if (!nesmr) {
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ stag = stag_index << 8;
+ stag |= driver_key;
+ stag += (u32)stag_key;
+
+ nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n",
+ stag, stag_index);
+
+ /* Register the region with the adapter */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ kfree(nesmr);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
+ cpu_to_le32( NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_RIGHTS_REMOTE_READ |
+ NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_VA_TO |
+ NES_CQP_STAG_REM_ACC_EN);
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ stag, ret, cqp_request->major_code, cqp_request->minor_code);
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ kfree(nesmr);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ if (!ret) {
+ return ERR_PTR(-ETIME);
+ } else {
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ nesmr->ibmw.rkey = stag;
+ nesmr->mode = IWNES_MEMREG_TYPE_MW;
+ ibmw = &nesmr->ibmw;
+ nesmr->pbl_4k = 0;
+ nesmr->pbls_used = 0;
+
+ return ibmw;
+}
+
+
+/**
+ * nes_dealloc_mw
+ */
+static int nes_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct nes_mr *nesmr = to_nesmw(ibmw);
+ struct nes_vnic *nesvnic = to_nesvnic(ibmw->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ int err = 0;
+ unsigned long flags;
+ int ret;
+
+ /* Deallocate the window with the adapter */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, NES_CQP_DEALLOCATE_STAG);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ibmw->rkey);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X to complete.\n",
+ ibmw->rkey);
+ ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MR, "Deallocate STag completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ ret, cqp_request->major_code, cqp_request->minor_code);
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret) {
+ err = -ETIME;
+ } else {
+ err = -EIO;
+ }
+ } else {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ (ibmw->rkey & 0x0fffff00) >> 8);
+ kfree(nesmr);
+
+ return err;
+}
+
+
+/**
+ * nes_bind_mw
+ */
+static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
+ struct ib_mw_bind *ibmw_bind)
+{
+ u64 u64temp;
+ struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ /* struct nes_mr *nesmr = to_nesmw(ibmw); */
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+ struct nes_hw_qp_wqe *wqe;
+ unsigned long flags = 0;
+ u32 head;
+ u32 wqe_misc = 0;
+ u32 qsize;
+
+ if (nesqp->ibqp_state > IB_QPS_RTS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+
+ head = nesqp->hwqp.sq_head;
+ qsize = nesqp->hwqp.sq_tail;
+
+ /* Check for SQ overflow */
+ if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ return -EINVAL;
+ }
+
+ wqe = &nesqp->hwqp.sq_vbase[head];
+ /* nes_debug(NES_DBG_MR, "processing sq wqe at %p, head = %u.\n", wqe, head); */
+ nes_fill_init_qp_wqe(wqe, nesqp, head);
+ u64temp = ibmw_bind->wr_id;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp);
+ wqe_misc = NES_IWARP_SQ_OP_BIND;
+
+ wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+
+ if (ibmw_bind->send_flags & IB_SEND_SIGNALED)
+ wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
+
+ if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_WRITE) {
+ wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE;
+ }
+ if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_READ) {
+ wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ;
+ }
+
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, ibmw_bind->mr->lkey);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX,
+ ibmw_bind->length);
+ wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0;
+ u64temp = (u64)ibmw_bind->addr;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp);
+
+ head++;
+ if (head >= qsize)
+ head = 0;
+
+ nesqp->hwqp.sq_head = head;
+ barrier();
+
+ nes_write32(nesdev->regs+NES_WQE_ALLOC,
+ (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
+
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ return 0;
+}
+
+
+/**
+ * nes_alloc_fmr
+ */
+static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
+ int ibmr_access_flags,
+ struct ib_fmr_attr *ibfmr_attr)
+{
+ unsigned long flags;
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_fmr *nesfmr;
+ struct nes_cqp_request *cqp_request;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ int ret;
+ u32 stag;
+ u32 stag_index = 0;
+ u32 next_stag_index = 0;
+ u32 driver_key = 0;
+ u32 opcode = 0;
+ u8 stag_key = 0;
+ int i=0;
+ struct nes_vpbl vpbl;
+
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+
+ driver_key = 0;
+
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
+
+ ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
+ nesadapter->max_mr, &stag_index, &next_stag_index);
+ if (ret) {
+ goto failed_resource_alloc;
+ }
+
+ nesfmr = kzalloc(sizeof(*nesfmr), GFP_KERNEL);
+ if (!nesfmr) {
+ ret = -ENOMEM;
+ goto failed_fmr_alloc;
+ }
+
+ nesfmr->nesmr.mode = IWNES_MEMREG_TYPE_FMR;
+ if (ibfmr_attr->max_pages == 1) {
+ /* use zero length PBL */
+ nesfmr->nesmr.pbl_4k = 0;
+ nesfmr->nesmr.pbls_used = 0;
+ } else if (ibfmr_attr->max_pages <= 32) {
+ /* use PBL 256 */
+ nesfmr->nesmr.pbl_4k = 0;
+ nesfmr->nesmr.pbls_used = 1;
+ } else if (ibfmr_attr->max_pages <= 512) {
+ /* use 4K PBLs */
+ nesfmr->nesmr.pbl_4k = 1;
+ nesfmr->nesmr.pbls_used = 1;
+ } else {
+ /* use two level 4K PBLs */
+ /* add support for two level 256B PBLs */
+ nesfmr->nesmr.pbl_4k = 1;
+ nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages >> 9) +
+ ((ibfmr_attr->max_pages & 511) ? 1 : 0);
+ }
+ /* Register the region with the adapter */
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+
+ /* track PBL resources */
+ if (nesfmr->nesmr.pbls_used != 0) {
+ if (nesfmr->nesmr.pbl_4k) {
+ if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_alloc;
+ } else {
+ nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
+ }
+ } else {
+ if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_alloc;
+ } else {
+ nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
+ }
+ }
+ }
+
+ /* one level pbl */
+ if (nesfmr->nesmr.pbls_used == 0) {
+ nesfmr->root_vpbl.pbl_vbase = NULL;
+ nes_debug(NES_DBG_MR, "zero level pbl \n");
+ } else if (nesfmr->nesmr.pbls_used == 1) {
+ /* can change it to kmalloc & dma_map_single */
+ nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &nesfmr->root_vpbl.pbl_pbase);
+ if (!nesfmr->root_vpbl.pbl_vbase) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_alloc;
+ }
+ nesfmr->leaf_pbl_cnt = 0;
+ nes_debug(NES_DBG_MR, "one level pbl, root_vpbl.pbl_vbase=%p \n",
+ nesfmr->root_vpbl.pbl_vbase);
+ }
+ /* two level pbl */
+ else {
+ nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
+ &nesfmr->root_vpbl.pbl_pbase);
+ if (!nesfmr->root_vpbl.pbl_vbase) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_vpbl_alloc;
+ }
+
+ nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
+ if (!nesfmr->root_vpbl.leaf_vpbl) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ ret = -ENOMEM;
+ goto failed_leaf_vpbl_alloc;
+ }
+
+ nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
+ nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
+ " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
+ nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
+
+ for (i=0; i<nesfmr->leaf_pbl_cnt; i++)
+ nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase = NULL;
+
+ for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
+ vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &vpbl.pbl_pbase);
+
+ if (!vpbl.pbl_vbase) {
+ ret = -ENOMEM;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ goto failed_leaf_vpbl_pages_alloc;
+ }
+
+ nesfmr->root_vpbl.pbl_vbase[i].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
+ nesfmr->root_vpbl.pbl_vbase[i].pa_high = cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
+ nesfmr->root_vpbl.leaf_vpbl[i] = vpbl;
+
+ nes_debug(NES_DBG_MR, "pbase_low=0x%x, pbase_high=0x%x, vpbl=%p\n",
+ nesfmr->root_vpbl.pbl_vbase[i].pa_low,
+ nesfmr->root_vpbl.pbl_vbase[i].pa_high,
+ &nesfmr->root_vpbl.leaf_vpbl[i]);
+ }
+ }
+ nesfmr->ib_qp = NULL;
+ nesfmr->access_rights =0;
+
+ stag = stag_index << 8;
+ stag |= driver_key;
+ stag += (u32)stag_key;
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
+ ret = -ENOMEM;
+ goto failed_leaf_vpbl_pages_alloc;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n",
+ stag, stag_index);
+
+ opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
+
+ if (nesfmr->nesmr.pbl_4k == 1)
+ opcode |= NES_CQP_STAG_PBL_BLK_SIZE;
+
+ if (ibmr_access_flags & IB_ACCESS_REMOTE_WRITE) {
+ opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE |
+ NES_CQP_STAG_RIGHTS_LOCAL_WRITE | NES_CQP_STAG_REM_ACC_EN;
+ nesfmr->access_rights |=
+ NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_RIGHTS_LOCAL_WRITE |
+ NES_CQP_STAG_REM_ACC_EN;
+ }
+
+ if (ibmr_access_flags & IB_ACCESS_REMOTE_READ) {
+ opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ |
+ NES_CQP_STAG_RIGHTS_LOCAL_READ | NES_CQP_STAG_REM_ACC_EN;
+ nesfmr->access_rights |=
+ NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_RIGHTS_LOCAL_READ |
+ NES_CQP_STAG_REM_ACC_EN;
+ }
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff));
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
+
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] =
+ cpu_to_le32((nesfmr->nesmr.pbls_used>1) ?
+ (nesfmr->nesmr.pbls_used-1) : nesfmr->nesmr.pbls_used);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ stag, ret, cqp_request->major_code, cqp_request->minor_code);
+
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ ret = (!ret) ? -ETIME : -EIO;
+ goto failed_leaf_vpbl_pages_alloc;
+ } else {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ nesfmr->nesmr.ibfmr.lkey = stag;
+ nesfmr->nesmr.ibfmr.rkey = stag;
+ nesfmr->attr = *ibfmr_attr;
+
+ return &nesfmr->nesmr.ibfmr;
+
+ failed_leaf_vpbl_pages_alloc:
+ /* unroll all allocated pages */
+ for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
+ if (nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase) {
+ pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
+ nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
+ }
+ }
+ if (nesfmr->root_vpbl.leaf_vpbl)
+ kfree(nesfmr->root_vpbl.leaf_vpbl);
+
+ failed_leaf_vpbl_alloc:
+ if (nesfmr->leaf_pbl_cnt == 0) {
+ if (nesfmr->root_vpbl.pbl_vbase)
+ pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
+ nesfmr->root_vpbl.pbl_pbase);
+ } else
+ pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
+ nesfmr->root_vpbl.pbl_pbase);
+
+ failed_vpbl_alloc:
+ kfree(nesfmr);
+
+ failed_fmr_alloc:
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+
+ failed_resource_alloc:
+ return ERR_PTR(ret);
+}
+
+
+/**
+ * nes_dealloc_fmr
+ */
+static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
+{
+ struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr);
+ struct nes_fmr *nesfmr = to_nesfmr(nesmr);
+ struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_mr temp_nesmr = *nesmr;
+ int i = 0;
+
+ temp_nesmr.ibmw.device = ibfmr->device;
+ temp_nesmr.ibmw.pd = ibfmr->pd;
+ temp_nesmr.ibmw.rkey = ibfmr->rkey;
+ temp_nesmr.ibmw.uobject = NULL;
+
+ /* free the resources */
+ if (nesfmr->leaf_pbl_cnt == 0) {
+ /* single PBL case */
+ if (nesfmr->root_vpbl.pbl_vbase)
+ pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
+ nesfmr->root_vpbl.pbl_pbase);
+ } else {
+ for (i = 0; i < nesfmr->leaf_pbl_cnt; i++) {
+ pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
+ nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
+ }
+ kfree(nesfmr->root_vpbl.leaf_vpbl);
+ pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
+ nesfmr->root_vpbl.pbl_pbase);
+ }
+
+ return nes_dealloc_mw(&temp_nesmr.ibmw);
+}
+
+
+/**
+ * nes_map_phys_fmr
+ */
+static int nes_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int list_len, u64 iova)
+{
+ return 0;
+}
+
+
+/**
+ * nes_unmap_frm
+ */
+static int nes_unmap_fmr(struct list_head *ibfmr_list)
+{
+ return 0;
+}
+
+
+
+/**
+ * nes_query_device
+ */
+static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
+{
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_ib_device *nesibdev = nesvnic->nesibdev;
+
+ memset(props, 0, sizeof(*props));
+ memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6);
+
+ props->fw_ver = nesdev->nesadapter->fw_ver;
+ props->device_cap_flags = nesdev->nesadapter->device_cap_flags;
+ props->vendor_id = nesdev->nesadapter->vendor_id;
+ props->vendor_part_id = nesdev->nesadapter->vendor_part_id;
+ props->hw_ver = nesdev->nesadapter->hw_rev;
+ props->max_mr_size = 0x80000000;
+ props->max_qp = nesibdev->max_qp;
+ props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2;
+ props->max_sge = nesdev->nesadapter->max_sge;
+ props->max_cq = nesibdev->max_cq;
+ props->max_cqe = nesdev->nesadapter->max_cqe - 1;
+ props->max_mr = nesibdev->max_mr;
+ props->max_mw = nesibdev->max_mr;
+ props->max_pd = nesibdev->max_pd;
+ props->max_sge_rd = 1;
+ switch (nesdev->nesadapter->max_irrq_wr) {
+ case 0:
+ props->max_qp_rd_atom = 1;
+ break;
+ case 1:
+ props->max_qp_rd_atom = 4;
+ break;
+ case 2:
+ props->max_qp_rd_atom = 16;
+ break;
+ case 3:
+ props->max_qp_rd_atom = 32;
+ break;
+ default:
+ props->max_qp_rd_atom = 0;
+ }
+ props->max_qp_init_rd_atom = props->max_qp_wr;
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->max_map_per_fmr = 1;
+
+ return 0;
+}
+
+
+/**
+ * nes_query_port
+ */
+static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
+{
+ memset(props, 0, sizeof(*props));
+
+ props->max_mtu = IB_MTU_2048;
+ props->active_mtu = IB_MTU_2048;
+ props->lid = 1;
+ props->lmc = 0;
+ props->sm_lid = 0;
+ props->sm_sl = 0;
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = 0;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
+ props->gid_tbl_len = 1;
+ props->pkey_tbl_len = 1;
+ props->qkey_viol_cntr = 0;
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = 1;
+ props->max_msg_sz = 0x80000000;
+
+ return 0;
+}
+
+
+/**
+ * nes_modify_port
+ */
+static int nes_modify_port(struct ib_device *ibdev, u8 port,
+ int port_modify_mask, struct ib_port_modify *props)
+{
+ return 0;
+}
+
+
+/**
+ * nes_query_pkey
+ */
+static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+ *pkey = 0;
+ return 0;
+}
+
+
+/**
+ * nes_query_gid
+ */
+static int nes_query_gid(struct ib_device *ibdev, u8 port,
+ int index, union ib_gid *gid)
+{
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+
+ memset(&(gid->raw[0]), 0, sizeof(gid->raw));
+ memcpy(&(gid->raw[0]), nesvnic->netdev->dev_addr, 6);
+
+ return 0;
+}
+
+
+/**
+ * nes_alloc_ucontext - Allocate the user context data structure. This keeps track
+ * of all objects associated with a particular user-mode client.
+ */
+static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_alloc_ucontext_req req;
+ struct nes_alloc_ucontext_resp uresp;
+ struct nes_ucontext *nes_ucontext;
+ struct nes_ib_device *nesibdev = nesvnic->nesibdev;
+
+
+ if (ib_copy_from_udata(&req, udata, sizeof(struct nes_alloc_ucontext_req))) {
+ printk(KERN_ERR PFX "Invalid structure size on allocate user context.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (req.userspace_ver != NES_ABI_USERSPACE_VER) {
+ printk(KERN_ERR PFX "Invalid userspace driver version detected. Detected version %d, should be %d\n",
+ req.userspace_ver, NES_ABI_USERSPACE_VER);
+ return ERR_PTR(-EINVAL);
+ }
+
+
+ memset(&uresp, 0, sizeof uresp);
+
+ uresp.max_qps = nesibdev->max_qp;
+ uresp.max_pds = nesibdev->max_pd;
+ uresp.wq_size = nesdev->nesadapter->max_qp_wr * 2;
+ uresp.virtwq = nesadapter->virtwq;
+ uresp.kernel_ver = NES_ABI_KERNEL_VER;
+
+ nes_ucontext = kzalloc(sizeof *nes_ucontext, GFP_KERNEL);
+ if (!nes_ucontext)
+ return ERR_PTR(-ENOMEM);
+
+ nes_ucontext->nesdev = nesdev;
+ nes_ucontext->mmap_wq_offset = uresp.max_pds;
+ nes_ucontext->mmap_cq_offset = nes_ucontext->mmap_wq_offset +
+ ((sizeof(struct nes_hw_qp_wqe) * uresp.max_qps * 2) + PAGE_SIZE-1) /
+ PAGE_SIZE;
+
+
+ if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
+ kfree(nes_ucontext);
+ return ERR_PTR(-EFAULT);
+ }
+
+ INIT_LIST_HEAD(&nes_ucontext->cq_reg_mem_list);
+ INIT_LIST_HEAD(&nes_ucontext->qp_reg_mem_list);
+ atomic_set(&nes_ucontext->usecnt, 1);
+ return &nes_ucontext->ibucontext;
+}
+
+
+/**
+ * nes_dealloc_ucontext
+ */
+static int nes_dealloc_ucontext(struct ib_ucontext *context)
+{
+ /* struct nes_vnic *nesvnic = to_nesvnic(context->device); */
+ /* struct nes_device *nesdev = nesvnic->nesdev; */
+ struct nes_ucontext *nes_ucontext = to_nesucontext(context);
+
+ if (!atomic_dec_and_test(&nes_ucontext->usecnt))
+ return 0;
+ kfree(nes_ucontext);
+ return 0;
+}
+
+
+/**
+ * nes_mmap
+ */
+static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ unsigned long index;
+ struct nes_vnic *nesvnic = to_nesvnic(context->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ /* struct nes_adapter *nesadapter = nesdev->nesadapter; */
+ struct nes_ucontext *nes_ucontext;
+ struct nes_qp *nesqp;
+
+ nes_ucontext = to_nesucontext(context);
+
+
+ if (vma->vm_pgoff >= nes_ucontext->mmap_wq_offset) {
+ index = (vma->vm_pgoff - nes_ucontext->mmap_wq_offset) * PAGE_SIZE;
+ index /= ((sizeof(struct nes_hw_qp_wqe) * nesdev->nesadapter->max_qp_wr * 2) +
+ PAGE_SIZE-1) & (~(PAGE_SIZE-1));
+ if (!test_bit(index, nes_ucontext->allocated_wqs)) {
+ nes_debug(NES_DBG_MMAP, "wq %lu not allocated\n", index);
+ return -EFAULT;
+ }
+ nesqp = nes_ucontext->mmap_nesqp[index];
+ if (nesqp == NULL) {
+ nes_debug(NES_DBG_MMAP, "wq %lu has a NULL QP base.\n", index);
+ return -EFAULT;
+ }
+ if (remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(nesqp->hwqp.sq_vbase) >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ nes_debug(NES_DBG_MMAP, "remap_pfn_range failed.\n");
+ return -EAGAIN;
+ }
+ vma->vm_private_data = nesqp;
+ return 0;
+ } else {
+ index = vma->vm_pgoff;
+ if (!test_bit(index, nes_ucontext->allocated_doorbells))
+ return -EFAULT;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, vma->vm_start,
+ (nesdev->doorbell_start +
+ ((nes_ucontext->mmap_db_index[index] - nesdev->base_doorbell_index) * 4096))
+ >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot))
+ return -EAGAIN;
+ vma->vm_private_data = nes_ucontext;
+ return 0;
+ }
+
+ return -ENOSYS;
+}
+
+
+/**
+ * nes_alloc_pd
+ */
+static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ struct nes_pd *nespd;
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_ucontext *nesucontext;
+ struct nes_alloc_pd_resp uresp;
+ u32 pd_num = 0;
+ int err;
+
+ nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
+ nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context,
+ atomic_read(&nesvnic->netdev->refcnt));
+
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
+ nesadapter->max_pd, &pd_num, &nesadapter->next_pd);
+ if (err) {
+ return ERR_PTR(err);
+ }
+
+ nespd = kzalloc(sizeof (struct nes_pd), GFP_KERNEL);
+ if (!nespd) {
+ nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
+ nespd, nesvnic->nesibdev->ibdev.name);
+
+ nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
+
+ if (context) {
+ nesucontext = to_nesucontext(context);
+ nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells,
+ NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db);
+ nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n",
+ nespd->mmap_db_index, nespd->pd_id);
+ if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) {
+ nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n");
+ nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
+ kfree(nespd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ uresp.pd_id = nespd->pd_id;
+ uresp.mmap_db_index = nespd->mmap_db_index;
+ if (ib_copy_to_udata(udata, &uresp, sizeof (struct nes_alloc_pd_resp))) {
+ nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
+ kfree(nespd);
+ return ERR_PTR(-EFAULT);
+ }
+
+ set_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
+ nesucontext->mmap_db_index[nespd->mmap_db_index] = nespd->pd_id;
+ nesucontext->first_free_db = nespd->mmap_db_index + 1;
+ }
+
+ nes_debug(NES_DBG_PD, "PD%u structure located @%p.\n", nespd->pd_id, nespd);
+ return &nespd->ibpd;
+}
+
+
+/**
+ * nes_dealloc_pd
+ */
+static int nes_dealloc_pd(struct ib_pd *ibpd)
+{
+ struct nes_ucontext *nesucontext;
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+ if ((ibpd->uobject) && (ibpd->uobject->context)) {
+ nesucontext = to_nesucontext(ibpd->uobject->context);
+ nes_debug(NES_DBG_PD, "Clearing bit %u from allocated doorbells\n",
+ nespd->mmap_db_index);
+ clear_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
+ nesucontext->mmap_db_index[nespd->mmap_db_index] = 0;
+ if (nesucontext->first_free_db > nespd->mmap_db_index) {
+ nesucontext->first_free_db = nespd->mmap_db_index;
+ }
+ }
+
+ nes_debug(NES_DBG_PD, "Deallocating PD%u structure located @%p.\n",
+ nespd->pd_id, nespd);
+ nes_free_resource(nesadapter, nesadapter->allocated_pds,
+ (nespd->pd_id-nesadapter->base_pd)>>(PAGE_SHIFT-12));
+ kfree(nespd);
+
+ return 0;
+}
+
+
+/**
+ * nes_create_ah
+ */
+static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+
+/**
+ * nes_destroy_ah
+ */
+static int nes_destroy_ah(struct ib_ah *ah)
+{
+ return -ENOSYS;
+}
+
+
+/**
+ * nes_get_encoded_size
+ */
+static inline u8 nes_get_encoded_size(int *size)
+{
+ u8 encoded_size = 0;
+ if (*size <= 32) {
+ *size = 32;
+ encoded_size = 1;
+ } else if (*size <= 128) {
+ *size = 128;
+ encoded_size = 2;
+ } else if (*size <= 512) {
+ *size = 512;
+ encoded_size = 3;
+ }
+ return (encoded_size);
+}
+
+
+
+/**
+ * nes_setup_virt_qp
+ */
+static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
+ struct nes_vnic *nesvnic, int sq_size, int rq_size)
+{
+ unsigned long flags;
+ void *mem;
+ __le64 *pbl = NULL;
+ __le64 *tpbl;
+ __le64 *pblbuffer;
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 pbl_entries;
+ u8 rq_pbl_entries;
+ u8 sq_pbl_entries;
+
+ pbl_entries = nespbl->pbl_size >> 3;
+ nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%p\n",
+ nespbl->pbl_size, pbl_entries,
+ (void *)nespbl->pbl_vbase,
+ (void *)nespbl->pbl_pbase);
+ pbl = (__le64 *) nespbl->pbl_vbase; /* points to first pbl entry */
+ /* now lets set the sq_vbase as well as rq_vbase addrs we will assign */
+ /* the first pbl to be fro the rq_vbase... */
+ rq_pbl_entries = (rq_size * sizeof(struct nes_hw_qp_wqe)) >> 12;
+ sq_pbl_entries = (sq_size * sizeof(struct nes_hw_qp_wqe)) >> 12;
+ nesqp->hwqp.sq_pbase = (le32_to_cpu(((__le32 *)pbl)[0])) | ((u64)((le32_to_cpu(((__le32 *)pbl)[1]))) << 32);
+ if (!nespbl->page) {
+ nes_debug(NES_DBG_QP, "QP nespbl->page is NULL \n");
+ kfree(nespbl);
+ return -ENOMEM;
+ }
+
+ nesqp->hwqp.sq_vbase = kmap(nespbl->page);
+ nesqp->page = nespbl->page;
+ if (!nesqp->hwqp.sq_vbase) {
+ nes_debug(NES_DBG_QP, "QP sq_vbase kmap failed\n");
+ kfree(nespbl);
+ return -ENOMEM;
+ }
+
+ /* Now to get to sq.. we need to calculate how many */
+ /* PBL entries were used by the rq.. */
+ pbl += sq_pbl_entries;
+ nesqp->hwqp.rq_pbase = (le32_to_cpu(((__le32 *)pbl)[0])) | ((u64)((le32_to_cpu(((__le32 *)pbl)[1]))) << 32);
+ /* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */
+ /*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */
+
+ nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%p rq_vbase=%p rq_pbase=%p\n",
+ nesqp->hwqp.sq_vbase, (void *)nesqp->hwqp.sq_pbase,
+ nesqp->hwqp.rq_vbase, (void *)nesqp->hwqp.rq_pbase);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ if (!nesadapter->free_256pbl) {
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kunmap(nesqp->page);
+ kfree(nespbl);
+ return -ENOMEM;
+ }
+ nesadapter->free_256pbl--;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
+ nesqp->pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 256, &nesqp->pbl_pbase);
+ pblbuffer = nesqp->pbl_vbase;
+ if (!nesqp->pbl_vbase) {
+ /* memory allocated during nes_reg_user_mr() */
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kunmap(nesqp->page);
+ return -ENOMEM;
+ }
+ memset(nesqp->pbl_vbase, 0, 256);
+ /* fill in the page address in the pbl buffer.. */
+ tpbl = pblbuffer + 16;
+ pbl = (__le64 *)nespbl->pbl_vbase;
+ while (sq_pbl_entries--)
+ *tpbl++ = *pbl++;
+ tpbl = pblbuffer;
+ while (rq_pbl_entries--)
+ *tpbl++ = *pbl++;
+
+ /* done with memory allocated during nes_reg_user_mr() */
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+
+ nesqp->qp_mem_size =
+ max((u32)sizeof(struct nes_qp_context), ((u32)256)) + 256; /* this is Q2 */
+ /* Round up to a multiple of a page */
+ nesqp->qp_mem_size += PAGE_SIZE - 1;
+ nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
+
+ mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ &nesqp->hwqp.q2_pbase);
+
+ if (!mem) {
+ pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase);
+ nesqp->pbl_vbase = NULL;
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kunmap(nesqp->page);
+ return -ENOMEM;
+ }
+ nesqp->hwqp.q2_vbase = mem;
+ mem += 256;
+ memset(nesqp->hwqp.q2_vbase, 0, 256);
+ nesqp->nesqp_context = mem;
+ memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
+ nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
+
+ return 0;
+}
+
+
+/**
+ * nes_setup_mmap_qp
+ */
+static int nes_setup_mmap_qp(struct nes_qp *nesqp, struct nes_vnic *nesvnic,
+ int sq_size, int rq_size)
+{
+ void *mem;
+ struct nes_device *nesdev = nesvnic->nesdev;
+
+ nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) +
+ (sizeof(struct nes_hw_qp_wqe) * rq_size) +
+ max((u32)sizeof(struct nes_qp_context), ((u32)256)) +
+ 256; /* this is Q2 */
+ /* Round up to a multiple of a page */
+ nesqp->qp_mem_size += PAGE_SIZE - 1;
+ nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
+
+ mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ &nesqp->hwqp.sq_pbase);
+ if (!mem)
+ return -ENOMEM;
+ nes_debug(NES_DBG_QP, "PCI consistent memory for "
+ "host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
+ mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size);
+
+ memset(mem, 0, nesqp->qp_mem_size);
+
+ nesqp->hwqp.sq_vbase = mem;
+ mem += sizeof(struct nes_hw_qp_wqe) * sq_size;
+
+ nesqp->hwqp.rq_vbase = mem;
+ nesqp->hwqp.rq_pbase = nesqp->hwqp.sq_pbase +
+ sizeof(struct nes_hw_qp_wqe) * sq_size;
+ mem += sizeof(struct nes_hw_qp_wqe) * rq_size;
+
+ nesqp->hwqp.q2_vbase = mem;
+ nesqp->hwqp.q2_pbase = nesqp->hwqp.rq_pbase +
+ sizeof(struct nes_hw_qp_wqe) * rq_size;
+ mem += 256;
+ memset(nesqp->hwqp.q2_vbase, 0, 256);
+
+ nesqp->nesqp_context = mem;
+ nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
+ memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
+ return 0;
+}
+
+
+/**
+ * nes_free_qp_mem() is to free up the qp's pci_alloc_consistent() memory.
+ */
+static inline void nes_free_qp_mem(struct nes_device *nesdev,
+ struct nes_qp *nesqp, int virt_wqs)
+{
+ unsigned long flags;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ if (!virt_wqs) {
+ pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
+ nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
+ }else {
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ nesadapter->free_256pbl++;
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
+ pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase );
+ nesqp->pbl_vbase = NULL;
+ kunmap(nesqp->page);
+ }
+}
+
+
+/**
+ * nes_create_qp
+ */
+static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
+ struct ib_qp_init_attr *init_attr, struct ib_udata *udata)
+{
+ u64 u64temp= 0;
+ u64 u64nesqp = 0;
+ struct nes_pd *nespd = to_nespd(ibpd);
+ struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_qp *nesqp;
+ struct nes_cq *nescq;
+ struct nes_ucontext *nes_ucontext;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ struct nes_create_qp_req req;
+ struct nes_create_qp_resp uresp;
+ struct nes_pbl *nespbl = NULL;
+ u32 qp_num = 0;
+ u32 opcode = 0;
+ /* u32 counter = 0; */
+ void *mem;
+ unsigned long flags;
+ int ret;
+ int err;
+ int virt_wqs = 0;
+ int sq_size;
+ int rq_size;
+ u8 sq_encoded_size;
+ u8 rq_encoded_size;
+ /* int counter; */
+
+ atomic_inc(&qps_created);
+ switch (init_attr->qp_type) {
+ case IB_QPT_RC:
+ if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
+ init_attr->cap.max_inline_data = 0;
+ } else {
+ init_attr->cap.max_inline_data = 64;
+ }
+ sq_size = init_attr->cap.max_send_wr;
+ rq_size = init_attr->cap.max_recv_wr;
+
+ // check if the encoded sizes are OK or not...
+ sq_encoded_size = nes_get_encoded_size(&sq_size);
+ rq_encoded_size = nes_get_encoded_size(&rq_size);
+
+ if ((!sq_encoded_size) || (!rq_encoded_size)) {
+ nes_debug(NES_DBG_QP, "ERROR bad rq (%u) or sq (%u) size\n",
+ rq_size, sq_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ init_attr->cap.max_send_wr = sq_size -2;
+ init_attr->cap.max_recv_wr = rq_size -1;
+ nes_debug(NES_DBG_QP, "RQ size=%u, SQ Size=%u\n", rq_size, sq_size);
+
+ ret = nes_alloc_resource(nesadapter, nesadapter->allocated_qps,
+ nesadapter->max_qp, &qp_num, &nesadapter->next_qp);
+ if (ret) {
+ return ERR_PTR(ret);
+ }
+
+ /* Need 512 (actually now 1024) byte alignment on this structure */
+ mem = kzalloc(sizeof(*nesqp)+NES_SW_CONTEXT_ALIGN-1, GFP_KERNEL);
+ if (!mem) {
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ nes_debug(NES_DBG_QP, "Unable to allocate QP\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ u64nesqp = (unsigned long)mem;
+ u64nesqp += ((u64)NES_SW_CONTEXT_ALIGN) - 1;
+ u64temp = ((u64)NES_SW_CONTEXT_ALIGN) - 1;
+ u64nesqp &= ~u64temp;
+ nesqp = (struct nes_qp *)(unsigned long)u64nesqp;
+ /* nes_debug(NES_DBG_QP, "nesqp=%p, allocated buffer=%p. Rounded to closest %u\n",
+ nesqp, mem, NES_SW_CONTEXT_ALIGN); */
+ nesqp->allocated_buffer = mem;
+
+ if (udata) {
+ if (ib_copy_from_udata(&req, udata, sizeof(struct nes_create_qp_req))) {
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
+ return NULL;
+ }
+ if (req.user_wqe_buffers) {
+ virt_wqs = 1;
+ }
+ if ((ibpd->uobject) && (ibpd->uobject->context)) {
+ nesqp->user_mode = 1;
+ nes_ucontext = to_nesucontext(ibpd->uobject->context);
+ if (virt_wqs) {
+ err = 1;
+ list_for_each_entry(nespbl, &nes_ucontext->qp_reg_mem_list, list) {
+ if (nespbl->user_base == (unsigned long )req.user_wqe_buffers) {
+ list_del(&nespbl->list);
+ err = 0;
+ nes_debug(NES_DBG_QP, "Found PBL for virtual QP. nespbl=%p. user_base=0x%lx\n",
+ nespbl, nespbl->user_base);
+ break;
+ }
+ }
+ if (err) {
+ nes_debug(NES_DBG_QP, "Didn't Find PBL for virtual QP. address = %llx.\n",
+ (long long unsigned int)req.user_wqe_buffers);
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ nes_ucontext = to_nesucontext(ibpd->uobject->context);
+ nesqp->mmap_sq_db_index =
+ find_next_zero_bit(nes_ucontext->allocated_wqs,
+ NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
+ /* nes_debug(NES_DBG_QP, "find_first_zero_biton wqs returned %u\n",
+ nespd->mmap_db_index); */
+ if (nesqp->mmap_sq_db_index > NES_MAX_USER_WQ_REGIONS) {
+ nes_debug(NES_DBG_QP,
+ "db index > max user regions, failing create QP\n");
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ if (virt_wqs) {
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+ }
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ set_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
+ nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = nesqp;
+ nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index + 1;
+ } else {
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+ err = (!virt_wqs) ? nes_setup_mmap_qp(nesqp, nesvnic, sq_size, rq_size) :
+ nes_setup_virt_qp(nesqp, nespbl, nesvnic, sq_size, rq_size);
+ if (err) {
+ nes_debug(NES_DBG_QP,
+ "error geting qp mem code = %d\n", err);
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ nesqp->hwqp.sq_size = sq_size;
+ nesqp->hwqp.sq_encoded_size = sq_encoded_size;
+ nesqp->hwqp.sq_head = 1;
+ nesqp->hwqp.rq_size = rq_size;
+ nesqp->hwqp.rq_encoded_size = rq_encoded_size;
+ /* nes_debug(NES_DBG_QP, "nesqp->nesqp_context_pbase = %p\n",
+ (void *)nesqp->nesqp_context_pbase);
+ */
+ nesqp->hwqp.qp_id = qp_num;
+ nesqp->ibqp.qp_num = nesqp->hwqp.qp_id;
+ nesqp->nespd = nespd;
+
+ nescq = to_nescq(init_attr->send_cq);
+ nesqp->nesscq = nescq;
+ nescq = to_nescq(init_attr->recv_cq);
+ nesqp->nesrcq = nescq;
+
+ nesqp->nesqp_context->misc |= cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) <<
+ NES_QPCONTEXT_MISC_PCI_FCN_SHIFT);
+ nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.rq_encoded_size <<
+ NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT);
+ nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size <<
+ NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT);
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN);
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN);
+ nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number +
+ ((u32)nesqp->nesrcq->hw_cq.cq_number << 16));
+ u64temp = (u64)nesqp->hwqp.sq_pbase;
+ nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+
+
+ if (!virt_wqs) {
+ u64temp = (u64)nesqp->hwqp.sq_pbase;
+ nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ u64temp = (u64)nesqp->hwqp.rq_pbase;
+ nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ } else {
+ u64temp = (u64)nesqp->pbl_pbase;
+ nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ }
+
+ /* nes_debug(NES_DBG_QP, "next_qp_nic_index=%u, using nic_index=%d\n",
+ nesvnic->next_qp_nic_index,
+ nesvnic->qp_nic_index[nesvnic->next_qp_nic_index]); */
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ nesqp->nesqp_context->misc2 |= cpu_to_le32(
+ (u32)nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] <<
+ NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT);
+ nesvnic->next_qp_nic_index++;
+ if ((nesvnic->next_qp_nic_index > 3) ||
+ (nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] == 0xf)) {
+ nesvnic->next_qp_nic_index = 0;
+ }
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+
+ nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32((u32)nesqp->nespd->pd_id << 16);
+ u64temp = (u64)nesqp->hwqp.q2_pbase;
+ nesqp->nesqp_context->q2_addr_low = cpu_to_le32((u32)u64temp);
+ nesqp->nesqp_context->q2_addr_high = cpu_to_le32((u32)(u64temp >> 32));
+ nesqp->nesqp_context->aeq_token_low = cpu_to_le32((u32)((unsigned long)(nesqp)));
+ nesqp->nesqp_context->aeq_token_high = cpu_to_le32((u32)(upper_32_bits((unsigned long)(nesqp))));
+ nesqp->nesqp_context->ird_ord_sizes = cpu_to_le32(NES_QPCONTEXT_ORDIRD_ALSMM |
+ ((((u32)nesadapter->max_irrq_wr) <<
+ NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT) & NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK));
+ if (disable_mpa_crc) {
+ nes_debug(NES_DBG_QP, "Disabling MPA crc checking due to module option.\n");
+ nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(NES_QPCONTEXT_ORDIRD_RNMC);
+ }
+
+
+ /* Create the QP */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_QP, "Failed to get a cqp_request\n");
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ nes_free_qp_mem(nesdev, nesqp,virt_wqs);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-ENOMEM);
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ if (!virt_wqs) {
+ opcode = NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP |
+ NES_CQP_QP_IWARP_STATE_IDLE;
+ } else {
+ opcode = NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_VIRT_WQS |
+ NES_CQP_QP_IWARP_STATE_IDLE;
+ }
+ opcode |= NES_CQP_QP_CQS_VALID;
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
+
+ u64temp = (u64)nesqp->nesqp_context_pbase;
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ nes_debug(NES_DBG_QP, "Waiting for create iWARP QP%u to complete.\n",
+ nesqp->hwqp.qp_id);
+ ret = wait_event_timeout(cqp_request->waitq,
+ (cqp_request->request_done != 0), NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_QP, "Create iwarp QP%u completed, wait_event_timeout ret=%u,"
+ " nesdev->cqp_head = %u, nesdev->cqp.sq_tail = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ nesqp->hwqp.qp_id, ret, nesdev->cqp.sq_head, nesdev->cqp.sq_tail,
+ cqp_request->major_code, cqp_request->minor_code);
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ nes_free_qp_mem(nesdev, nesqp,virt_wqs);
+ kfree(nesqp->allocated_buffer);
+ if (!ret) {
+ return ERR_PTR(-ETIME);
+ } else {
+ return ERR_PTR(-EIO);
+ }
+ } else {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ if (ibpd->uobject) {
+ uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
+ uresp.actual_sq_size = sq_size;
+ uresp.actual_rq_size = rq_size;
+ uresp.qp_id = nesqp->hwqp.qp_id;
+ uresp.nes_drv_opt = nes_drv_opt;
+ if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
+ nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
+ nes_free_qp_mem(nesdev, nesqp,virt_wqs);
+ kfree(nesqp->allocated_buffer);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ nes_debug(NES_DBG_QP, "QP%u structure located @%p.Size = %u.\n",
+ nesqp->hwqp.qp_id, nesqp, (u32)sizeof(*nesqp));
+ spin_lock_init(&nesqp->lock);
+ init_waitqueue_head(&nesqp->state_waitq);
+ init_waitqueue_head(&nesqp->kick_waitq);
+ nes_add_ref(&nesqp->ibqp);
+ break;
+ default:
+ nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type);
+ return ERR_PTR(-EINVAL);
+ break;
+ }
+
+ /* update the QP table */
+ nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
+ nes_debug(NES_DBG_QP, "netdev refcnt=%u\n",
+ atomic_read(&nesvnic->netdev->refcnt));
+
+ return &nesqp->ibqp;
+}
+
+
+/**
+ * nes_destroy_qp
+ */
+static int nes_destroy_qp(struct ib_qp *ibqp)
+{
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+ /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
+ struct nes_ucontext *nes_ucontext;
+ struct ib_qp_attr attr;
+ struct iw_cm_id *cm_id;
+ struct iw_cm_event cm_event;
+ int ret;
+
+ atomic_inc(&sw_qps_destroyed);
+ nesqp->destroyed = 1;
+
+ /* Blow away the connection if it exists. */
+ if (nesqp->ibqp_state >= IB_QPS_INIT && nesqp->ibqp_state <= IB_QPS_RTS) {
+ /* if (nesqp->ibqp_state == IB_QPS_RTS) { */
+ attr.qp_state = IB_QPS_ERR;
+ nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
+ }
+
+ if (((nesqp->ibqp_state == IB_QPS_INIT) ||
+ (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) {
+ cm_id = nesqp->cm_id;
+ cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
+ cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT;
+ cm_event.local_addr = cm_id->local_addr;
+ cm_event.remote_addr = cm_id->remote_addr;
+ cm_event.private_data = NULL;
+ cm_event.private_data_len = 0;
+
+ nes_debug(NES_DBG_QP, "Generating a CM Timeout Event for "
+ "QP%u. cm_id = %p, refcount = %u. \n",
+ nesqp->hwqp.qp_id, cm_id, atomic_read(&nesqp->refcount));
+
+ cm_id->rem_ref(cm_id);
+ ret = cm_id->event_handler(cm_id, &cm_event);
+ if (ret)
+ nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
+ }
+
+
+ if (nesqp->user_mode) {
+ if ((ibqp->uobject)&&(ibqp->uobject->context)) {
+ nes_ucontext = to_nesucontext(ibqp->uobject->context);
+ clear_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
+ nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = NULL;
+ if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) {
+ nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
+ }
+ }
+ if (nesqp->pbl_pbase)
+ kunmap(nesqp->page);
+ }
+
+ nes_rem_ref(&nesqp->ibqp);
+ return 0;
+}
+
+
+/**
+ * nes_create_cq
+ */
+static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
+ int comp_vector,
+ struct ib_ucontext *context, struct ib_udata *udata)
+{
+ u64 u64temp;
+ struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_cq *nescq;
+ struct nes_ucontext *nes_ucontext = NULL;
+ struct nes_cqp_request *cqp_request;
+ void *mem = NULL;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_pbl *nespbl = NULL;
+ struct nes_create_cq_req req;
+ struct nes_create_cq_resp resp;
+ u32 cq_num = 0;
+ u32 opcode = 0;
+ u32 pbl_entries = 1;
+ int err;
+ unsigned long flags;
+ int ret;
+
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_cqs,
+ nesadapter->max_cq, &cq_num, &nesadapter->next_cq);
+ if (err) {
+ return ERR_PTR(err);
+ }
+
+ nescq = kzalloc(sizeof(struct nes_cq), GFP_KERNEL);
+ if (!nescq) {
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ nes_debug(NES_DBG_CQ, "Unable to allocate nes_cq struct\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ nescq->hw_cq.cq_size = max(entries + 1, 5);
+ nescq->hw_cq.cq_number = cq_num;
+ nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1;
+
+
+ if (context) {
+ nes_ucontext = to_nesucontext(context);
+ if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) {
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-EFAULT);
+ }
+ nesvnic->mcrq_ucontext = nes_ucontext;
+ nes_ucontext->mcrqf = req.mcrqf;
+ if (nes_ucontext->mcrqf) {
+ if (nes_ucontext->mcrqf & 0x80000000)
+ nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 12 + (nes_ucontext->mcrqf & 0xf) - 1;
+ else if (nes_ucontext->mcrqf & 0x40000000)
+ nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff;
+ else
+ nescq->hw_cq.cq_number = nesvnic->mcrq_qp_id + nes_ucontext->mcrqf-1;
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ }
+ nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n",
+ (unsigned long)req.user_cq_buffer, entries);
+ list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) {
+ if (nespbl->user_base == (unsigned long )req.user_cq_buffer) {
+ list_del(&nespbl->list);
+ err = 0;
+ nes_debug(NES_DBG_CQ, "Found PBL for virtual CQ. nespbl=%p.\n",
+ nespbl);
+ break;
+ }
+ }
+ if (err) {
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(err);
+ }
+
+ pbl_entries = nespbl->pbl_size >> 3;
+ nescq->cq_mem_size = 0;
+ } else {
+ nescq->cq_mem_size = nescq->hw_cq.cq_size * sizeof(struct nes_hw_cqe);
+ nes_debug(NES_DBG_CQ, "Attempting to allocate pci memory (%u entries, %u bytes) for CQ%u.\n",
+ entries, nescq->cq_mem_size, nescq->hw_cq.cq_number);
+
+ /* allocate the physical buffer space */
+ mem = pci_alloc_consistent(nesdev->pcidev, nescq->cq_mem_size,
+ &nescq->hw_cq.cq_pbase);
+ if (!mem) {
+ printk(KERN_ERR PFX "Unable to allocate pci memory for cq\n");
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(mem, 0, nescq->cq_mem_size);
+ nescq->hw_cq.cq_vbase = mem;
+ nescq->hw_cq.cq_head = 0;
+ nes_debug(NES_DBG_CQ, "CQ%u virtual address @ %p, phys = 0x%08X\n",
+ nescq->hw_cq.cq_number, nescq->hw_cq.cq_vbase,
+ (u32)nescq->hw_cq.cq_pbase);
+ }
+
+ nescq->hw_cq.ce_handler = nes_iwarp_ce_handler;
+ spin_lock_init(&nescq->lock);
+
+ /* send CreateCQ request to CQP */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
+ if (!context)
+ pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ nescq->hw_cq.cq_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-ENOMEM);
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ opcode = NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
+ NES_CQP_CQ_CHK_OVERFLOW |
+ NES_CQP_CQ_CEQE_MASK | ((u32)nescq->hw_cq.cq_size << 16);
+
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+
+ if (pbl_entries != 1) {
+ if (pbl_entries > 32) {
+ /* use 4k pbl */
+ nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 4k PBL\n", pbl_entries);
+ if (nesadapter->free_4kpbl == 0) {
+ if (cqp_request->dynamic) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kfree(cqp_request);
+ } else {
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ }
+ if (!context)
+ pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ nescq->hw_cq.cq_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-ENOMEM);
+ } else {
+ opcode |= (NES_CQP_CQ_VIRT | NES_CQP_CQ_4KB_CHUNK);
+ nescq->virtual_cq = 2;
+ nesadapter->free_4kpbl--;
+ }
+ } else {
+ /* use 256 byte pbl */
+ nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 256 byte PBL\n", pbl_entries);
+ if (nesadapter->free_256pbl == 0) {
+ if (cqp_request->dynamic) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kfree(cqp_request);
+ } else {
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ }
+ if (!context)
+ pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ nescq->hw_cq.cq_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-ENOMEM);
+ } else {
+ opcode |= NES_CQP_CQ_VIRT;
+ nescq->virtual_cq = 1;
+ nesadapter->free_256pbl--;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ (nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
+
+ if (context) {
+ if (pbl_entries != 1)
+ u64temp = (u64)nespbl->pbl_pbase;
+ else
+ u64temp = le64_to_cpu(nespbl->pbl_vbase[0]);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX,
+ nes_ucontext->mmap_db_index[0]);
+ } else {
+ u64temp = (u64)nescq->hw_cq.cq_pbase;
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
+ }
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
+ u64temp = (u64)(unsigned long)&nescq->hw_cq;
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] =
+ cpu_to_le32((u32)(u64temp >> 1));
+ cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
+ cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ nes_debug(NES_DBG_CQ, "Waiting for create iWARP CQ%u to complete.\n",
+ nescq->hw_cq.cq_number);
+ ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ NES_EVENT_TIMEOUT * 2);
+ nes_debug(NES_DBG_CQ, "Create iWARP CQ%u completed, wait_event_timeout ret = %d.\n",
+ nescq->hw_cq.cq_number, ret);
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X,"
+ " minor code = 0x%04X\n",
+ nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code);
+ if (!context)
+ pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
+ nescq->hw_cq.cq_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-EIO);
+ } else {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ if (context) {
+ /* free the nespbl */
+ pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
+ nespbl->pbl_pbase);
+ kfree(nespbl);
+ resp.cq_id = nescq->hw_cq.cq_number;
+ resp.cq_size = nescq->hw_cq.cq_size;
+ resp.mmap_db_index = 0;
+ if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
+ kfree(nescq);
+ return ERR_PTR(-EFAULT);
+ }
+ }
+
+ return &nescq->ibcq;
+}
+
+
+/**
+ * nes_destroy_cq
+ */
+static int nes_destroy_cq(struct ib_cq *ib_cq)
+{
+ struct nes_cq *nescq;
+ struct nes_device *nesdev;
+ struct nes_vnic *nesvnic;
+ struct nes_adapter *nesadapter;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ unsigned long flags;
+ u32 opcode = 0;
+ int ret;
+
+ if (ib_cq == NULL)
+ return 0;
+
+ nescq = to_nescq(ib_cq);
+ nesvnic = to_nesvnic(ib_cq->device);
+ nesdev = nesvnic->nesdev;
+ nesadapter = nesdev->nesadapter;
+
+ nes_debug(NES_DBG_CQ, "Destroy CQ%u\n", nescq->hw_cq.cq_number);
+
+ /* Send DestroyCQ request to CQP */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+ opcode = NES_CQP_DESTROY_CQ | (nescq->hw_cq.cq_size << 16);
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ if (nescq->virtual_cq == 1) {
+ nesadapter->free_256pbl++;
+ if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
+ printk(KERN_ERR PFX "%s: free 256B PBLs(%u) has exceeded the max(%u)\n",
+ __FUNCTION__, nesadapter->free_256pbl, nesadapter->max_256pbl);
+ }
+ } else if (nescq->virtual_cq == 2) {
+ nesadapter->free_4kpbl++;
+ if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
+ printk(KERN_ERR PFX "%s: free 4K PBLs(%u) has exceeded the max(%u)\n",
+ __FUNCTION__, nesadapter->free_4kpbl, nesadapter->max_4kpbl);
+ }
+ opcode |= NES_CQP_CQ_4KB_CHUNK;
+ }
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
+ (nescq->hw_cq.cq_number | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 16)));
+ nes_free_resource(nesadapter, nesadapter->allocated_cqs, nescq->hw_cq.cq_number);
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ nes_debug(NES_DBG_CQ, "Waiting for destroy iWARP CQ%u to complete.\n",
+ nescq->hw_cq.cq_number);
+ ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_CQ, "Destroy iWARP CQ%u completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ nescq->hw_cq.cq_number, ret, cqp_request->major_code,
+ cqp_request->minor_code);
+ if ((!ret) || (cqp_request->major_code)) {
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret) {
+ nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy timeout expired\n",
+ nescq->hw_cq.cq_number);
+ ret = -ETIME;
+ } else {
+ nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy failed\n",
+ nescq->hw_cq.cq_number);
+ ret = -EIO;
+ }
+ } else {
+ ret = 0;
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ }
+
+ if (nescq->cq_mem_size)
+ pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
+ (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
+ kfree(nescq);
+
+ return ret;
+}
+
+
+/**
+ * nes_reg_mr
+ */
+static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
+ u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl,
+ dma_addr_t single_buffer, u16 pbl_count, u16 residual_page_count,
+ int acc, u64 *iova_start)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ unsigned long flags;
+ int ret;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ /* int count; */
+ u32 opcode = 0;
+ u16 major_code;
+
+ /* Register the region with the adapter */
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ /* track PBL resources */
+ if (pbl_count != 0) {
+ if (pbl_count > 1) {
+ /* Two level PBL */
+ if ((pbl_count+1) > nesadapter->free_4kpbl) {
+ nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n");
+ if (cqp_request->dynamic) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kfree(cqp_request);
+ } else {
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ }
+ return -ENOMEM;
+ } else {
+ nesadapter->free_4kpbl -= pbl_count+1;
+ }
+ } else if (residual_page_count > 32) {
+ if (pbl_count > nesadapter->free_4kpbl) {
+ nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n");
+ if (cqp_request->dynamic) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kfree(cqp_request);
+ } else {
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ }
+ return -ENOMEM;
+ } else {
+ nesadapter->free_4kpbl -= pbl_count;
+ }
+ } else {
+ if (pbl_count > nesadapter->free_256pbl) {
+ nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n");
+ if (cqp_request->dynamic) {
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ kfree(cqp_request);
+ } else {
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ }
+ return -ENOMEM;
+ } else {
+ nesadapter->free_256pbl -= pbl_count;
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+
+ opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ |
+ NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
+ if (acc & IB_ACCESS_LOCAL_WRITE)
+ opcode |= NES_CQP_STAG_RIGHTS_LOCAL_WRITE;
+ if (acc & IB_ACCESS_REMOTE_WRITE)
+ opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_REM_ACC_EN;
+ if (acc & IB_ACCESS_REMOTE_READ)
+ opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_REM_ACC_EN;
+ if (acc & IB_ACCESS_MW_BIND)
+ opcode |= NES_CQP_STAG_RIGHTS_WINDOW_BIND | NES_CQP_STAG_REM_ACC_EN;
+
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_VA_LOW_IDX, *iova_start);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_LOW_IDX, region_length);
+
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
+ cpu_to_le32((u32)(region_length >> 8) & 0xff000000);
+ cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] |=
+ cpu_to_le32(nespd->pd_id & 0x00007fff);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
+
+ if (pbl_count == 0) {
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, single_buffer);
+ } else {
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX,
+ (((pbl_count - 1) * 4096) + (residual_page_count*8)));
+
+ if ((pbl_count > 1) || (residual_page_count > 32))
+ cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
+ }
+ barrier();
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ stag, ret, cqp_request->major_code, cqp_request->minor_code);
+ major_code = cqp_request->major_code;
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret)
+ return -ETIME;
+ else if (major_code)
+ return -EIO;
+ else
+ return 0;
+
+ return 0;
+}
+
+
+/**
+ * nes_reg_phys_mr
+ */
+static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
+ struct ib_phys_buf *buffer_list, int num_phys_buf, int acc,
+ u64 * iova_start)
+{
+ u64 region_length;
+ struct nes_pd *nespd = to_nespd(ib_pd);
+ struct nes_vnic *nesvnic = to_nesvnic(ib_pd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_mr *nesmr;
+ struct ib_mr *ibmr;
+ struct nes_vpbl vpbl;
+ struct nes_root_vpbl root_vpbl;
+ u32 stag;
+ u32 i;
+ u32 stag_index = 0;
+ u32 next_stag_index = 0;
+ u32 driver_key = 0;
+ u32 root_pbl_index = 0;
+ u32 cur_pbl_index = 0;
+ int err = 0, pbl_depth = 0;
+ int ret = 0;
+ u16 pbl_count = 0;
+ u8 single_page = 1;
+ u8 stag_key = 0;
+
+ pbl_depth = 0;
+ region_length = 0;
+ vpbl.pbl_vbase = NULL;
+ root_vpbl.pbl_vbase = NULL;
+ root_vpbl.pbl_pbase = 0;
+
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+
+ driver_key = 0;
+
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
+ if (num_phys_buf > (1024*512)) {
+ return ERR_PTR(-E2BIG);
+ }
+
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs, nesadapter->max_mr,
+ &stag_index, &next_stag_index);
+ if (err) {
+ return ERR_PTR(err);
+ }
+
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
+ if (!nesmr) {
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < num_phys_buf; i++) {
+
+ if ((i & 0x01FF) == 0) {
+ if (root_pbl_index == 1) {
+ /* Allocate the root PBL */
+ root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
+ &root_vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
+ root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
+ if (!root_vpbl.pbl_vbase) {
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ kfree(nesmr);
+ return ERR_PTR(-ENOMEM);
+ }
+ root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
+ if (!root_vpbl.leaf_vpbl) {
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ kfree(nesmr);
+ return ERR_PTR(-ENOMEM);
+ }
+ root_vpbl.pbl_vbase[0].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[0].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
+ root_vpbl.leaf_vpbl[0] = vpbl;
+ }
+ /* Allocate a 4K buffer for the PBL */
+ vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%016lX\n",
+ vpbl.pbl_vbase, (unsigned long)vpbl.pbl_pbase);
+ if (!vpbl.pbl_vbase) {
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ ibmr = ERR_PTR(-ENOMEM);
+ kfree(nesmr);
+ goto reg_phys_err;
+ }
+ /* Fill in the root table */
+ if (1 <= root_pbl_index) {
+ root_vpbl.pbl_vbase[root_pbl_index].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[root_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
+ root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
+ }
+ root_pbl_index++;
+ cur_pbl_index = 0;
+ }
+ if (buffer_list[i].addr & ~PAGE_MASK) {
+ /* TODO: Unwind allocated buffers */
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
+ (unsigned int) buffer_list[i].addr);
+ ibmr = ERR_PTR(-EINVAL);
+ kfree(nesmr);
+ goto reg_phys_err;
+ }
+
+ if (!buffer_list[i].size) {
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
+ ibmr = ERR_PTR(-EINVAL);
+ kfree(nesmr);
+ goto reg_phys_err;
+ }
+
+ region_length += buffer_list[i].size;
+ if ((i != 0) && (single_page)) {
+ if ((buffer_list[i-1].addr+PAGE_SIZE) != buffer_list[i].addr)
+ single_page = 0;
+ }
+ vpbl.pbl_vbase[cur_pbl_index].pa_low = cpu_to_le32((u32)buffer_list[i].addr);
+ vpbl.pbl_vbase[cur_pbl_index++].pa_high =
+ cpu_to_le32((u32)((((u64)buffer_list[i].addr) >> 32)));
+ }
+
+ stag = stag_index << 8;
+ stag |= driver_key;
+ stag += (u32)stag_key;
+
+ nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%016lX,"
+ " length = 0x%016lX, index = 0x%08X\n",
+ stag, (unsigned long)*iova_start, (unsigned long)region_length, stag_index);
+
+ region_length -= (*iova_start)&PAGE_MASK;
+
+ /* Make the leaf PBL the root if only one PBL */
+ if (root_pbl_index == 1) {
+ root_vpbl.pbl_pbase = vpbl.pbl_pbase;
+ }
+
+ if (single_page) {
+ pbl_count = 0;
+ } else {
+ pbl_count = root_pbl_index;
+ }
+ ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl,
+ buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start);
+
+ if (ret == 0) {
+ nesmr->ibmr.rkey = stag;
+ nesmr->ibmr.lkey = stag;
+ nesmr->mode = IWNES_MEMREG_TYPE_MEM;
+ ibmr = &nesmr->ibmr;
+ nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
+ nesmr->pbls_used = pbl_count;
+ if (pbl_count > 1) {
+ nesmr->pbls_used++;
+ }
+ } else {
+ kfree(nesmr);
+ ibmr = ERR_PTR(-ENOMEM);
+ }
+
+ reg_phys_err:
+ /* free the resources */
+ if (root_pbl_index == 1) {
+ /* single PBL case */
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, vpbl.pbl_pbase);
+ } else {
+ for (i=0; i<root_pbl_index; i++) {
+ pci_free_consistent(nesdev->pcidev, 4096, root_vpbl.leaf_vpbl[i].pbl_vbase,
+ root_vpbl.leaf_vpbl[i].pbl_pbase);
+ }
+ kfree(root_vpbl.leaf_vpbl);
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ }
+
+ return ibmr;
+}
+
+
+/**
+ * nes_get_dma_mr
+ */
+static struct ib_mr *nes_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct ib_phys_buf bl;
+ u64 kva = 0;
+
+ nes_debug(NES_DBG_MR, "\n");
+
+ bl.size = (u64)0xffffffffffULL;
+ bl.addr = 0;
+ return nes_reg_phys_mr(pd, &bl, 1, acc, &kva);
+}
+
+
+/**
+ * nes_reg_user_mr
+ */
+static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *udata)
+{
+ u64 iova_start;
+ __le64 *pbl;
+ u64 region_length;
+ dma_addr_t last_dma_addr = 0;
+ dma_addr_t first_dma_addr = 0;
+ struct nes_pd *nespd = to_nespd(pd);
+ struct nes_vnic *nesvnic = to_nesvnic(pd->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct ib_mr *ibmr = ERR_PTR(-EINVAL);
+ struct ib_umem_chunk *chunk;
+ struct nes_ucontext *nes_ucontext;
+ struct nes_pbl *nespbl;
+ struct nes_mr *nesmr;
+ struct ib_umem *region;
+ struct nes_mem_reg_req req;
+ struct nes_vpbl vpbl;
+ struct nes_root_vpbl root_vpbl;
+ int nmap_index, page_index;
+ int page_count = 0;
+ int err, pbl_depth = 0;
+ int chunk_pages;
+ int ret;
+ u32 stag;
+ u32 stag_index = 0;
+ u32 next_stag_index;
+ u32 driver_key;
+ u32 root_pbl_index = 0;
+ u32 cur_pbl_index = 0;
+ u32 skip_pages;
+ u16 pbl_count;
+ u8 single_page = 1;
+ u8 stag_key;
+
+ region = ib_umem_get(pd->uobject->context, start, length, acc);
+ if (IS_ERR(region)) {
+ return (struct ib_mr *)region;
+ }
+
+ nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
+ " offset = %u, page size = %u.\n",
+ (unsigned long int)start, (unsigned long int)virt, (u32)length,
+ region->offset, region->page_size);
+
+ skip_pages = ((u32)region->offset) >> 12;
+
+ if (ib_copy_from_udata(&req, udata, sizeof(req)))
+ return ERR_PTR(-EFAULT);
+ nes_debug(NES_DBG_MR, "Memory Registration type = %08X.\n", req.reg_type);
+
+ switch (req.reg_type) {
+ case IWNES_MEMREG_TYPE_MEM:
+ pbl_depth = 0;
+ region_length = 0;
+ vpbl.pbl_vbase = NULL;
+ root_vpbl.pbl_vbase = NULL;
+ root_vpbl.pbl_pbase = 0;
+
+ get_random_bytes(&next_stag_index, sizeof(next_stag_index));
+ stag_key = (u8)next_stag_index;
+
+ driver_key = next_stag_index & 0x70000000;
+
+ next_stag_index >>= 8;
+ next_stag_index %= nesadapter->max_mr;
+
+ err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
+ nesadapter->max_mr, &stag_index, &next_stag_index);
+ if (err) {
+ ib_umem_release(region);
+ return ERR_PTR(err);
+ }
+
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
+ if (!nesmr) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ return ERR_PTR(-ENOMEM);
+ }
+ nesmr->region = region;
+
+ list_for_each_entry(chunk, &region->chunk_list, list) {
+ nes_debug(NES_DBG_MR, "Chunk: nents = %u, nmap = %u .\n",
+ chunk->nents, chunk->nmap);
+ for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
+ if (sg_dma_address(&chunk->page_list[nmap_index]) & ~PAGE_MASK) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
+ (unsigned int) sg_dma_address(&chunk->page_list[nmap_index]));
+ ibmr = ERR_PTR(-EINVAL);
+ kfree(nesmr);
+ goto reg_user_mr_err;
+ }
+
+ if (!sg_dma_len(&chunk->page_list[nmap_index])) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
+ nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
+ ibmr = ERR_PTR(-EINVAL);
+ kfree(nesmr);
+ goto reg_user_mr_err;
+ }
+
+ region_length += sg_dma_len(&chunk->page_list[nmap_index]);
+ chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12;
+ region_length -= skip_pages << 12;
+ for (page_index=skip_pages; page_index < chunk_pages; page_index++) {
+ skip_pages = 0;
+ if ((page_count!=0)&&(page_count<<12)-(region->offset&(4096-1))>=region->length)
+ goto enough_pages;
+ if ((page_count&0x01FF) == 0) {
+ if (page_count>(1024*512)) {
+ ib_umem_release(region);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter,
+ nesadapter->allocated_mrs, stag_index);
+ kfree(nesmr);
+ ibmr = ERR_PTR(-E2BIG);
+ goto reg_user_mr_err;
+ }
+ if (root_pbl_index == 1) {
+ root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
+ 8192, &root_vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
+ root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
+ if (!root_vpbl.pbl_vbase) {
+ ib_umem_release(region);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
+ kfree(nesmr);
+ ibmr = ERR_PTR(-ENOMEM);
+ goto reg_user_mr_err;
+ }
+ root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
+ GFP_KERNEL);
+ if (!root_vpbl.leaf_vpbl) {
+ ib_umem_release(region);
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ stag_index);
+ kfree(nesmr);
+ ibmr = ERR_PTR(-ENOMEM);
+ goto reg_user_mr_err;
+ }
+ root_vpbl.pbl_vbase[0].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[0].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
+ root_vpbl.leaf_vpbl[0] = vpbl;
+ }
+ vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
+ &vpbl.pbl_pbase);
+ nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
+ vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
+ if (!vpbl.pbl_vbase) {
+ ib_umem_release(region);
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
+ ibmr = ERR_PTR(-ENOMEM);
+ kfree(nesmr);
+ goto reg_user_mr_err;
+ }
+ if (1 <= root_pbl_index) {
+ root_vpbl.pbl_vbase[root_pbl_index].pa_low =
+ cpu_to_le32((u32)vpbl.pbl_pbase);
+ root_vpbl.pbl_vbase[root_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
+ root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
+ }
+ root_pbl_index++;
+ cur_pbl_index = 0;
+ }
+ if (single_page) {
+ if (page_count != 0) {
+ if ((last_dma_addr+4096) !=
+ (sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096)))
+ single_page = 0;
+ last_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096);
+ } else {
+ first_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096);
+ last_dma_addr = first_dma_addr;
+ }
+ }
+
+ vpbl.pbl_vbase[cur_pbl_index].pa_low =
+ cpu_to_le32((u32)(sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096)));
+ vpbl.pbl_vbase[cur_pbl_index].pa_high =
+ cpu_to_le32((u32)((((u64)(sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096))) >> 32)));
+ cur_pbl_index++;
+ page_count++;
+ }
+ }
+ }
+ enough_pages:
+ nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x,"
+ " stag_key=0x%08x\n",
+ stag_index, driver_key, stag_key);
+ stag = stag_index << 8;
+ stag |= driver_key;
+ stag += (u32)stag_key;
+ if (stag == 0) {
+ stag = 1;
+ }
+
+ iova_start = virt;
+ /* Make the leaf PBL the root if only one PBL */
+ if (root_pbl_index == 1) {
+ root_vpbl.pbl_pbase = vpbl.pbl_pbase;
+ }
+
+ if (single_page) {
+ pbl_count = 0;
+ } else {
+ pbl_count = root_pbl_index;
+ first_dma_addr = 0;
+ }
+ nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%08X, length = 0x%08X,"
+ " index = 0x%08X, region->length=0x%08llx, pbl_count = %u\n",
+ stag, (unsigned int)iova_start,
+ (unsigned int)region_length, stag_index,
+ (unsigned long long)region->length, pbl_count);
+ ret = nes_reg_mr( nesdev, nespd, stag, region->length, &root_vpbl,
+ first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, &iova_start);
+
+ nes_debug(NES_DBG_MR, "ret=%d\n", ret);
+
+ if (ret == 0) {
+ nesmr->ibmr.rkey = stag;
+ nesmr->ibmr.lkey = stag;
+ nesmr->mode = IWNES_MEMREG_TYPE_MEM;
+ ibmr = &nesmr->ibmr;
+ nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
+ nesmr->pbls_used = pbl_count;
+ if (pbl_count > 1) {
+ nesmr->pbls_used++;
+ }
+ } else {
+ ib_umem_release(region);
+ kfree(nesmr);
+ ibmr = ERR_PTR(-ENOMEM);
+ }
+
+ reg_user_mr_err:
+ /* free the resources */
+ if (root_pbl_index == 1) {
+ pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
+ vpbl.pbl_pbase);
+ } else {
+ for (page_index=0; page_index<root_pbl_index; page_index++) {
+ pci_free_consistent(nesdev->pcidev, 4096,
+ root_vpbl.leaf_vpbl[page_index].pbl_vbase,
+ root_vpbl.leaf_vpbl[page_index].pbl_pbase);
+ }
+ kfree(root_vpbl.leaf_vpbl);
+ pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
+ root_vpbl.pbl_pbase);
+ }
+
+ nes_debug(NES_DBG_MR, "Leaving, ibmr=%p", ibmr);
+
+ return ibmr;
+ break;
+ case IWNES_MEMREG_TYPE_QP:
+ case IWNES_MEMREG_TYPE_CQ:
+ nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
+ if (!nespbl) {
+ nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
+ ib_umem_release(region);
+ return ERR_PTR(-ENOMEM);
+ }
+ nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
+ if (!nesmr) {
+ ib_umem_release(region);
+ kfree(nespbl);
+ nes_debug(NES_DBG_MR, "Unable to allocate nesmr\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ nesmr->region = region;
+ nes_ucontext = to_nesucontext(pd->uobject->context);
+ pbl_depth = region->length >> 12;
+ pbl_depth += (region->length & (4096-1)) ? 1 : 0;
+ nespbl->pbl_size = pbl_depth*sizeof(u64);
+ if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
+ nes_debug(NES_DBG_MR, "Attempting to allocate QP PBL memory");
+ } else {
+ nes_debug(NES_DBG_MR, "Attempting to allocate CP PBL memory");
+ }
+
+ nes_debug(NES_DBG_MR, " %u bytes, %u entries.\n",
+ nespbl->pbl_size, pbl_depth);
+ pbl = pci_alloc_consistent(nesdev->pcidev, nespbl->pbl_size,
+ &nespbl->pbl_pbase);
+ if (!pbl) {
+ ib_umem_release(region);
+ kfree(nesmr);
+ kfree(nespbl);
+ nes_debug(NES_DBG_MR, "Unable to allocate PBL memory\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ nespbl->pbl_vbase = (u64 *)pbl;
+ nespbl->user_base = start;
+ nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%p,"
+ " pbl_vbase=%p user_base=0x%lx\n",
+ nespbl->pbl_size, (void *)nespbl->pbl_pbase,
+ (void*)nespbl->pbl_vbase, nespbl->user_base);
+
+ list_for_each_entry(chunk, &region->chunk_list, list) {
+ for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
+ chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12;
+ chunk_pages += (sg_dma_len(&chunk->page_list[nmap_index]) & (4096-1)) ? 1 : 0;
+ nespbl->page = sg_page(&chunk->page_list[0]);
+ for (page_index=0; page_index<chunk_pages; page_index++) {
+ ((__le32 *)pbl)[0] = cpu_to_le32((u32)
+ (sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096)));
+ ((__le32 *)pbl)[1] = cpu_to_le32(((u64)
+ (sg_dma_address(&chunk->page_list[nmap_index])+
+ (page_index*4096)))>>32);
+ nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
+ (unsigned long long)*pbl,
+ le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
+ pbl++;
+ }
+ }
+ }
+ if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
+ list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list);
+ } else {
+ list_add_tail(&nespbl->list, &nes_ucontext->cq_reg_mem_list);
+ }
+ nesmr->ibmr.rkey = -1;
+ nesmr->ibmr.lkey = -1;
+ nesmr->mode = req.reg_type;
+ return &nesmr->ibmr;
+ break;
+ }
+
+ return ERR_PTR(-ENOSYS);
+}
+
+
+/**
+ * nes_dereg_mr
+ */
+static int nes_dereg_mr(struct ib_mr *ib_mr)
+{
+ struct nes_mr *nesmr = to_nesmr(ib_mr);
+ struct nes_vnic *nesvnic = to_nesvnic(ib_mr->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ struct nes_cqp_request *cqp_request;
+ unsigned long flags;
+ int ret;
+ u16 major_code;
+ u16 minor_code;
+
+ if (nesmr->region) {
+ ib_umem_release(nesmr->region);
+ }
+ if (nesmr->mode != IWNES_MEMREG_TYPE_MEM) {
+ kfree(nesmr);
+ return 0;
+ }
+
+ /* Deallocate the region with the adapter */
+
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ cqp_request->waiting = 1;
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ spin_lock_irqsave(&nesadapter->pbl_lock, flags);
+ if (nesmr->pbls_used != 0) {
+ if (nesmr->pbl_4k) {
+ nesadapter->free_4kpbl += nesmr->pbls_used;
+ if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
+ printk(KERN_ERR PFX "free 4KB PBLs(%u) has exceeded the max(%u)\n",
+ nesadapter->free_4kpbl, nesadapter->max_4kpbl);
+ }
+ } else {
+ nesadapter->free_256pbl += nesmr->pbls_used;
+ if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
+ printk(KERN_ERR PFX "free 256B PBLs(%u) has exceeded the max(%u)\n",
+ nesadapter->free_256pbl, nesadapter->max_256pbl);
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO |
+ NES_CQP_STAG_DEALLOC_PBLS | NES_CQP_STAG_MR);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ib_mr->rkey);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X completed\n", ib_mr->rkey);
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MR, "Deallocate STag 0x%08X completed, wait_event_timeout ret = %u,"
+ " CQP Major:Minor codes = 0x%04X:0x%04X\n",
+ ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code);
+
+ nes_free_resource(nesadapter, nesadapter->allocated_mrs,
+ (ib_mr->rkey & 0x0fffff00) >> 8);
+
+ kfree(nesmr);
+
+ major_code = cqp_request->major_code;
+ minor_code = cqp_request->minor_code;
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret) {
+ nes_debug(NES_DBG_MR, "Timeout waiting to destroy STag,"
+ " ib_mr=%p, rkey = 0x%08X\n",
+ ib_mr, ib_mr->rkey);
+ return -ETIME;
+ } else if (major_code) {
+ nes_debug(NES_DBG_MR, "Error (0x%04X:0x%04X) while attempting"
+ " to destroy STag, ib_mr=%p, rkey = 0x%08X\n",
+ major_code, minor_code, ib_mr, ib_mr->rkey);
+ return -EIO;
+ } else
+ return 0;
+}
+
+
+/**
+ * show_rev
+ */
+static ssize_t show_rev(struct class_device *cdev, char *buf)
+{
+ struct nes_ib_device *nesibdev =
+ container_of(cdev, struct nes_ib_device, ibdev.class_dev);
+ struct nes_vnic *nesvnic = nesibdev->nesvnic;
+
+ nes_debug(NES_DBG_INIT, "\n");
+ return sprintf(buf, "%x\n", nesvnic->nesdev->nesadapter->hw_rev);
+}
+
+
+/**
+ * show_fw_ver
+ */
+static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
+{
+ struct nes_ib_device *nesibdev =
+ container_of(cdev, struct nes_ib_device, ibdev.class_dev);
+ struct nes_vnic *nesvnic = nesibdev->nesvnic;
+
+ nes_debug(NES_DBG_INIT, "\n");
+ return sprintf(buf, "%x.%x.%x\n",
+ (int)(nesvnic->nesdev->nesadapter->fw_ver >> 32),
+ (int)(nesvnic->nesdev->nesadapter->fw_ver >> 16) & 0xffff,
+ (int)(nesvnic->nesdev->nesadapter->fw_ver & 0xffff));
+}
+
+
+/**
+ * show_hca
+ */
+static ssize_t show_hca(struct class_device *cdev, char *buf)
+{
+ nes_debug(NES_DBG_INIT, "\n");
+ return sprintf(buf, "NES020\n");
+}
+
+
+/**
+ * show_board
+ */
+static ssize_t show_board(struct class_device *cdev, char *buf)
+{
+ nes_debug(NES_DBG_INIT, "\n");
+ return sprintf(buf, "%.*s\n", 32, "NES020 Board ID");
+}
+
+
+static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+
+static struct class_device_attribute *nes_class_attributes[] = {
+ &class_device_attr_hw_rev,
+ &class_device_attr_fw_ver,
+ &class_device_attr_hca_type,
+ &class_device_attr_board_id
+};
+
+
+/**
+ * nes_query_qp
+ */
+static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+
+ nes_debug(NES_DBG_QP, "\n");
+
+ attr->qp_access_flags = 0;
+ attr->cap.max_send_wr = nesqp->hwqp.sq_size;
+ attr->cap.max_recv_wr = nesqp->hwqp.rq_size;
+ attr->cap.max_recv_sge = 1;
+ if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
+ init_attr->cap.max_inline_data = 0;
+ } else {
+ init_attr->cap.max_inline_data = 64;
+ }
+
+ init_attr->event_handler = nesqp->ibqp.event_handler;
+ init_attr->qp_context = nesqp->ibqp.qp_context;
+ init_attr->send_cq = nesqp->ibqp.send_cq;
+ init_attr->recv_cq = nesqp->ibqp.recv_cq;
+ init_attr->srq = nesqp->ibqp.srq = nesqp->ibqp.srq;
+ init_attr->cap = attr->cap;
+
+ return 0;
+}
+
+
+/**
+ * nes_hw_modify_qp
+ */
+int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
+ u32 next_iwarp_state, u32 wait_completion)
+{
+ struct nes_hw_cqp_wqe *cqp_wqe;
+ /* struct iw_cm_id *cm_id = nesqp->cm_id; */
+ /* struct iw_cm_event cm_event; */
+ struct nes_cqp_request *cqp_request;
+ unsigned long flags;
+ int ret;
+ u16 major_code;
+
+ nes_debug(NES_DBG_MOD_QP, "QP%u, refcount=%d\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
+
+ cqp_request = nes_get_cqp_request(nesdev);
+ if (cqp_request == NULL) {
+ nes_debug(NES_DBG_MOD_QP, "Failed to get a cqp_request.\n");
+ return -ENOMEM;
+ }
+ if (wait_completion) {
+ cqp_request->waiting = 1;
+ } else {
+ cqp_request->waiting = 0;
+ }
+ cqp_wqe = &cqp_request->cqp_wqe;
+
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
+ NES_CQP_MODIFY_QP | NES_CQP_QP_TYPE_IWARP | next_iwarp_state);
+ nes_debug(NES_DBG_MOD_QP, "using next_iwarp_state=%08x, wqe_words=%08x\n",
+ next_iwarp_state, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX]));
+ nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
+ set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
+ set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
+
+ atomic_set(&cqp_request->refcount, 2);
+ nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
+
+ /* Wait for CQP */
+ if (wait_completion) {
+ /* nes_debug(NES_DBG_MOD_QP, "Waiting for modify iWARP QP%u to complete.\n",
+ nesqp->hwqp.qp_id); */
+ ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
+ NES_EVENT_TIMEOUT);
+ nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u completed, wait_event_timeout ret=%u, "
+ "CQP Major:Minor codes = 0x%04X:0x%04X.\n",
+ nesqp->hwqp.qp_id, ret, cqp_request->major_code, cqp_request->minor_code);
+ major_code = cqp_request->major_code;
+ if (major_code) {
+ nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u failed"
+ "CQP Major:Minor codes = 0x%04X:0x%04X, intended next state = 0x%08X.\n",
+ nesqp->hwqp.qp_id, cqp_request->major_code,
+ cqp_request->minor_code, next_iwarp_state);
+ }
+ if (atomic_dec_and_test(&cqp_request->refcount)) {
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ spin_lock_irqsave(&nesdev->cqp.lock, flags);
+ list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
+ spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
+ }
+ }
+ if (!ret)
+ return -ETIME;
+ else if (major_code)
+ return -EIO;
+ else
+ return 0;
+ } else {
+ return 0;
+ }
+}
+
+
+/**
+ * nes_modify_qp
+ */
+int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+ struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ /* u32 cqp_head; */
+ /* u32 counter; */
+ u32 next_iwarp_state = 0;
+ int err;
+ unsigned long qplockflags;
+ int ret;
+ u16 original_last_aeq;
+ u8 issue_modify_qp = 0;
+ u8 issue_disconnect = 0;
+ u8 dont_wait = 0;
+
+ nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u,"
+ " iwarp_state=0x%X, refcount=%d\n",
+ nesqp->hwqp.qp_id, attr->qp_state, nesqp->ibqp_state,
+ nesqp->iwarp_state, atomic_read(&nesqp->refcount));
+
+ nes_add_ref(&nesqp->ibqp);
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+
+ nes_debug(NES_DBG_MOD_QP, "QP%u: hw_iwarp_state=0x%X, hw_tcp_state=0x%X,"
+ " QP Access Flags=0x%X, attr_mask = 0x%0x\n",
+ nesqp->hwqp.qp_id, nesqp->hw_iwarp_state,
+ nesqp->hw_tcp_state, attr->qp_access_flags, attr_mask);
+
+ if (attr_mask & IB_QP_STATE) {
+ switch (attr->qp_state) {
+ case IB_QPS_INIT:
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state = init\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_IDLE) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_RTR:
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state = rtr\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_IDLE) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_RTS:
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state = rts\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_RTS) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ if (nesqp->cm_id == NULL) {
+ nes_debug(NES_DBG_MOD_QP, "QP%u: Failing attempt to move QP to RTS without a CM_ID. \n",
+ nesqp->hwqp.qp_id );
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_RTS;
+ if (nesqp->iwarp_state != NES_CQP_QP_IWARP_STATE_RTS)
+ next_iwarp_state |= NES_CQP_QP_CONTEXT_VALID |
+ NES_CQP_QP_ARP_VALID | NES_CQP_QP_ORD_VALID;
+ issue_modify_qp = 1;
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_ESTABLISHED;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_RTS;
+ nesqp->hte_added = 1;
+ break;
+ case IB_QPS_SQD:
+ issue_modify_qp = 1;
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state=closing. SQ head=%u, SQ tail=%u\n",
+ nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail);
+ if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return 0;
+ } else {
+ if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
+ nes_debug(NES_DBG_MOD_QP, "QP%u: State change to closing"
+ " ignored due to current iWARP state\n",
+ nesqp->hwqp.qp_id);
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ if (nesqp->hw_iwarp_state != NES_AEQE_IWARP_STATE_RTS) {
+ nes_debug(NES_DBG_MOD_QP, "QP%u: State change to closing"
+ " already done based on hw state.\n",
+ nesqp->hwqp.qp_id);
+ issue_modify_qp = 0;
+ nesqp->in_disconnect = 0;
+ }
+ switch (nesqp->hw_iwarp_state) {
+ case NES_AEQE_IWARP_STATE_CLOSING:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+ case NES_AEQE_IWARP_STATE_TERMINATE:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
+ break;
+ case NES_AEQE_IWARP_STATE_ERROR:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
+ break;
+ default:
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
+ nesqp->in_disconnect = 1;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ break;
+ }
+ }
+ break;
+ case IB_QPS_SQE:
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state = terminate\n",
+ nesqp->hwqp.qp_id);
+ if (nesqp->iwarp_state>=(u32)NES_CQP_QP_IWARP_STATE_TERMINATE) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
+ issue_modify_qp = 1;
+ nesqp->in_disconnect = 1;
+ break;
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+ if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_ERROR) {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ }
+ nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
+ nesqp->hwqp.qp_id);
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
+ /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
+ if (nesqp->hte_added) {
+ nes_debug(NES_DBG_MOD_QP, "set CQP_QP_DEL_HTE\n");
+ next_iwarp_state |= NES_CQP_QP_DEL_HTE;
+ nesqp->hte_added = 0;
+ }
+ if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
+ (nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
+ next_iwarp_state |= NES_CQP_QP_RESET;
+ nesqp->in_disconnect = 1;
+ } else {
+ nes_debug(NES_DBG_MOD_QP, "QP%u NOT setting NES_CQP_QP_RESET since TCP state = %u\n",
+ nesqp->hwqp.qp_id, nesqp->hw_tcp_state);
+ dont_wait = 1;
+ }
+ issue_modify_qp = 1;
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
+ break;
+ default:
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_rem_ref(&nesqp->ibqp);
+ return -EINVAL;
+ break;
+ }
+
+ nesqp->ibqp_state = attr->qp_state;
+ if (((nesqp->iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) ==
+ (u32)NES_CQP_QP_IWARP_STATE_RTS) &&
+ ((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) >
+ (u32)NES_CQP_QP_IWARP_STATE_RTS)) {
+ nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
+ nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
+ nesqp->iwarp_state);
+ issue_disconnect = 1;
+ } else {
+ nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
+ nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
+ nesqp->iwarp_state);
+ }
+ }
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) {
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN |
+ NES_QPCONTEXT_MISC_RDMA_READ_EN);
+ issue_modify_qp = 1;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) {
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN);
+ issue_modify_qp = 1;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) {
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_READ_EN);
+ issue_modify_qp = 1;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_MW_BIND) {
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WBIND_EN);
+ issue_modify_qp = 1;
+ }
+
+ if (nesqp->user_mode) {
+ nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN |
+ NES_QPCONTEXT_MISC_RDMA_READ_EN);
+ issue_modify_qp = 1;
+ }
+ }
+
+ original_last_aeq = nesqp->last_aeq;
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+
+ nes_debug(NES_DBG_MOD_QP, "issue_modify_qp=%u\n", issue_modify_qp);
+
+ ret = 0;
+
+
+ if (issue_modify_qp) {
+ nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
+ ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1);
+ if (ret)
+ nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
+ " failed for QP%u.\n",
+ next_iwarp_state, nesqp->hwqp.qp_id);
+
+ }
+
+ if ((issue_modify_qp) && (nesqp->ibqp_state > IB_QPS_RTS)) {
+ nes_debug(NES_DBG_MOD_QP, "QP%u Issued ModifyQP refcount (%d),"
+ " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ if ((!ret) ||
+ ((original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) &&
+ (ret))) {
+ if (dont_wait) {
+ if (nesqp->cm_id && nesqp->hw_tcp_state != 0) {
+ nes_debug(NES_DBG_MOD_QP, "QP%u Queuing fake disconnect for QP refcount (%d),"
+ " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ /* this one is for the cm_disconnect thread */
+ nes_add_ref(&nesqp->ibqp);
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+ nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
+ nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_cm_disconn(nesqp);
+ } else {
+ nes_debug(NES_DBG_MOD_QP, "QP%u No fake disconnect, QP refcount=%d\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
+ nes_rem_ref(&nesqp->ibqp);
+ }
+ } else {
+ spin_lock_irqsave(&nesqp->lock, qplockflags);
+ if (nesqp->cm_id) {
+ /* These two are for the timer thread */
+ if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
+ nes_add_ref(&nesqp->ibqp);
+ nesqp->cm_id->add_ref(nesqp->cm_id);
+ nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
+ " need ae to finish up, original_last_aeq = 0x%04X."
+ " last_aeq = 0x%04X, scheduling timer.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ schedule_nes_timer(nesqp->cm_node, (struct sk_buff *) nesqp, NES_TIMER_TYPE_CLOSE, 1, 0);
+ }
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ } else {
+ spin_unlock_irqrestore(&nesqp->lock, qplockflags);
+ nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
+ " need ae to finish up, original_last_aeq = 0x%04X."
+ " last_aeq = 0x%04X.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ }
+ }
+ } else {
+ nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up,"
+ " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ nes_rem_ref(&nesqp->ibqp);
+ }
+ } else {
+ nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up,"
+ " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
+ original_last_aeq, nesqp->last_aeq);
+ nes_rem_ref(&nesqp->ibqp);
+ }
+
+ err = 0;
+
+ nes_debug(NES_DBG_MOD_QP, "QP%u Leaving, refcount=%d\n",
+ nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
+
+ return err;
+}
+
+
+/**
+ * nes_muticast_attach
+ */
+static int nes_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ nes_debug(NES_DBG_INIT, "\n");
+ return -ENOSYS;
+}
+
+
+/**
+ * nes_multicast_detach
+ */
+static int nes_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ nes_debug(NES_DBG_INIT, "\n");
+ return -ENOSYS;
+}
+
+
+/**
+ * nes_process_mad
+ */
+static int nes_process_mad(struct ib_device *ibdev, int mad_flags,
+ u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ nes_debug(NES_DBG_INIT, "\n");
+ return -ENOSYS;
+}
+
+static inline void
+fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselkey)
+{
+ int sge_index;
+ int total_payload_length = 0;
+ for (sge_index = 0; sge_index < ib_wr->num_sge; sge_index++) {
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX+(sge_index*4),
+ ib_wr->sg_list[sge_index].addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_LENGTH0_IDX + (sge_index*4),
+ ib_wr->sg_list[sge_index].length);
+ if (uselkey)
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4),
+ (ib_wr->sg_list[sge_index].lkey));
+ else
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4), 0);
+
+ total_payload_length += ib_wr->sg_list[sge_index].length;
+ }
+ nes_debug(NES_DBG_IW_TX, "UC UC UC, sending total_payload_length=%u \n",
+ total_payload_length);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+ total_payload_length);
+}
+
+/**
+ * nes_post_send
+ */
+static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
+ struct ib_send_wr **bad_wr)
+{
+ u64 u64temp;
+ unsigned long flags = 0;
+ struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+ struct nes_hw_qp_wqe *wqe;
+ int err;
+ u32 qsize = nesqp->hwqp.sq_size;
+ u32 head;
+ u32 wqe_misc;
+ u32 wqe_count;
+ u32 counter;
+ u32 total_payload_length;
+
+ err = 0;
+ wqe_misc = 0;
+ wqe_count = 0;
+ total_payload_length = 0;
+
+ if (nesqp->ibqp_state > IB_QPS_RTS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+
+ head = nesqp->hwqp.sq_head;
+
+ while (ib_wr) {
+ /* Check for SQ overflow */
+ if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
+ err = -EINVAL;
+ break;
+ }
+
+ wqe = &nesqp->hwqp.sq_vbase[head];
+ /* nes_debug(NES_DBG_IW_TX, "processing sq wqe for QP%u at %p, head = %u.\n",
+ nesqp->hwqp.qp_id, wqe, head); */
+ nes_fill_init_qp_wqe(wqe, nesqp, head);
+ u64temp = (u64)(ib_wr->wr_id);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
+ u64temp);
+ switch (ib_wr->opcode) {
+ case IB_WR_SEND:
+ if (ib_wr->send_flags & IB_SEND_SOLICITED) {
+ wqe_misc = NES_IWARP_SQ_OP_SENDSE;
+ } else {
+ wqe_misc = NES_IWARP_SQ_OP_SEND;
+ }
+ if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+ err = -EINVAL;
+ break;
+ }
+ if (ib_wr->send_flags & IB_SEND_FENCE) {
+ wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+ }
+ if ((ib_wr->send_flags & IB_SEND_INLINE) &&
+ ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+ (ib_wr->sg_list[0].length <= 64)) {
+ memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+ (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+ ib_wr->sg_list[0].length);
+ wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
+ } else {
+ fill_wqe_sg_send(wqe, ib_wr, 1);
+ }
+
+ break;
+ case IB_WR_RDMA_WRITE:
+ wqe_misc = NES_IWARP_SQ_OP_RDMAW;
+ if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+ nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
+ ib_wr->num_sge,
+ nesdev->nesadapter->max_sge);
+ err = -EINVAL;
+ break;
+ }
+ if (ib_wr->send_flags & IB_SEND_FENCE) {
+ wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
+ }
+
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
+ ib_wr->wr.rdma.rkey);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
+ ib_wr->wr.rdma.remote_addr);
+
+ if ((ib_wr->send_flags & IB_SEND_INLINE) &&
+ ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
+ (ib_wr->sg_list[0].length <= 64)) {
+ memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
+ (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
+ ib_wr->sg_list[0].length);
+ wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
+ } else {
+ fill_wqe_sg_send(wqe, ib_wr, 1);
+ }
+ wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
+ wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
+ break;
+ case IB_WR_RDMA_READ:
+ /* iWARP only supports 1 sge for RDMA reads */
+ if (ib_wr->num_sge > 1) {
+ nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
+ ib_wr->num_sge);
+ err = -EINVAL;
+ break;
+ }
+ wqe_misc = NES_IWARP_SQ_OP_RDMAR;
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
+ ib_wr->wr.rdma.remote_addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
+ ib_wr->wr.rdma.rkey);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
+ ib_wr->sg_list->length);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
+ ib_wr->sg_list->addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
+ ib_wr->sg_list->lkey);
+ break;
+ default:
+ /* error */
+ err = -EINVAL;
+ break;
+ }
+
+ if (ib_wr->send_flags & IB_SEND_SIGNALED) {
+ wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
+ }
+ wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc);
+
+ ib_wr = ib_wr->next;
+ head++;
+ wqe_count++;
+ if (head >= qsize)
+ head = 0;
+
+ }
+
+ nesqp->hwqp.sq_head = head;
+ barrier();
+ while (wqe_count) {
+ counter = min(wqe_count, ((u32)255));
+ wqe_count -= counter;
+ nes_write32(nesdev->regs + NES_WQE_ALLOC,
+ (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id);
+ }
+
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ if (err)
+ *bad_wr = ib_wr;
+ return err;
+}
+
+
+/**
+ * nes_post_recv
+ */
+static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
+ struct ib_recv_wr **bad_wr)
+{
+ u64 u64temp;
+ unsigned long flags = 0;
+ struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_qp *nesqp = to_nesqp(ibqp);
+ struct nes_hw_qp_wqe *wqe;
+ int err = 0;
+ int sge_index;
+ u32 qsize = nesqp->hwqp.rq_size;
+ u32 head;
+ u32 wqe_count = 0;
+ u32 counter;
+ u32 total_payload_length;
+
+ if (nesqp->ibqp_state > IB_QPS_RTS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&nesqp->lock, flags);
+
+ head = nesqp->hwqp.rq_head;
+
+ while (ib_wr) {
+ if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
+ err = -EINVAL;
+ break;
+ }
+ /* Check for RQ overflow */
+ if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) {
+ err = -EINVAL;
+ break;
+ }
+
+ nes_debug(NES_DBG_IW_RX, "ibwr sge count = %u.\n", ib_wr->num_sge);
+ wqe = &nesqp->hwqp.rq_vbase[head];
+
+ /* nes_debug(NES_DBG_IW_RX, "QP%u:processing rq wqe at %p, head = %u.\n",
+ nesqp->hwqp.qp_id, wqe, head); */
+ nes_fill_init_qp_wqe(wqe, nesqp, head);
+ u64temp = (u64)(ib_wr->wr_id);
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
+ u64temp);
+ total_payload_length = 0;
+ for (sge_index=0; sge_index < ib_wr->num_sge; sge_index++) {
+ set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_FRAG0_LOW_IDX+(sge_index*4),
+ ib_wr->sg_list[sge_index].addr);
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_LENGTH0_IDX+(sge_index*4),
+ ib_wr->sg_list[sge_index].length);
+ set_wqe_32bit_value(wqe->wqe_words,NES_IWARP_RQ_WQE_STAG0_IDX+(sge_index*4),
+ ib_wr->sg_list[sge_index].lkey);
+
+ total_payload_length += ib_wr->sg_list[sge_index].length;
+ }
+ set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX,
+ total_payload_length);
+
+ ib_wr = ib_wr->next;
+ head++;
+ wqe_count++;
+ if (head >= qsize)
+ head = 0;
+ }
+
+ nesqp->hwqp.rq_head = head;
+ barrier();
+ while (wqe_count) {
+ counter = min(wqe_count, ((u32)255));
+ wqe_count -= counter;
+ nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id);
+ }
+
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+
+ if (err)
+ *bad_wr = ib_wr;
+ return err;
+}
+
+
+/**
+ * nes_poll_cq
+ */
+static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
+{
+ u64 u64temp;
+ u64 wrid;
+ /* u64 u64temp; */
+ unsigned long flags = 0;
+ struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_cq *nescq = to_nescq(ibcq);
+ struct nes_qp *nesqp;
+ struct nes_hw_cqe cqe;
+ u32 head;
+ u32 wq_tail;
+ u32 cq_size;
+ u32 cqe_count = 0;
+ u32 wqe_index;
+ u32 u32temp;
+ /* u32 counter; */
+
+ nes_debug(NES_DBG_CQ, "\n");
+
+ spin_lock_irqsave(&nescq->lock, flags);
+
+ head = nescq->hw_cq.cq_head;
+ cq_size = nescq->hw_cq.cq_size;
+
+ while (cqe_count < num_entries) {
+ if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
+ NES_CQE_VALID) {
+ cqe = nescq->hw_cq.cq_vbase[head];
+ nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
+ u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+ wqe_index = u32temp &
+ (nesdev->nesadapter->max_qp_wr - 1);
+ u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
+ /* parse CQE, get completion context from WQE (either rq or sq */
+ u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
+ ((u64)u32temp);
+ nesqp = *((struct nes_qp **)&u64temp);
+ memset(entry, 0, sizeof *entry);
+ if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
+ entry->status = IB_WC_SUCCESS;
+ } else {
+ entry->status = IB_WC_WR_FLUSH_ERR;
+ }
+
+ entry->qp = &nesqp->ibqp;
+ entry->src_qp = nesqp->hwqp.qp_id;
+
+ if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
+ if (nesqp->skip_lsmm) {
+ nesqp->skip_lsmm = 0;
+ wq_tail = nesqp->hwqp.sq_tail++;
+ }
+
+ /* Working on a SQ Completion*/
+ wq_tail = wqe_index;
+ nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
+ wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
+ ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
+ entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
+
+ switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
+ case NES_IWARP_SQ_OP_RDMAW:
+ nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case NES_IWARP_SQ_OP_RDMAR:
+ nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
+ entry->opcode = IB_WC_RDMA_READ;
+ entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+ wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
+ break;
+ case NES_IWARP_SQ_OP_SENDINV:
+ case NES_IWARP_SQ_OP_SENDSEINV:
+ case NES_IWARP_SQ_OP_SEND:
+ case NES_IWARP_SQ_OP_SENDSE:
+ nes_debug(NES_DBG_CQ, "Operation = Send.\n");
+ entry->opcode = IB_WC_SEND;
+ break;
+ }
+ } else {
+ /* Working on a RQ Completion*/
+ wq_tail = wqe_index;
+ nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
+ entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
+ wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
+ ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
+ entry->opcode = IB_WC_RECV;
+ }
+ entry->wr_id = wrid;
+
+ if (++head >= cq_size)
+ head = 0;
+ cqe_count++;
+ nescq->polled_completions++;
+ if ((nescq->polled_completions > (cq_size / 2)) ||
+ (nescq->polled_completions == 255)) {
+ nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
+ " are pending %u of %u.\n",
+ nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
+ nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
+ nescq->polled_completions = 0;
+ }
+ entry++;
+ } else
+ break;
+ }
+
+ if (nescq->polled_completions) {
+ nes_write32(nesdev->regs+NES_CQE_ALLOC,
+ nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
+ nescq->polled_completions = 0;
+ }
+
+ nescq->hw_cq.cq_head = head;
+ nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n",
+ cqe_count, nescq->hw_cq.cq_number);
+
+ spin_unlock_irqrestore(&nescq->lock, flags);
+
+ return cqe_count;
+}
+
+
+/**
+ * nes_req_notify_cq
+ */
+static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
+ {
+ struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_cq *nescq = to_nescq(ibcq);
+ u32 cq_arm;
+
+ nes_debug(NES_DBG_CQ, "Requesting notification for CQ%u.\n",
+ nescq->hw_cq.cq_number);
+
+ cq_arm = nescq->hw_cq.cq_number;
+ if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
+ cq_arm |= NES_CQE_ALLOC_NOTIFY_NEXT;
+ else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
+ cq_arm |= NES_CQE_ALLOC_NOTIFY_SE;
+ else
+ return -EINVAL;
+
+ nes_write32(nesdev->regs+NES_CQE_ALLOC, cq_arm);
+ nes_read32(nesdev->regs+NES_CQE_ALLOC);
+
+ return 0;
+}
+
+
+/**
+ * nes_init_ofa_device
+ */
+struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
+{
+ struct nes_ib_device *nesibdev;
+ struct nes_vnic *nesvnic = netdev_priv(netdev);
+ struct nes_device *nesdev = nesvnic->nesdev;
+
+ nesibdev = (struct nes_ib_device *)ib_alloc_device(sizeof(struct nes_ib_device));
+ if (nesibdev == NULL) {
+ return NULL;
+ }
+ strlcpy(nesibdev->ibdev.name, "nes%d", IB_DEVICE_NAME_MAX);
+ nesibdev->ibdev.owner = THIS_MODULE;
+
+ nesibdev->ibdev.node_type = RDMA_NODE_RNIC;
+ memset(&nesibdev->ibdev.node_guid, 0, sizeof(nesibdev->ibdev.node_guid));
+ memcpy(&nesibdev->ibdev.node_guid, netdev->dev_addr, 6);
+
+ nesibdev->ibdev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+ (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
+ (1ull << IB_USER_VERBS_CMD_BIND_MW) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_MW) |
+ (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+ (1ull << IB_USER_VERBS_CMD_POST_SEND);
+
+ nesibdev->ibdev.phys_port_cnt = 1;
+ nesibdev->ibdev.num_comp_vectors = 1;
+ nesibdev->ibdev.dma_device = &nesdev->pcidev->dev;
+ nesibdev->ibdev.class_dev.dev = &nesdev->pcidev->dev;
+ nesibdev->ibdev.query_device = nes_query_device;
+ nesibdev->ibdev.query_port = nes_query_port;
+ nesibdev->ibdev.modify_port = nes_modify_port;
+ nesibdev->ibdev.query_pkey = nes_query_pkey;
+ nesibdev->ibdev.query_gid = nes_query_gid;
+ nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext;
+ nesibdev->ibdev.dealloc_ucontext = nes_dealloc_ucontext;
+ nesibdev->ibdev.mmap = nes_mmap;
+ nesibdev->ibdev.alloc_pd = nes_alloc_pd;
+ nesibdev->ibdev.dealloc_pd = nes_dealloc_pd;
+ nesibdev->ibdev.create_ah = nes_create_ah;
+ nesibdev->ibdev.destroy_ah = nes_destroy_ah;
+ nesibdev->ibdev.create_qp = nes_create_qp;
+ nesibdev->ibdev.modify_qp = nes_modify_qp;
+ nesibdev->ibdev.query_qp = nes_query_qp;
+ nesibdev->ibdev.destroy_qp = nes_destroy_qp;
+ nesibdev->ibdev.create_cq = nes_create_cq;
+ nesibdev->ibdev.destroy_cq = nes_destroy_cq;
+ nesibdev->ibdev.poll_cq = nes_poll_cq;
+ nesibdev->ibdev.get_dma_mr = nes_get_dma_mr;
+ nesibdev->ibdev.reg_phys_mr = nes_reg_phys_mr;
+ nesibdev->ibdev.reg_user_mr = nes_reg_user_mr;
+ nesibdev->ibdev.dereg_mr = nes_dereg_mr;
+ nesibdev->ibdev.alloc_mw = nes_alloc_mw;
+ nesibdev->ibdev.dealloc_mw = nes_dealloc_mw;
+ nesibdev->ibdev.bind_mw = nes_bind_mw;
+
+ nesibdev->ibdev.alloc_fmr = nes_alloc_fmr;
+ nesibdev->ibdev.unmap_fmr = nes_unmap_fmr;
+ nesibdev->ibdev.dealloc_fmr = nes_dealloc_fmr;
+ nesibdev->ibdev.map_phys_fmr = nes_map_phys_fmr;
+
+ nesibdev->ibdev.attach_mcast = nes_multicast_attach;
+ nesibdev->ibdev.detach_mcast = nes_multicast_detach;
+ nesibdev->ibdev.process_mad = nes_process_mad;
+
+ nesibdev->ibdev.req_notify_cq = nes_req_notify_cq;
+ nesibdev->ibdev.post_send = nes_post_send;
+ nesibdev->ibdev.post_recv = nes_post_recv;
+
+ nesibdev->ibdev.iwcm = kzalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL);
+ if (nesibdev->ibdev.iwcm == NULL) {
+ ib_dealloc_device(&nesibdev->ibdev);
+ return NULL;
+ }
+ nesibdev->ibdev.iwcm->add_ref = nes_add_ref;
+ nesibdev->ibdev.iwcm->rem_ref = nes_rem_ref;
+ nesibdev->ibdev.iwcm->get_qp = nes_get_qp;
+ nesibdev->ibdev.iwcm->connect = nes_connect;
+ nesibdev->ibdev.iwcm->accept = nes_accept;
+ nesibdev->ibdev.iwcm->reject = nes_reject;
+ nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
+ nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
+
+ return nesibdev;
+}
+
+
+/**
+ * nes_destroy_ofa_device
+ */
+void nes_destroy_ofa_device(struct nes_ib_device *nesibdev)
+{
+ if (nesibdev == NULL)
+ return;
+
+ nes_unregister_ofa_device(nesibdev);
+
+ kfree(nesibdev->ibdev.iwcm);
+ ib_dealloc_device(&nesibdev->ibdev);
+}
+
+
+/**
+ * nes_register_ofa_device
+ */
+int nes_register_ofa_device(struct nes_ib_device *nesibdev)
+{
+ struct nes_vnic *nesvnic = nesibdev->nesvnic;
+ struct nes_device *nesdev = nesvnic->nesdev;
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ int i, ret;
+
+ ret = ib_register_device(&nesvnic->nesibdev->ibdev);
+ if (ret) {
+ return ret;
+ }
+
+ /* Get the resources allocated to this device */
+ nesibdev->max_cq = (nesadapter->max_cq-NES_FIRST_QPN) / nesadapter->port_count;
+ nesibdev->max_mr = nesadapter->max_mr / nesadapter->port_count;
+ nesibdev->max_qp = (nesadapter->max_qp-NES_FIRST_QPN) / nesadapter->port_count;
+ nesibdev->max_pd = nesadapter->max_pd / nesadapter->port_count;
+
+ for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
+ ret = class_device_create_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
+ if (ret) {
+ while (i > 0) {
+ i--;
+ class_device_remove_file(&nesibdev->ibdev.class_dev,
+ nes_class_attributes[i]);
+ }
+ ib_unregister_device(&nesibdev->ibdev);
+ return ret;
+ }
+ }
+
+ nesvnic->of_device_registered = 1;
+
+ return 0;
+}
+
+
+/**
+ * nes_unregister_ofa_device
+ */
+void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
+{
+ struct nes_vnic *nesvnic = nesibdev->nesvnic;
+ int i;
+
+ if (nesibdev == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
+ class_device_remove_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
+ }
+
+ if (nesvnic->of_device_registered) {
+ ib_unregister_device(&nesibdev->ibdev);
+ }
+
+ nesvnic->of_device_registered = 0;
+}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
new file mode 100644
index 000000000000..6c6b4da5184f
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef NES_VERBS_H
+#define NES_VERBS_H
+
+struct nes_device;
+
+#define NES_MAX_USER_DB_REGIONS 4096
+#define NES_MAX_USER_WQ_REGIONS 4096
+
+struct nes_ucontext {
+ struct ib_ucontext ibucontext;
+ struct nes_device *nesdev;
+ unsigned long mmap_wq_offset;
+ unsigned long mmap_cq_offset; /* to be removed */
+ int index; /* rnic index (minor) */
+ unsigned long allocated_doorbells[BITS_TO_LONGS(NES_MAX_USER_DB_REGIONS)];
+ u16 mmap_db_index[NES_MAX_USER_DB_REGIONS];
+ u16 first_free_db;
+ unsigned long allocated_wqs[BITS_TO_LONGS(NES_MAX_USER_WQ_REGIONS)];
+ struct nes_qp *mmap_nesqp[NES_MAX_USER_WQ_REGIONS];
+ u16 first_free_wq;
+ struct list_head cq_reg_mem_list;
+ struct list_head qp_reg_mem_list;
+ u32 mcrqf;
+ atomic_t usecnt;
+};
+
+struct nes_pd {
+ struct ib_pd ibpd;
+ u16 pd_id;
+ atomic_t sqp_count;
+ u16 mmap_db_index;
+};
+
+struct nes_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ struct ib_fmr ibfmr;
+ };
+ struct ib_umem *region;
+ u16 pbls_used;
+ u8 mode;
+ u8 pbl_4k;
+};
+
+struct nes_hw_pb {
+ __le32 pa_low;
+ __le32 pa_high;
+};
+
+struct nes_vpbl {
+ dma_addr_t pbl_pbase;
+ struct nes_hw_pb *pbl_vbase;
+};
+
+struct nes_root_vpbl {
+ dma_addr_t pbl_pbase;
+ struct nes_hw_pb *pbl_vbase;
+ struct nes_vpbl *leaf_vpbl;
+};
+
+struct nes_fmr {
+ struct nes_mr nesmr;
+ u32 leaf_pbl_cnt;
+ struct nes_root_vpbl root_vpbl;
+ struct ib_qp *ib_qp;
+ int access_rights;
+ struct ib_fmr_attr attr;
+};
+
+struct nes_av;
+
+struct nes_cq {
+ struct ib_cq ibcq;
+ struct nes_hw_cq hw_cq;
+ u32 polled_completions;
+ u32 cq_mem_size;
+ spinlock_t lock;
+ u8 virtual_cq;
+ u8 pad[3];
+};
+
+struct nes_wq {
+ spinlock_t lock;
+};
+
+struct iw_cm_id;
+struct ietf_mpa_frame;
+
+struct nes_qp {
+ struct ib_qp ibqp;
+ void *allocated_buffer;
+ struct iw_cm_id *cm_id;
+ struct workqueue_struct *wq;
+ struct work_struct disconn_work;
+ struct nes_cq *nesscq;
+ struct nes_cq *nesrcq;
+ struct nes_pd *nespd;
+ void *cm_node; /* handle of the node this QP is associated with */
+ struct ietf_mpa_frame *ietf_frame;
+ dma_addr_t ietf_frame_pbase;
+ wait_queue_head_t state_waitq;
+ unsigned long socket;
+ struct nes_hw_qp hwqp;
+ struct work_struct work;
+ struct work_struct ae_work;
+ enum ib_qp_state ibqp_state;
+ u32 iwarp_state;
+ u32 hte_index;
+ u32 last_aeq;
+ u32 qp_mem_size;
+ atomic_t refcount;
+ atomic_t close_timer_started;
+ u32 mmap_sq_db_index;
+ u32 mmap_rq_db_index;
+ spinlock_t lock;
+ struct nes_qp_context *nesqp_context;
+ dma_addr_t nesqp_context_pbase;
+ void *pbl_vbase;
+ dma_addr_t pbl_pbase;
+ struct page *page;
+ wait_queue_head_t kick_waitq;
+ u16 in_disconnect;
+ u16 private_data_len;
+ u8 active_conn;
+ u8 skip_lsmm;
+ u8 user_mode;
+ u8 hte_added;
+ u8 hw_iwarp_state;
+ u8 flush_issued;
+ u8 hw_tcp_state;
+ u8 disconn_pending;
+ u8 destroyed;
+};
+#endif /* NES_VERBS_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index a082466f4a83..09f5371137a1 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -680,12 +680,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
neigh = *to_ipoib_neigh(skb->dst->neighbour);
- if (ipoib_cm_get(neigh)) {
- if (ipoib_cm_up(neigh)) {
- ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
- goto out;
- }
- } else if (neigh->ah) {
+ if (neigh->ah)
if (unlikely((memcmp(&neigh->dgid.raw,
skb->dst->neighbour->ha + 4,
sizeof(union ib_gid))) ||
@@ -706,6 +701,12 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ if (ipoib_cm_get(neigh)) {
+ if (ipoib_cm_up(neigh)) {
+ ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
+ goto out;
+ }
+ } else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
goto out;
}
@@ -813,11 +814,9 @@ static void ipoib_neigh_cleanup(struct neighbour *n)
struct ipoib_ah *ah = NULL;
neigh = *to_ipoib_neigh(n);
- if (neigh) {
+ if (neigh)
priv = netdev_priv(neigh->dev);
- ipoib_dbg(priv, "neigh_destructor for bonding device: %s\n",
- n->dev->name);
- } else
+ else
return;
ipoib_dbg(priv,
"neigh_cleanup for %06x " IPOIB_GID_FMT "\n",
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 195ce7c12319..fd4a49fc4773 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -204,6 +204,22 @@ out:
return ret;
}
+static int srp_new_cm_id(struct srp_target_port *target)
+{
+ struct ib_cm_id *new_cm_id;
+
+ new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
+ srp_cm_handler, target);
+ if (IS_ERR(new_cm_id))
+ return PTR_ERR(new_cm_id);
+
+ if (target->cm_id)
+ ib_destroy_cm_id(target->cm_id);
+ target->cm_id = new_cm_id;
+
+ return 0;
+}
+
static int srp_create_target_ib(struct srp_target_port *target)
{
struct ib_qp_init_attr *init_attr;
@@ -436,6 +452,7 @@ static void srp_remove_work(struct work_struct *work)
static int srp_connect_target(struct srp_target_port *target)
{
+ int retries = 3;
int ret;
ret = srp_lookup_path(target);
@@ -468,6 +485,21 @@ static int srp_connect_target(struct srp_target_port *target)
case SRP_DLID_REDIRECT:
break;
+ case SRP_STALE_CONN:
+ /* Our current CM id was stale, and is now in timewait.
+ * Try to reconnect with a new one.
+ */
+ if (!retries-- || srp_new_cm_id(target)) {
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "giving up on stale connection\n");
+ target->status = -ECONNRESET;
+ return target->status;
+ }
+
+ shost_printk(KERN_ERR, target->scsi_host, PFX
+ "retrying stale connection\n");
+ break;
+
default:
return target->status;
}
@@ -507,7 +539,6 @@ static void srp_reset_req(struct srp_target_port *target, struct srp_request *re
static int srp_reconnect_target(struct srp_target_port *target)
{
- struct ib_cm_id *new_cm_id;
struct ib_qp_attr qp_attr;
struct srp_request *req, *tmp;
struct ib_wc wc;
@@ -526,14 +557,9 @@ static int srp_reconnect_target(struct srp_target_port *target)
* Now get a new local CM ID so that we avoid confusing the
* target in case things are really fouled up.
*/
- new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
- srp_cm_handler, target);
- if (IS_ERR(new_cm_id)) {
- ret = PTR_ERR(new_cm_id);
+ ret = srp_new_cm_id(target);
+ if (ret)
goto err;
- }
- ib_destroy_cm_id(target->cm_id);
- target->cm_id = new_cm_id;
qp_attr.qp_state = IB_QPS_RESET;
ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
@@ -1171,6 +1197,11 @@ static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
target->status = -ECONNRESET;
break;
+ case IB_CM_REJ_STALE_CONN:
+ shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
+ target->status = SRP_STALE_CONN;
+ break;
+
default:
shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
event->param.rej_rcvd.reason);
@@ -1862,11 +1893,9 @@ static ssize_t srp_create_target(struct class_device *class_dev,
if (ret)
goto err;
- target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
- if (IS_ERR(target->cm_id)) {
- ret = PTR_ERR(target->cm_id);
+ ret = srp_new_cm_id(target);
+ if (ret)
goto err_free;
- }
target->qp_in_error = 0;
ret = srp_connect_target(target);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 4a3c1f37e4c2..cb6eb816024a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -54,6 +54,7 @@ enum {
SRP_PORT_REDIRECT = 1,
SRP_DLID_REDIRECT = 2,
+ SRP_STALE_CONN = 3,
SRP_MAX_LUN = 512,
SRP_DEF_SG_TABLESIZE = 12,
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 63512d906f02..9dea14db724c 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -137,6 +137,18 @@ config INPUT_EVBUG
To compile this driver as a module, choose M here: the
module will be called evbug.
+config INPUT_APMPOWER
+ tristate "Input Power Event -> APM Bridge" if EMBEDDED
+ depends on INPUT && APM_EMULATION
+ ---help---
+ Say Y here if you want suspend key events to trigger a user
+ requested suspend through APM. This is useful on embedded
+ systems where such behviour is desired without userspace
+ interaction. If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apm-power.
+
comment "Input Device Drivers"
source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 99af903bd3ce..2ae87b19caa8 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_INPUT_TABLET) += tablet/
obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/
obj-$(CONFIG_INPUT_MISC) += misc/
+obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o
diff --git a/drivers/input/apm-power.c b/drivers/input/apm-power.c
new file mode 100644
index 000000000000..c36d110b349a
--- /dev/null
+++ b/drivers/input/apm-power.c
@@ -0,0 +1,131 @@
+/*
+ * Input Power Event -> APM Bridge
+ *
+ * Copyright (c) 2007 Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/apm-emulation.h>
+
+static void system_power_event(unsigned int keycode)
+{
+ switch (keycode) {
+ case KEY_SUSPEND:
+ apm_queue_event(APM_USER_SUSPEND);
+
+ printk(KERN_INFO "apm-power: Requesting system suspend...\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void apmpower_event(struct input_handle *handle, unsigned int type,
+ unsigned int code, int value)
+{
+ /* only react on key down events */
+ if (value != 1)
+ return;
+
+ switch (type) {
+ case EV_PWR:
+ system_power_event(code);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int apmpower_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "apm-power";
+
+ handler->private = handle;
+
+ error = input_register_handle(handle);
+ if (error) {
+ printk(KERN_ERR
+ "apm-power: Failed to register input power handler, "
+ "error %d\n", error);
+ kfree(handle);
+ return error;
+ }
+
+ error = input_open_device(handle);
+ if (error) {
+ printk(KERN_ERR
+ "apm-power: Failed to open input power device, "
+ "error %d\n", error);
+ input_unregister_handle(handle);
+ kfree(handle);
+ return error;
+ }
+
+ return 0;
+}
+
+static void apmpower_disconnect(struct input_handle *handler)
+{
+ struct input_handle *handle = handler->private;
+
+ input_close_device(handle);
+ kfree(handle);
+}
+
+static const struct input_device_id apmpower_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_PWR) },
+ },
+ { },
+};
+
+MODULE_DEVICE_TABLE(input, apmpower_ids);
+
+static struct input_handler apmpower_handler = {
+ .event = apmpower_event,
+ .connect = apmpower_connect,
+ .disconnect = apmpower_disconnect,
+ .name = "apm-power",
+ .id_table = apmpower_ids,
+};
+
+static int __init apmpower_init(void)
+{
+ return input_register_handler(&apmpower_handler);
+}
+
+static void __exit apmpower_exit(void)
+{
+ input_unregister_handler(&apmpower_handler);
+}
+
+module_init(apmpower_init);
+module_exit(apmpower_exit);
+
+MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+MODULE_DESCRIPTION("Input Power Event -> APM Bridge");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index e5b4e9bfbdc5..0727b0a12557 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -617,7 +617,7 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
if (get_user(t, ip))
return -EFAULT;
- error = dev->getkeycode(dev, t, &v);
+ error = input_get_keycode(dev, t, &v);
if (error)
return error;
@@ -630,7 +630,7 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
if (get_user(t, ip) || get_user(v, ip + 1))
return -EFAULT;
- return dev->setkeycode(dev, t, v);
+ return input_set_keycode(dev, t, v);
case EVIOCSFF:
if (copy_from_user(&effect, p, sizeof(effect)))
@@ -683,7 +683,7 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
case EV_FF: bits = dev->ffbit; len = FF_MAX; break;
case EV_SW: bits = dev->swbit; len = SW_MAX; break;
default: return -EINVAL;
- }
+ }
return bits_to_user(bits, len, _IOC_SIZE(cmd), p, compat_mode);
}
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index 92b359894e81..490918a5d192 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -60,17 +60,21 @@ static void input_polled_device_work(struct work_struct *work)
{
struct input_polled_dev *dev =
container_of(work, struct input_polled_dev, work.work);
+ unsigned long delay;
dev->poll(dev);
- queue_delayed_work(polldev_wq, &dev->work,
- msecs_to_jiffies(dev->poll_interval));
+
+ delay = msecs_to_jiffies(dev->poll_interval);
+ if (delay >= HZ)
+ delay = round_jiffies_relative(delay);
+
+ queue_delayed_work(polldev_wq, &dev->work, delay);
}
static int input_open_polled_device(struct input_dev *input)
{
struct input_polled_dev *dev = input->private;
int error;
- unsigned long ticks;
error = input_polldev_start_workqueue();
if (error)
@@ -79,10 +83,8 @@ static int input_open_polled_device(struct input_dev *input)
if (dev->flush)
dev->flush(dev);
- ticks = msecs_to_jiffies(dev->poll_interval);
- if (ticks >= HZ)
- ticks = round_jiffies(ticks);
- queue_delayed_work(polldev_wq, &dev->work, ticks);
+ queue_delayed_work(polldev_wq, &dev->work,
+ msecs_to_jiffies(dev->poll_interval));
return 0;
}
@@ -91,7 +93,7 @@ static void input_close_polled_device(struct input_dev *input)
{
struct input_polled_dev *dev = input->private;
- cancel_rearming_delayed_workqueue(polldev_wq, &dev->work);
+ cancel_delayed_work_sync(&dev->work);
input_polldev_stop_workqueue();
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index a0be978501ff..f02c242c3114 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -493,7 +493,7 @@ static void input_disconnect_device(struct input_dev *dev)
if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
for (code = 0; code <= KEY_MAX; code++) {
if (is_event_supported(code, dev->keybit, KEY_MAX) &&
- test_bit(code, dev->key)) {
+ __test_and_clear_bit(code, dev->key)) {
input_pass_event(dev, EV_KEY, code, 0);
}
}
@@ -526,7 +526,7 @@ static int input_default_getkeycode(struct input_dev *dev,
if (!dev->keycodesize)
return -EINVAL;
- if (scancode < 0 || scancode >= dev->keycodemax)
+ if (scancode >= dev->keycodemax)
return -EINVAL;
*keycode = input_fetch_keycode(dev, scancode);
@@ -540,10 +540,7 @@ static int input_default_setkeycode(struct input_dev *dev,
int old_keycode;
int i;
- if (scancode < 0 || scancode >= dev->keycodemax)
- return -EINVAL;
-
- if (keycode < 0 || keycode > KEY_MAX)
+ if (scancode >= dev->keycodemax)
return -EINVAL;
if (!dev->keycodesize)
@@ -586,6 +583,75 @@ static int input_default_setkeycode(struct input_dev *dev,
return 0;
}
+/**
+ * input_get_keycode - retrieve keycode currently mapped to a given scancode
+ * @dev: input device which keymap is being queried
+ * @scancode: scancode (or its equivalent for device in question) for which
+ * keycode is needed
+ * @keycode: result
+ *
+ * This function should be called by anyone interested in retrieving current
+ * keymap. Presently keyboard and evdev handlers use it.
+ */
+int input_get_keycode(struct input_dev *dev, int scancode, int *keycode)
+{
+ if (scancode < 0)
+ return -EINVAL;
+
+ return dev->getkeycode(dev, scancode, keycode);
+}
+EXPORT_SYMBOL(input_get_keycode);
+
+/**
+ * input_get_keycode - assign new keycode to a given scancode
+ * @dev: input device which keymap is being updated
+ * @scancode: scancode (or its equivalent for device in question)
+ * @keycode: new keycode to be assigned to the scancode
+ *
+ * This function should be called by anyone needing to update current
+ * keymap. Presently keyboard and evdev handlers use it.
+ */
+int input_set_keycode(struct input_dev *dev, int scancode, int keycode)
+{
+ unsigned long flags;
+ int old_keycode;
+ int retval;
+
+ if (scancode < 0)
+ return -EINVAL;
+
+ if (keycode < 0 || keycode > KEY_MAX)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+
+ retval = dev->getkeycode(dev, scancode, &old_keycode);
+ if (retval)
+ goto out;
+
+ retval = dev->setkeycode(dev, scancode, keycode);
+ if (retval)
+ goto out;
+
+ /*
+ * Simulate keyup event if keycode is not present
+ * in the keymap anymore
+ */
+ if (test_bit(EV_KEY, dev->evbit) &&
+ !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
+ __test_and_clear_bit(old_keycode, dev->key)) {
+
+ input_pass_event(dev, EV_KEY, old_keycode, 0);
+ if (dev->sync)
+ input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+ }
+
+ out:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return retval;
+}
+EXPORT_SYMBOL(input_set_keycode);
#define MATCH_BIT(bit, max) \
for (i = 0; i < BITS_TO_LONGS(max); i++) \
@@ -755,7 +821,7 @@ static int input_devices_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations input_devices_seq_ops = {
+static const struct seq_operations input_devices_seq_ops = {
.start = input_devices_seq_start,
.next = input_devices_seq_next,
.stop = input_devices_seq_stop,
@@ -808,7 +874,7 @@ static int input_handlers_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations input_handlers_seq_ops = {
+static const struct seq_operations input_handlers_seq_ops = {
.start = input_handlers_seq_start,
.next = input_handlers_seq_next,
.stop = input_handlers_seq_stop,
@@ -1329,9 +1395,6 @@ int input_register_device(struct input_dev *dev)
snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id),
"input%ld", (unsigned long) atomic_inc_return(&input_no) - 1);
- if (dev->cdev.dev)
- dev->dev.parent = dev->cdev.dev;
-
error = device_add(&dev->dev);
if (error)
return error;
diff --git a/drivers/input/joystick/amijoy.c b/drivers/input/joystick/amijoy.c
index 5cf9f3610e67..deb9f825f92c 100644
--- a/drivers/input/joystick/amijoy.c
+++ b/drivers/input/joystick/amijoy.c
@@ -32,7 +32,6 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 15739880afc6..f32e031dcb27 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -31,7 +31,6 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/init.h>
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index a6ca9d5e252f..960e501c60c8 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/parport.h>
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index df2a9d02ca6c..07a32aff5a31 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -33,7 +33,6 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/parport.h>
#include <linux/input.h>
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index 6f826b37d9aa..a2517fa72eb8 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -85,7 +85,7 @@ static struct iforce_device iforce_device[] = {
static int iforce_playback(struct input_dev *dev, int effect_id, int value)
{
- struct iforce* iforce = dev->private;
+ struct iforce *iforce = input_get_drvdata(dev);
struct iforce_core_effect *core_effect = &iforce->core_effects[effect_id];
if (value > 0)
@@ -99,7 +99,7 @@ static int iforce_playback(struct input_dev *dev, int effect_id, int value)
static void iforce_set_gain(struct input_dev *dev, u16 gain)
{
- struct iforce* iforce = dev->private;
+ struct iforce *iforce = input_get_drvdata(dev);
unsigned char data[3];
data[0] = gain >> 9;
@@ -108,7 +108,7 @@ static void iforce_set_gain(struct input_dev *dev, u16 gain)
static void iforce_set_autocenter(struct input_dev *dev, u16 magnitude)
{
- struct iforce* iforce = dev->private;
+ struct iforce *iforce = input_get_drvdata(dev);
unsigned char data[3];
data[0] = 0x03;
@@ -126,7 +126,7 @@ static void iforce_set_autocenter(struct input_dev *dev, u16 magnitude)
*/
static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old)
{
- struct iforce* iforce = dev->private;
+ struct iforce *iforce = input_get_drvdata(dev);
struct iforce_core_effect *core_effect = &iforce->core_effects[effect->id];
int ret;
@@ -173,7 +173,7 @@ static int iforce_upload_effect(struct input_dev *dev, struct ff_effect *effect,
*/
static int iforce_erase_effect(struct input_dev *dev, int effect_id)
{
- struct iforce *iforce = dev->private;
+ struct iforce *iforce = input_get_drvdata(dev);
struct iforce_core_effect *core_effect = &iforce->core_effects[effect_id];
int err = 0;
@@ -191,7 +191,7 @@ static int iforce_erase_effect(struct input_dev *dev, int effect_id)
static int iforce_open(struct input_dev *dev)
{
- struct iforce *iforce = dev->private;
+ struct iforce *iforce = input_get_drvdata(dev);
switch (iforce->bus) {
#ifdef CONFIG_JOYSTICK_IFORCE_USB
@@ -213,7 +213,7 @@ static int iforce_open(struct input_dev *dev)
static void iforce_release(struct input_dev *dev)
{
- struct iforce *iforce = dev->private;
+ struct iforce *iforce = input_get_drvdata(dev);
int i;
if (test_bit(EV_FF, dev->evbit)) {
@@ -298,7 +298,8 @@ int iforce_init_device(struct iforce *iforce)
#endif
}
- input_dev->private = iforce;
+ input_set_drvdata(input_dev, iforce);
+
input_dev->name = "Unknown I-Force device";
input_dev->open = iforce_open;
input_dev->close = iforce_release;
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index bbebd4e2ad7f..989483f53160 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -35,7 +35,6 @@
#include <linux/parport.h>
#include <linux/input.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/mutex.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6e9d75bd2b15..0380597249bb 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -75,7 +75,6 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/usb/input.h>
#define DRIVER_VERSION "v0.0.6"
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 086d58c0ccbe..8ea709be3306 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -154,6 +154,27 @@ config KEYBOARD_SPITZ
To compile this driver as a module, choose M here: the
module will be called spitzkbd.
+config KEYBOARD_TOSA
+ tristate "Tosa keyboard"
+ depends on MACH_TOSA
+ default y
+ help
+ Say Y here to enable the keyboard on the Sharp Zaurus SL-6000x (Tosa)
+
+ To compile this driver as a module, choose M here: the
+ module will be called tosakbd.
+
+config KEYBOARD_TOSA_USE_EXT_KEYCODES
+ bool "Tosa keyboard: use extended keycodes"
+ depends on KEYBOARD_TOSA
+ default n
+ help
+ Say Y here to enable the tosa keyboard driver to generate extended
+ (>= 127) keycodes. Be aware, that they can't be correctly interpreted
+ by either console keyboard driver or by Kdrive keybd driver.
+
+ Say Y only if you know, what you are doing!
+
config KEYBOARD_AMIGA
tristate "Amiga keyboard"
depends on AMIGA
@@ -239,13 +260,13 @@ config KEYBOARD_OMAP
module will be called omap-keypad.
config KEYBOARD_PXA27x
- tristate "PXA27x keyboard support"
- depends on PXA27x
+ tristate "PXA27x/PXA3xx keypad support"
+ depends on PXA27x || PXA3xx
help
- Enable support for PXA27x matrix keyboard controller
+ Enable support for PXA27x/PXA3xx keypad controller
To compile this driver as a module, choose M here: the
- module will be called pxa27x_keyboard.
+ module will be called pxa27x_keypad.
config KEYBOARD_AAED2000
tristate "AAED-2000 keyboard"
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index e97455fdcc83..e741f4031012 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -15,10 +15,11 @@ obj-$(CONFIG_KEYBOARD_NEWTON) += newtonkbd.o
obj-$(CONFIG_KEYBOARD_STOWAWAY) += stowaway.o
obj-$(CONFIG_KEYBOARD_CORGI) += corgikbd.o
obj-$(CONFIG_KEYBOARD_SPITZ) += spitzkbd.o
+obj-$(CONFIG_KEYBOARD_TOSA) += tosakbd.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o
-obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keyboard.o
+obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
obj-$(CONFIG_KEYBOARD_AAED2000) += aaed2000_kbd.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index b39c5b31e620..4a95adc4cc78 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -19,7 +19,6 @@
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -28,6 +27,7 @@
#include <linux/workqueue.h>
#include <linux/libps2.h>
#include <linux/mutex.h>
+#include <linux/dmi.h>
#define DRIVER_DESC "AT and PS/2 keyboard driver"
@@ -201,6 +201,7 @@ struct atkbd {
unsigned short id;
unsigned char keycode[512];
+ DECLARE_BITMAP(force_release_mask, 512);
unsigned char set;
unsigned char translated;
unsigned char extra;
@@ -225,6 +226,11 @@ struct atkbd {
unsigned long event_mask;
};
+/*
+ * System-specific ketymap fixup routine
+ */
+static void (*atkbd_platform_fixup)(struct atkbd *);
+
static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
ssize_t (*handler)(struct atkbd *, char *));
static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
@@ -349,7 +355,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
struct atkbd *atkbd = serio_get_drvdata(serio);
struct input_dev *dev = atkbd->dev;
unsigned int code = data;
- int scroll = 0, hscroll = 0, click = -1, add_release_event = 0;
+ int scroll = 0, hscroll = 0, click = -1;
int value;
unsigned char keycode;
@@ -414,14 +420,6 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
"Some program might be trying access hardware directly.\n",
data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys);
goto out;
- case ATKBD_RET_HANGEUL:
- case ATKBD_RET_HANJA:
- /*
- * These keys do not report release and thus need to be
- * flagged properly
- */
- add_release_event = 1;
- break;
case ATKBD_RET_ERR:
atkbd->err_count++;
#ifdef ATKBD_DEBUG
@@ -491,7 +489,7 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
input_event(dev, EV_KEY, keycode, value);
input_sync(dev);
- if (value && add_release_event) {
+ if (value && test_bit(code, atkbd->force_release_mask)) {
input_report_key(dev, keycode, 0);
input_sync(dev);
}
@@ -824,7 +822,6 @@ static void atkbd_disconnect(struct serio *serio)
atkbd_disable(atkbd);
/* make sure we don't have a command in flight */
- synchronize_sched(); /* Allow atkbd_interrupt()s to complete. */
flush_scheduled_work();
sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
@@ -834,6 +831,22 @@ static void atkbd_disconnect(struct serio *serio)
kfree(atkbd);
}
+/*
+ * Most special keys (Fn+F?) on Dell Latitudes do not generate release
+ * events so we have to do it ourselves.
+ */
+static void atkbd_latitude_keymap_fixup(struct atkbd *atkbd)
+{
+ const unsigned int forced_release_keys[] = {
+ 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93,
+ };
+ int i;
+
+ if (atkbd->set == 2)
+ for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
+ __set_bit(forced_release_keys[i],
+ atkbd->force_release_mask);
+}
/*
* atkbd_set_keycode_table() initializes keyboard's keycode table
@@ -842,17 +855,20 @@ static void atkbd_disconnect(struct serio *serio)
static void atkbd_set_keycode_table(struct atkbd *atkbd)
{
+ unsigned int scancode;
int i, j;
memset(atkbd->keycode, 0, sizeof(atkbd->keycode));
+ bitmap_zero(atkbd->force_release_mask, 512);
if (atkbd->translated) {
for (i = 0; i < 128; i++) {
- atkbd->keycode[i] = atkbd_set2_keycode[atkbd_unxlate_table[i]];
- atkbd->keycode[i | 0x80] = atkbd_set2_keycode[atkbd_unxlate_table[i] | 0x80];
+ scancode = atkbd_unxlate_table[i];
+ atkbd->keycode[i] = atkbd_set2_keycode[scancode];
+ atkbd->keycode[i | 0x80] = atkbd_set2_keycode[scancode | 0x80];
if (atkbd->scroll)
for (j = 0; j < ARRAY_SIZE(atkbd_scroll_keys); j++)
- if ((atkbd_unxlate_table[i] | 0x80) == atkbd_scroll_keys[j].set2)
+ if ((scancode | 0x80) == atkbd_scroll_keys[j].set2)
atkbd->keycode[i | 0x80] = atkbd_scroll_keys[j].keycode;
}
} else if (atkbd->set == 3) {
@@ -861,12 +877,29 @@ static void atkbd_set_keycode_table(struct atkbd *atkbd)
memcpy(atkbd->keycode, atkbd_set2_keycode, sizeof(atkbd->keycode));
if (atkbd->scroll)
- for (i = 0; i < ARRAY_SIZE(atkbd_scroll_keys); i++)
- atkbd->keycode[atkbd_scroll_keys[i].set2] = atkbd_scroll_keys[i].keycode;
+ for (i = 0; i < ARRAY_SIZE(atkbd_scroll_keys); i++) {
+ scancode = atkbd_scroll_keys[i].set2;
+ atkbd->keycode[scancode] = atkbd_scroll_keys[i].keycode;
+ }
}
- atkbd->keycode[atkbd_compat_scancode(atkbd, ATKBD_RET_HANGEUL)] = KEY_HANGUEL;
- atkbd->keycode[atkbd_compat_scancode(atkbd, ATKBD_RET_HANJA)] = KEY_HANJA;
+/*
+ * HANGEUL and HANJA keys do not send release events so we need to
+ * generate such events ourselves
+ */
+ scancode = atkbd_compat_scancode(atkbd, ATKBD_RET_HANGEUL);
+ atkbd->keycode[scancode] = KEY_HANGEUL;
+ __set_bit(scancode, atkbd->force_release_mask);
+
+ scancode = atkbd_compat_scancode(atkbd, ATKBD_RET_HANJA);
+ atkbd->keycode[scancode] = KEY_HANJA;
+ __set_bit(scancode, atkbd->force_release_mask);
+
+/*
+ * Perform additional fixups
+ */
+ if (atkbd_platform_fixup)
+ atkbd_platform_fixup(atkbd);
}
/*
@@ -1401,9 +1434,29 @@ static ssize_t atkbd_show_err_count(struct atkbd *atkbd, char *buf)
return sprintf(buf, "%lu\n", atkbd->err_count);
}
+static int __init atkbd_setup_fixup(const struct dmi_system_id *id)
+{
+ atkbd_platform_fixup = id->driver_data;
+ return 0;
+}
+
+static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = {
+ {
+ .ident = "Dell Latitude series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude"),
+ },
+ .callback = atkbd_setup_fixup,
+ .driver_data = atkbd_latitude_keymap_fixup,
+ },
+ { }
+};
static int __init atkbd_init(void)
{
+ dmi_check_system(atkbd_dmi_quirk_table);
+
return serio_register_driver(&atkbd_drv);
}
diff --git a/drivers/input/keyboard/lkkbd.c b/drivers/input/keyboard/lkkbd.c
index 1b08f4e79dd2..32e2c2605d95 100644
--- a/drivers/input/keyboard/lkkbd.c
+++ b/drivers/input/keyboard/lkkbd.c
@@ -64,7 +64,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/input.h>
diff --git a/drivers/input/keyboard/pxa27x_keyboard.c b/drivers/input/keyboard/pxa27x_keyboard.c
deleted file mode 100644
index bdd64ee4c5c8..000000000000
--- a/drivers/input/keyboard/pxa27x_keyboard.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * linux/drivers/input/keyboard/pxa27x_keyboard.c
- *
- * Driver for the pxa27x matrix keyboard controller.
- *
- * Created: Feb 22, 2007
- * Author: Rodolfo Giometti <giometti@linux.it>
- *
- * Based on a previous implementations by Kevin O'Connor
- * <kevin_at_koconnor.net> and Alex Osborne <bobofdoom@gmail.com> and
- * on some suggestions by Nicolas Pitre <nico@cam.org>.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#include <asm/arch/hardware.h>
-#include <asm/arch/pxa-regs.h>
-#include <asm/arch/irqs.h>
-#include <asm/arch/pxa27x_keyboard.h>
-
-#define DRIVER_NAME "pxa27x-keyboard"
-
-#define KPASMKP(col) (col/2 == 0 ? KPASMKP0 : \
- col/2 == 1 ? KPASMKP1 : \
- col/2 == 2 ? KPASMKP2 : KPASMKP3)
-#define KPASMKPx_MKC(row, col) (1 << (row + 16 * (col % 2)))
-
-static struct clk *pxakbd_clk;
-
-static irqreturn_t pxakbd_irq_handler(int irq, void *dev_id)
-{
- struct platform_device *pdev = dev_id;
- struct pxa27x_keyboard_platform_data *pdata = pdev->dev.platform_data;
- struct input_dev *input_dev = platform_get_drvdata(pdev);
- unsigned long kpc = KPC;
- int p, row, col, rel;
-
- if (kpc & KPC_DI) {
- unsigned long kpdk = KPDK;
-
- if (!(kpdk & KPDK_DKP)) {
- /* better luck next time */
- } else if (kpc & KPC_REE0) {
- unsigned long kprec = KPREC;
- KPREC = 0x7f;
-
- if (kprec & KPREC_OF0)
- rel = (kprec & 0xff) + 0x7f;
- else if (kprec & KPREC_UF0)
- rel = (kprec & 0xff) - 0x7f - 0xff;
- else
- rel = (kprec & 0xff) - 0x7f;
-
- if (rel) {
- input_report_rel(input_dev, REL_WHEEL, rel);
- input_sync(input_dev);
- }
- }
- }
-
- if (kpc & KPC_MI) {
- /* report the status of every button */
- for (row = 0; row < pdata->nr_rows; row++) {
- for (col = 0; col < pdata->nr_cols; col++) {
- p = KPASMKP(col) & KPASMKPx_MKC(row, col) ?
- 1 : 0;
- pr_debug("keycode %x - pressed %x\n",
- pdata->keycodes[row][col], p);
- input_report_key(input_dev,
- pdata->keycodes[row][col], p);
- }
- }
- input_sync(input_dev);
- }
-
- return IRQ_HANDLED;
-}
-
-static int pxakbd_open(struct input_dev *dev)
-{
- /* Set keypad control register */
- KPC |= (KPC_ASACT |
- KPC_MS_ALL |
- (2 << 6) | KPC_REE0 | KPC_DK_DEB_SEL |
- KPC_ME | KPC_MIE | KPC_DE | KPC_DIE);
-
- KPC &= ~KPC_AS; /* disable automatic scan */
- KPC &= ~KPC_IMKP; /* do not ignore multiple keypresses */
-
- /* Set rotary count to mid-point value */
- KPREC = 0x7F;
-
- /* Enable unit clock */
- clk_enable(pxakbd_clk);
-
- return 0;
-}
-
-static void pxakbd_close(struct input_dev *dev)
-{
- /* Disable clock unit */
- clk_disable(pxakbd_clk);
-}
-
-#ifdef CONFIG_PM
-static int pxakbd_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct pxa27x_keyboard_platform_data *pdata = pdev->dev.platform_data;
-
- /* Save controller status */
- pdata->reg_kpc = KPC;
- pdata->reg_kprec = KPREC;
-
- return 0;
-}
-
-static int pxakbd_resume(struct platform_device *pdev)
-{
- struct pxa27x_keyboard_platform_data *pdata = pdev->dev.platform_data;
- struct input_dev *input_dev = platform_get_drvdata(pdev);
-
- mutex_lock(&input_dev->mutex);
-
- if (input_dev->users) {
- /* Restore controller status */
- KPC = pdata->reg_kpc;
- KPREC = pdata->reg_kprec;
-
- /* Enable unit clock */
- clk_disable(pxakbd_clk);
- clk_enable(pxakbd_clk);
- }
-
- mutex_unlock(&input_dev->mutex);
-
- return 0;
-}
-#else
-#define pxakbd_suspend NULL
-#define pxakbd_resume NULL
-#endif
-
-static int __devinit pxakbd_probe(struct platform_device *pdev)
-{
- struct pxa27x_keyboard_platform_data *pdata = pdev->dev.platform_data;
- struct input_dev *input_dev;
- int i, row, col, error;
-
- pxakbd_clk = clk_get(&pdev->dev, "KBDCLK");
- if (IS_ERR(pxakbd_clk)) {
- error = PTR_ERR(pxakbd_clk);
- goto err_clk;
- }
-
- /* Create and register the input driver. */
- input_dev = input_allocate_device();
- if (!input_dev) {
- printk(KERN_ERR "Cannot request keypad device\n");
- error = -ENOMEM;
- goto err_alloc;
- }
-
- input_dev->name = DRIVER_NAME;
- input_dev->id.bustype = BUS_HOST;
- input_dev->open = pxakbd_open;
- input_dev->close = pxakbd_close;
- input_dev->dev.parent = &pdev->dev;
-
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
- BIT_MASK(EV_REL);
- input_dev->relbit[BIT_WORD(REL_WHEEL)] = BIT_MASK(REL_WHEEL);
- for (row = 0; row < pdata->nr_rows; row++) {
- for (col = 0; col < pdata->nr_cols; col++) {
- int code = pdata->keycodes[row][col];
- if (code > 0)
- set_bit(code, input_dev->keybit);
- }
- }
-
- error = request_irq(IRQ_KEYPAD, pxakbd_irq_handler, IRQF_DISABLED,
- DRIVER_NAME, pdev);
- if (error) {
- printk(KERN_ERR "Cannot request keypad IRQ\n");
- goto err_free_dev;
- }
-
- platform_set_drvdata(pdev, input_dev);
-
- /* Register the input device */
- error = input_register_device(input_dev);
- if (error)
- goto err_free_irq;
-
- /* Setup GPIOs. */
- for (i = 0; i < pdata->nr_rows + pdata->nr_cols; i++)
- pxa_gpio_mode(pdata->gpio_modes[i]);
-
- /*
- * Store rows/cols info into keyboard registers.
- */
-
- KPC |= (pdata->nr_rows - 1) << 26;
- KPC |= (pdata->nr_cols - 1) << 23;
-
- for (col = 0; col < pdata->nr_cols; col++)
- KPC |= KPC_MS0 << col;
-
- return 0;
-
- err_free_irq:
- platform_set_drvdata(pdev, NULL);
- free_irq(IRQ_KEYPAD, pdev);
- err_free_dev:
- input_free_device(input_dev);
- err_alloc:
- clk_put(pxakbd_clk);
- err_clk:
- return error;
-}
-
-static int __devexit pxakbd_remove(struct platform_device *pdev)
-{
- struct input_dev *input_dev = platform_get_drvdata(pdev);
-
- input_unregister_device(input_dev);
- free_irq(IRQ_KEYPAD, pdev);
- clk_put(pxakbd_clk);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
-static struct platform_driver pxakbd_driver = {
- .probe = pxakbd_probe,
- .remove = __devexit_p(pxakbd_remove),
- .suspend = pxakbd_suspend,
- .resume = pxakbd_resume,
- .driver = {
- .name = DRIVER_NAME,
- },
-};
-
-static int __init pxakbd_init(void)
-{
- return platform_driver_register(&pxakbd_driver);
-}
-
-static void __exit pxakbd_exit(void)
-{
- platform_driver_unregister(&pxakbd_driver);
-}
-
-module_init(pxakbd_init);
-module_exit(pxakbd_exit);
-
-MODULE_DESCRIPTION("PXA27x Matrix Keyboard Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
new file mode 100644
index 000000000000..6224c2fb3b65
--- /dev/null
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -0,0 +1,572 @@
+/*
+ * linux/drivers/input/keyboard/pxa27x_keypad.c
+ *
+ * Driver for the pxa27x matrix keyboard controller.
+ *
+ * Created: Feb 22, 2007
+ * Author: Rodolfo Giometti <giometti@linux.it>
+ *
+ * Based on a previous implementations by Kevin O'Connor
+ * <kevin_at_koconnor.net> and Alex Osborne <bobofdoom@gmail.com> and
+ * on some suggestions by Nicolas Pitre <nico@cam.org>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/pxa27x_keypad.h>
+
+/*
+ * Keypad Controller registers
+ */
+#define KPC 0x0000 /* Keypad Control register */
+#define KPDK 0x0008 /* Keypad Direct Key register */
+#define KPREC 0x0010 /* Keypad Rotary Encoder register */
+#define KPMK 0x0018 /* Keypad Matrix Key register */
+#define KPAS 0x0020 /* Keypad Automatic Scan register */
+
+/* Keypad Automatic Scan Multiple Key Presser register 0-3 */
+#define KPASMKP0 0x0028
+#define KPASMKP1 0x0030
+#define KPASMKP2 0x0038
+#define KPASMKP3 0x0040
+#define KPKDI 0x0048
+
+/* bit definitions */
+#define KPC_MKRN(n) ((((n) & 0x7) - 1) << 26) /* matrix key row number */
+#define KPC_MKCN(n) ((((n) & 0x7) - 1) << 23) /* matrix key column number */
+#define KPC_DKN(n) ((((n) & 0x7) - 1) << 6) /* direct key number */
+
+#define KPC_AS (0x1 << 30) /* Automatic Scan bit */
+#define KPC_ASACT (0x1 << 29) /* Automatic Scan on Activity */
+#define KPC_MI (0x1 << 22) /* Matrix interrupt bit */
+#define KPC_IMKP (0x1 << 21) /* Ignore Multiple Key Press */
+
+#define KPC_MS(n) (0x1 << (13 + (n))) /* Matrix scan line 'n' */
+#define KPC_MS_ALL (0xff << 13)
+
+#define KPC_ME (0x1 << 12) /* Matrix Keypad Enable */
+#define KPC_MIE (0x1 << 11) /* Matrix Interrupt Enable */
+#define KPC_DK_DEB_SEL (0x1 << 9) /* Direct Keypad Debounce Select */
+#define KPC_DI (0x1 << 5) /* Direct key interrupt bit */
+#define KPC_RE_ZERO_DEB (0x1 << 4) /* Rotary Encoder Zero Debounce */
+#define KPC_REE1 (0x1 << 3) /* Rotary Encoder1 Enable */
+#define KPC_REE0 (0x1 << 2) /* Rotary Encoder0 Enable */
+#define KPC_DE (0x1 << 1) /* Direct Keypad Enable */
+#define KPC_DIE (0x1 << 0) /* Direct Keypad interrupt Enable */
+
+#define KPDK_DKP (0x1 << 31)
+#define KPDK_DK(n) ((n) & 0xff)
+
+#define KPREC_OF1 (0x1 << 31)
+#define kPREC_UF1 (0x1 << 30)
+#define KPREC_OF0 (0x1 << 15)
+#define KPREC_UF0 (0x1 << 14)
+
+#define KPREC_RECOUNT0(n) ((n) & 0xff)
+#define KPREC_RECOUNT1(n) (((n) >> 16) & 0xff)
+
+#define KPMK_MKP (0x1 << 31)
+#define KPAS_SO (0x1 << 31)
+#define KPASMKPx_SO (0x1 << 31)
+
+#define KPAS_MUKP(n) (((n) >> 26) & 0x1f)
+#define KPAS_RP(n) (((n) >> 4) & 0xf)
+#define KPAS_CP(n) ((n) & 0xf)
+
+#define KPASMKP_MKC_MASK (0xff)
+
+#define keypad_readl(off) __raw_readl(keypad->mmio_base + (off))
+#define keypad_writel(off, v) __raw_writel((v), keypad->mmio_base + (off))
+
+#define MAX_MATRIX_KEY_NUM (8 * 8)
+
+struct pxa27x_keypad {
+ struct pxa27x_keypad_platform_data *pdata;
+
+ struct clk *clk;
+ struct input_dev *input_dev;
+ void __iomem *mmio_base;
+
+ /* matrix key code map */
+ unsigned int matrix_keycodes[MAX_MATRIX_KEY_NUM];
+
+ /* state row bits of each column scan */
+ uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
+ uint32_t direct_key_state;
+
+ unsigned int direct_key_mask;
+
+ int rotary_rel_code[2];
+ int rotary_up_key[2];
+ int rotary_down_key[2];
+};
+
+static void pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
+{
+ struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
+ struct input_dev *input_dev = keypad->input_dev;
+ unsigned int *key;
+ int i;
+
+ key = &pdata->matrix_key_map[0];
+ for (i = 0; i < pdata->matrix_key_map_size; i++, key++) {
+ int row = ((*key) >> 28) & 0xf;
+ int col = ((*key) >> 24) & 0xf;
+ int code = (*key) & 0xffffff;
+
+ keypad->matrix_keycodes[(row << 3) + col] = code;
+ set_bit(code, input_dev->keybit);
+ }
+
+ keypad->rotary_up_key[0] = pdata->rotary0_up_key;
+ keypad->rotary_up_key[1] = pdata->rotary1_up_key;
+ keypad->rotary_down_key[0] = pdata->rotary0_down_key;
+ keypad->rotary_down_key[1] = pdata->rotary1_down_key;
+ keypad->rotary_rel_code[0] = pdata->rotary0_rel_code;
+ keypad->rotary_rel_code[1] = pdata->rotary1_rel_code;
+
+ if (pdata->rotary0_up_key && pdata->rotary0_down_key) {
+ set_bit(pdata->rotary0_up_key, input_dev->keybit);
+ set_bit(pdata->rotary0_down_key, input_dev->keybit);
+ } else
+ set_bit(pdata->rotary0_rel_code, input_dev->relbit);
+
+ if (pdata->rotary1_up_key && pdata->rotary1_down_key) {
+ set_bit(pdata->rotary1_up_key, input_dev->keybit);
+ set_bit(pdata->rotary1_down_key, input_dev->keybit);
+ } else
+ set_bit(pdata->rotary1_rel_code, input_dev->relbit);
+}
+
+static inline unsigned int lookup_matrix_keycode(
+ struct pxa27x_keypad *keypad, int row, int col)
+{
+ return keypad->matrix_keycodes[(row << 3) + col];
+}
+
+static void pxa27x_keypad_scan_matrix(struct pxa27x_keypad *keypad)
+{
+ struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
+ int row, col, num_keys_pressed = 0;
+ uint32_t new_state[MAX_MATRIX_KEY_COLS];
+ uint32_t kpas = keypad_readl(KPAS);
+
+ num_keys_pressed = KPAS_MUKP(kpas);
+
+ memset(new_state, 0, sizeof(new_state));
+
+ if (num_keys_pressed == 0)
+ goto scan;
+
+ if (num_keys_pressed == 1) {
+ col = KPAS_CP(kpas);
+ row = KPAS_RP(kpas);
+
+ /* if invalid row/col, treat as no key pressed */
+ if (col >= pdata->matrix_key_cols ||
+ row >= pdata->matrix_key_rows)
+ goto scan;
+
+ new_state[col] = (1 << row);
+ goto scan;
+ }
+
+ if (num_keys_pressed > 1) {
+ uint32_t kpasmkp0 = keypad_readl(KPASMKP0);
+ uint32_t kpasmkp1 = keypad_readl(KPASMKP1);
+ uint32_t kpasmkp2 = keypad_readl(KPASMKP2);
+ uint32_t kpasmkp3 = keypad_readl(KPASMKP3);
+
+ new_state[0] = kpasmkp0 & KPASMKP_MKC_MASK;
+ new_state[1] = (kpasmkp0 >> 16) & KPASMKP_MKC_MASK;
+ new_state[2] = kpasmkp1 & KPASMKP_MKC_MASK;
+ new_state[3] = (kpasmkp1 >> 16) & KPASMKP_MKC_MASK;
+ new_state[4] = kpasmkp2 & KPASMKP_MKC_MASK;
+ new_state[5] = (kpasmkp2 >> 16) & KPASMKP_MKC_MASK;
+ new_state[6] = kpasmkp3 & KPASMKP_MKC_MASK;
+ new_state[7] = (kpasmkp3 >> 16) & KPASMKP_MKC_MASK;
+ }
+scan:
+ for (col = 0; col < pdata->matrix_key_cols; col++) {
+ uint32_t bits_changed;
+
+ bits_changed = keypad->matrix_key_state[col] ^ new_state[col];
+ if (bits_changed == 0)
+ continue;
+
+ for (row = 0; row < pdata->matrix_key_rows; row++) {
+ if ((bits_changed & (1 << row)) == 0)
+ continue;
+
+ input_report_key(keypad->input_dev,
+ lookup_matrix_keycode(keypad, row, col),
+ new_state[col] & (1 << row));
+ }
+ }
+ input_sync(keypad->input_dev);
+ memcpy(keypad->matrix_key_state, new_state, sizeof(new_state));
+}
+
+#define DEFAULT_KPREC (0x007f007f)
+
+static inline int rotary_delta(uint32_t kprec)
+{
+ if (kprec & KPREC_OF0)
+ return (kprec & 0xff) + 0x7f;
+ else if (kprec & KPREC_UF0)
+ return (kprec & 0xff) - 0x7f - 0xff;
+ else
+ return (kprec & 0xff) - 0x7f;
+}
+
+static void report_rotary_event(struct pxa27x_keypad *keypad, int r, int delta)
+{
+ struct input_dev *dev = keypad->input_dev;
+
+ if (delta == 0)
+ return;
+
+ if (keypad->rotary_up_key[r] && keypad->rotary_down_key[r]) {
+ int keycode = (delta > 0) ? keypad->rotary_up_key[r] :
+ keypad->rotary_down_key[r];
+
+ /* simulate a press-n-release */
+ input_report_key(dev, keycode, 1);
+ input_sync(dev);
+ input_report_key(dev, keycode, 0);
+ input_sync(dev);
+ } else {
+ input_report_rel(dev, keypad->rotary_rel_code[r], delta);
+ input_sync(dev);
+ }
+}
+
+static void pxa27x_keypad_scan_rotary(struct pxa27x_keypad *keypad)
+{
+ struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
+ uint32_t kprec;
+
+ /* read and reset to default count value */
+ kprec = keypad_readl(KPREC);
+ keypad_writel(KPREC, DEFAULT_KPREC);
+
+ if (pdata->enable_rotary0)
+ report_rotary_event(keypad, 0, rotary_delta(kprec));
+
+ if (pdata->enable_rotary1)
+ report_rotary_event(keypad, 1, rotary_delta(kprec >> 16));
+}
+
+static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
+{
+ struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
+ unsigned int new_state;
+ uint32_t kpdk, bits_changed;
+ int i;
+
+ kpdk = keypad_readl(KPDK);
+
+ if (pdata->enable_rotary0 || pdata->enable_rotary1)
+ pxa27x_keypad_scan_rotary(keypad);
+
+ if (pdata->direct_key_map == NULL)
+ return;
+
+ new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
+ bits_changed = keypad->direct_key_state ^ new_state;
+
+ if (bits_changed == 0)
+ return;
+
+ for (i = 0; i < pdata->direct_key_num; i++) {
+ if (bits_changed & (1 << i))
+ input_report_key(keypad->input_dev,
+ pdata->direct_key_map[i],
+ (new_state & (1 << i)));
+ }
+ input_sync(keypad->input_dev);
+ keypad->direct_key_state = new_state;
+}
+
+static irqreturn_t pxa27x_keypad_irq_handler(int irq, void *dev_id)
+{
+ struct pxa27x_keypad *keypad = dev_id;
+ unsigned long kpc = keypad_readl(KPC);
+
+ if (kpc & KPC_DI)
+ pxa27x_keypad_scan_direct(keypad);
+
+ if (kpc & KPC_MI)
+ pxa27x_keypad_scan_matrix(keypad);
+
+ return IRQ_HANDLED;
+}
+
+static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
+{
+ struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
+ unsigned int mask = 0, direct_key_num = 0;
+ unsigned long kpc = 0;
+
+ /* enable matrix keys with automatic scan */
+ if (pdata->matrix_key_rows && pdata->matrix_key_cols) {
+ kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL;
+ kpc |= KPC_MKRN(pdata->matrix_key_rows) |
+ KPC_MKCN(pdata->matrix_key_cols);
+ }
+
+ /* enable rotary key, debounce interval same as direct keys */
+ if (pdata->enable_rotary0) {
+ mask |= 0x03;
+ direct_key_num = 2;
+ kpc |= KPC_REE0;
+ }
+
+ if (pdata->enable_rotary1) {
+ mask |= 0x0c;
+ direct_key_num = 4;
+ kpc |= KPC_REE1;
+ }
+
+ if (pdata->direct_key_num > direct_key_num)
+ direct_key_num = pdata->direct_key_num;
+
+ keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask;
+
+ /* enable direct key */
+ if (direct_key_num)
+ kpc |= KPC_DE | KPC_DIE | KPC_DKN(direct_key_num);
+
+ keypad_writel(KPC, kpc | KPC_RE_ZERO_DEB);
+ keypad_writel(KPREC, DEFAULT_KPREC);
+ keypad_writel(KPKDI, pdata->debounce_interval);
+}
+
+static int pxa27x_keypad_open(struct input_dev *dev)
+{
+ struct pxa27x_keypad *keypad = input_get_drvdata(dev);
+
+ /* Enable unit clock */
+ clk_enable(keypad->clk);
+ pxa27x_keypad_config(keypad);
+
+ return 0;
+}
+
+static void pxa27x_keypad_close(struct input_dev *dev)
+{
+ struct pxa27x_keypad *keypad = input_get_drvdata(dev);
+
+ /* Disable clock unit */
+ clk_disable(keypad->clk);
+}
+
+#ifdef CONFIG_PM
+static int pxa27x_keypad_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
+
+ clk_disable(keypad->clk);
+ return 0;
+}
+
+static int pxa27x_keypad_resume(struct platform_device *pdev)
+{
+ struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
+ struct input_dev *input_dev = keypad->input_dev;
+
+ mutex_lock(&input_dev->mutex);
+
+ if (input_dev->users) {
+ /* Enable unit clock */
+ clk_enable(keypad->clk);
+ pxa27x_keypad_config(keypad);
+ }
+
+ mutex_unlock(&input_dev->mutex);
+
+ return 0;
+}
+#else
+#define pxa27x_keypad_suspend NULL
+#define pxa27x_keypad_resume NULL
+#endif
+
+#define res_size(res) ((res)->end - (res)->start + 1)
+
+static int __devinit pxa27x_keypad_probe(struct platform_device *pdev)
+{
+ struct pxa27x_keypad *keypad;
+ struct input_dev *input_dev;
+ struct resource *res;
+ int irq, error;
+
+ keypad = kzalloc(sizeof(struct pxa27x_keypad), GFP_KERNEL);
+ if (keypad == NULL) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ keypad->pdata = pdev->dev.platform_data;
+ if (keypad->pdata == NULL) {
+ dev_err(&pdev->dev, "no platform data defined\n");
+ error = -EINVAL;
+ goto failed_free;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get keypad irq\n");
+ error = -ENXIO;
+ goto failed_free;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get I/O memory\n");
+ error = -ENXIO;
+ goto failed_free;
+ }
+
+ res = request_mem_region(res->start, res_size(res), pdev->name);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to request I/O memory\n");
+ error = -EBUSY;
+ goto failed_free;
+ }
+
+ keypad->mmio_base = ioremap(res->start, res_size(res));
+ if (keypad->mmio_base == NULL) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ error = -ENXIO;
+ goto failed_free_mem;
+ }
+
+ keypad->clk = clk_get(&pdev->dev, "KBDCLK");
+ if (IS_ERR(keypad->clk)) {
+ dev_err(&pdev->dev, "failed to get keypad clock\n");
+ error = PTR_ERR(keypad->clk);
+ goto failed_free_io;
+ }
+
+ /* Create and register the input driver. */
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ error = -ENOMEM;
+ goto failed_put_clk;
+ }
+
+ input_dev->name = pdev->name;
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->open = pxa27x_keypad_open;
+ input_dev->close = pxa27x_keypad_close;
+ input_dev->dev.parent = &pdev->dev;
+
+ keypad->input_dev = input_dev;
+ input_set_drvdata(input_dev, keypad);
+
+ input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP) |
+ BIT_MASK(EV_REL);
+
+ pxa27x_keypad_build_keycode(keypad);
+ platform_set_drvdata(pdev, keypad);
+
+ error = request_irq(irq, pxa27x_keypad_irq_handler, IRQF_DISABLED,
+ pdev->name, keypad);
+ if (error) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto failed_free_dev;
+ }
+
+ /* Register the input device */
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device\n");
+ goto failed_free_irq;
+ }
+
+ return 0;
+
+failed_free_irq:
+ free_irq(irq, pdev);
+ platform_set_drvdata(pdev, NULL);
+failed_free_dev:
+ input_free_device(input_dev);
+failed_put_clk:
+ clk_put(keypad->clk);
+failed_free_io:
+ iounmap(keypad->mmio_base);
+failed_free_mem:
+ release_mem_region(res->start, res_size(res));
+failed_free:
+ kfree(keypad);
+ return error;
+}
+
+static int __devexit pxa27x_keypad_remove(struct platform_device *pdev)
+{
+ struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
+ struct resource *res;
+
+ free_irq(platform_get_irq(pdev, 0), pdev);
+
+ clk_disable(keypad->clk);
+ clk_put(keypad->clk);
+
+ input_unregister_device(keypad->input_dev);
+ input_free_device(keypad->input_dev);
+
+ iounmap(keypad->mmio_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, res_size(res));
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(keypad);
+ return 0;
+}
+
+static struct platform_driver pxa27x_keypad_driver = {
+ .probe = pxa27x_keypad_probe,
+ .remove = __devexit_p(pxa27x_keypad_remove),
+ .suspend = pxa27x_keypad_suspend,
+ .resume = pxa27x_keypad_resume,
+ .driver = {
+ .name = "pxa27x-keypad",
+ },
+};
+
+static int __init pxa27x_keypad_init(void)
+{
+ return platform_driver_register(&pxa27x_keypad_driver);
+}
+
+static void __exit pxa27x_keypad_exit(void)
+{
+ platform_driver_unregister(&pxa27x_keypad_driver);
+}
+
+module_init(pxa27x_keypad_init);
+module_exit(pxa27x_keypad_exit);
+
+MODULE_DESCRIPTION("PXA27x Keypad Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tosakbd.c b/drivers/input/keyboard/tosakbd.c
new file mode 100644
index 000000000000..3884d1e3f070
--- /dev/null
+++ b/drivers/input/keyboard/tosakbd.c
@@ -0,0 +1,415 @@
+/*
+ * Keyboard driver for Sharp Tosa models (SL-6000x)
+ *
+ * Copyright (c) 2005 Dirk Opfer
+ * Copyright (c) 2007 Dmitry Baryshkov
+ *
+ * Based on xtkbd.c/locomkbd.c/corgikbd.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/tosa.h>
+
+#define KB_ROWMASK(r) (1 << (r))
+#define SCANCODE(r, c) (((r)<<4) + (c) + 1)
+#define NR_SCANCODES SCANCODE(TOSA_KEY_SENSE_NUM - 1, TOSA_KEY_STROBE_NUM - 1) + 1
+
+#define SCAN_INTERVAL (HZ/10)
+
+#define KB_DISCHARGE_DELAY 10
+#define KB_ACTIVATE_DELAY 10
+
+static unsigned int tosakbd_keycode[NR_SCANCODES] = {
+0,
+0, KEY_W, 0, 0, 0, KEY_K, KEY_BACKSPACE, KEY_P,
+0, 0, 0, 0, 0, 0, 0, 0,
+KEY_Q, KEY_E, KEY_T, KEY_Y, 0, KEY_O, KEY_I, KEY_COMMA,
+0, 0, 0, 0, 0, 0, 0, 0,
+KEY_A, KEY_D, KEY_G, KEY_U, 0, KEY_L, KEY_ENTER, KEY_DOT,
+0, 0, 0, 0, 0, 0, 0, 0,
+KEY_Z, KEY_C, KEY_V, KEY_J, TOSA_KEY_ADDRESSBOOK, TOSA_KEY_CANCEL, TOSA_KEY_CENTER, TOSA_KEY_OK,
+KEY_LEFTSHIFT, 0, 0, 0, 0, 0, 0, 0,
+KEY_S, KEY_R, KEY_B, KEY_N, TOSA_KEY_CALENDAR, TOSA_KEY_HOMEPAGE, KEY_LEFTCTRL, TOSA_KEY_LIGHT,
+0, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0,
+KEY_TAB, KEY_SLASH, KEY_H, KEY_M, TOSA_KEY_MENU, 0, KEY_UP, 0,
+0, 0, TOSA_KEY_FN, 0, 0, 0, 0, 0,
+KEY_X, KEY_F, KEY_SPACE, KEY_APOSTROPHE, TOSA_KEY_MAIL, KEY_LEFT, KEY_DOWN, KEY_RIGHT,
+0, 0, 0,
+};
+
+struct tosakbd {
+ unsigned int keycode[ARRAY_SIZE(tosakbd_keycode)];
+ struct input_dev *input;
+
+ spinlock_t lock; /* protect kbd scanning */
+ struct timer_list timer;
+};
+
+
+/* Helper functions for reading the keyboard matrix
+ * Note: We should really be using pxa_gpio_mode to alter GPDR but it
+ * requires a function call per GPIO bit which is excessive
+ * when we need to access 12 bits at once, multiple times.
+ * These functions must be called within local_irq_save()/local_irq_restore()
+ * or similar.
+ */
+#define GET_ROWS_STATUS(c) ((GPLR2 & TOSA_GPIO_ALL_SENSE_BIT) >> TOSA_GPIO_ALL_SENSE_RSHIFT)
+
+static inline void tosakbd_discharge_all(void)
+{
+ /* STROBE All HiZ */
+ GPCR1 = TOSA_GPIO_HIGH_STROBE_BIT;
+ GPDR1 &= ~TOSA_GPIO_HIGH_STROBE_BIT;
+ GPCR2 = TOSA_GPIO_LOW_STROBE_BIT;
+ GPDR2 &= ~TOSA_GPIO_LOW_STROBE_BIT;
+}
+
+static inline void tosakbd_activate_all(void)
+{
+ /* STROBE ALL -> High */
+ GPSR1 = TOSA_GPIO_HIGH_STROBE_BIT;
+ GPDR1 |= TOSA_GPIO_HIGH_STROBE_BIT;
+ GPSR2 = TOSA_GPIO_LOW_STROBE_BIT;
+ GPDR2 |= TOSA_GPIO_LOW_STROBE_BIT;
+
+ udelay(KB_DISCHARGE_DELAY);
+
+ /* STATE CLEAR */
+ GEDR2 |= TOSA_GPIO_ALL_SENSE_BIT;
+}
+
+static inline void tosakbd_activate_col(int col)
+{
+ if (col <= 5) {
+ /* STROBE col -> High, not col -> HiZ */
+ GPSR1 = TOSA_GPIO_STROBE_BIT(col);
+ GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
+ } else {
+ /* STROBE col -> High, not col -> HiZ */
+ GPSR2 = TOSA_GPIO_STROBE_BIT(col);
+ GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
+ }
+}
+
+static inline void tosakbd_reset_col(int col)
+{
+ if (col <= 5) {
+ /* STROBE col -> Low */
+ GPCR1 = TOSA_GPIO_STROBE_BIT(col);
+ /* STROBE col -> out, not col -> HiZ */
+ GPDR1 = (GPDR1 & ~TOSA_GPIO_HIGH_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
+ } else {
+ /* STROBE col -> Low */
+ GPCR2 = TOSA_GPIO_STROBE_BIT(col);
+ /* STROBE col -> out, not col -> HiZ */
+ GPDR2 = (GPDR2 & ~TOSA_GPIO_LOW_STROBE_BIT) | TOSA_GPIO_STROBE_BIT(col);
+ }
+}
+/*
+ * The tosa keyboard only generates interrupts when a key is pressed.
+ * So when a key is pressed, we enable a timer. This timer scans the
+ * keyboard, and this is how we detect when the key is released.
+ */
+
+/* Scan the hardware keyboard and push any changes up through the input layer */
+static void tosakbd_scankeyboard(struct platform_device *dev)
+{
+ struct tosakbd *tosakbd = platform_get_drvdata(dev);
+ unsigned int row, col, rowd;
+ unsigned long flags;
+ unsigned int num_pressed = 0;
+
+ spin_lock_irqsave(&tosakbd->lock, flags);
+
+ for (col = 0; col < TOSA_KEY_STROBE_NUM; col++) {
+ /*
+ * Discharge the output driver capacitatance
+ * in the keyboard matrix. (Yes it is significant..)
+ */
+ tosakbd_discharge_all();
+ udelay(KB_DISCHARGE_DELAY);
+
+ tosakbd_activate_col(col);
+ udelay(KB_ACTIVATE_DELAY);
+
+ rowd = GET_ROWS_STATUS(col);
+
+ for (row = 0; row < TOSA_KEY_SENSE_NUM; row++) {
+ unsigned int scancode, pressed;
+ scancode = SCANCODE(row, col);
+ pressed = rowd & KB_ROWMASK(row);
+
+ if (pressed && !tosakbd->keycode[scancode])
+ dev_warn(&dev->dev,
+ "unhandled scancode: 0x%02x\n",
+ scancode);
+
+ input_report_key(tosakbd->input,
+ tosakbd->keycode[scancode],
+ pressed);
+ if (pressed)
+ num_pressed++;
+ }
+
+ tosakbd_reset_col(col);
+ }
+
+ tosakbd_activate_all();
+
+ input_sync(tosakbd->input);
+
+ /* if any keys are pressed, enable the timer */
+ if (num_pressed)
+ mod_timer(&tosakbd->timer, jiffies + SCAN_INTERVAL);
+
+ spin_unlock_irqrestore(&tosakbd->lock, flags);
+}
+
+/*
+ * tosa keyboard interrupt handler.
+ */
+static irqreturn_t tosakbd_interrupt(int irq, void *__dev)
+{
+ struct platform_device *dev = __dev;
+ struct tosakbd *tosakbd = platform_get_drvdata(dev);
+
+ if (!timer_pending(&tosakbd->timer)) {
+ /** wait chattering delay **/
+ udelay(20);
+ tosakbd_scankeyboard(dev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * tosa timer checking for released keys
+ */
+static void tosakbd_timer_callback(unsigned long __dev)
+{
+ struct platform_device *dev = (struct platform_device *)__dev;
+ tosakbd_scankeyboard(dev);
+}
+
+#ifdef CONFIG_PM
+static int tosakbd_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct tosakbd *tosakbd = platform_get_drvdata(dev);
+
+ del_timer_sync(&tosakbd->timer);
+
+ return 0;
+}
+
+static int tosakbd_resume(struct platform_device *dev)
+{
+ tosakbd_scankeyboard(dev);
+
+ return 0;
+}
+#else
+#define tosakbd_suspend NULL
+#define tosakbd_resume NULL
+#endif
+
+static int __devinit tosakbd_probe(struct platform_device *pdev) {
+
+ int i;
+ struct tosakbd *tosakbd;
+ struct input_dev *input_dev;
+ int error;
+
+ tosakbd = kzalloc(sizeof(struct tosakbd), GFP_KERNEL);
+ if (!tosakbd)
+ return -ENOMEM;
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ kfree(tosakbd);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, tosakbd);
+
+ spin_lock_init(&tosakbd->lock);
+
+ /* Init Keyboard rescan timer */
+ init_timer(&tosakbd->timer);
+ tosakbd->timer.function = tosakbd_timer_callback;
+ tosakbd->timer.data = (unsigned long) pdev;
+
+ tosakbd->input = input_dev;
+
+ input_set_drvdata(input_dev, tosakbd);
+ input_dev->name = "Tosa Keyboard";
+ input_dev->phys = "tosakbd/input0";
+ input_dev->dev.parent = &pdev->dev;
+
+ input_dev->id.bustype = BUS_HOST;
+ input_dev->id.vendor = 0x0001;
+ input_dev->id.product = 0x0001;
+ input_dev->id.version = 0x0100;
+
+ input_dev->evbit[0] = BIT(EV_KEY) | BIT(EV_REP);
+ input_dev->keycode = tosakbd->keycode;
+ input_dev->keycodesize = sizeof(unsigned int);
+ input_dev->keycodemax = ARRAY_SIZE(tosakbd_keycode);
+
+ memcpy(tosakbd->keycode, tosakbd_keycode, sizeof(tosakbd_keycode));
+
+ for (i = 0; i < ARRAY_SIZE(tosakbd_keycode); i++)
+ __set_bit(tosakbd->keycode[i], input_dev->keybit);
+ clear_bit(0, input_dev->keybit);
+
+ /* Setup sense interrupts - RisingEdge Detect, sense lines as inputs */
+ for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
+ int gpio = TOSA_GPIO_KEY_SENSE(i);
+ int irq;
+ error = gpio_request(gpio, "tosakbd");
+ if (error < 0) {
+ printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
+ " error %d\n", gpio, error);
+ goto fail;
+ }
+
+ error = gpio_direction_input(TOSA_GPIO_KEY_SENSE(i));
+ if (error < 0) {
+ printk(KERN_ERR "tosakbd: failed to configure input"
+ " direction for GPIO %d, error %d\n",
+ gpio, error);
+ gpio_free(gpio);
+ goto fail;
+ }
+
+ irq = gpio_to_irq(gpio);
+ if (irq < 0) {
+ error = irq;
+ printk(KERN_ERR "gpio-keys: Unable to get irq number"
+ " for GPIO %d, error %d\n",
+ gpio, error);
+ gpio_free(gpio);
+ goto fail;
+ }
+
+ error = request_irq(irq, tosakbd_interrupt,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING,
+ "tosakbd", pdev);
+
+ if (error) {
+ printk("tosakbd: Can't get IRQ: %d: error %d!\n",
+ irq, error);
+ gpio_free(gpio);
+ goto fail;
+ }
+ }
+
+ /* Set Strobe lines as outputs - set high */
+ for (i = 0; i < TOSA_KEY_STROBE_NUM; i++) {
+ int gpio = TOSA_GPIO_KEY_STROBE(i);
+ error = gpio_request(gpio, "tosakbd");
+ if (error < 0) {
+ printk(KERN_ERR "tosakbd: failed to request GPIO %d, "
+ " error %d\n", gpio, error);
+ goto fail2;
+ }
+
+ error = gpio_direction_output(gpio, 1);
+ if (error < 0) {
+ printk(KERN_ERR "tosakbd: failed to configure input"
+ " direction for GPIO %d, error %d\n",
+ gpio, error);
+ gpio_free(gpio);
+ goto fail;
+ }
+
+ }
+
+ error = input_register_device(input_dev);
+ if (error) {
+ printk(KERN_ERR "tosakbd: Unable to register input device, "
+ "error: %d\n", error);
+ goto fail;
+ }
+
+ printk(KERN_INFO "input: Tosa Keyboard Registered\n");
+
+ return 0;
+
+fail2:
+ while (--i >= 0)
+ gpio_free(TOSA_GPIO_KEY_STROBE(i));
+
+ i = TOSA_KEY_SENSE_NUM;
+fail:
+ while (--i >= 0) {
+ free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), pdev);
+ gpio_free(TOSA_GPIO_KEY_SENSE(i));
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ input_free_device(input_dev);
+ kfree(tosakbd);
+
+ return error;
+}
+
+static int __devexit tosakbd_remove(struct platform_device *dev) {
+
+ int i;
+ struct tosakbd *tosakbd = platform_get_drvdata(dev);
+
+ for (i = 0; i < TOSA_KEY_STROBE_NUM; i++)
+ gpio_free(TOSA_GPIO_KEY_STROBE(i));
+
+ for (i = 0; i < TOSA_KEY_SENSE_NUM; i++) {
+ free_irq(gpio_to_irq(TOSA_GPIO_KEY_SENSE(i)), dev);
+ gpio_free(TOSA_GPIO_KEY_SENSE(i));
+ }
+
+ del_timer_sync(&tosakbd->timer);
+
+ input_unregister_device(tosakbd->input);
+
+ kfree(tosakbd);
+
+ return 0;
+}
+
+static struct platform_driver tosakbd_driver = {
+ .probe = tosakbd_probe,
+ .remove = __devexit_p(tosakbd_remove),
+ .suspend = tosakbd_suspend,
+ .resume = tosakbd_resume,
+ .driver = {
+ .name = "tosa-keyboard",
+ },
+};
+
+static int __devinit tosakbd_init(void)
+{
+ return platform_driver_register(&tosakbd_driver);
+}
+
+static void __exit tosakbd_exit(void)
+{
+ platform_driver_unregister(&tosakbd_driver);
+}
+
+module_init(tosakbd_init);
+module_exit(tosakbd_exit);
+
+MODULE_AUTHOR("Dirk Opfer <Dirk@Opfer-Online.de>");
+MODULE_DESCRIPTION("Tosa Keyboard Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 8f5c7b90187d..8b10d9f23bef 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -40,6 +40,20 @@ config INPUT_M68K_BEEP
tristate "M68k Beeper support"
depends on M68K
+config INPUT_APANEL
+ tristate "Fujitsu Lifebook Application Panel buttons"
+ depends on X86
+ select I2C_I801
+ select INPUT_POLLDEV
+ select CHECK_SIGNATURE
+ help
+ Say Y here for support of the Application Panel buttons, used on
+ Fujitsu Lifebook. These are attached to the mainboard through
+ an SMBus interface managed by the I2C Intel ICH (i801) driver.
+
+ To compile this driver as a module, choose M here: the module will
+ be called apanel.
+
config INPUT_IXP4XX_BEEPER
tristate "IXP4XX Beeper support"
depends on ARCH_IXP4XX
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 3585b5038418..ebd39f291d25 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_INPUT_POWERMATE) += powermate.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_UINPUT) += uinput.o
+obj-$(CONFIG_INPUT_APANEL) += apanel.o
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
new file mode 100644
index 000000000000..9531d8c7444f
--- /dev/null
+++ b/drivers/input/misc/apanel.c
@@ -0,0 +1,378 @@
+/*
+ * Fujitsu Lifebook Application Panel button drive
+ *
+ * Copyright (C) 2007 Stephen Hemminger <shemminger@linux-foundation.org>
+ * Copyright (C) 2001-2003 Jochen Eisinger <jochen@penguin-breeder.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Many Fujitsu Lifebook laptops have a small panel of buttons that are
+ * accessible via the i2c/smbus interface. This driver polls those
+ * buttons and generates input events.
+ *
+ * For more details see:
+ * http://apanel.sourceforge.net/tech.php
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/input-polldev.h>
+#include <linux/i2c.h>
+#include <linux/workqueue.h>
+#include <linux/leds.h>
+
+#define APANEL_NAME "Fujitsu Application Panel"
+#define APANEL_VERSION "1.3.1"
+#define APANEL "apanel"
+
+/* How often we poll keys - msecs */
+#define POLL_INTERVAL_DEFAULT 1000
+
+/* Magic constants in BIOS that tell about buttons */
+enum apanel_devid {
+ APANEL_DEV_NONE = 0,
+ APANEL_DEV_APPBTN = 1,
+ APANEL_DEV_CDBTN = 2,
+ APANEL_DEV_LCD = 3,
+ APANEL_DEV_LED = 4,
+
+ APANEL_DEV_MAX,
+};
+
+enum apanel_chip {
+ CHIP_NONE = 0,
+ CHIP_OZ992C = 1,
+ CHIP_OZ163T = 2,
+ CHIP_OZ711M3 = 4,
+};
+
+/* Result of BIOS snooping/probing -- what features are supported */
+static enum apanel_chip device_chip[APANEL_DEV_MAX];
+
+#define MAX_PANEL_KEYS 12
+
+struct apanel {
+ struct input_polled_dev *ipdev;
+ struct i2c_client client;
+ unsigned short keymap[MAX_PANEL_KEYS];
+ u16 nkeys;
+ u16 led_bits;
+ struct work_struct led_work;
+ struct led_classdev mail_led;
+};
+
+
+static int apanel_probe(struct i2c_adapter *, int, int);
+
+/* for now, we only support one address */
+static unsigned short normal_i2c[] = {0, I2C_CLIENT_END};
+static unsigned short ignore = I2C_CLIENT_END;
+static struct i2c_client_address_data addr_data = {
+ .normal_i2c = normal_i2c,
+ .probe = &ignore,
+ .ignore = &ignore,
+};
+
+static void report_key(struct input_dev *input, unsigned keycode)
+{
+ pr_debug(APANEL ": report key %#x\n", keycode);
+ input_report_key(input, keycode, 1);
+ input_sync(input);
+
+ input_report_key(input, keycode, 0);
+ input_sync(input);
+}
+
+/* Poll for key changes
+ *
+ * Read Application keys via SMI
+ * A (0x4), B (0x8), Internet (0x2), Email (0x1).
+ *
+ * CD keys:
+ * Forward (0x100), Rewind (0x200), Stop (0x400), Pause (0x800)
+ */
+static void apanel_poll(struct input_polled_dev *ipdev)
+{
+ struct apanel *ap = ipdev->private;
+ struct input_dev *idev = ipdev->input;
+ u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
+ s32 data;
+ int i;
+
+ data = i2c_smbus_read_word_data(&ap->client, cmd);
+ if (data < 0)
+ return; /* ignore errors (due to ACPI??) */
+
+ /* write back to clear latch */
+ i2c_smbus_write_word_data(&ap->client, cmd, 0);
+
+ if (!data)
+ return;
+
+ dev_dbg(&idev->dev, APANEL ": data %#x\n", data);
+ for (i = 0; i < idev->keycodemax; i++)
+ if ((1u << i) & data)
+ report_key(idev, ap->keymap[i]);
+}
+
+/* Track state changes of LED */
+static void led_update(struct work_struct *work)
+{
+ struct apanel *ap = container_of(work, struct apanel, led_work);
+
+ i2c_smbus_write_word_data(&ap->client, 0x10, ap->led_bits);
+}
+
+static void mail_led_set(struct led_classdev *led,
+ enum led_brightness value)
+{
+ struct apanel *ap = container_of(led, struct apanel, mail_led);
+
+ if (value != LED_OFF)
+ ap->led_bits |= 0x8000;
+ else
+ ap->led_bits &= ~0x8000;
+
+ schedule_work(&ap->led_work);
+}
+
+static int apanel_detach_client(struct i2c_client *client)
+{
+ struct apanel *ap = i2c_get_clientdata(client);
+
+ if (device_chip[APANEL_DEV_LED] != CHIP_NONE)
+ led_classdev_unregister(&ap->mail_led);
+
+ input_unregister_polled_device(ap->ipdev);
+ i2c_detach_client(&ap->client);
+ input_free_polled_device(ap->ipdev);
+
+ return 0;
+}
+
+/* Function is invoked for every i2c adapter. */
+static int apanel_attach_adapter(struct i2c_adapter *adap)
+{
+ dev_dbg(&adap->dev, APANEL ": attach adapter id=%d\n", adap->id);
+
+ /* Our device is connected only to i801 on laptop */
+ if (adap->id != I2C_HW_SMBUS_I801)
+ return -ENODEV;
+
+ return i2c_probe(adap, &addr_data, apanel_probe);
+}
+
+static void apanel_shutdown(struct i2c_client *client)
+{
+ apanel_detach_client(client);
+}
+
+static struct i2c_driver apanel_driver = {
+ .driver = {
+ .name = APANEL,
+ },
+ .attach_adapter = &apanel_attach_adapter,
+ .detach_client = &apanel_detach_client,
+ .shutdown = &apanel_shutdown,
+};
+
+static struct apanel apanel = {
+ .client = {
+ .driver = &apanel_driver,
+ .name = APANEL,
+ },
+ .keymap = {
+ [0] = KEY_MAIL,
+ [1] = KEY_WWW,
+ [2] = KEY_PROG2,
+ [3] = KEY_PROG1,
+
+ [8] = KEY_FORWARD,
+ [9] = KEY_REWIND,
+ [10] = KEY_STOPCD,
+ [11] = KEY_PLAYPAUSE,
+
+ },
+ .mail_led = {
+ .name = "mail:blue",
+ .brightness_set = mail_led_set,
+ },
+};
+
+/* NB: Only one panel on the i2c. */
+static int apanel_probe(struct i2c_adapter *bus, int address, int kind)
+{
+ struct apanel *ap;
+ struct input_polled_dev *ipdev;
+ struct input_dev *idev;
+ u8 cmd = device_chip[APANEL_DEV_APPBTN] == CHIP_OZ992C ? 0 : 8;
+ int i, err = -ENOMEM;
+
+ dev_dbg(&bus->dev, APANEL ": probe adapter %p addr %d kind %d\n",
+ bus, address, kind);
+
+ ap = &apanel;
+
+ ipdev = input_allocate_polled_device();
+ if (!ipdev)
+ goto out1;
+
+ ap->ipdev = ipdev;
+ ap->client.adapter = bus;
+ ap->client.addr = address;
+
+ i2c_set_clientdata(&ap->client, ap);
+
+ err = i2c_attach_client(&ap->client);
+ if (err)
+ goto out2;
+
+ err = i2c_smbus_write_word_data(&ap->client, cmd, 0);
+ if (err) {
+ dev_warn(&ap->client.dev, APANEL ": smbus write error %d\n",
+ err);
+ goto out3;
+ }
+
+ ipdev->poll = apanel_poll;
+ ipdev->poll_interval = POLL_INTERVAL_DEFAULT;
+ ipdev->private = ap;
+
+ idev = ipdev->input;
+ idev->name = APANEL_NAME " buttons";
+ idev->phys = "apanel/input0";
+ idev->id.bustype = BUS_HOST;
+ idev->dev.parent = &ap->client.dev;
+
+ set_bit(EV_KEY, idev->evbit);
+
+ idev->keycode = ap->keymap;
+ idev->keycodesize = sizeof(ap->keymap[0]);
+ idev->keycodemax = (device_chip[APANEL_DEV_CDBTN] != CHIP_NONE) ? 12 : 4;
+
+ for (i = 0; i < idev->keycodemax; i++)
+ if (ap->keymap[i])
+ set_bit(ap->keymap[i], idev->keybit);
+
+ err = input_register_polled_device(ipdev);
+ if (err)
+ goto out3;
+
+ INIT_WORK(&ap->led_work, led_update);
+ if (device_chip[APANEL_DEV_LED] != CHIP_NONE) {
+ err = led_classdev_register(&ap->client.dev, &ap->mail_led);
+ if (err)
+ goto out4;
+ }
+
+ return 0;
+out4:
+ input_unregister_polled_device(ipdev);
+out3:
+ i2c_detach_client(&ap->client);
+out2:
+ input_free_polled_device(ipdev);
+out1:
+ return err;
+}
+
+/* Scan the system ROM for the signature "FJKEYINF" */
+static __init const void __iomem *bios_signature(const void __iomem *bios)
+{
+ ssize_t offset;
+ const unsigned char signature[] = "FJKEYINF";
+
+ for (offset = 0; offset < 0x10000; offset += 0x10) {
+ if (check_signature(bios + offset, signature,
+ sizeof(signature)-1))
+ return bios + offset;
+ }
+ pr_notice(APANEL ": Fujitsu BIOS signature '%s' not found...\n",
+ signature);
+ return NULL;
+}
+
+static int __init apanel_init(void)
+{
+ void __iomem *bios;
+ const void __iomem *p;
+ u8 devno;
+ int found = 0;
+
+ bios = ioremap(0xF0000, 0x10000); /* Can't fail */
+
+ p = bios_signature(bios);
+ if (!p) {
+ iounmap(bios);
+ return -ENODEV;
+ }
+
+ /* just use the first address */
+ p += 8;
+ normal_i2c[0] = readb(p+3) >> 1;
+
+ for ( ; (devno = readb(p)) & 0x7f; p += 4) {
+ unsigned char method, slave, chip;
+
+ method = readb(p + 1);
+ chip = readb(p + 2);
+ slave = readb(p + 3) >> 1;
+
+ if (slave != normal_i2c[0]) {
+ pr_notice(APANEL ": only one SMBus slave "
+ "address supported, skiping device...\n");
+ continue;
+ }
+
+ /* translate alternative device numbers */
+ switch (devno) {
+ case 6:
+ devno = APANEL_DEV_APPBTN;
+ break;
+ case 7:
+ devno = APANEL_DEV_LED;
+ break;
+ }
+
+ if (devno >= APANEL_DEV_MAX)
+ pr_notice(APANEL ": unknown device %u found\n", devno);
+ else if (device_chip[devno] != CHIP_NONE)
+ pr_warning(APANEL ": duplicate entry for devno %u\n", devno);
+
+ else if (method != 1 && method != 2 && method != 4) {
+ pr_notice(APANEL ": unknown method %u for devno %u\n",
+ method, devno);
+ } else {
+ device_chip[devno] = (enum apanel_chip) chip;
+ ++found;
+ }
+ }
+ iounmap(bios);
+
+ if (found == 0) {
+ pr_info(APANEL ": no input devices reported by BIOS\n");
+ return -EIO;
+ }
+
+ return i2c_add_driver(&apanel_driver);
+}
+module_init(apanel_init);
+
+static void __exit apanel_cleanup(void)
+{
+ i2c_del_driver(&apanel_driver);
+}
+module_exit(apanel_cleanup);
+
+MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
+MODULE_DESCRIPTION(APANEL_NAME " driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(APANEL_VERSION);
+
+MODULE_ALIAS("dmi:*:svnFUJITSU:pnLifeBook*:pvr*:rvnFUJITSU:*");
+MODULE_ALIAS("dmi:*:svnFUJITSU:pnLifebook*:pvr*:rvnFUJITSU:*");
diff --git a/drivers/input/misc/ati_remote.c b/drivers/input/misc/ati_remote.c
index 3a7937481ad8..f3b86c2b0797 100644
--- a/drivers/input/misc/ati_remote.c
+++ b/drivers/input/misc/ati_remote.c
@@ -90,7 +90,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/usb/input.h>
#include <linux/wait.h>
#include <linux/jiffies.h>
diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c
index 4e3ad657ed80..1b871917340a 100644
--- a/drivers/input/misc/atlas_btns.c
+++ b/drivers/input/misc/atlas_btns.c
@@ -29,9 +29,10 @@
#include <asm/uaccess.h>
#include <acpi/acpi_drivers.h>
-#define ACPI_ATLAS_NAME "Atlas ACPI"
-#define ACPI_ATLAS_CLASS "Atlas"
+#define ACPI_ATLAS_NAME "Atlas ACPI"
+#define ACPI_ATLAS_CLASS "Atlas"
+static unsigned short atlas_keymap[16];
static struct input_dev *input_dev;
/* button handling code */
@@ -50,12 +51,15 @@ static acpi_status acpi_atlas_button_handler(u32 function,
void *handler_context, void *region_context)
{
acpi_status status;
- int keycode;
if (function == ACPI_WRITE) {
- keycode = KEY_F1 + (address & 0x0F);
- input_report_key(input_dev, keycode, !(address & 0x10));
+ int code = address & 0x0f;
+ int key_down = !(address & 0x10);
+
+ input_event(input_dev, EV_MSC, MSC_SCAN, code);
+ input_report_key(input_dev, atlas_keymap[code], key_down);
input_sync(input_dev);
+
status = 0;
} else {
printk(KERN_WARNING "atlas: shrugged on unexpected function"
@@ -70,6 +74,7 @@ static acpi_status acpi_atlas_button_handler(u32 function,
static int atlas_acpi_button_add(struct acpi_device *device)
{
acpi_status status;
+ int i;
int err;
input_dev = input_allocate_device();
@@ -81,17 +86,19 @@ static int atlas_acpi_button_add(struct acpi_device *device)
input_dev->name = "Atlas ACPI button driver";
input_dev->phys = "ASIM0000/atlas/input0";
input_dev->id.bustype = BUS_HOST;
- input_dev->evbit[BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY);
-
- set_bit(KEY_F1, input_dev->keybit);
- set_bit(KEY_F2, input_dev->keybit);
- set_bit(KEY_F3, input_dev->keybit);
- set_bit(KEY_F4, input_dev->keybit);
- set_bit(KEY_F5, input_dev->keybit);
- set_bit(KEY_F6, input_dev->keybit);
- set_bit(KEY_F7, input_dev->keybit);
- set_bit(KEY_F8, input_dev->keybit);
- set_bit(KEY_F9, input_dev->keybit);
+ input_dev->keycode = atlas_keymap;
+ input_dev->keycodesize = sizeof(unsigned short);
+ input_dev->keycodemax = ARRAY_SIZE(atlas_keymap);
+
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ __set_bit(EV_KEY, input_dev->evbit);
+ for (i = 0; i < ARRAY_SIZE(atlas_keymap); i++) {
+ if (i < 9) {
+ atlas_keymap[i] = KEY_F1 + i;
+ __set_bit(KEY_F1 + i, input_dev->keybit);
+ } else
+ atlas_keymap[i] = KEY_RESERVED;
+ }
err = input_register_device(input_dev);
if (err) {
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index 1aef97ed5e84..4833b1a82623 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -27,55 +27,48 @@
#define BUTTONS_COUNT_THRESHOLD 3
#define BUTTONS_STATUS_MASK 0xfe000000
+static const unsigned short cobalt_map[] = {
+ KEY_RESERVED,
+ KEY_RESTART,
+ KEY_LEFT,
+ KEY_UP,
+ KEY_DOWN,
+ KEY_RIGHT,
+ KEY_ENTER,
+ KEY_SELECT
+};
+
struct buttons_dev {
struct input_polled_dev *poll_dev;
+ unsigned short keymap[ARRAY_SIZE(cobalt_map)];
+ int count[ARRAY_SIZE(cobalt_map)];
void __iomem *reg;
};
-struct buttons_map {
- uint32_t mask;
- int keycode;
- int count;
-};
-
-static struct buttons_map buttons_map[] = {
- { 0x02000000, KEY_RESTART, },
- { 0x04000000, KEY_LEFT, },
- { 0x08000000, KEY_UP, },
- { 0x10000000, KEY_DOWN, },
- { 0x20000000, KEY_RIGHT, },
- { 0x40000000, KEY_ENTER, },
- { 0x80000000, KEY_SELECT, },
-};
-
static void handle_buttons(struct input_polled_dev *dev)
{
- struct buttons_map *button = buttons_map;
struct buttons_dev *bdev = dev->private;
struct input_dev *input = dev->input;
uint32_t status;
int i;
- status = readl(bdev->reg);
- status = ~status & BUTTONS_STATUS_MASK;
+ status = ~readl(bdev->reg) >> 24;
- for (i = 0; i < ARRAY_SIZE(buttons_map); i++) {
- if (status & button->mask) {
- button->count++;
+ for (i = 0; i < ARRAY_SIZE(bdev->keymap); i++) {
+ if (status & (1UL << i)) {
+ if (++bdev->count[i] == BUTTONS_COUNT_THRESHOLD) {
+ input_event(input, EV_MSC, MSC_SCAN, i);
+ input_report_key(input, bdev->keymap[i], 1);
+ input_sync(input);
+ }
} else {
- if (button->count >= BUTTONS_COUNT_THRESHOLD) {
- input_report_key(input, button->keycode, 0);
+ if (bdev->count[i] >= BUTTONS_COUNT_THRESHOLD) {
+ input_event(input, EV_MSC, MSC_SCAN, i);
+ input_report_key(input, bdev->keymap[i], 0);
input_sync(input);
}
- button->count = 0;
- }
-
- if (button->count == BUTTONS_COUNT_THRESHOLD) {
- input_report_key(input, button->keycode, 1);
- input_sync(input);
+ bdev->count[i] = 0;
}
-
- button++;
}
}
@@ -94,6 +87,8 @@ static int __devinit cobalt_buttons_probe(struct platform_device *pdev)
goto err_free_mem;
}
+ memcpy(bdev->keymap, cobalt_map, sizeof(bdev->keymap));
+
poll_dev->private = bdev;
poll_dev->poll = handle_buttons;
poll_dev->poll_interval = BUTTONS_POLL_INTERVAL;
@@ -104,11 +99,15 @@ static int __devinit cobalt_buttons_probe(struct platform_device *pdev)
input->id.bustype = BUS_HOST;
input->cdev.dev = &pdev->dev;
- input->evbit[0] = BIT_MASK(EV_KEY);
- for (i = 0; i < ARRAY_SIZE(buttons_map); i++) {
- set_bit(buttons_map[i].keycode, input->keybit);
- buttons_map[i].count = 0;
- }
+ input->keycode = pdev->keymap;
+ input->keycodemax = ARRAY_SIZE(pdev->keymap);
+ input->keycodesize = sizeof(unsigned short);
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+ __set_bit(EV_KEY, input->evbit);
+ for (i = 0; i < ARRAY_SIZE(buttons_map); i++)
+ __set_bit(input->keycode[i], input->keybit);
+ __clear_bit(KEY_RESERVED, input->keybit);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index fd74347047dd..952938a8e991 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -16,7 +16,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/usb/input.h>
#define DRIVER_VERSION "v0.1"
@@ -46,53 +45,12 @@ MODULE_PARM_DESC(debug, "Enable extra debug messages and information");
#define RECV_SIZE 8 /* The UIA-11 type have a 8 byte limit. */
-/* table of devices that work with this driver */
-static struct usb_device_id keyspan_table[] = {
- { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) },
- { } /* Terminating entry */
-};
-
-/* Structure to store all the real stuff that a remote sends to us. */
-struct keyspan_message {
- u16 system;
- u8 button;
- u8 toggle;
-};
-
-/* Structure used for all the bit testing magic needed to be done. */
-struct bit_tester {
- u32 tester;
- int len;
- int pos;
- int bits_left;
- u8 buffer[32];
-};
-
-/* Structure to hold all of our driver specific stuff */
-struct usb_keyspan {
- char name[128];
- char phys[64];
- struct usb_device* udev;
- struct input_dev *input;
- struct usb_interface* interface;
- struct usb_endpoint_descriptor* in_endpoint;
- struct urb* irq_urb;
- int open;
- dma_addr_t in_dma;
- unsigned char* in_buffer;
-
- /* variables used to parse messages from remote. */
- struct bit_tester data;
- int stage;
- int toggle;
-};
-
/*
* Table that maps the 31 possible keycodes to input keys.
* Currently there are 15 and 17 button models so RESERVED codes
* are blank areas in the mapping.
*/
-static const int keyspan_key_table[] = {
+static const unsigned short keyspan_key_table[] = {
KEY_RESERVED, /* 0 is just a place holder. */
KEY_RESERVED,
KEY_STOP,
@@ -127,6 +85,48 @@ static const int keyspan_key_table[] = {
KEY_MENU
};
+/* table of devices that work with this driver */
+static struct usb_device_id keyspan_table[] = {
+ { USB_DEVICE(USB_KEYSPAN_VENDOR_ID, USB_KEYSPAN_PRODUCT_UIA11) },
+ { } /* Terminating entry */
+};
+
+/* Structure to store all the real stuff that a remote sends to us. */
+struct keyspan_message {
+ u16 system;
+ u8 button;
+ u8 toggle;
+};
+
+/* Structure used for all the bit testing magic needed to be done. */
+struct bit_tester {
+ u32 tester;
+ int len;
+ int pos;
+ int bits_left;
+ u8 buffer[32];
+};
+
+/* Structure to hold all of our driver specific stuff */
+struct usb_keyspan {
+ char name[128];
+ char phys[64];
+ unsigned short keymap[ARRAY_SIZE(keyspan_key_table)];
+ struct usb_device *udev;
+ struct input_dev *input;
+ struct usb_interface *interface;
+ struct usb_endpoint_descriptor *in_endpoint;
+ struct urb* irq_urb;
+ int open;
+ dma_addr_t in_dma;
+ unsigned char *in_buffer;
+
+ /* variables used to parse messages from remote. */
+ struct bit_tester data;
+ int stage;
+ int toggle;
+};
+
static struct usb_driver keyspan_driver;
/*
@@ -173,6 +173,15 @@ static int keyspan_load_tester(struct usb_keyspan* dev, int bits_needed)
return 0;
}
+static void keyspan_report_button(struct usb_keyspan *remote, int button, int press)
+{
+ struct input_dev *input = remote->input;
+
+ input_event(input, EV_MSC, MSC_SCAN, button);
+ input_report_key(input, remote->keymap[button], press);
+ input_sync(input);
+}
+
/*
* Routine that handles all the logic needed to parse out the message from the remote.
*/
@@ -311,9 +320,8 @@ static void keyspan_check_data(struct usb_keyspan *remote)
__FUNCTION__, message.system, message.button, message.toggle);
if (message.toggle != remote->toggle) {
- input_report_key(remote->input, keyspan_key_table[message.button], 1);
- input_report_key(remote->input, keyspan_key_table[message.button], 0);
- input_sync(remote->input);
+ keyspan_report_button(remote, message.button, 1);
+ keyspan_report_button(remote, message.button, 0);
remote->toggle = message.toggle;
}
@@ -491,16 +499,21 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
usb_make_path(udev, remote->phys, sizeof(remote->phys));
strlcat(remote->phys, "/input0", sizeof(remote->phys));
+ memcpy(remote->keymap, keyspan_key_table, sizeof(remote->keymap));
input_dev->name = remote->name;
input_dev->phys = remote->phys;
usb_to_input_id(udev, &input_dev->id);
input_dev->dev.parent = &interface->dev;
+ input_dev->keycode = remote->keymap;
+ input_dev->keycodesize = sizeof(unsigned short);
+ input_dev->keycodemax = ARRAY_SIZE(remote->keymap);
- input_dev->evbit[0] = BIT_MASK(EV_KEY); /* We will only report KEY events. */
+ input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ __set_bit(EV_KEY, input_dev->evbit);
for (i = 0; i < ARRAY_SIZE(keyspan_key_table); i++)
- if (keyspan_key_table[i] != KEY_RESERVED)
- set_bit(keyspan_key_table[i], input_dev->keybit);
+ __set_bit(keyspan_key_table[i], input_dev->keybit);
+ __clear_bit(KEY_RESERVED, input_dev->keybit);
input_set_drvdata(input_dev, remote);
@@ -508,12 +521,14 @@ static int keyspan_probe(struct usb_interface *interface, const struct usb_devic
input_dev->close = keyspan_close;
/*
- * Initialize the URB to access the device. The urb gets sent to the device in keyspan_open()
+ * Initialize the URB to access the device.
+ * The urb gets sent to the device in keyspan_open()
*/
usb_fill_int_urb(remote->irq_urb,
- remote->udev, usb_rcvintpipe(remote->udev, remote->in_endpoint->bEndpointAddress),
+ remote->udev,
+ usb_rcvintpipe(remote->udev, endpoint->bEndpointAddress),
remote->in_buffer, RECV_SIZE, keyspan_irq_recv, remote,
- remote->in_endpoint->bInterval);
+ endpoint->bInterval);
remote->irq_urb->transfer_dma = remote->in_dma;
remote->irq_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index b438d998625c..72176f3d49cb 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -998,12 +998,12 @@ static void wistron_wifi_led_set(struct led_classdev *led_cdev,
}
static struct led_classdev wistron_mail_led = {
- .name = "mail:green",
+ .name = "wistron:green:mail",
.brightness_set = wistron_mail_led_set,
};
static struct led_classdev wistron_wifi_led = {
- .name = "wifi:red",
+ .name = "wistron:red:wifi",
.brightness_set = wistron_wifi_led_set,
};
diff --git a/drivers/input/mouse/inport.c b/drivers/input/mouse/inport.c
index 26ec09529b51..06c35fc553c0 100644
--- a/drivers/input/mouse/inport.c
+++ b/drivers/input/mouse/inport.c
@@ -35,7 +35,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/mouse/logibm.c b/drivers/input/mouse/logibm.c
index 37e7c75b43bd..9ea895593b27 100644
--- a/drivers/input/mouse/logibm.c
+++ b/drivers/input/mouse/logibm.c
@@ -36,7 +36,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/init.h>
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index b8628252e10c..f5a6be1d3c46 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -13,7 +13,6 @@
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/input.h>
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 9ab5b5ea809d..26b845fc186a 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/serio.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/input.h>
#include <linux/libps2.h>
#include <linux/proc_fs.h>
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index be83516c776c..bbbe5e81adc1 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -16,7 +16,6 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/random.h>
@@ -1033,7 +1032,7 @@ static const struct input_device_id mousedev_ids[] = {
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT |
INPUT_DEVICE_ID_MATCH_ABSBIT,
- .evbit = { BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_SYN) },
+ .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
.keybit = { [BIT_WORD(BTN_LEFT)] = BIT_MASK(BTN_LEFT) },
.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
}, /* Mouse-like device with absolute X and Y but ordinary
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index c5e68dcd88ac..662e84482c26 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -63,7 +63,7 @@ static inline void i8042_write_command(int val)
outb(val, I8042_COMMAND_REG);
}
-#if defined(__i386__)
+#if defined(__i386__) || defined(__x86_64__)
#include <linux/dmi.h>
@@ -186,6 +186,13 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
},
},
{
+ .ident = "Fujitsu-Siemens Amilo Pro 2010",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
+ },
+ },
+ {
/*
* No data is coming from the touchscreen unless KBC
* is in legacy mode.
@@ -277,6 +284,57 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
#endif
+#ifdef CONFIG_X86
+
+#include <linux/dmi.h>
+
+/*
+ * Some Wistron based laptops need us to explicitly enable the 'Dritek
+ * keyboard extension' to make their extra keys start generating scancodes.
+ * Originally, this was just confined to older laptops, but a few Acer laptops
+ * have turned up in 2007 that also need this again.
+ */
+static struct dmi_system_id __initdata i8042_dmi_dritek_table[] = {
+ {
+ .ident = "Acer Aspire 5630",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
+ },
+ },
+ {
+ .ident = "Acer Aspire 5650",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
+ },
+ },
+ {
+ .ident = "Acer Aspire 5680",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
+ },
+ },
+ {
+ .ident = "Acer Aspire 9110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
+ },
+ },
+ {
+ .ident = "Acer TravelMate 2490",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
+ },
+ },
+ { }
+};
+
+#endif /* CONFIG_X86 */
+
#ifdef CONFIG_PNP
#include <linux/pnp.h>
@@ -512,7 +570,7 @@ static int __init i8042_platform_init(void)
i8042_reset = 1;
#endif
-#if defined(__i386__)
+#if defined(__i386__) || defined(__x86_64__)
if (dmi_check_system(i8042_dmi_noloop_table))
i8042_noloop = 1;
@@ -520,6 +578,11 @@ static int __init i8042_platform_init(void)
i8042_nomux = 1;
#endif
+#ifdef CONFIG_X86
+ if (dmi_check_system(i8042_dmi_dritek_table))
+ i8042_dritek = 1;
+#endif /* CONFIG_X86 */
+
return retval;
}
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 1a0cea3c5294..2763394869d2 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -64,6 +63,12 @@ static unsigned int i8042_blink_frequency = 500;
module_param_named(panicblink, i8042_blink_frequency, uint, 0600);
MODULE_PARM_DESC(panicblink, "Frequency with which keyboard LEDs should blink when kernel panics");
+#ifdef CONFIG_X86
+static unsigned int i8042_dritek;
+module_param_named(dritek, i8042_dritek, bool, 0);
+MODULE_PARM_DESC(dritek, "Force enable the Dritek keyboard extension");
+#endif
+
#ifdef CONFIG_PNP
static int i8042_nopnp;
module_param_named(nopnp, i8042_nopnp, bool, 0);
@@ -280,7 +285,14 @@ static void i8042_stop(struct serio *serio)
struct i8042_port *port = serio->port_data;
port->exists = 0;
- synchronize_sched();
+
+ /*
+ * We synchronize with both AUX and KBD IRQs because there is
+ * a (very unlikely) chance that AUX IRQ is raised for KBD port
+ * and vice versa.
+ */
+ synchronize_irq(I8042_AUX_IRQ);
+ synchronize_irq(I8042_KBD_IRQ);
port->serio = NULL;
}
@@ -1139,6 +1151,7 @@ static int __devinit i8042_setup_kbd(void)
static int __devinit i8042_probe(struct platform_device *dev)
{
int error;
+ char param;
error = i8042_controller_selftest();
if (error)
@@ -1159,7 +1172,14 @@ static int __devinit i8042_probe(struct platform_device *dev)
if (error)
goto out_fail;
}
-
+#ifdef CONFIG_X86
+ if (i8042_dritek) {
+ param = 0x90;
+ error = i8042_command(&param, 0x1059);
+ if (error)
+ goto out_fail;
+ }
+#endif
/*
* Ok, everything is ready, let's register all serio ports
*/
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 10d9d74ae43a..b819239d74dc 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -13,7 +13,6 @@
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/input.h>
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index fd9c5d51870a..58934a40f5ce 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -116,6 +116,7 @@ struct ads7846 {
// FIXME remove "irq_disabled"
unsigned irq_disabled:1; /* P: lock */
unsigned disabled:1;
+ unsigned is_suspended:1;
int (*filter)(void *data, int data_idx, int *val);
void *filter_data;
@@ -203,7 +204,7 @@ static void ads7846_disable(struct ads7846 *ts);
static int device_suspended(struct device *dev)
{
struct ads7846 *ts = dev_get_drvdata(dev);
- return dev->power.power_state.event != PM_EVENT_ON || ts->disabled;
+ return ts->is_suspended || ts->disabled;
}
static int ads7846_read12_ser(struct device *dev, unsigned command)
@@ -794,7 +795,7 @@ static int ads7846_suspend(struct spi_device *spi, pm_message_t message)
spin_lock_irq(&ts->lock);
- spi->dev.power.power_state = message;
+ ts->is_suspended = 1;
ads7846_disable(ts);
spin_unlock_irq(&ts->lock);
@@ -809,7 +810,7 @@ static int ads7846_resume(struct spi_device *spi)
spin_lock_irq(&ts->lock);
- spi->dev.power.power_state = PMSG_ON;
+ ts->is_suspended = 0;
ads7846_enable(ts);
spin_unlock_irq(&ts->lock);
@@ -871,7 +872,6 @@ static int __devinit ads7846_probe(struct spi_device *spi)
}
dev_set_drvdata(&spi->dev, ts);
- spi->dev.power.power_state = PMSG_ON;
ts->spi = spi;
ts->input = input_dev;
diff --git a/drivers/input/touchscreen/h3600_ts_input.c b/drivers/input/touchscreen/h3600_ts_input.c
index 2ae6c6016a86..28ae15ed12c5 100644
--- a/drivers/input/touchscreen/h3600_ts_input.c
+++ b/drivers/input/touchscreen/h3600_ts_input.c
@@ -109,7 +109,7 @@ struct h3600_dev {
static irqreturn_t action_button_handler(int irq, void *dev_id)
{
int down = (GPLR & GPIO_BITSY_ACTION_BUTTON) ? 0 : 1;
- struct input_dev *dev = (struct input_dev *) dev_id;
+ struct input_dev *dev = dev_id;
input_report_key(dev, KEY_ENTER, down);
input_sync(dev);
@@ -120,7 +120,7 @@ static irqreturn_t action_button_handler(int irq, void *dev_id)
static irqreturn_t npower_button_handler(int irq, void *dev_id)
{
int down = (GPLR & GPIO_BITSY_NPOWER_BUTTON) ? 0 : 1;
- struct input_dev *dev = (struct input_dev *) dev_id;
+ struct input_dev *dev = dev_id;
/*
* This interrupt is only called when we release the key. So we have
diff --git a/drivers/input/touchscreen/mk712.c b/drivers/input/touchscreen/mk712.c
index 80a658868706..efd3aebaba5f 100644
--- a/drivers/input/touchscreen/mk712.c
+++ b/drivers/input/touchscreen/mk712.c
@@ -36,7 +36,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 986a8365e37f..607f9933aa1f 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -15,7 +15,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/delay.h>
diff --git a/drivers/isdn/act2000/module.c b/drivers/isdn/act2000/module.c
index ee2b0b9f8f46..8325022e2bed 100644
--- a/drivers/isdn/act2000/module.c
+++ b/drivers/isdn/act2000/module.c
@@ -310,7 +310,7 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
}
break;
case ISDN_CMD_DIAL:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
@@ -339,7 +339,7 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
}
return ret;
case ISDN_CMD_ACCEPTD:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
@@ -347,11 +347,11 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
actcapi_select_b2_protocol_req(card, chan);
return 0;
case ISDN_CMD_ACCEPTB:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return 0;
case ISDN_CMD_HANGUP:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
@@ -366,7 +366,7 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
}
return 0;
case ISDN_CMD_SETEAZ:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
@@ -386,7 +386,7 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
actcapi_listen_req(card);
return 0;
case ISDN_CMD_CLREAZ:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
@@ -394,14 +394,14 @@ act2000_command(act2000_card * card, isdn_ctrl * c)
actcapi_listen_req(card);
return 0;
case ISDN_CMD_SETL2:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if (!(chan = find_channel(card, c->arg & 0x0f)))
break;
chan->l2prot = (c->arg >> 8);
return 0;
case ISDN_CMD_SETL3:
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg >> 8) != ISDN_PROTO_L3_TRANS) {
printk(KERN_WARNING "L3 protocol unknown\n");
@@ -524,7 +524,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
act2000_card *card = act2000_findcard(id);
if (card) {
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (len);
}
@@ -539,7 +539,7 @@ if_readstatus(u_char __user * buf, int len, int id, int channel)
act2000_card *card = act2000_findcard(id);
if (card) {
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (act2000_readstatus(buf, len, card));
}
@@ -554,7 +554,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
act2000_card *card = act2000_findcard(id);
if (card) {
- if (!card->flags & ACT2000_FLAGS_RUNNING)
+ if (!(card->flags & ACT2000_FLAGS_RUNNING))
return -ENODEV;
return (act2000_sendbuf(card, channel, ack, skb));
}
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 00a3be5b862b..091deb9d1c47 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -350,8 +350,8 @@ void gigaset_m10x_input(struct inbuf_t *inbuf)
unsigned char *src, c;
int procbytes;
- head = atomic_read(&inbuf->head);
- tail = atomic_read(&inbuf->tail);
+ head = inbuf->head;
+ tail = inbuf->tail;
gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
if (head != tail) {
@@ -361,7 +361,7 @@ void gigaset_m10x_input(struct inbuf_t *inbuf)
gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
while (numbytes) {
- if (atomic_read(&cs->mstate) == MS_LOCKED) {
+ if (cs->mstate == MS_LOCKED) {
procbytes = lock_loop(src, numbytes, inbuf);
src += procbytes;
numbytes -= procbytes;
@@ -436,7 +436,7 @@ nextbyte:
}
gig_dbg(DEBUG_INTR, "setting head to %u", head);
- atomic_set(&inbuf->head, head);
+ inbuf->head = head;
}
}
EXPORT_SYMBOL_GPL(gigaset_m10x_input);
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index af7648274b38..5255b5e20e13 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -73,6 +73,14 @@ static int gigaset_probe(struct usb_interface *interface,
/* Function will be called if the device is unplugged */
static void gigaset_disconnect(struct usb_interface *interface);
+/* functions called before/after suspend */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
+static int gigaset_resume(struct usb_interface *intf);
+
+/* functions called before/after device reset */
+static int gigaset_pre_reset(struct usb_interface *intf);
+static int gigaset_post_reset(struct usb_interface *intf);
+
static int atread_submit(struct cardstate *, int);
static void stopurbs(struct bas_bc_state *);
static int req_submit(struct bc_state *, int, int, int);
@@ -105,8 +113,9 @@ struct bas_cardstate {
unsigned char int_in_buf[3];
spinlock_t lock; /* locks all following */
- atomic_t basstate; /* bitmap (BS_*) */
+ int basstate; /* bitmap (BS_*) */
int pending; /* uncompleted base request */
+ wait_queue_head_t waitqueue;
int rcvbuf_size; /* size of AT receive buffer */
/* 0: no receive in progress */
int retry_cmd_in; /* receive req retry count */
@@ -121,10 +130,10 @@ struct bas_cardstate {
#define BS_ATTIMER 0x020 /* waiting for HD_READY_SEND_ATDATA */
#define BS_ATRDPEND 0x040 /* urb_cmd_in in use */
#define BS_ATWRPEND 0x080 /* urb_cmd_out in use */
+#define BS_SUSPEND 0x100 /* USB port suspended */
static struct gigaset_driver *driver = NULL;
-static struct cardstate *cardstate = NULL;
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver gigaset_usb_driver = {
@@ -132,6 +141,11 @@ static struct usb_driver gigaset_usb_driver = {
.probe = gigaset_probe,
.disconnect = gigaset_disconnect,
.id_table = gigaset_table,
+ .suspend = gigaset_suspend,
+ .resume = gigaset_resume,
+ .reset_resume = gigaset_post_reset,
+ .pre_reset = gigaset_pre_reset,
+ .post_reset = gigaset_post_reset,
};
/* get message text for usb_submit_urb return code
@@ -248,12 +262,12 @@ static inline void dump_urb(enum debuglevel level, const char *tag,
if (urb) {
gig_dbg(level,
" dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, "
- "status=%d, hcpriv=0x%08lx, transfer_flags=0x%x,",
+ "hcpriv=0x%08lx, transfer_flags=0x%x,",
(unsigned long) urb->dev,
usb_pipetype_str(urb->pipe),
usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
- urb->status, (unsigned long) urb->hcpriv,
+ (unsigned long) urb->hcpriv,
urb->transfer_flags);
gig_dbg(level,
" transfer_buffer=0x%08lx[%d], actual_length=%d, "
@@ -355,27 +369,27 @@ static void check_pending(struct bas_cardstate *ucs)
case 0:
break;
case HD_OPEN_ATCHANNEL:
- if (atomic_read(&ucs->basstate) & BS_ATOPEN)
+ if (ucs->basstate & BS_ATOPEN)
ucs->pending = 0;
break;
case HD_OPEN_B1CHANNEL:
- if (atomic_read(&ucs->basstate) & BS_B1OPEN)
+ if (ucs->basstate & BS_B1OPEN)
ucs->pending = 0;
break;
case HD_OPEN_B2CHANNEL:
- if (atomic_read(&ucs->basstate) & BS_B2OPEN)
+ if (ucs->basstate & BS_B2OPEN)
ucs->pending = 0;
break;
case HD_CLOSE_ATCHANNEL:
- if (!(atomic_read(&ucs->basstate) & BS_ATOPEN))
+ if (!(ucs->basstate & BS_ATOPEN))
ucs->pending = 0;
break;
case HD_CLOSE_B1CHANNEL:
- if (!(atomic_read(&ucs->basstate) & BS_B1OPEN))
+ if (!(ucs->basstate & BS_B1OPEN))
ucs->pending = 0;
break;
case HD_CLOSE_B2CHANNEL:
- if (!(atomic_read(&ucs->basstate) & BS_B2OPEN))
+ if (!(ucs->basstate & BS_B2OPEN))
ucs->pending = 0;
break;
case HD_DEVICE_INIT_ACK: /* no reply expected */
@@ -441,8 +455,8 @@ inline static int update_basstate(struct bas_cardstate *ucs,
int state;
spin_lock_irqsave(&ucs->lock, flags);
- state = atomic_read(&ucs->basstate);
- atomic_set(&ucs->basstate, (state & ~clear) | set);
+ state = ucs->basstate;
+ ucs->basstate = (state & ~clear) | set;
spin_unlock_irqrestore(&ucs->lock, flags);
return state;
}
@@ -459,11 +473,13 @@ static void read_ctrl_callback(struct urb *urb)
struct inbuf_t *inbuf = urb->context;
struct cardstate *cs = inbuf->cs;
struct bas_cardstate *ucs = cs->hw.bas;
+ int status = urb->status;
int have_data = 0;
unsigned numbytes;
int rc;
update_basstate(ucs, 0, BS_ATRDPEND);
+ wake_up(&ucs->waitqueue);
if (!ucs->rcvbuf_size) {
dev_warn(cs->dev, "%s: no receive in progress\n", __func__);
@@ -472,7 +488,7 @@ static void read_ctrl_callback(struct urb *urb)
del_timer(&ucs->timer_cmd_in);
- switch (urb->status) {
+ switch (status) {
case 0: /* normal completion */
numbytes = urb->actual_length;
if (unlikely(numbytes != ucs->rcvbuf_size)) {
@@ -506,12 +522,12 @@ static void read_ctrl_callback(struct urb *urb)
case -ESHUTDOWN: /* device shut down */
/* no action necessary */
gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
break;
default: /* severe trouble */
dev_warn(cs->dev, "control read: %s\n",
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
if (ucs->retry_cmd_in++ < BAS_RETRY) {
dev_notice(cs->dev, "control read: retry %d\n",
ucs->retry_cmd_in);
@@ -550,17 +566,28 @@ static void read_ctrl_callback(struct urb *urb)
static int atread_submit(struct cardstate *cs, int timeout)
{
struct bas_cardstate *ucs = cs->hw.bas;
+ int basstate;
int ret;
gig_dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)",
ucs->rcvbuf_size);
- if (update_basstate(ucs, BS_ATRDPEND, 0) & BS_ATRDPEND) {
+ basstate = update_basstate(ucs, BS_ATRDPEND, 0);
+ if (basstate & BS_ATRDPEND) {
dev_err(cs->dev,
"could not submit HD_READ_ATMESSAGE: URB busy\n");
return -EBUSY;
}
+ if (basstate & BS_SUSPEND) {
+ dev_notice(cs->dev,
+ "HD_READ_ATMESSAGE not submitted, "
+ "suspend in progress\n");
+ update_basstate(ucs, 0, BS_ATRDPEND);
+ /* treat like disconnect */
+ return -ENODEV;
+ }
+
ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ;
ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE;
ucs->dr_cmd_in.wValue = 0;
@@ -601,12 +628,13 @@ static void read_int_callback(struct urb *urb)
struct cardstate *cs = urb->context;
struct bas_cardstate *ucs = cs->hw.bas;
struct bc_state *bcs;
+ int status = urb->status;
unsigned long flags;
int rc;
unsigned l;
int channel;
- switch (urb->status) {
+ switch (status) {
case 0: /* success */
break;
case -ENOENT: /* cancelled */
@@ -614,7 +642,7 @@ static void read_int_callback(struct urb *urb)
case -EINPROGRESS: /* pending */
/* ignore silently */
gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
return;
case -ENODEV: /* device removed */
case -ESHUTDOWN: /* device shut down */
@@ -623,7 +651,7 @@ static void read_int_callback(struct urb *urb)
return;
default: /* severe trouble */
dev_warn(cs->dev, "interrupt read: %s\n",
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
//FIXME corrective action? resubmission always ok?
goto resubmit;
}
@@ -745,6 +773,7 @@ static void read_int_callback(struct urb *urb)
}
check_pending(ucs);
+ wake_up(&ucs->waitqueue);
resubmit:
rc = usb_submit_urb(urb, GFP_ATOMIC);
@@ -766,17 +795,18 @@ static void read_iso_callback(struct urb *urb)
{
struct bc_state *bcs;
struct bas_bc_state *ubc;
+ int status = urb->status;
unsigned long flags;
int i, rc;
/* status codes not worth bothering the tasklet with */
- if (unlikely(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -EINPROGRESS ||
- urb->status == -ENODEV ||
- urb->status == -ESHUTDOWN)) {
+ if (unlikely(status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -EINPROGRESS ||
+ status == -ENODEV ||
+ status == -ESHUTDOWN)) {
gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
return;
}
@@ -787,10 +817,11 @@ static void read_iso_callback(struct urb *urb)
if (likely(ubc->isoindone == NULL)) {
/* pass URB to tasklet */
ubc->isoindone = urb;
+ ubc->isoinstatus = status;
tasklet_schedule(&ubc->rcvd_tasklet);
} else {
/* tasklet still busy, drop data and resubmit URB */
- ubc->loststatus = urb->status;
+ ubc->loststatus = status;
for (i = 0; i < BAS_NUMFRAMES; i++) {
ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
if (unlikely(urb->iso_frame_desc[i].status != 0 &&
@@ -800,7 +831,7 @@ static void read_iso_callback(struct urb *urb)
urb->iso_frame_desc[i].status = 0;
urb->iso_frame_desc[i].actual_length = 0;
}
- if (likely(atomic_read(&ubc->running))) {
+ if (likely(ubc->running)) {
/* urb->dev is clobbered by USB subsystem */
urb->dev = bcs->cs->hw.bas->udev;
urb->transfer_flags = URB_ISO_ASAP;
@@ -831,22 +862,24 @@ static void write_iso_callback(struct urb *urb)
{
struct isow_urbctx_t *ucx;
struct bas_bc_state *ubc;
+ int status = urb->status;
unsigned long flags;
/* status codes not worth bothering the tasklet with */
- if (unlikely(urb->status == -ENOENT ||
- urb->status == -ECONNRESET ||
- urb->status == -EINPROGRESS ||
- urb->status == -ENODEV ||
- urb->status == -ESHUTDOWN)) {
+ if (unlikely(status == -ENOENT ||
+ status == -ECONNRESET ||
+ status == -EINPROGRESS ||
+ status == -ENODEV ||
+ status == -ESHUTDOWN)) {
gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
return;
}
/* pass URB context to tasklet */
ucx = urb->context;
ubc = ucx->bcs->hw.bas;
+ ucx->status = status;
spin_lock_irqsave(&ubc->isooutlock, flags);
ubc->isooutovfl = ubc->isooutdone;
@@ -875,7 +908,7 @@ static int starturbs(struct bc_state *bcs)
bcs->inputstate |= INS_flag_hunt;
/* submit all isochronous input URBs */
- atomic_set(&ubc->running, 1);
+ ubc->running = 1;
for (k = 0; k < BAS_INURBS; k++) {
urb = ubc->isoinurbs[k];
if (!urb) {
@@ -932,15 +965,15 @@ static int starturbs(struct bc_state *bcs)
ubc->isoouturbs[k].limit = -1;
}
- /* submit two URBs, keep third one */
- for (k = 0; k < 2; ++k) {
+ /* keep one URB free, submit the others */
+ for (k = 0; k < BAS_OUTURBS-1; ++k) {
dump_urb(DEBUG_ISO, "Initial isoc write", urb);
rc = usb_submit_urb(ubc->isoouturbs[k].urb, GFP_ATOMIC);
if (rc != 0)
goto error;
}
dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb);
- ubc->isooutfree = &ubc->isoouturbs[2];
+ ubc->isooutfree = &ubc->isoouturbs[BAS_OUTURBS-1];
ubc->isooutdone = ubc->isooutovfl = NULL;
return 0;
error:
@@ -958,7 +991,7 @@ static void stopurbs(struct bas_bc_state *ubc)
{
int k, rc;
- atomic_set(&ubc->running, 0);
+ ubc->running = 0;
for (k = 0; k < BAS_INURBS; ++k) {
rc = usb_unlink_urb(ubc->isoinurbs[k]);
@@ -1034,7 +1067,7 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
}
break;
}
- ucx->limit = atomic_read(&ubc->isooutbuf->nextread);
+ ucx->limit = ubc->isooutbuf->nextread;
ifd->status = 0;
ifd->actual_length = 0;
}
@@ -1070,6 +1103,7 @@ static void write_iso_tasklet(unsigned long data)
struct cardstate *cs = bcs->cs;
struct isow_urbctx_t *done, *next, *ovfl;
struct urb *urb;
+ int status;
struct usb_iso_packet_descriptor *ifd;
int offset;
unsigned long flags;
@@ -1080,7 +1114,7 @@ static void write_iso_tasklet(unsigned long data)
/* loop while completed URBs arrive in time */
for (;;) {
- if (unlikely(!(atomic_read(&ubc->running)))) {
+ if (unlikely(!(ubc->running))) {
gig_dbg(DEBUG_ISO, "%s: not running", __func__);
return;
}
@@ -1126,7 +1160,8 @@ static void write_iso_tasklet(unsigned long data)
/* process completed URB */
urb = done->urb;
- switch (urb->status) {
+ status = done->status;
+ switch (status) {
case -EXDEV: /* partial completion */
gig_dbg(DEBUG_ISO, "%s: URB partially completed",
__func__);
@@ -1179,12 +1214,12 @@ static void write_iso_tasklet(unsigned long data)
break;
default: /* severe trouble */
dev_warn(cs->dev, "isochronous write: %s\n",
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
}
/* mark the write buffer area covered by this URB as free */
if (done->limit >= 0)
- atomic_set(&ubc->isooutbuf->read, done->limit);
+ ubc->isooutbuf->read = done->limit;
/* mark URB as free */
spin_lock_irqsave(&ubc->isooutlock, flags);
@@ -1233,6 +1268,7 @@ static void read_iso_tasklet(unsigned long data)
struct bas_bc_state *ubc = bcs->hw.bas;
struct cardstate *cs = bcs->cs;
struct urb *urb;
+ int status;
char *rcvbuf;
unsigned long flags;
int totleft, numbytes, offset, frame, rc;
@@ -1245,6 +1281,7 @@ static void read_iso_tasklet(unsigned long data)
spin_unlock_irqrestore(&ubc->isoinlock, flags);
return;
}
+ status = ubc->isoinstatus;
ubc->isoindone = NULL;
if (unlikely(ubc->loststatus != -EINPROGRESS)) {
dev_warn(cs->dev,
@@ -1256,15 +1293,15 @@ static void read_iso_tasklet(unsigned long data)
}
spin_unlock_irqrestore(&ubc->isoinlock, flags);
- if (unlikely(!(atomic_read(&ubc->running)))) {
+ if (unlikely(!(ubc->running))) {
gig_dbg(DEBUG_ISO,
"%s: channel not running, "
"dropped URB with status: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
return;
}
- switch (urb->status) {
+ switch (status) {
case 0: /* normal completion */
break;
case -EXDEV: /* inspect individual frames
@@ -1276,7 +1313,7 @@ static void read_iso_tasklet(unsigned long data)
case -ECONNRESET:
case -EINPROGRESS:
gig_dbg(DEBUG_ISO, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
continue; /* -> skip */
case -EPIPE:
dev_err(cs->dev, "isochronous read stalled\n");
@@ -1284,7 +1321,7 @@ static void read_iso_tasklet(unsigned long data)
continue; /* -> skip */
default: /* severe trouble */
dev_warn(cs->dev, "isochronous read: %s\n",
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
goto error;
}
@@ -1406,6 +1443,8 @@ static void req_timeout(unsigned long data)
dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n",
pending);
}
+
+ wake_up(&ucs->waitqueue);
}
/* write_ctrl_callback
@@ -1418,11 +1457,12 @@ static void req_timeout(unsigned long data)
static void write_ctrl_callback(struct urb *urb)
{
struct bas_cardstate *ucs = urb->context;
+ int status = urb->status;
int rc;
unsigned long flags;
/* check status */
- switch (urb->status) {
+ switch (status) {
case 0: /* normal completion */
spin_lock_irqsave(&ucs->lock, flags);
switch (ucs->pending) {
@@ -1441,20 +1481,22 @@ static void write_ctrl_callback(struct urb *urb)
case -ESHUTDOWN: /* device shut down */
/* ignore silently */
gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
break;
default: /* any failure */
- if (++ucs->retry_ctrl > BAS_RETRY) {
+ /* don't retry if suspend requested */
+ if (++ucs->retry_ctrl > BAS_RETRY ||
+ (ucs->basstate & BS_SUSPEND)) {
dev_err(&ucs->interface->dev,
"control request 0x%02x failed: %s\n",
ucs->dr_ctrl.bRequest,
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
break; /* give up */
}
dev_notice(&ucs->interface->dev,
"control request 0x%02x: %s, retry %d\n",
- ucs->dr_ctrl.bRequest, get_usb_statmsg(urb->status),
+ ucs->dr_ctrl.bRequest, get_usb_statmsg(status),
ucs->retry_ctrl);
/* urb->dev is clobbered by USB subsystem */
urb->dev = ucs->udev;
@@ -1474,6 +1516,7 @@ static void write_ctrl_callback(struct urb *urb)
del_timer(&ucs->timer_ctrl);
ucs->pending = 0;
spin_unlock_irqrestore(&ucs->lock, flags);
+ wake_up(&ucs->waitqueue);
}
/* req_submit
@@ -1548,37 +1591,46 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
*/
static int gigaset_init_bchannel(struct bc_state *bcs)
{
+ struct cardstate *cs = bcs->cs;
int req, ret;
unsigned long flags;
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (unlikely(!bcs->cs->connected)) {
+ spin_lock_irqsave(&cs->lock, flags);
+ if (unlikely(!cs->connected)) {
gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
return -ENODEV;
}
+ if (cs->hw.bas->basstate & BS_SUSPEND) {
+ dev_notice(cs->dev,
+ "not starting isochronous I/O, "
+ "suspend in progress\n");
+ spin_unlock_irqrestore(&cs->lock, flags);
+ return -EHOSTUNREACH;
+ }
+
if ((ret = starturbs(bcs)) < 0) {
- dev_err(bcs->cs->dev,
+ dev_err(cs->dev,
"could not start isochronous I/O for channel B%d: %s\n",
bcs->channel + 1,
ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret));
if (ret != -ENODEV)
error_hangup(bcs);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
return ret;
}
req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) {
- dev_err(bcs->cs->dev, "could not open channel B%d\n",
+ dev_err(cs->dev, "could not open channel B%d\n",
bcs->channel + 1);
stopurbs(bcs->hw.bas);
if (ret != -ENODEV)
error_hangup(bcs);
}
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
return ret;
}
@@ -1594,20 +1646,20 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
*/
static int gigaset_close_bchannel(struct bc_state *bcs)
{
+ struct cardstate *cs = bcs->cs;
int req, ret;
unsigned long flags;
- spin_lock_irqsave(&bcs->cs->lock, flags);
- if (unlikely(!bcs->cs->connected)) {
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_lock_irqsave(&cs->lock, flags);
+ if (unlikely(!cs->connected)) {
+ spin_unlock_irqrestore(&cs->lock, flags);
gig_dbg(DEBUG_USBREQ, "%s: not connected", __func__);
return -ENODEV;
}
- if (!(atomic_read(&bcs->cs->hw.bas->basstate) &
- (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
+ if (!(cs->hw.bas->basstate & (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) {
/* channel not running: just signal common.c */
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
gigaset_bchannel_down(bcs);
return 0;
}
@@ -1615,10 +1667,10 @@ static int gigaset_close_bchannel(struct bc_state *bcs)
/* channel running: tell device to close it */
req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0)
- dev_err(bcs->cs->dev, "closing channel B%d failed\n",
+ dev_err(cs->dev, "closing channel B%d failed\n",
bcs->channel + 1);
- spin_unlock_irqrestore(&bcs->cs->lock, flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
return ret;
}
@@ -1665,12 +1717,14 @@ static void write_command_callback(struct urb *urb)
{
struct cardstate *cs = urb->context;
struct bas_cardstate *ucs = cs->hw.bas;
+ int status = urb->status;
unsigned long flags;
update_basstate(ucs, 0, BS_ATWRPEND);
+ wake_up(&ucs->waitqueue);
/* check status */
- switch (urb->status) {
+ switch (status) {
case 0: /* normal completion */
break;
case -ENOENT: /* cancelled */
@@ -1680,26 +1734,33 @@ static void write_command_callback(struct urb *urb)
case -ESHUTDOWN: /* device shut down */
/* ignore silently */
gig_dbg(DEBUG_USBREQ, "%s: %s",
- __func__, get_usb_statmsg(urb->status));
+ __func__, get_usb_statmsg(status));
return;
default: /* any failure */
if (++ucs->retry_cmd_out > BAS_RETRY) {
dev_warn(cs->dev,
"command write: %s, "
"giving up after %d retries\n",
- get_usb_statmsg(urb->status),
+ get_usb_statmsg(status),
ucs->retry_cmd_out);
break;
}
+ if (ucs->basstate & BS_SUSPEND) {
+ dev_warn(cs->dev,
+ "command write: %s, "
+ "won't retry - suspend requested\n",
+ get_usb_statmsg(status));
+ break;
+ }
if (cs->cmdbuf == NULL) {
dev_warn(cs->dev,
"command write: %s, "
"cannot retry - cmdbuf gone\n",
- get_usb_statmsg(urb->status));
+ get_usb_statmsg(status));
break;
}
dev_notice(cs->dev, "command write: %s, retry %d\n",
- get_usb_statmsg(urb->status), ucs->retry_cmd_out);
+ get_usb_statmsg(status), ucs->retry_cmd_out);
if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0)
/* resubmitted - bypass regular exit block */
return;
@@ -1799,8 +1860,14 @@ static int start_cbsend(struct cardstate *cs)
int rc;
int retval = 0;
+ /* check if suspend requested */
+ if (ucs->basstate & BS_SUSPEND) {
+ gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD, "suspending");
+ return -EHOSTUNREACH;
+ }
+
/* check if AT channel is open */
- if (!(atomic_read(&ucs->basstate) & BS_ATOPEN)) {
+ if (!(ucs->basstate & BS_ATOPEN)) {
gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD, "AT channel not open");
rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT);
if (rc < 0) {
@@ -1816,8 +1883,7 @@ static int start_cbsend(struct cardstate *cs)
/* try to send first command in queue */
spin_lock_irqsave(&cs->cmdlock, flags);
- while ((cb = cs->cmdbuf) != NULL &&
- atomic_read(&ucs->basstate) & BS_ATREADY) {
+ while ((cb = cs->cmdbuf) != NULL && (ucs->basstate & BS_ATREADY)) {
ucs->retry_cmd_out = 0;
rc = atwrite_submit(cs, cb->buf, cb->len);
if (unlikely(rc)) {
@@ -1855,7 +1921,7 @@ static int gigaset_write_cmd(struct cardstate *cs,
unsigned long flags;
int rc;
- gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
+ gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
DEBUG_TRANSCMD : DEBUG_LOCKCMD,
"CMD Transmit", len, buf);
@@ -1970,7 +2036,7 @@ static int gigaset_freebcshw(struct bc_state *bcs)
return 0;
/* kill URBs and tasklets before freeing - better safe than sorry */
- atomic_set(&ubc->running, 0);
+ ubc->running = 0;
gig_dbg(DEBUG_INIT, "%s: killing iso URBs", __func__);
for (i = 0; i < BAS_OUTURBS; ++i) {
usb_kill_urb(ubc->isoouturbs[i].urb);
@@ -2005,7 +2071,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
return 0;
}
- atomic_set(&ubc->running, 0);
+ ubc->running = 0;
atomic_set(&ubc->corrbytes, 0);
spin_lock_init(&ubc->isooutlock);
for (i = 0; i < BAS_OUTURBS; ++i) {
@@ -2050,7 +2116,7 @@ static void gigaset_reinitbcshw(struct bc_state *bcs)
{
struct bas_bc_state *ubc = bcs->hw.bas;
- atomic_set(&bcs->hw.bas->running, 0);
+ bcs->hw.bas->running = 0;
atomic_set(&bcs->hw.bas->corrbytes, 0);
bcs->hw.bas->numsub = 0;
spin_lock_init(&ubc->isooutlock);
@@ -2081,10 +2147,11 @@ static int gigaset_initcshw(struct cardstate *cs)
spin_lock_init(&ucs->lock);
ucs->pending = 0;
- atomic_set(&ucs->basstate, 0);
+ ucs->basstate = 0;
init_timer(&ucs->timer_ctrl);
init_timer(&ucs->timer_atrdy);
init_timer(&ucs->timer_cmd_in);
+ init_waitqueue_head(&ucs->waitqueue);
return 1;
}
@@ -2102,7 +2169,7 @@ static void freeurbs(struct cardstate *cs)
int i, j;
gig_dbg(DEBUG_INIT, "%s: killing URBs", __func__);
- for (j = 0; j < 2; ++j) {
+ for (j = 0; j < BAS_CHANNELS; ++j) {
ubc = cs->bcs[j].hw.bas;
for (i = 0; i < BAS_OUTURBS; ++i) {
usb_kill_urb(ubc->isoouturbs[i].urb);
@@ -2179,11 +2246,11 @@ static int gigaset_probe(struct usb_interface *interface,
__func__, le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
- cs = gigaset_getunassignedcs(driver);
- if (!cs) {
- dev_err(&udev->dev, "no free cardstate\n");
+ /* allocate memory for our device state and intialize it */
+ cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode,
+ GIGASET_MODULENAME);
+ if (!cs)
return -ENODEV;
- }
ucs = cs->hw.bas;
/* save off device structure ptrs for later use */
@@ -2203,7 +2270,7 @@ static int gigaset_probe(struct usb_interface *interface,
!(ucs->urb_ctrl = usb_alloc_urb(0, GFP_KERNEL)))
goto allocerr;
- for (j = 0; j < 2; ++j) {
+ for (j = 0; j < BAS_CHANNELS; ++j) {
ubc = cs->bcs[j].hw.bas;
for (i = 0; i < BAS_OUTURBS; ++i)
if (!(ubc->isoouturbs[i].urb =
@@ -2237,7 +2304,7 @@ static int gigaset_probe(struct usb_interface *interface,
/* tell common part that the device is ready */
if (startmode == SM_LOCKED)
- atomic_set(&cs->mstate, MS_LOCKED);
+ cs->mstate = MS_LOCKED;
/* save address of controller structure */
usb_set_intfdata(interface, cs);
@@ -2252,7 +2319,7 @@ allocerr:
error:
freeurbs(cs);
usb_set_intfdata(interface, NULL);
- gigaset_unassign(cs);
+ gigaset_freecs(cs);
return -ENODEV;
}
@@ -2272,11 +2339,10 @@ static void gigaset_disconnect(struct usb_interface *interface)
dev_info(cs->dev, "disconnecting Gigaset base\n");
/* mark base as not ready, all channels disconnected */
- atomic_set(&ucs->basstate, 0);
+ ucs->basstate = 0;
/* tell LL all channels are down */
- //FIXME shouldn't gigaset_stop() do this?
- for (j = 0; j < 2; ++j)
+ for (j = 0; j < BAS_CHANNELS; ++j)
gigaset_bchannel_down(cs->bcs + j);
/* stop driver (common part) */
@@ -2295,9 +2361,113 @@ static void gigaset_disconnect(struct usb_interface *interface)
ucs->interface = NULL;
ucs->udev = NULL;
cs->dev = NULL;
- gigaset_unassign(cs);
+ gigaset_freecs(cs);
}
+/* gigaset_suspend
+ * This function is called before the USB connection is suspended.
+ */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int rc;
+
+ /* set suspend flag; this stops AT command/response traffic */
+ if (update_basstate(ucs, BS_SUSPEND, 0) & BS_SUSPEND) {
+ gig_dbg(DEBUG_SUSPEND, "already suspended");
+ return 0;
+ }
+
+ /* wait a bit for blocking conditions to go away */
+ rc = wait_event_timeout(ucs->waitqueue,
+ !(ucs->basstate &
+ (BS_B1OPEN|BS_B2OPEN|BS_ATRDPEND|BS_ATWRPEND)),
+ BAS_TIMEOUT*HZ/10);
+ gig_dbg(DEBUG_SUSPEND, "wait_event_timeout() -> %d", rc);
+
+ /* check for conditions preventing suspend */
+ if (ucs->basstate & (BS_B1OPEN|BS_B2OPEN|BS_ATRDPEND|BS_ATWRPEND)) {
+ dev_warn(cs->dev, "cannot suspend:\n");
+ if (ucs->basstate & BS_B1OPEN)
+ dev_warn(cs->dev, " B channel 1 open\n");
+ if (ucs->basstate & BS_B2OPEN)
+ dev_warn(cs->dev, " B channel 2 open\n");
+ if (ucs->basstate & BS_ATRDPEND)
+ dev_warn(cs->dev, " receiving AT reply\n");
+ if (ucs->basstate & BS_ATWRPEND)
+ dev_warn(cs->dev, " sending AT command\n");
+ update_basstate(ucs, 0, BS_SUSPEND);
+ return -EBUSY;
+ }
+
+ /* close AT channel if open */
+ if (ucs->basstate & BS_ATOPEN) {
+ gig_dbg(DEBUG_SUSPEND, "closing AT channel");
+ rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, 0);
+ if (rc) {
+ update_basstate(ucs, 0, BS_SUSPEND);
+ return rc;
+ }
+ wait_event_timeout(ucs->waitqueue, !ucs->pending,
+ BAS_TIMEOUT*HZ/10);
+ /* in case of timeout, proceed anyway */
+ }
+
+ /* kill all URBs and timers that might still be pending */
+ usb_kill_urb(ucs->urb_ctrl);
+ usb_kill_urb(ucs->urb_int_in);
+ del_timer_sync(&ucs->timer_ctrl);
+
+ gig_dbg(DEBUG_SUSPEND, "suspend complete");
+ return 0;
+}
+
+/* gigaset_resume
+ * This function is called after the USB connection has been resumed.
+ */
+static int gigaset_resume(struct usb_interface *intf)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+ struct bas_cardstate *ucs = cs->hw.bas;
+ int rc;
+
+ /* resubmit interrupt URB for spontaneous messages from base */
+ rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
+ if (rc) {
+ dev_err(cs->dev, "could not resubmit interrupt URB: %s\n",
+ get_usb_rcmsg(rc));
+ return rc;
+ }
+
+ /* clear suspend flag to reallow activity */
+ update_basstate(ucs, 0, BS_SUSPEND);
+
+ gig_dbg(DEBUG_SUSPEND, "resume complete");
+ return 0;
+}
+
+/* gigaset_pre_reset
+ * This function is called before the USB connection is reset.
+ */
+static int gigaset_pre_reset(struct usb_interface *intf)
+{
+ /* handle just like suspend */
+ return gigaset_suspend(intf, PMSG_ON);
+}
+
+/* gigaset_post_reset
+ * This function is called after the USB connection has been reset.
+ */
+static int gigaset_post_reset(struct usb_interface *intf)
+{
+ /* FIXME: send HD_DEVICE_INIT_ACK? */
+
+ /* resume operations */
+ return gigaset_resume(intf);
+}
+
+
static const struct gigaset_ops gigops = {
gigaset_write_cmd,
gigaset_write_room,
@@ -2330,12 +2500,6 @@ static int __init bas_gigaset_init(void)
&gigops, THIS_MODULE)) == NULL)
goto error;
- /* allocate memory for our device state and intialize it */
- cardstate = gigaset_initcs(driver, 2, 0, 0, cidmode,
- GIGASET_MODULENAME);
- if (!cardstate)
- goto error;
-
/* register this driver with the USB subsystem */
result = usb_register(&gigaset_usb_driver);
if (result < 0) {
@@ -2347,9 +2511,7 @@ static int __init bas_gigaset_init(void)
info(DRIVER_DESC);
return 0;
-error: if (cardstate)
- gigaset_freecs(cardstate);
- cardstate = NULL;
+error:
if (driver)
gigaset_freedriver(driver);
driver = NULL;
@@ -2361,43 +2523,50 @@ error: if (cardstate)
*/
static void __exit bas_gigaset_exit(void)
{
- struct bas_cardstate *ucs = cardstate->hw.bas;
+ struct bas_cardstate *ucs;
+ int i;
gigaset_blockdriver(driver); /* => probe will fail
* => no gigaset_start any more
*/
- gigaset_shutdown(cardstate);
- /* from now on, no isdn callback should be possible */
-
- /* close all still open channels */
- if (atomic_read(&ucs->basstate) & BS_B1OPEN) {
- gig_dbg(DEBUG_INIT, "closing B1 channel");
- usb_control_msg(ucs->udev, usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_B1CHANNEL, OUT_VENDOR_REQ, 0, 0,
- NULL, 0, BAS_TIMEOUT);
- }
- if (atomic_read(&ucs->basstate) & BS_B2OPEN) {
- gig_dbg(DEBUG_INIT, "closing B2 channel");
- usb_control_msg(ucs->udev, usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_B2CHANNEL, OUT_VENDOR_REQ, 0, 0,
- NULL, 0, BAS_TIMEOUT);
- }
- if (atomic_read(&ucs->basstate) & BS_ATOPEN) {
- gig_dbg(DEBUG_INIT, "closing AT channel");
- usb_control_msg(ucs->udev, usb_sndctrlpipe(ucs->udev, 0),
- HD_CLOSE_ATCHANNEL, OUT_VENDOR_REQ, 0, 0,
- NULL, 0, BAS_TIMEOUT);
+ /* stop all connected devices */
+ for (i = 0; i < driver->minors; i++) {
+ if (gigaset_shutdown(driver->cs + i) < 0)
+ continue; /* no device */
+ /* from now on, no isdn callback should be possible */
+
+ /* close all still open channels */
+ ucs = driver->cs[i].hw.bas;
+ if (ucs->basstate & BS_B1OPEN) {
+ gig_dbg(DEBUG_INIT, "closing B1 channel");
+ usb_control_msg(ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ HD_CLOSE_B1CHANNEL, OUT_VENDOR_REQ,
+ 0, 0, NULL, 0, BAS_TIMEOUT);
+ }
+ if (ucs->basstate & BS_B2OPEN) {
+ gig_dbg(DEBUG_INIT, "closing B2 channel");
+ usb_control_msg(ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ HD_CLOSE_B2CHANNEL, OUT_VENDOR_REQ,
+ 0, 0, NULL, 0, BAS_TIMEOUT);
+ }
+ if (ucs->basstate & BS_ATOPEN) {
+ gig_dbg(DEBUG_INIT, "closing AT channel");
+ usb_control_msg(ucs->udev,
+ usb_sndctrlpipe(ucs->udev, 0),
+ HD_CLOSE_ATCHANNEL, OUT_VENDOR_REQ,
+ 0, 0, NULL, 0, BAS_TIMEOUT);
+ }
+ ucs->basstate = 0;
}
- atomic_set(&ucs->basstate, 0);
/* deregister this driver with the USB subsystem */
usb_deregister(&gigaset_usb_driver);
/* this will call the disconnect-callback */
/* from now on, no disconnect/probe callback should be running */
- gigaset_freecs(cardstate);
- cardstate = NULL;
gigaset_freedriver(driver);
driver = NULL;
}
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index acd417197d03..aacedec4986f 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -31,7 +31,6 @@ MODULE_PARM_DESC(debug, "debug level");
/* driver state flags */
#define VALID_MINOR 0x01
#define VALID_ID 0x02
-#define ASSIGNED 0x04
void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
size_t len, const unsigned char *buf)
@@ -178,7 +177,7 @@ int gigaset_get_channel(struct bc_state *bcs)
unsigned long flags;
spin_lock_irqsave(&bcs->cs->lock, flags);
- if (bcs->use_count) {
+ if (bcs->use_count || !try_module_get(bcs->cs->driver->owner)) {
gig_dbg(DEBUG_ANY, "could not allocate channel %d",
bcs->channel);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
@@ -203,6 +202,7 @@ void gigaset_free_channel(struct bc_state *bcs)
}
--bcs->use_count;
bcs->busy = 0;
+ module_put(bcs->cs->driver->owner);
gig_dbg(DEBUG_ANY, "freed channel %d", bcs->channel);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
}
@@ -356,31 +356,28 @@ static struct cardstate *alloc_cs(struct gigaset_driver *drv)
{
unsigned long flags;
unsigned i;
+ struct cardstate *cs;
struct cardstate *ret = NULL;
spin_lock_irqsave(&drv->lock, flags);
+ if (drv->blocked)
+ goto exit;
for (i = 0; i < drv->minors; ++i) {
- if (!(drv->flags[i] & VALID_MINOR)) {
- if (try_module_get(drv->owner)) {
- drv->flags[i] = VALID_MINOR;
- ret = drv->cs + i;
- }
+ cs = drv->cs + i;
+ if (!(cs->flags & VALID_MINOR)) {
+ cs->flags = VALID_MINOR;
+ ret = cs;
break;
}
}
+exit:
spin_unlock_irqrestore(&drv->lock, flags);
return ret;
}
static void free_cs(struct cardstate *cs)
{
- unsigned long flags;
- struct gigaset_driver *drv = cs->driver;
- spin_lock_irqsave(&drv->lock, flags);
- if (drv->flags[cs->minor_index] & VALID_MINOR)
- module_put(drv->owner);
- drv->flags[cs->minor_index] = 0;
- spin_unlock_irqrestore(&drv->lock, flags);
+ cs->flags = 0;
}
static void make_valid(struct cardstate *cs, unsigned mask)
@@ -388,7 +385,7 @@ static void make_valid(struct cardstate *cs, unsigned mask)
unsigned long flags;
struct gigaset_driver *drv = cs->driver;
spin_lock_irqsave(&drv->lock, flags);
- drv->flags[cs->minor_index] |= mask;
+ cs->flags |= mask;
spin_unlock_irqrestore(&drv->lock, flags);
}
@@ -397,7 +394,7 @@ static void make_invalid(struct cardstate *cs, unsigned mask)
unsigned long flags;
struct gigaset_driver *drv = cs->driver;
spin_lock_irqsave(&drv->lock, flags);
- drv->flags[cs->minor_index] &= ~mask;
+ cs->flags &= ~mask;
spin_unlock_irqrestore(&drv->lock, flags);
}
@@ -501,11 +498,11 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs,
struct cardstate *cs, int inputstate)
/* inbuf->read must be allocated before! */
{
- atomic_set(&inbuf->head, 0);
- atomic_set(&inbuf->tail, 0);
+ inbuf->head = 0;
+ inbuf->tail = 0;
inbuf->cs = cs;
inbuf->bcs = bcs; /*base driver: NULL*/
- inbuf->rcvbuf = NULL; //FIXME
+ inbuf->rcvbuf = NULL;
inbuf->inputstate = inputstate;
}
@@ -521,8 +518,8 @@ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
return 0;
bytesleft = numbytes;
- tail = atomic_read(&inbuf->tail);
- head = atomic_read(&inbuf->head);
+ tail = inbuf->tail;
+ head = inbuf->head;
gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
while (bytesleft) {
@@ -546,7 +543,7 @@ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
src += n;
}
gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
- atomic_set(&inbuf->tail, tail);
+ inbuf->tail = tail;
return numbytes != bytesleft;
}
EXPORT_SYMBOL_GPL(gigaset_fill_inbuf);
@@ -668,7 +665,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
tasklet_init(&cs->event_tasklet, &gigaset_handle_event,
(unsigned long) cs);
- atomic_set(&cs->commands_pending, 0);
+ cs->commands_pending = 0;
cs->cur_at_seq = 0;
cs->gotfwver = -1;
cs->open_count = 0;
@@ -688,8 +685,8 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
init_waitqueue_head(&cs->waitqueue);
cs->waiting = 0;
- atomic_set(&cs->mode, M_UNKNOWN);
- atomic_set(&cs->mstate, MS_UNINITIALIZED);
+ cs->mode = M_UNKNOWN;
+ cs->mstate = MS_UNINITIALIZED;
for (i = 0; i < channels; ++i) {
gig_dbg(DEBUG_INIT, "setting up bcs[%d].read", i);
@@ -806,8 +803,8 @@ static void cleanup_cs(struct cardstate *cs)
spin_lock_irqsave(&cs->lock, flags);
- atomic_set(&cs->mode, M_UNKNOWN);
- atomic_set(&cs->mstate, MS_UNINITIALIZED);
+ cs->mode = M_UNKNOWN;
+ cs->mstate = MS_UNINITIALIZED;
clear_at_state(&cs->at_state);
dealloc_at_states(cs);
@@ -817,8 +814,8 @@ static void cleanup_cs(struct cardstate *cs)
kfree(cs->inbuf->rcvbuf);
cs->inbuf->rcvbuf = NULL;
cs->inbuf->inputstate = INS_command;
- atomic_set(&cs->inbuf->head, 0);
- atomic_set(&cs->inbuf->tail, 0);
+ cs->inbuf->head = 0;
+ cs->inbuf->tail = 0;
cb = cs->cmdbuf;
while (cb) {
@@ -832,7 +829,7 @@ static void cleanup_cs(struct cardstate *cs)
cs->gotfwver = -1;
cs->dle = 0;
cs->cur_at_seq = 0;
- atomic_set(&cs->commands_pending, 0);
+ cs->commands_pending = 0;
cs->cbytes = 0;
spin_unlock_irqrestore(&cs->lock, flags);
@@ -862,7 +859,7 @@ int gigaset_start(struct cardstate *cs)
cs->connected = 1;
spin_unlock_irqrestore(&cs->lock, flags);
- if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ if (cs->mstate != MS_LOCKED) {
cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS);
cs->ops->baud_rate(cs, B115200);
cs->ops->set_line_ctrl(cs, CS8);
@@ -893,10 +890,17 @@ error:
}
EXPORT_SYMBOL_GPL(gigaset_start);
-void gigaset_shutdown(struct cardstate *cs)
+/* gigaset_shutdown
+ * check if a device is associated to the cardstate structure and stop it
+ * return value: 0 if ok, -1 if no device was associated
+ */
+int gigaset_shutdown(struct cardstate *cs)
{
mutex_lock(&cs->mutex);
+ if (!(cs->flags & VALID_MINOR))
+ return -1;
+
cs->waiting = 1;
if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
@@ -913,6 +917,7 @@ void gigaset_shutdown(struct cardstate *cs)
exit:
mutex_unlock(&cs->mutex);
+ return 0;
}
EXPORT_SYMBOL_GPL(gigaset_shutdown);
@@ -954,13 +959,11 @@ struct cardstate *gigaset_get_cs_by_id(int id)
list_for_each_entry(drv, &drivers, list) {
spin_lock(&drv->lock);
for (i = 0; i < drv->minors; ++i) {
- if (drv->flags[i] & VALID_ID) {
- cs = drv->cs + i;
- if (cs->myid == id)
- ret = cs;
- }
- if (ret)
+ cs = drv->cs + i;
+ if ((cs->flags & VALID_ID) && cs->myid == id) {
+ ret = cs;
break;
+ }
}
spin_unlock(&drv->lock);
if (ret)
@@ -983,10 +986,9 @@ void gigaset_debugdrivers(void)
spin_lock(&drv->lock);
for (i = 0; i < drv->minors; ++i) {
gig_dbg(DEBUG_DRIVER, " index %u", i);
- gig_dbg(DEBUG_DRIVER, " flags 0x%02x",
- drv->flags[i]);
cs = drv->cs + i;
gig_dbg(DEBUG_DRIVER, " cardstate %p", cs);
+ gig_dbg(DEBUG_DRIVER, " flags 0x%02x", cs->flags);
gig_dbg(DEBUG_DRIVER, " minor_index %u",
cs->minor_index);
gig_dbg(DEBUG_DRIVER, " driver %p", cs->driver);
@@ -1010,7 +1012,7 @@ static struct cardstate *gigaset_get_cs_by_minor(unsigned minor)
continue;
index = minor - drv->minor;
spin_lock(&drv->lock);
- if (drv->flags[index] & VALID_MINOR)
+ if (drv->cs[index].flags & VALID_MINOR)
ret = drv->cs + index;
spin_unlock(&drv->lock);
if (ret)
@@ -1038,7 +1040,6 @@ void gigaset_freedriver(struct gigaset_driver *drv)
gigaset_if_freedriver(drv);
kfree(drv->cs);
- kfree(drv->flags);
kfree(drv);
}
EXPORT_SYMBOL_GPL(gigaset_freedriver);
@@ -1080,12 +1081,8 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors,
if (!drv->cs)
goto error;
- drv->flags = kmalloc(minors * sizeof *drv->flags, GFP_KERNEL);
- if (!drv->flags)
- goto error;
-
for (i = 0; i < minors; ++i) {
- drv->flags[i] = 0;
+ drv->cs[i].flags = 0;
drv->cs[i].driver = drv;
drv->cs[i].ops = drv->ops;
drv->cs[i].minor_index = i;
@@ -1106,53 +1103,9 @@ error:
}
EXPORT_SYMBOL_GPL(gigaset_initdriver);
-/* For drivers without fixed assignment device<->cardstate (usb) */
-struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv)
-{
- unsigned long flags;
- struct cardstate *cs = NULL;
- unsigned i;
-
- spin_lock_irqsave(&drv->lock, flags);
- if (drv->blocked)
- goto exit;
- for (i = 0; i < drv->minors; ++i) {
- if ((drv->flags[i] & VALID_MINOR) &&
- !(drv->flags[i] & ASSIGNED)) {
- drv->flags[i] |= ASSIGNED;
- cs = drv->cs + i;
- break;
- }
- }
-exit:
- spin_unlock_irqrestore(&drv->lock, flags);
- return cs;
-}
-EXPORT_SYMBOL_GPL(gigaset_getunassignedcs);
-
-void gigaset_unassign(struct cardstate *cs)
-{
- unsigned long flags;
- unsigned *minor_flags;
- struct gigaset_driver *drv;
-
- if (!cs)
- return;
- drv = cs->driver;
- spin_lock_irqsave(&drv->lock, flags);
- minor_flags = drv->flags + cs->minor_index;
- if (*minor_flags & VALID_MINOR)
- *minor_flags &= ~ASSIGNED;
- spin_unlock_irqrestore(&drv->lock, flags);
-}
-EXPORT_SYMBOL_GPL(gigaset_unassign);
-
void gigaset_blockdriver(struct gigaset_driver *drv)
{
- unsigned long flags;
- spin_lock_irqsave(&drv->lock, flags);
drv->blocked = 1;
- spin_unlock_irqrestore(&drv->lock, flags);
}
EXPORT_SYMBOL_GPL(gigaset_blockdriver);
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index cec1ef342fcc..5cbf64d850ee 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -735,7 +735,7 @@ static void disconnect(struct at_state_t **at_state_p)
/* revert to selected idle mode */
if (!cs->cidmode) {
cs->at_state.pending_commands |= PC_UMMODE;
- atomic_set(&cs->commands_pending, 1); //FIXME
+ cs->commands_pending = 1;
gig_dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
}
spin_unlock_irqrestore(&cs->lock, flags);
@@ -793,15 +793,15 @@ static void init_failed(struct cardstate *cs, int mode)
struct at_state_t *at_state;
cs->at_state.pending_commands &= ~PC_INIT;
- atomic_set(&cs->mode, mode);
- atomic_set(&cs->mstate, MS_UNINITIALIZED);
+ cs->mode = mode;
+ cs->mstate = MS_UNINITIALIZED;
gigaset_free_channels(cs);
for (i = 0; i < cs->channels; ++i) {
at_state = &cs->bcs[i].at_state;
if (at_state->pending_commands & PC_CID) {
at_state->pending_commands &= ~PC_CID;
at_state->pending_commands |= PC_NOCID;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
}
}
}
@@ -812,11 +812,11 @@ static void schedule_init(struct cardstate *cs, int state)
gig_dbg(DEBUG_CMD, "not scheduling PC_INIT again");
return;
}
- atomic_set(&cs->mstate, state);
- atomic_set(&cs->mode, M_UNKNOWN);
+ cs->mstate = state;
+ cs->mode = M_UNKNOWN;
gigaset_block_channels(cs);
cs->at_state.pending_commands |= PC_INIT;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
gig_dbg(DEBUG_CMD, "Scheduling PC_INIT");
}
@@ -953,13 +953,13 @@ static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_ind
at_state->pending_commands |= PC_CID;
gig_dbg(DEBUG_CMD, "Scheduling PC_CID");
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
return;
error:
at_state->pending_commands |= PC_NOCID;
gig_dbg(DEBUG_CMD, "Scheduling PC_NOCID");
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
return;
}
@@ -973,12 +973,12 @@ static void start_accept(struct at_state_t *at_state)
if (retval == 0) {
at_state->pending_commands |= PC_ACCEPT;
gig_dbg(DEBUG_CMD, "Scheduling PC_ACCEPT");
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
} else {
- //FIXME
+ /* error reset */
at_state->pending_commands |= PC_HUP;
gig_dbg(DEBUG_CMD, "Scheduling PC_HUP");
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
}
}
@@ -986,7 +986,7 @@ static void do_start(struct cardstate *cs)
{
gigaset_free_channels(cs);
- if (atomic_read(&cs->mstate) != MS_LOCKED)
+ if (cs->mstate != MS_LOCKED)
schedule_init(cs, MS_INIT);
cs->isdn_up = 1;
@@ -1000,9 +1000,9 @@ static void do_start(struct cardstate *cs)
static void finish_shutdown(struct cardstate *cs)
{
- if (atomic_read(&cs->mstate) != MS_LOCKED) {
- atomic_set(&cs->mstate, MS_UNINITIALIZED);
- atomic_set(&cs->mode, M_UNKNOWN);
+ if (cs->mstate != MS_LOCKED) {
+ cs->mstate = MS_UNINITIALIZED;
+ cs->mode = M_UNKNOWN;
}
/* Tell the LL that the device is not available .. */
@@ -1022,10 +1022,10 @@ static void do_shutdown(struct cardstate *cs)
{
gigaset_block_channels(cs);
- if (atomic_read(&cs->mstate) == MS_READY) {
- atomic_set(&cs->mstate, MS_SHUTDOWN);
+ if (cs->mstate == MS_READY) {
+ cs->mstate = MS_SHUTDOWN;
cs->at_state.pending_commands |= PC_SHUTDOWN;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
gig_dbg(DEBUG_CMD, "Scheduling PC_SHUTDOWN");
} else
finish_shutdown(cs);
@@ -1120,7 +1120,7 @@ static void handle_icall(struct cardstate *cs, struct bc_state *bcs,
* In fact it doesn't.
*/
at_state->pending_commands |= PC_HUP;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
}
}
@@ -1130,7 +1130,7 @@ static int do_lock(struct cardstate *cs)
int mode;
int i;
- switch (atomic_read(&cs->mstate)) {
+ switch (cs->mstate) {
case MS_UNINITIALIZED:
case MS_READY:
if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) ||
@@ -1152,20 +1152,20 @@ static int do_lock(struct cardstate *cs)
return -EBUSY;
}
- mode = atomic_read(&cs->mode);
- atomic_set(&cs->mstate, MS_LOCKED);
- atomic_set(&cs->mode, M_UNKNOWN);
+ mode = cs->mode;
+ cs->mstate = MS_LOCKED;
+ cs->mode = M_UNKNOWN;
return mode;
}
static int do_unlock(struct cardstate *cs)
{
- if (atomic_read(&cs->mstate) != MS_LOCKED)
+ if (cs->mstate != MS_LOCKED)
return -EINVAL;
- atomic_set(&cs->mstate, MS_UNINITIALIZED);
- atomic_set(&cs->mode, M_UNKNOWN);
+ cs->mstate = MS_UNINITIALIZED;
+ cs->mode = M_UNKNOWN;
gigaset_free_channels(cs);
if (cs->connected)
schedule_init(cs, MS_INIT);
@@ -1198,17 +1198,17 @@ static void do_action(int action, struct cardstate *cs,
case ACT_INIT:
cs->at_state.pending_commands &= ~PC_INIT;
cs->cur_at_seq = SEQ_NONE;
- atomic_set(&cs->mode, M_UNIMODEM);
+ cs->mode = M_UNIMODEM;
spin_lock_irqsave(&cs->lock, flags);
if (!cs->cidmode) {
spin_unlock_irqrestore(&cs->lock, flags);
gigaset_free_channels(cs);
- atomic_set(&cs->mstate, MS_READY);
+ cs->mstate = MS_READY;
break;
}
spin_unlock_irqrestore(&cs->lock, flags);
cs->at_state.pending_commands |= PC_CIDMODE;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
gig_dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
break;
case ACT_FAILINIT:
@@ -1234,22 +1234,20 @@ static void do_action(int action, struct cardstate *cs,
| INS_command;
break;
case ACT_CMODESET:
- if (atomic_read(&cs->mstate) == MS_INIT ||
- atomic_read(&cs->mstate) == MS_RECOVER) {
+ if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
gigaset_free_channels(cs);
- atomic_set(&cs->mstate, MS_READY);
+ cs->mstate = MS_READY;
}
- atomic_set(&cs->mode, M_CID);
+ cs->mode = M_CID;
cs->cur_at_seq = SEQ_NONE;
break;
case ACT_UMODESET:
- atomic_set(&cs->mode, M_UNIMODEM);
+ cs->mode = M_UNIMODEM;
cs->cur_at_seq = SEQ_NONE;
break;
case ACT_FAILCMODE:
cs->cur_at_seq = SEQ_NONE;
- if (atomic_read(&cs->mstate) == MS_INIT ||
- atomic_read(&cs->mstate) == MS_RECOVER) {
+ if (cs->mstate == MS_INIT || cs->mstate == MS_RECOVER) {
init_failed(cs, M_UNKNOWN);
break;
}
@@ -1307,7 +1305,7 @@ static void do_action(int action, struct cardstate *cs,
case ACT_CONNECT:
if (cs->onechannel) {
at_state->pending_commands |= PC_DLE1;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
}
bcs->chstate |= CHS_D_UP;
@@ -1333,7 +1331,7 @@ static void do_action(int action, struct cardstate *cs,
* DLE only used for M10x with one B channel.
*/
at_state->pending_commands |= PC_DLE0;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
} else
disconnect(p_at_state);
break;
@@ -1369,7 +1367,7 @@ static void do_action(int action, struct cardstate *cs,
"Could not enter DLE mode. Trying to hang up.\n");
channel = cs->curchannel;
cs->bcs[channel].at_state.pending_commands |= PC_HUP;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
case ACT_CID: /* got cid; start dialing */
@@ -1379,7 +1377,7 @@ static void do_action(int action, struct cardstate *cs,
cs->bcs[channel].at_state.cid = ev->parameter;
cs->bcs[channel].at_state.pending_commands |=
PC_DIAL;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
}
/* fall through */
@@ -1411,14 +1409,14 @@ static void do_action(int action, struct cardstate *cs,
case ACT_ABORTDIAL: /* error/timeout during dial preparation */
cs->cur_at_seq = SEQ_NONE;
at_state->pending_commands |= PC_HUP;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */
case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */
case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */
at_state->pending_commands |= PC_HUP;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
break;
case ACT_GETSTRING: /* warning: RING, ZDLE, ...
are not handled properly anymore */
@@ -1515,7 +1513,7 @@ static void do_action(int action, struct cardstate *cs,
break;
case ACT_HUP:
at_state->pending_commands |= PC_HUP;
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
gig_dbg(DEBUG_CMD, "Scheduling PC_HUP");
break;
@@ -1558,7 +1556,7 @@ static void do_action(int action, struct cardstate *cs,
cs->at_state.pending_commands |= PC_UMMODE;
gig_dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
}
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
}
spin_unlock_irqrestore(&cs->lock, flags);
cs->waiting = 0;
@@ -1741,7 +1739,7 @@ static void process_command_flags(struct cardstate *cs)
int sequence;
unsigned long flags;
- atomic_set(&cs->commands_pending, 0);
+ cs->commands_pending = 0;
if (cs->cur_at_seq) {
gig_dbg(DEBUG_CMD, "not searching scheduled commands: busy");
@@ -1779,7 +1777,7 @@ static void process_command_flags(struct cardstate *cs)
~(PC_DLE1 | PC_ACCEPT | PC_DIAL);
if (at_state->cid > 0)
at_state->pending_commands |= PC_HUP;
- if (atomic_read(&cs->mstate) == MS_RECOVER) {
+ if (cs->mstate == MS_RECOVER) {
if (at_state->pending_commands & PC_CID) {
at_state->pending_commands |= PC_NOCID;
at_state->pending_commands &= ~PC_CID;
@@ -1793,7 +1791,7 @@ static void process_command_flags(struct cardstate *cs)
if (cs->at_state.pending_commands == PC_UMMODE
&& !cs->cidmode
&& list_empty(&cs->temp_at_states)
- && atomic_read(&cs->mode) == M_CID) {
+ && cs->mode == M_CID) {
sequence = SEQ_UMMODE;
at_state = &cs->at_state;
for (i = 0; i < cs->channels; ++i) {
@@ -1860,7 +1858,7 @@ static void process_command_flags(struct cardstate *cs)
}
if (cs->at_state.pending_commands & PC_CIDMODE) {
cs->at_state.pending_commands &= ~PC_CIDMODE;
- if (atomic_read(&cs->mode) == M_UNIMODEM) {
+ if (cs->mode == M_UNIMODEM) {
cs->retry_count = 1;
schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE);
return;
@@ -1886,11 +1884,11 @@ static void process_command_flags(struct cardstate *cs)
return;
}
if (bcs->at_state.pending_commands & PC_CID) {
- switch (atomic_read(&cs->mode)) {
+ switch (cs->mode) {
case M_UNIMODEM:
cs->at_state.pending_commands |= PC_CIDMODE;
gig_dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
- atomic_set(&cs->commands_pending, 1);
+ cs->commands_pending = 1;
return;
#ifdef GIG_MAYINITONDIAL
case M_UNKNOWN:
@@ -1926,7 +1924,7 @@ static void process_events(struct cardstate *cs)
for (i = 0; i < 2 * MAX_EVENTS; ++i) {
tail = cs->ev_tail;
if (tail == head) {
- if (!check_flags && !atomic_read(&cs->commands_pending))
+ if (!check_flags && !cs->commands_pending)
break;
check_flags = 0;
spin_unlock_irqrestore(&cs->ev_lock, flags);
@@ -1934,7 +1932,7 @@ static void process_events(struct cardstate *cs)
spin_lock_irqsave(&cs->ev_lock, flags);
tail = cs->ev_tail;
if (tail == head) {
- if (!atomic_read(&cs->commands_pending))
+ if (!cs->commands_pending)
break;
continue;
}
@@ -1971,7 +1969,7 @@ void gigaset_handle_event(unsigned long data)
struct cardstate *cs = (struct cardstate *) data;
/* handle incoming data on control/common channel */
- if (atomic_read(&cs->inbuf->head) != atomic_read(&cs->inbuf->tail)) {
+ if (cs->inbuf->head != cs->inbuf->tail) {
gig_dbg(DEBUG_INTR, "processing new data");
cs->ops->handle_input(cs->inbuf);
}
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 02bdaf22d7ea..f365993161fc 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -70,22 +70,13 @@
extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */
-/* any combination of these can be given with the 'debug=' parameter to insmod,
- * e.g. 'insmod usb_gigaset.o debug=0x2c' will set DEBUG_OPEN, DEBUG_CMD and
- * DEBUG_INTR.
- */
+/* debug flags, combine by adding/bitwise OR */
enum debuglevel {
- DEBUG_REG = 0x0002, /* serial port I/O register operations */
- DEBUG_OPEN = 0x0004, /* open/close serial port */
- DEBUG_INTR = 0x0008, /* interrupt processing */
- DEBUG_INTR_DUMP = 0x0010, /* Activating hexdump debug output on
- interrupt requests, not available as
- run-time option */
+ DEBUG_INTR = 0x00008, /* interrupt processing */
DEBUG_CMD = 0x00020, /* sent/received LL commands */
DEBUG_STREAM = 0x00040, /* application data stream I/O events */
DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
DEBUG_LLDATA = 0x00100, /* sent/received LL data */
- DEBUG_INTR_0 = 0x00200, /* serial port interrupt processing */
DEBUG_DRIVER = 0x00400, /* driver structure */
DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
DEBUG_WRITE = 0x01000, /* M105 data write */
@@ -93,7 +84,7 @@ enum debuglevel {
DEBUG_MCMD = 0x04000, /* COMMANDS THAT ARE SENT VERY OFTEN */
DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data
structures */
- DEBUG_LOCK = 0x10000, /* semaphore operations */
+ DEBUG_SUSPEND = 0x10000, /* suspend/resume processing */
DEBUG_OUTPUT = 0x20000, /* output to device */
DEBUG_ISO = 0x40000, /* isochronous transfers */
DEBUG_IF = 0x80000, /* character device operations */
@@ -191,6 +182,9 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
#define HD_OPEN_ATCHANNEL (0x28) // 3070
#define HD_CLOSE_ATCHANNEL (0x29) // 3070
+/* number of B channels supported by base driver */
+#define BAS_CHANNELS 2
+
/* USB frames for isochronous transfer */
#define BAS_FRAMETIME 1 /* number of milliseconds between frames */
#define BAS_NUMFRAMES 8 /* number of frames per URB */
@@ -313,7 +307,7 @@ struct inbuf_t {
struct bc_state *bcs;
struct cardstate *cs;
int inputstate;
- atomic_t head, tail;
+ int head, tail;
unsigned char data[RBUFSIZE];
};
@@ -335,9 +329,9 @@ struct inbuf_t {
* are also filled with that value
*/
struct isowbuf_t {
- atomic_t read;
- atomic_t nextread;
- atomic_t write;
+ int read;
+ int nextread;
+ int write;
atomic_t writesem;
int wbits;
unsigned char data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD];
@@ -350,11 +344,13 @@ struct isowbuf_t {
* - urb: pointer to the URB itself
* - bcs: pointer to the B Channel control structure
* - limit: end of write buffer area covered by this URB
+ * - status: URB completion status
*/
struct isow_urbctx_t {
struct urb *urb;
struct bc_state *bcs;
int limit;
+ int status;
};
/* AT state structure
@@ -439,14 +435,15 @@ struct cardstate {
unsigned minor_index;
struct device *dev;
struct device *tty_dev;
+ unsigned flags;
const struct gigaset_ops *ops;
/* Stuff to handle communication */
wait_queue_head_t waitqueue;
int waiting;
- atomic_t mode; /* see M_XXXX */
- atomic_t mstate; /* Modem state: see MS_XXXX */
+ int mode; /* see M_XXXX */
+ int mstate; /* Modem state: see MS_XXXX */
/* only changed by the event layer */
int cmd_result;
@@ -503,7 +500,7 @@ struct cardstate {
processed */
int curchannel; /* channel those commands are meant
for */
- atomic_t commands_pending; /* flag(s) in xxx.commands_pending have
+ int commands_pending; /* flag(s) in xxx.commands_pending have
been set */
struct tasklet_struct event_tasklet;
/* tasklet for serializing AT commands.
@@ -543,7 +540,6 @@ struct gigaset_driver {
unsigned minor;
unsigned minors;
struct cardstate *cs;
- unsigned *flags;
int blocked;
const struct gigaset_ops *ops;
@@ -559,7 +555,7 @@ struct cmdbuf_t {
struct bas_bc_state {
/* isochronous output state */
- atomic_t running;
+ int running;
atomic_t corrbytes;
spinlock_t isooutlock;
struct isow_urbctx_t isoouturbs[BAS_OUTURBS];
@@ -574,6 +570,7 @@ struct bas_bc_state {
struct urb *isoinurbs[BAS_INURBS];
unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS];
struct urb *isoindone; /* completed isoc read URB */
+ int isoinstatus; /* status of completed URB */
int loststatus; /* status of dropped URB */
unsigned isoinlost; /* number of bytes lost */
/* state of bit unstuffing algorithm
@@ -770,10 +767,6 @@ void gigaset_freedriver(struct gigaset_driver *drv);
void gigaset_debugdrivers(void);
struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty);
struct cardstate *gigaset_get_cs_by_id(int id);
-
-/* For drivers without fixed assignment device<->cardstate (usb) */
-struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv);
-void gigaset_unassign(struct cardstate *cs);
void gigaset_blockdriver(struct gigaset_driver *drv);
/* Allocate and initialize card state. Calls hardware dependent
@@ -792,7 +785,7 @@ int gigaset_start(struct cardstate *cs);
void gigaset_stop(struct cardstate *cs);
/* Tell common.c that the driver is being unloaded. */
-void gigaset_shutdown(struct cardstate *cs);
+int gigaset_shutdown(struct cardstate *cs);
/* Tell common.c that an skb has been sent. */
void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index eb50f3dab5f7..af195b07c191 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -28,12 +28,11 @@ static int if_lock(struct cardstate *cs, int *arg)
return -EINVAL;
if (cmd < 0) {
- *arg = atomic_read(&cs->mstate) == MS_LOCKED; //FIXME remove?
+ *arg = cs->mstate == MS_LOCKED;
return 0;
}
- if (!cmd && atomic_read(&cs->mstate) == MS_LOCKED
- && cs->connected) {
+ if (!cmd && cs->mstate == MS_LOCKED && cs->connected) {
cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS);
cs->ops->baud_rate(cs, B115200);
cs->ops->set_line_ctrl(cs, CS8);
@@ -104,7 +103,7 @@ static int if_config(struct cardstate *cs, int *arg)
if (*arg != 1)
return -EINVAL;
- if (atomic_read(&cs->mstate) != MS_LOCKED)
+ if (cs->mstate != MS_LOCKED)
return -EBUSY;
if (!cs->connected) {
@@ -162,7 +161,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
tty->driver_data = NULL;
cs = gigaset_get_cs_by_tty(tty);
- if (!cs)
+ if (!cs || !try_module_get(cs->driver->owner))
return -ENODEV;
if (mutex_lock_interruptible(&cs->mutex))
@@ -208,6 +207,8 @@ static void if_close(struct tty_struct *tty, struct file *filp)
}
mutex_unlock(&cs->mutex);
+
+ module_put(cs->driver->owner);
}
static int if_ioctl(struct tty_struct *tty, struct file *file,
@@ -364,7 +365,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
if (!cs->open_count)
warn("%s: device not opened", __func__);
- else if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ else if (cs->mstate != MS_LOCKED) {
warn("can't write to unlocked device");
retval = -EBUSY;
} else if (!cs->connected) {
@@ -398,9 +399,9 @@ static int if_write_room(struct tty_struct *tty)
if (!cs->open_count)
warn("%s: device not opened", __func__);
- else if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ else if (cs->mstate != MS_LOCKED) {
warn("can't write to unlocked device");
- retval = -EBUSY; //FIXME
+ retval = -EBUSY;
} else if (!cs->connected) {
gig_dbg(DEBUG_ANY, "can't write to unplugged device");
retval = -EBUSY; //FIXME
@@ -430,7 +431,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
if (!cs->open_count)
warn("%s: device not opened", __func__);
- else if (atomic_read(&cs->mstate) != MS_LOCKED) {
+ else if (cs->mstate != MS_LOCKED) {
warn("can't write to unlocked device");
retval = -EBUSY;
} else if (!cs->connected) {
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index e0505f238807..e30a7773f93c 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -23,9 +23,9 @@
*/
void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle)
{
- atomic_set(&iwb->read, 0);
- atomic_set(&iwb->nextread, 0);
- atomic_set(&iwb->write, 0);
+ iwb->read = 0;
+ iwb->nextread = 0;
+ iwb->write = 0;
atomic_set(&iwb->writesem, 1);
iwb->wbits = 0;
iwb->idle = idle;
@@ -39,8 +39,8 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
{
int read, write, freebytes;
- read = atomic_read(&iwb->read);
- write = atomic_read(&iwb->write);
+ read = iwb->read;
+ write = iwb->write;
if ((freebytes = read - write) > 0) {
/* no wraparound: need padding space within regular area */
return freebytes - BAS_OUTBUFPAD;
@@ -62,7 +62,7 @@ static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b)
int read;
if (a == b)
return 0;
- read = atomic_read(&iwb->read);
+ read = iwb->read;
if (a < b) {
if (a < read && read <= b)
return +1;
@@ -91,18 +91,18 @@ static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
#ifdef CONFIG_GIGASET_DEBUG
gig_dbg(DEBUG_ISO,
"%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
- __func__, iwb->data[atomic_read(&iwb->write)], iwb->wbits);
+ __func__, iwb->data[iwb->write], iwb->wbits);
#endif
return 1;
}
/* finish writing
- * release the write semaphore and update the maximum buffer fill level
+ * release the write semaphore
* returns the current write position
*/
static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
{
- int write = atomic_read(&iwb->write);
+ int write = iwb->write;
atomic_inc(&iwb->writesem);
return write;
}
@@ -116,7 +116,7 @@ static inline int isowbuf_donewrite(struct isowbuf_t *iwb)
*/
static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
{
- int write = atomic_read(&iwb->write);
+ int write = iwb->write;
data <<= iwb->wbits;
data |= iwb->data[write];
nbits += iwb->wbits;
@@ -128,7 +128,7 @@ static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits)
}
iwb->wbits = nbits;
iwb->data[write] = data & 0xff;
- atomic_set(&iwb->write, write);
+ iwb->write = write;
}
/* put final flag on HDLC bitstream
@@ -142,7 +142,7 @@ static inline void isowbuf_putflag(struct isowbuf_t *iwb)
/* add two flags, thus reliably covering one byte */
isowbuf_putbits(iwb, 0x7e7e, 8);
/* recover the idle flag byte */
- write = atomic_read(&iwb->write);
+ write = iwb->write;
iwb->idle = iwb->data[write];
gig_dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle);
/* mask extraneous bits in buffer */
@@ -160,8 +160,8 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
int read, write, limit, src, dst;
unsigned char pbyte;
- read = atomic_read(&iwb->nextread);
- write = atomic_read(&iwb->write);
+ read = iwb->nextread;
+ write = iwb->write;
if (likely(read == write)) {
/* return idle frame */
return read < BAS_OUTBUFPAD ?
@@ -176,7 +176,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
err("invalid size %d", size);
return -EINVAL;
}
- src = atomic_read(&iwb->read);
+ src = iwb->read;
if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD ||
(read < src && limit >= src))) {
err("isoc write buffer frame reservation violated");
@@ -191,7 +191,8 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
if (!isowbuf_startwrite(iwb))
return -EBUSY;
/* write position could have changed */
- if (limit >= (write = atomic_read(&iwb->write))) {
+ write = iwb->write;
+ if (limit >= write) {
pbyte = iwb->data[write]; /* save
partial byte */
limit = write + BAS_OUTBUFPAD;
@@ -213,7 +214,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
__func__, pbyte, limit);
iwb->data[limit] = pbyte; /* restore
partial byte */
- atomic_set(&iwb->write, limit);
+ iwb->write = limit;
}
isowbuf_donewrite(iwb);
}
@@ -233,7 +234,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
limit = src;
}
}
- atomic_set(&iwb->nextread, limit);
+ iwb->nextread = limit;
return read;
}
@@ -477,7 +478,7 @@ static inline int trans_buildframe(struct isowbuf_t *iwb,
unsigned char c;
if (unlikely(count <= 0))
- return atomic_read(&iwb->write); /* better ideas? */
+ return iwb->write;
if (isowbuf_freebytes(iwb) < count ||
!isowbuf_startwrite(iwb)) {
@@ -486,13 +487,13 @@ static inline int trans_buildframe(struct isowbuf_t *iwb,
}
gig_dbg(DEBUG_STREAM, "put %d bytes", count);
- write = atomic_read(&iwb->write);
+ write = iwb->write;
do {
c = bitrev8(*in++);
iwb->data[write++] = c;
write %= BAS_OUTBUFSIZE;
} while (--count > 0);
- atomic_set(&iwb->write, write);
+ iwb->write = write;
iwb->idle = c;
return isowbuf_donewrite(iwb);
@@ -947,8 +948,8 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
unsigned tail, head, numbytes;
unsigned char *src;
- head = atomic_read(&inbuf->head);
- while (head != (tail = atomic_read(&inbuf->tail))) {
+ head = inbuf->head;
+ while (head != (tail = inbuf->tail)) {
gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
if (head > tail)
tail = RBUFSIZE;
@@ -956,7 +957,7 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
numbytes = tail - head;
gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
- if (atomic_read(&cs->mstate) == MS_LOCKED) {
+ if (cs->mstate == MS_LOCKED) {
gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
numbytes, src);
gigaset_if_receive(inbuf->cs, src, numbytes);
@@ -970,7 +971,7 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
if (head == RBUFSIZE)
head = 0;
gig_dbg(DEBUG_INTR, "setting head to %u", head);
- atomic_set(&inbuf->head, head);
+ inbuf->head = head;
}
}
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index ea44302e6e7e..fceeb1d57682 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/poll.h>
+#include <linux/completion.h>
/* Version Information */
#define DRIVER_AUTHOR "Tilman Schmidt"
@@ -48,7 +49,7 @@ struct ser_cardstate {
struct platform_device dev;
struct tty_struct *tty;
atomic_t refcnt;
- struct mutex dead_mutex;
+ struct completion dead_cmp;
};
static struct platform_driver device_driver = {
@@ -240,7 +241,7 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
struct cmdbuf_t *cb;
unsigned long flags;
- gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
+ gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
DEBUG_TRANSCMD : DEBUG_LOCKCMD,
"CMD Transmit", len, buf);
@@ -498,7 +499,7 @@ static struct cardstate *cs_get(struct tty_struct *tty)
static void cs_put(struct cardstate *cs)
{
if (atomic_dec_and_test(&cs->hw.ser->refcnt))
- mutex_unlock(&cs->hw.ser->dead_mutex);
+ complete(&cs->hw.ser->dead_cmp);
}
/*
@@ -527,8 +528,8 @@ gigaset_tty_open(struct tty_struct *tty)
cs->dev = &cs->hw.ser->dev.dev;
cs->hw.ser->tty = tty;
- mutex_init(&cs->hw.ser->dead_mutex);
atomic_set(&cs->hw.ser->refcnt, 1);
+ init_completion(&cs->hw.ser->dead_cmp);
tty->disc_data = cs;
@@ -536,14 +537,13 @@ gigaset_tty_open(struct tty_struct *tty)
* startup system and notify the LL that we are ready to run
*/
if (startmode == SM_LOCKED)
- atomic_set(&cs->mstate, MS_LOCKED);
+ cs->mstate = MS_LOCKED;
if (!gigaset_start(cs)) {
tasklet_kill(&cs->write_tasklet);
goto error;
}
gig_dbg(DEBUG_INIT, "Startup of HLL done");
- mutex_lock(&cs->hw.ser->dead_mutex);
return 0;
error:
@@ -577,7 +577,7 @@ gigaset_tty_close(struct tty_struct *tty)
else {
/* wait for running methods to finish */
if (!atomic_dec_and_test(&cs->hw.ser->refcnt))
- mutex_lock(&cs->hw.ser->dead_mutex);
+ wait_for_completion(&cs->hw.ser->dead_cmp);
}
/* stop operations */
@@ -714,8 +714,8 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
return;
}
- tail = atomic_read(&inbuf->tail);
- head = atomic_read(&inbuf->head);
+ tail = inbuf->tail;
+ head = inbuf->head;
gig_dbg(DEBUG_INTR, "buffer state: %u -> %u, receive %u bytes",
head, tail, count);
@@ -742,7 +742,7 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
}
gig_dbg(DEBUG_INTR, "setting tail to %u", tail);
- atomic_set(&inbuf->tail, tail);
+ inbuf->tail = tail;
/* Everything was received .. Push data into handler */
gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index ca4bee173cfb..77d20ab0cd4d 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -104,12 +104,17 @@ MODULE_DEVICE_TABLE(usb, gigaset_table);
* flags per packet.
*/
+/* functions called if a device of this driver is connected/disconnected */
static int gigaset_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void gigaset_disconnect(struct usb_interface *interface);
+/* functions called before/after suspend */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
+static int gigaset_resume(struct usb_interface *intf);
+static int gigaset_pre_reset(struct usb_interface *intf);
+
static struct gigaset_driver *driver = NULL;
-static struct cardstate *cardstate = NULL;
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver gigaset_usb_driver = {
@@ -117,12 +122,17 @@ static struct usb_driver gigaset_usb_driver = {
.probe = gigaset_probe,
.disconnect = gigaset_disconnect,
.id_table = gigaset_table,
+ .suspend = gigaset_suspend,
+ .resume = gigaset_resume,
+ .reset_resume = gigaset_resume,
+ .pre_reset = gigaset_pre_reset,
+ .post_reset = gigaset_resume,
};
struct usb_cardstate {
struct usb_device *udev; /* usb device pointer */
struct usb_interface *interface; /* interface for this device */
- atomic_t busy; /* bulk output in progress */
+ int busy; /* bulk output in progress */
/* Output buffer */
unsigned char *bulk_out_buffer;
@@ -314,7 +324,7 @@ static void gigaset_modem_fill(unsigned long data)
gig_dbg(DEBUG_OUTPUT, "modem_fill");
- if (atomic_read(&cs->hw.usb->busy)) {
+ if (cs->hw.usb->busy) {
gig_dbg(DEBUG_OUTPUT, "modem_fill: busy");
return;
}
@@ -361,18 +371,13 @@ static void gigaset_read_int_callback(struct urb *urb)
{
struct inbuf_t *inbuf = urb->context;
struct cardstate *cs = inbuf->cs;
- int resubmit = 0;
+ int status = urb->status;
int r;
unsigned numbytes;
unsigned char *src;
unsigned long flags;
- if (!urb->status) {
- if (!cs->connected) {
- err("%s: disconnected", __func__); /* should never happen */
- return;
- }
-
+ if (!status) {
numbytes = urb->actual_length;
if (numbytes) {
@@ -389,28 +394,26 @@ static void gigaset_read_int_callback(struct urb *urb)
}
} else
gig_dbg(DEBUG_INTR, "Received zero block length");
- resubmit = 1;
} else {
/* The urb might have been killed. */
- gig_dbg(DEBUG_ANY, "%s - nonzero read bulk status received: %d",
- __func__, urb->status);
- if (urb->status != -ENOENT) { /* not killed */
- if (!cs->connected) {
- err("%s: disconnected", __func__); /* should never happen */
- return;
- }
- resubmit = 1;
- }
+ gig_dbg(DEBUG_ANY, "%s - nonzero status received: %d",
+ __func__, status);
+ if (status == -ENOENT || status == -ESHUTDOWN)
+ /* killed or endpoint shutdown: don't resubmit */
+ return;
}
- if (resubmit) {
- spin_lock_irqsave(&cs->lock, flags);
- r = cs->connected ? usb_submit_urb(urb, GFP_ATOMIC) : -ENODEV;
+ /* resubmit URB */
+ spin_lock_irqsave(&cs->lock, flags);
+ if (!cs->connected) {
spin_unlock_irqrestore(&cs->lock, flags);
- if (r)
- dev_err(cs->dev, "error %d when resubmitting urb.\n",
- -r);
+ err("%s: disconnected", __func__);
+ return;
}
+ r = usb_submit_urb(urb, GFP_ATOMIC);
+ spin_unlock_irqrestore(&cs->lock, flags);
+ if (r)
+ dev_err(cs->dev, "error %d resubmitting URB\n", -r);
}
@@ -418,19 +421,28 @@ static void gigaset_read_int_callback(struct urb *urb)
static void gigaset_write_bulk_callback(struct urb *urb)
{
struct cardstate *cs = urb->context;
+ int status = urb->status;
unsigned long flags;
- if (urb->status)
+ switch (status) {
+ case 0: /* normal completion */
+ break;
+ case -ENOENT: /* killed */
+ gig_dbg(DEBUG_ANY, "%s: killed", __func__);
+ cs->hw.usb->busy = 0;
+ return;
+ default:
dev_err(cs->dev, "bulk transfer failed (status %d)\n",
- -urb->status);
+ -status);
/* That's all we can do. Communication problems
are handled by timeouts or network protocols. */
+ }
spin_lock_irqsave(&cs->lock, flags);
if (!cs->connected) {
err("%s: not connected", __func__);
} else {
- atomic_set(&cs->hw.usb->busy, 0);
+ cs->hw.usb->busy = 0;
tasklet_schedule(&cs->write_tasklet);
}
spin_unlock_irqrestore(&cs->lock, flags);
@@ -478,14 +490,14 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
cb->offset += count;
cb->len -= count;
- atomic_set(&ucs->busy, 1);
+ ucs->busy = 1;
spin_lock_irqsave(&cs->lock, flags);
status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV;
spin_unlock_irqrestore(&cs->lock, flags);
if (status) {
- atomic_set(&ucs->busy, 0);
+ ucs->busy = 0;
err("could not submit urb (error %d)\n",
-status);
cb->len = 0; /* skip urb => remove cb+wakeup
@@ -504,7 +516,7 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
struct cmdbuf_t *cb;
unsigned long flags;
- gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ?
+ gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
DEBUG_TRANSCMD : DEBUG_LOCKCMD,
"CMD Transmit", len, buf);
@@ -641,7 +653,7 @@ static int write_modem(struct cardstate *cs)
count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
skb_pull(bcs->tx_skb, count);
- atomic_set(&ucs->busy, 1);
+ ucs->busy = 1;
gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count);
spin_lock_irqsave(&cs->lock, flags);
@@ -659,7 +671,7 @@ static int write_modem(struct cardstate *cs)
if (ret) {
err("could not submit urb (error %d)\n", -ret);
- atomic_set(&ucs->busy, 0);
+ ucs->busy = 0;
}
if (!bcs->tx_skb->len) {
@@ -680,53 +692,44 @@ static int gigaset_probe(struct usb_interface *interface,
{
int retval;
struct usb_device *udev = interface_to_usbdev(interface);
- unsigned int ifnum;
- struct usb_host_interface *hostif;
+ struct usb_host_interface *hostif = interface->cur_altsetting;
struct cardstate *cs = NULL;
struct usb_cardstate *ucs = NULL;
struct usb_endpoint_descriptor *endpoint;
int buffer_size;
- int alt;
- gig_dbg(DEBUG_ANY,
- "%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
- __func__, le16_to_cpu(udev->descriptor.idVendor),
- le16_to_cpu(udev->descriptor.idProduct));
-
- retval = -ENODEV; //FIXME
+ gig_dbg(DEBUG_ANY, "%s: Check if device matches ...", __func__);
/* See if the device offered us matches what we can accept */
if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) ||
- (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID))
+ (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) {
+ gig_dbg(DEBUG_ANY, "device ID (0x%x, 0x%x) not for me - skip",
+ le16_to_cpu(udev->descriptor.idVendor),
+ le16_to_cpu(udev->descriptor.idProduct));
return -ENODEV;
-
- /* this starts to become ascii art... */
- hostif = interface->cur_altsetting;
- alt = hostif->desc.bAlternateSetting;
- ifnum = hostif->desc.bInterfaceNumber; // FIXME ?
-
- if (alt != 0 || ifnum != 0) {
- dev_warn(&udev->dev, "ifnum %d, alt %d\n", ifnum, alt);
+ }
+ if (hostif->desc.bInterfaceNumber != 0) {
+ gig_dbg(DEBUG_ANY, "interface %d not for me - skip",
+ hostif->desc.bInterfaceNumber);
+ return -ENODEV;
+ }
+ if (hostif->desc.bAlternateSetting != 0) {
+ dev_notice(&udev->dev, "unsupported altsetting %d - skip",
+ hostif->desc.bAlternateSetting);
return -ENODEV;
}
-
- /* Reject application specific intefaces
- *
- */
if (hostif->desc.bInterfaceClass != 255) {
- dev_info(&udev->dev,
- "%s: Device matched but iface_desc[%d]->bInterfaceClass==%d!\n",
- __func__, ifnum, hostif->desc.bInterfaceClass);
+ dev_notice(&udev->dev, "unsupported interface class %d - skip",
+ hostif->desc.bInterfaceClass);
return -ENODEV;
}
dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
- cs = gigaset_getunassignedcs(driver);
- if (!cs) {
- dev_warn(&udev->dev, "no free cardstate\n");
+ /* allocate memory for our device state and intialize it */
+ cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
+ if (!cs)
return -ENODEV;
- }
ucs = cs->hw.usb;
/* save off device structure ptrs for later use */
@@ -759,7 +762,7 @@ static int gigaset_probe(struct usb_interface *interface,
endpoint = &hostif->endpoint[1].desc;
- atomic_set(&ucs->busy, 0);
+ ucs->busy = 0;
ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ucs->read_urb) {
@@ -792,7 +795,7 @@ static int gigaset_probe(struct usb_interface *interface,
/* tell common part that the device is ready */
if (startmode == SM_LOCKED)
- atomic_set(&cs->mstate, MS_LOCKED);
+ cs->mstate = MS_LOCKED;
if (!gigaset_start(cs)) {
tasklet_kill(&cs->write_tasklet);
@@ -813,7 +816,7 @@ error:
usb_put_dev(ucs->udev);
ucs->udev = NULL;
ucs->interface = NULL;
- gigaset_unassign(cs);
+ gigaset_freecs(cs);
return retval;
}
@@ -824,6 +827,9 @@ static void gigaset_disconnect(struct usb_interface *interface)
cs = usb_get_intfdata(interface);
ucs = cs->hw.usb;
+
+ dev_info(cs->dev, "disconnecting Gigaset USB adapter\n");
+
usb_kill_urb(ucs->read_urb);
gigaset_stop(cs);
@@ -831,7 +837,7 @@ static void gigaset_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
tasklet_kill(&cs->write_tasklet);
- usb_kill_urb(ucs->bulk_out_urb); /* FIXME: only if active? */
+ usb_kill_urb(ucs->bulk_out_urb);
kfree(ucs->bulk_out_buffer);
usb_free_urb(ucs->bulk_out_urb);
@@ -844,7 +850,53 @@ static void gigaset_disconnect(struct usb_interface *interface)
ucs->interface = NULL;
ucs->udev = NULL;
cs->dev = NULL;
- gigaset_unassign(cs);
+ gigaset_freecs(cs);
+}
+
+/* gigaset_suspend
+ * This function is called before the USB connection is suspended or reset.
+ */
+static int gigaset_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+
+ /* stop activity */
+ cs->connected = 0; /* prevent rescheduling */
+ usb_kill_urb(cs->hw.usb->read_urb);
+ tasklet_kill(&cs->write_tasklet);
+ usb_kill_urb(cs->hw.usb->bulk_out_urb);
+
+ gig_dbg(DEBUG_SUSPEND, "suspend complete");
+ return 0;
+}
+
+/* gigaset_resume
+ * This function is called after the USB connection has been resumed or reset.
+ */
+static int gigaset_resume(struct usb_interface *intf)
+{
+ struct cardstate *cs = usb_get_intfdata(intf);
+ int rc;
+
+ /* resubmit interrupt URB */
+ cs->connected = 1;
+ rc = usb_submit_urb(cs->hw.usb->read_urb, GFP_KERNEL);
+ if (rc) {
+ dev_err(cs->dev, "Could not submit read URB (error %d)\n", -rc);
+ return rc;
+ }
+
+ gig_dbg(DEBUG_SUSPEND, "resume complete");
+ return 0;
+}
+
+/* gigaset_pre_reset
+ * This function is called before the USB connection is reset.
+ */
+static int gigaset_pre_reset(struct usb_interface *intf)
+{
+ /* same as suspend */
+ return gigaset_suspend(intf, PMSG_ON);
}
static const struct gigaset_ops ops = {
@@ -880,11 +932,6 @@ static int __init usb_gigaset_init(void)
&ops, THIS_MODULE)) == NULL)
goto error;
- /* allocate memory for our device state and intialize it */
- cardstate = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
- if (!cardstate)
- goto error;
-
/* register this driver with the USB subsystem */
result = usb_register(&gigaset_usb_driver);
if (result < 0) {
@@ -897,9 +944,7 @@ static int __init usb_gigaset_init(void)
info(DRIVER_DESC);
return 0;
-error: if (cardstate)
- gigaset_freecs(cardstate);
- cardstate = NULL;
+error:
if (driver)
gigaset_freedriver(driver);
driver = NULL;
@@ -913,11 +958,16 @@ error: if (cardstate)
*/
static void __exit usb_gigaset_exit(void)
{
+ int i;
+
gigaset_blockdriver(driver); /* => probe will fail
* => no gigaset_start any more
*/
- gigaset_shutdown(cardstate);
+ /* stop all connected devices */
+ for (i = 0; i < driver->minors; i++)
+ gigaset_shutdown(driver->cs + i);
+
/* from now on, no isdn callback should be possible */
/* deregister this driver with the USB subsystem */
@@ -925,8 +975,6 @@ static void __exit usb_gigaset_exit(void)
/* this will call the disconnect-callback */
/* from now on, no disconnect/probe callback should be running */
- gigaset_freecs(cardstate);
- cardstate = NULL;
gigaset_freedriver(driver);
driver = NULL;
}
diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c
index 0db9cc661e28..84318ec8d13e 100644
--- a/drivers/isdn/hardware/eicon/debug.c
+++ b/drivers/isdn/hardware/eicon/debug.c
@@ -1188,7 +1188,7 @@ int SuperTraceASSIGN (void* AdapterHandle, byte* data) {
if ((features[0] & DIVA_XDI_EXTENDED_FEATURES_VALID) &&
(features[0] & DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA)) {
- dword rx_dma_magic;
+ dword uninitialized_var(rx_dma_magic);
if ((pC->dma_handle = diva_get_dma_descriptor (pC->request, &rx_dma_magic)) >= 0) {
pC->xbuffer[0] = LLI;
pC->xbuffer[1] = 8;
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
index ffa2afa77c2f..1403a5458e68 100644
--- a/drivers/isdn/hardware/eicon/diva.c
+++ b/drivers/isdn/hardware/eicon/diva.c
@@ -515,12 +515,11 @@ diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
irqreturn_t diva_os_irq_wrapper(int irq, void *context)
{
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) context;
+ diva_os_xdi_adapter_t *a = context;
diva_xdi_clear_interrupts_proc_t clear_int_proc;
- if (!a || !a->xdi_adapter.diva_isr_handler) {
+ if (!a || !a->xdi_adapter.diva_isr_handler)
return IRQ_NONE;
- }
if ((clear_int_proc = a->clear_interrupts_proc)) {
(*clear_int_proc) (a);
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index b9177ca4369a..1ff98e7eb794 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -9027,7 +9027,7 @@ static byte AddInfo(byte **add_i,
/* facility is a nested structure */
/* FTY can be more than once */
- if(esc_chi[0] && !(esc_chi[esc_chi[0]])&0x7f )
+ if (esc_chi[0] && !(esc_chi[esc_chi[0]] & 0x7f))
{
add_i[0] = (byte *)"\x02\x02\x00"; /* use neither b nor d channel */
}
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index 035d158779df..0f1db1f669b2 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -263,11 +263,7 @@ hdlc_empty_fifo(struct BCState *bcs, int count)
outl(idx, cs->hw.avm.cfg_reg + 4);
while (cnt < count) {
#ifdef __powerpc__
-#ifdef CONFIG_APUS
- *ptr++ = in_le32((unsigned *)(cs->hw.avm.isac +_IO_BASE));
-#else
*ptr++ = in_be32((unsigned *)(cs->hw.avm.isac +_IO_BASE));
-#endif /* CONFIG_APUS */
#else
*ptr++ = inl(cs->hw.avm.isac);
#endif /* __powerpc__ */
@@ -328,11 +324,7 @@ hdlc_fill_fifo(struct BCState *bcs)
if (cs->subtyp == AVM_FRITZ_PCI) {
while (cnt<count) {
#ifdef __powerpc__
-#ifdef CONFIG_APUS
- out_le32((unsigned *)(cs->hw.avm.isac +_IO_BASE), *ptr++);
-#else
out_be32((unsigned *)(cs->hw.avm.isac +_IO_BASE), *ptr++);
-#endif /* CONFIG_APUS */
#else
outl(*ptr++, cs->hw.avm.isac);
#endif /* __powerpc__ */
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 9cb6e5021adb..133eb18e65cc 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1917,7 +1917,6 @@ isdn_tty_modem_init(void)
info->owner = THIS_MODULE;
#endif
spin_lock_init(&info->readlock);
- init_MUTEX(&info->write_sem);
sprintf(info->last_cause, "0000");
sprintf(info->last_num, "none");
info->last_dir = 0;
diff --git a/drivers/isdn/i4l/isdn_ttyfax.c b/drivers/isdn/i4l/isdn_ttyfax.c
index a943d078bacc..f93de4a30355 100644
--- a/drivers/isdn/i4l/isdn_ttyfax.c
+++ b/drivers/isdn/i4l/isdn_ttyfax.c
@@ -834,7 +834,7 @@ isdn_tty_cmd_FCLASS2(char **p, modem_info * info)
char *rp = &f->resolution;
p[0] += 2;
- if (!info->faxonline & 1) /* not outgoing connection */
+ if (!(info->faxonline & 1)) /* not outgoing connection */
PARSE_ERROR1;
for (i = 0; (((*p[0] >= '0') && (*p[0] <= '9')) || (*p[0] == ',')) && (i < 4); i++) {
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 82d957bde299..bf7997abc4ac 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1302,7 +1302,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_DIAL:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
@@ -1328,7 +1328,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_ACCEPTD:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
@@ -1348,7 +1348,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_ACCEPTB:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
@@ -1366,7 +1366,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_HANGUP:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ICN_BCH) {
a = c->arg + 1;
@@ -1375,7 +1375,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_SETEAZ:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
@@ -1391,7 +1391,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_CLREAZ:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
@@ -1405,7 +1405,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_SETL2:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg & 255) < ICN_BCH) {
a = c->arg;
@@ -1424,7 +1424,7 @@ icn_command(isdn_ctrl * c, icn_card * card)
}
break;
case ISDN_CMD_SETL3:
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return 0;
default:
@@ -1471,7 +1471,7 @@ if_writecmd(const u_char __user *buf, int len, int id, int channel)
icn_card *card = icn_findcard(id);
if (card) {
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_writecmd(buf, len, 1, card));
}
@@ -1486,7 +1486,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
icn_card *card = icn_findcard(id);
if (card) {
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_readstatus(buf, len, card));
}
@@ -1501,7 +1501,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
icn_card *card = icn_findcard(id);
if (card) {
- if (!card->flags & ICN_FLAGS_RUNNING)
+ if (!(card->flags & ICN_FLAGS_RUNNING))
return -ENODEV;
return (icn_sendbuf(channel, ack, skb, card));
}
diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
index bb92e3cd9334..655ef9a3f4df 100644
--- a/drivers/isdn/isdnloop/isdnloop.c
+++ b/drivers/isdn/isdnloop/isdnloop.c
@@ -1184,7 +1184,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
}
break;
case ISDN_CMD_DIAL:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
@@ -1210,7 +1210,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
}
break;
case ISDN_CMD_ACCEPTD:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
@@ -1238,7 +1238,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
}
break;
case ISDN_CMD_ACCEPTB:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
@@ -1264,7 +1264,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
i = isdnloop_writecmd(cbuf, strlen(cbuf), 0, card);
break;
case ISDN_CMD_HANGUP:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (c->arg < ISDNLOOP_BCH) {
a = c->arg + 1;
@@ -1273,7 +1273,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
}
break;
case ISDN_CMD_SETEAZ:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if (card->leased)
break;
@@ -1303,7 +1303,7 @@ isdnloop_command(isdn_ctrl * c, isdnloop_card * card)
}
break;
case ISDN_CMD_SETL2:
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
if ((c->arg & 255) < ISDNLOOP_BCH) {
a = c->arg;
@@ -1395,7 +1395,7 @@ if_readstatus(u_char __user *buf, int len, int id, int channel)
isdnloop_card *card = isdnloop_findcard(id);
if (card) {
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
return (isdnloop_readstatus(buf, len, card));
}
@@ -1410,7 +1410,7 @@ if_sendbuf(int id, int channel, int ack, struct sk_buff *skb)
isdnloop_card *card = isdnloop_findcard(id);
if (card) {
- if (!card->flags & ISDNLOOP_FLAGS_RUNNING)
+ if (!(card->flags & ISDNLOOP_FLAGS_RUNNING))
return -ENODEV;
/* ack request stored in skb scratch area */
*(skb->head) = ack;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ec568fa1c6cc..851a3b01781e 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -39,15 +39,6 @@ config LEDS_SPITZ
This option enables support for the LEDs on Sharp Zaurus
SL-Cxx00 series (C1000, C3000, C3100).
-config LEDS_IXP4XX
- tristate "LED Support for GPIO connected LEDs on IXP4XX processors"
- depends on LEDS_CLASS && ARCH_IXP4XX
- help
- This option enables support for the LEDs connected to GPIO
- outputs of the Intel IXP4XX processors. To be useful the
- particular board must have LEDs and they must be connected
- to the GPIO lines. If unsure, say Y.
-
config LEDS_TOSA
tristate "LED Support for the Sharp SL-6000 series"
depends on LEDS_CLASS && PXA_SHARPSL
@@ -100,6 +91,13 @@ config LEDS_COBALT_RAQ
help
This option enables support for the Cobalt Raq series LEDs.
+config LEDS_HP6XX
+ tristate "LED Support for the HP Jornada 6xx"
+ depends on LEDS_CLASS && SH_HP6XX
+ help
+ This option enables led support for the handheld
+ HP Jornada 620/660/680/690.
+
config LEDS_GPIO
tristate "LED Support for GPIO connected LEDs"
depends on LEDS_CLASS && GENERIC_GPIO
@@ -114,6 +112,32 @@ config LEDS_CM_X270
help
This option enables support for the CM-X270 LEDs.
+config LEDS_CLEVO_MAIL
+ tristate "Mail LED on Clevo notebook (EXPERIMENTAL)"
+ depends on LEDS_CLASS && X86 && SERIO_I8042 && DMI && EXPERIMENTAL
+ help
+ This driver makes the mail LED accessible from userspace
+ programs through the leds subsystem. This LED have three
+ known mode: off, blink at 0.5Hz and blink at 1Hz.
+
+ The driver supports two kinds of interface: using ledtrig-timer
+ or through /sys/class/leds/clevo::mail/brightness. As this LED
+ cannot change it's brightness it blinks instead. The brightness
+ value 0 means off, 1..127 means blink at 0.5Hz and 128..255 means
+ blink at 1Hz.
+
+ This module can drive the mail LED for the following notebooks:
+
+ Clevo D410J
+ Clevo D410V
+ Clevo D400V/D470V (not tested, but might work)
+ Clevo M540N
+ Clevo M5x0N (not tested, but might work)
+ Positivo Mobile (Clevo M5x0V)
+
+ To compile this driver as a module, choose M here: the
+ module will be called leds-clevo-mail.
+
comment "LED Triggers"
config LEDS_TRIGGERS
@@ -128,7 +152,11 @@ config LEDS_TRIGGER_TIMER
depends on LEDS_TRIGGERS
help
This allows LEDs to be controlled by a programmable timer
- via sysfs. If unsure, say Y.
+ via sysfs. Some LED hardware can be programmed to start
+ blinking the LED without any further software interaction.
+ For more details read Documentation/leds-class.txt.
+
+ If unsure, say Y.
config LEDS_TRIGGER_IDE_DISK
bool "LED IDE Disk Trigger"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index a60de1b46c2c..bc6afc8dcb27 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
obj-$(CONFIG_LEDS_CORGI) += leds-corgi.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_SPITZ) += leds-spitz.o
-obj-$(CONFIG_LEDS_IXP4XX) += leds-ixp4xx-gpio.o
obj-$(CONFIG_LEDS_TOSA) += leds-tosa.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_AMS_DELTA) += leds-ams-delta.o
@@ -19,6 +18,8 @@ obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_CM_X270) += leds-cm-x270.o
+obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
+obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
# LED Triggers
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 64c66b3769c9..4a938780dfc3 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -137,12 +137,14 @@ err_out:
EXPORT_SYMBOL_GPL(led_classdev_register);
/**
- * led_classdev_unregister - unregisters a object of led_properties class.
+ * __led_classdev_unregister - unregisters a object of led_properties class.
* @led_cdev: the led device to unregister
+ * @suspended: indicates whether system-wide suspend or resume is in progress
*
* Unregisters a previously registered via led_classdev_register object.
*/
-void led_classdev_unregister(struct led_classdev *led_cdev)
+void __led_classdev_unregister(struct led_classdev *led_cdev,
+ bool suspended)
{
device_remove_file(led_cdev->dev, &dev_attr_brightness);
#ifdef CONFIG_LEDS_TRIGGERS
@@ -153,13 +155,16 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
up_write(&led_cdev->trigger_lock);
#endif
- device_unregister(led_cdev->dev);
+ if (suspended)
+ device_pm_schedule_removal(led_cdev->dev);
+ else
+ device_unregister(led_cdev->dev);
down_write(&leds_list_lock);
list_del(&led_cdev->node);
up_write(&leds_list_lock);
}
-EXPORT_SYMBOL_GPL(led_classdev_unregister);
+EXPORT_SYMBOL_GPL(__led_classdev_unregister);
static int __init leds_init(void)
{
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index 599878c8e714..9e3077463d84 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -37,42 +37,42 @@ static void ams_delta_led_set(struct led_classdev *led_cdev,
static struct ams_delta_led ams_delta_leds[] = {
{
.cdev = {
- .name = "ams-delta:camera",
+ .name = "ams-delta::camera",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_CAMERA,
},
{
.cdev = {
- .name = "ams-delta:advert",
+ .name = "ams-delta::advert",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_ADVERT,
},
{
.cdev = {
- .name = "ams-delta:email",
+ .name = "ams-delta::email",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_EMAIL,
},
{
.cdev = {
- .name = "ams-delta:handsfree",
+ .name = "ams-delta::handsfree",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_HANDSFREE,
},
{
.cdev = {
- .name = "ams-delta:voicemail",
+ .name = "ams-delta::voicemail",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_VOICEMAIL,
},
{
.cdev = {
- .name = "ams-delta:voice",
+ .name = "ams-delta::voice",
.brightness_set = ams_delta_led_set,
},
.bitmask = AMS_DELTA_LATCH1_LED_VOICE,
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
new file mode 100644
index 000000000000..6c3d33b8e383
--- /dev/null
+++ b/drivers/leds/leds-clevo-mail.c
@@ -0,0 +1,219 @@
+
+#include <linux/module.h>
+
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/leds.h>
+
+#include <linux/io.h>
+#include <linux/dmi.h>
+
+#include <linux/i8042.h>
+
+#define CLEVO_MAIL_LED_OFF 0x0084
+#define CLEVO_MAIL_LED_BLINK_1HZ 0x008A
+#define CLEVO_MAIL_LED_BLINK_0_5HZ 0x0083
+
+MODULE_AUTHOR("Márton Németh <nm127@freemail.hu>");
+MODULE_DESCRIPTION("Clevo mail LED driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int __initdata nodetect;
+module_param_named(nodetect, nodetect, bool, 0);
+MODULE_PARM_DESC(nodetect, "Skip DMI hardware detection");
+
+static struct platform_device *pdev;
+
+static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
+{
+ printk(KERN_INFO KBUILD_MODNAME ": '%s' found\n", id->ident);
+ return 1;
+}
+
+/*
+ * struct mail_led_whitelist - List of known good models
+ *
+ * Contains the known good models this driver is compatible with.
+ * When adding a new model try to be as strict as possible. This
+ * makes it possible to keep the false positives (the model is
+ * detected as working, but in reality it is not) as low as
+ * possible.
+ */
+static struct dmi_system_id __initdata mail_led_whitelist[] = {
+ {
+ .callback = clevo_mail_led_dmi_callback,
+ .ident = "Clevo D410J",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "VIA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K8N800"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VT8204B")
+ }
+ },
+ {
+ .callback = clevo_mail_led_dmi_callback,
+ .ident = "Clevo M5x0N",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "M5x0N")
+ }
+ },
+ {
+ .callback = clevo_mail_led_dmi_callback,
+ .ident = "Positivo Mobile",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "CLEVO Co. "),
+ DMI_MATCH(DMI_BOARD_NAME, "M5X0V "),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Positivo Mobile"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "VT6198")
+ }
+ },
+ {
+ .callback = clevo_mail_led_dmi_callback,
+ .ident = "Clevo D410V",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Clevo, Co."),
+ DMI_MATCH(DMI_BOARD_NAME, "D400V/D470V"),
+ DMI_MATCH(DMI_BOARD_VERSION, "SS78B"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Rev. A1")
+ }
+ },
+ { }
+};
+
+static void clevo_mail_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value == LED_OFF)
+ i8042_command(NULL, CLEVO_MAIL_LED_OFF);
+ else if (value <= LED_HALF)
+ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_0_5HZ);
+ else
+ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_1HZ);
+
+}
+
+static int clevo_mail_led_blink(struct led_classdev *led_cdev,
+ unsigned long* delay_on,
+ unsigned long* delay_off)
+{
+ int status = -EINVAL;
+
+ if (*delay_on == 0 /* ms */ && *delay_off == 0 /* ms */) {
+ /* Special case: the leds subsystem requested us to
+ * chose one user friendly blinking of the LED, and
+ * start it. Let's blink the led slowly (0.5Hz).
+ */
+ *delay_on = 1000; /* ms */
+ *delay_off = 1000; /* ms */
+ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_0_5HZ);
+ status = 0;
+
+ } else if (*delay_on == 500 /* ms */ && *delay_off == 500 /* ms */) {
+ /* blink the led with 1Hz */
+ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_1HZ);
+ status = 0;
+
+ } else if (*delay_on == 1000 /* ms */ && *delay_off == 1000 /* ms */) {
+ /* blink the led with 0.5Hz */
+ i8042_command(NULL, CLEVO_MAIL_LED_BLINK_0_5HZ);
+ status = 0;
+
+ } else {
+ printk(KERN_DEBUG KBUILD_MODNAME
+ ": clevo_mail_led_blink(..., %lu, %lu),"
+ " returning -EINVAL (unsupported)\n",
+ *delay_on, *delay_off);
+ }
+
+ return status;
+}
+
+static struct led_classdev clevo_mail_led = {
+ .name = "clevo::mail",
+ .brightness_set = clevo_mail_led_set,
+ .blink_set = clevo_mail_led_blink,
+};
+
+static int __init clevo_mail_led_probe(struct platform_device *pdev)
+{
+ return led_classdev_register(&pdev->dev, &clevo_mail_led);
+}
+
+static int clevo_mail_led_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&clevo_mail_led);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int clevo_mail_led_suspend(struct platform_device *dev,
+ pm_message_t state)
+{
+ led_classdev_suspend(&clevo_mail_led);
+ return 0;
+}
+
+static int clevo_mail_led_resume(struct platform_device *dev)
+{
+ led_classdev_resume(&clevo_mail_led);
+ return 0;
+}
+#else
+#define clevo_mail_led_suspend NULL
+#define clevo_mail_led_resume NULL
+#endif
+
+static struct platform_driver clevo_mail_led_driver = {
+ .probe = clevo_mail_led_probe,
+ .remove = clevo_mail_led_remove,
+ .suspend = clevo_mail_led_suspend,
+ .resume = clevo_mail_led_resume,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+static int __init clevo_mail_led_init(void)
+{
+ int error = 0;
+ int count = 0;
+
+ /* Check with the help of DMI if we are running on supported hardware */
+ if (!nodetect) {
+ count = dmi_check_system(mail_led_whitelist);
+ } else {
+ count = 1;
+ printk(KERN_ERR KBUILD_MODNAME ": Skipping DMI detection. "
+ "If the driver works on your hardware please "
+ "report model and the output of dmidecode in tracker "
+ "at http://sourceforge.net/projects/clevo-mailled/\n");
+ }
+
+ if (!count)
+ return -ENODEV;
+
+ pdev = platform_device_register_simple(KBUILD_MODNAME, -1, NULL, 0);
+ if (!IS_ERR(pdev)) {
+ error = platform_driver_probe(&clevo_mail_led_driver,
+ clevo_mail_led_probe);
+ if (error) {
+ printk(KERN_ERR KBUILD_MODNAME
+ ": Can't probe platform driver\n");
+ platform_device_unregister(pdev);
+ }
+ } else
+ error = PTR_ERR(pdev);
+
+ return error;
+}
+
+static void __exit clevo_mail_led_exit(void)
+{
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&clevo_mail_led_driver);
+
+ clevo_mail_led_set(NULL, LED_OFF);
+}
+
+module_init(clevo_mail_led_init);
+module_exit(clevo_mail_led_exit);
diff --git a/drivers/leds/leds-corgi.c b/drivers/leds/leds-corgi.c
index cf1dcd719a28..e45f6c4b59ba 100644
--- a/drivers/leds/leds-corgi.c
+++ b/drivers/leds/leds-corgi.c
@@ -38,13 +38,13 @@ static void corgiled_green_set(struct led_classdev *led_cdev, enum led_brightnes
}
static struct led_classdev corgi_amber_led = {
- .name = "corgi:amber",
+ .name = "corgi:amber:charge",
.default_trigger = "sharpsl-charge",
.brightness_set = corgiled_amber_set,
};
static struct led_classdev corgi_green_led = {
- .name = "corgi:green",
+ .name = "corgi:green:mail",
.default_trigger = "nand-disk",
.brightness_set = corgiled_green_set,
};
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 99bc50059d35..6c0a9c4761ee 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -85,7 +85,7 @@ static int gpio_led_probe(struct platform_device *pdev)
led_dat->can_sleep = gpio_cansleep(cur_led->gpio);
led_dat->active_low = cur_led->active_low;
led_dat->cdev.brightness_set = gpio_led_set;
- led_dat->cdev.brightness = cur_led->active_low ? LED_FULL : LED_OFF;
+ led_dat->cdev.brightness = LED_OFF;
ret = gpio_request(led_dat->gpio, led_dat->cdev.name);
if (ret < 0)
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
new file mode 100644
index 000000000000..82d4ec384797
--- /dev/null
+++ b/drivers/leds/leds-hp6xx.c
@@ -0,0 +1,120 @@
+/*
+ * LED Triggers Core
+ * For the HP Jornada 620/660/680/690 handhelds
+ *
+ * Copyright 2008 Kristoffer Ericson <kristoffer.ericson@gmail.com>
+ * this driver is based on leds-spitz.c by Richard Purdie.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <asm/hd64461.h>
+#include <asm/hp6xx.h>
+
+static void hp6xxled_green_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ u8 v8;
+
+ v8 = inb(PKDR);
+ if (value)
+ outb(v8 & (~PKDR_LED_GREEN), PKDR);
+ else
+ outb(v8 | PKDR_LED_GREEN, PKDR);
+}
+
+static void hp6xxled_red_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ u16 v16;
+
+ v16 = inw(HD64461_GPBDR);
+ if (value)
+ outw(v16 & (~HD64461_GPBDR_LED_RED), HD64461_GPBDR);
+ else
+ outw(v16 | HD64461_GPBDR_LED_RED, HD64461_GPBDR);
+}
+
+static struct led_classdev hp6xx_red_led = {
+ .name = "hp6xx:red",
+ .default_trigger = "hp6xx-charge",
+ .brightness_set = hp6xxled_red_set,
+};
+
+static struct led_classdev hp6xx_green_led = {
+ .name = "hp6xx:green",
+ .default_trigger = "ide-disk",
+ .brightness_set = hp6xxled_green_set,
+};
+
+#ifdef CONFIG_PM
+static int hp6xxled_suspend(struct platform_device *dev, pm_message_t state)
+{
+ led_classdev_suspend(&hp6xx_red_led);
+ led_classdev_suspend(&hp6xx_green_led);
+ return 0;
+}
+
+static int hp6xxled_resume(struct platform_device *dev)
+{
+ led_classdev_resume(&hp6xx_red_led);
+ led_classdev_resume(&hp6xx_green_led);
+ return 0;
+}
+#endif
+
+static int hp6xxled_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = led_classdev_register(&pdev->dev, &hp6xx_red_led);
+ if (ret < 0)
+ return ret;
+
+ ret = led_classdev_register(&pdev->dev, &hp6xx_green_led);
+ if (ret < 0)
+ led_classdev_unregister(&hp6xx_red_led);
+
+ return ret;
+}
+
+static int hp6xxled_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&hp6xx_red_led);
+ led_classdev_unregister(&hp6xx_green_led);
+
+ return 0;
+}
+
+static struct platform_driver hp6xxled_driver = {
+ .probe = hp6xxled_probe,
+ .remove = hp6xxled_remove,
+#ifdef CONFIG_PM
+ .suspend = hp6xxled_suspend,
+ .resume = hp6xxled_resume,
+#endif
+ .driver = {
+ .name = "hp6xx-led",
+ },
+};
+
+static int __init hp6xxled_init(void)
+{
+ return platform_driver_register(&hp6xxled_driver);
+}
+
+static void __exit hp6xxled_exit(void)
+{
+ platform_driver_unregister(&hp6xxled_driver);
+}
+
+module_init(hp6xxled_init);
+module_exit(hp6xxled_exit);
+
+MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
+MODULE_DESCRIPTION("HP Jornada 6xx LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-ixp4xx-gpio.c b/drivers/leds/leds-ixp4xx-gpio.c
deleted file mode 100644
index 7dcf0b92c460..000000000000
--- a/drivers/leds/leds-ixp4xx-gpio.c
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * IXP4XX GPIO driver LED driver
- *
- * Author: John Bowler <jbowler@acm.org>
- *
- * Copyright (c) 2006 John Bowler
- *
- * Permission is hereby granted, free of charge, to any
- * person obtaining a copy of this software and associated
- * documentation files (the "Software"), to deal in the
- * Software without restriction, including without
- * limitation the rights to use, copy, modify, merge,
- * publish, distribute, sublicense, and/or sell copies of
- * the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the
- * following conditions:
- *
- * The above copyright notice and this permission notice
- * shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
- * ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
- * TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
- * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
- * SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/leds.h>
-#include <asm/arch/hardware.h>
-
-extern spinlock_t gpio_lock;
-
-/* Up to 16 gpio lines are possible. */
-#define GPIO_MAX 16
-static struct ixp4xxgpioled_device {
- struct led_classdev ancestor;
- int flags;
-} ixp4xxgpioled_devices[GPIO_MAX];
-
-void ixp4xxgpioled_brightness_set(struct led_classdev *pled,
- enum led_brightness value)
-{
- const struct ixp4xxgpioled_device *const ixp4xx_dev =
- container_of(pled, struct ixp4xxgpioled_device, ancestor);
- const u32 gpio_pin = ixp4xx_dev - ixp4xxgpioled_devices;
-
- if (gpio_pin < GPIO_MAX && ixp4xx_dev->ancestor.name != 0) {
- /* Set or clear the 'gpio_pin' bit according to the style
- * and the required setting (value > 0 == on)
- */
- const int gpio_value =
- (value > 0) == (ixp4xx_dev->flags != IXP4XX_GPIO_LOW) ?
- IXP4XX_GPIO_HIGH : IXP4XX_GPIO_LOW;
-
- {
- unsigned long flags;
- spin_lock_irqsave(&gpio_lock, flags);
- gpio_line_set(gpio_pin, gpio_value);
- spin_unlock_irqrestore(&gpio_lock, flags);
- }
- }
-}
-
-/* LEDs are described in resources, the following iterates over the valid
- * LED resources.
- */
-#define for_all_leds(i, pdev) \
- for (i=0; i<pdev->num_resources; ++i) \
- if (pdev->resource[i].start < GPIO_MAX && \
- pdev->resource[i].name != 0)
-
-/* The following applies 'operation' to each LED from the given platform,
- * the function always returns 0 to allow tail call elimination.
- */
-static int apply_to_all_leds(struct platform_device *pdev,
- void (*operation)(struct led_classdev *pled))
-{
- int i;
-
- for_all_leds(i, pdev)
- operation(&ixp4xxgpioled_devices[pdev->resource[i].start].ancestor);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ixp4xxgpioled_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- return apply_to_all_leds(pdev, led_classdev_suspend);
-}
-
-static int ixp4xxgpioled_resume(struct platform_device *pdev)
-{
- return apply_to_all_leds(pdev, led_classdev_resume);
-}
-#endif
-
-static void ixp4xxgpioled_remove_one_led(struct led_classdev *pled)
-{
- led_classdev_unregister(pled);
- pled->name = 0;
-}
-
-static int ixp4xxgpioled_remove(struct platform_device *pdev)
-{
- return apply_to_all_leds(pdev, ixp4xxgpioled_remove_one_led);
-}
-
-static int ixp4xxgpioled_probe(struct platform_device *pdev)
-{
- /* The board level has to tell the driver where the
- * LEDs are connected - there is no way to find out
- * electrically. It must also say whether the GPIO
- * lines are active high or active low.
- *
- * To do this read the num_resources (the number of
- * LEDs) and the struct resource (the data for each
- * LED). The name comes from the resource, and it
- * isn't copied.
- */
- int i;
-
- for_all_leds(i, pdev) {
- const u8 gpio_pin = pdev->resource[i].start;
- int rc;
-
- if (ixp4xxgpioled_devices[gpio_pin].ancestor.name == 0) {
- unsigned long flags;
-
- spin_lock_irqsave(&gpio_lock, flags);
- gpio_line_config(gpio_pin, IXP4XX_GPIO_OUT);
- /* The config can, apparently, reset the state,
- * I suspect the gpio line may be an input and
- * the config may cause the line to be latched,
- * so the setting depends on how the LED is
- * connected to the line (which affects how it
- * floats if not driven).
- */
- gpio_line_set(gpio_pin, IXP4XX_GPIO_HIGH);
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- ixp4xxgpioled_devices[gpio_pin].flags =
- pdev->resource[i].flags & IORESOURCE_BITS;
-
- ixp4xxgpioled_devices[gpio_pin].ancestor.name =
- pdev->resource[i].name;
-
- /* This is how a board manufacturer makes the LED
- * come on on reset - the GPIO line will be high, so
- * make the LED light when the line is low...
- */
- if (ixp4xxgpioled_devices[gpio_pin].flags != IXP4XX_GPIO_LOW)
- ixp4xxgpioled_devices[gpio_pin].ancestor.brightness = 100;
- else
- ixp4xxgpioled_devices[gpio_pin].ancestor.brightness = 0;
-
- ixp4xxgpioled_devices[gpio_pin].ancestor.flags = 0;
-
- ixp4xxgpioled_devices[gpio_pin].ancestor.brightness_set =
- ixp4xxgpioled_brightness_set;
-
- ixp4xxgpioled_devices[gpio_pin].ancestor.default_trigger = 0;
- }
-
- rc = led_classdev_register(&pdev->dev,
- &ixp4xxgpioled_devices[gpio_pin].ancestor);
- if (rc < 0) {
- ixp4xxgpioled_devices[gpio_pin].ancestor.name = 0;
- ixp4xxgpioled_remove(pdev);
- return rc;
- }
- }
-
- return 0;
-}
-
-static struct platform_driver ixp4xxgpioled_driver = {
- .probe = ixp4xxgpioled_probe,
- .remove = ixp4xxgpioled_remove,
-#ifdef CONFIG_PM
- .suspend = ixp4xxgpioled_suspend,
- .resume = ixp4xxgpioled_resume,
-#endif
- .driver = {
- .name = "IXP4XX-GPIO-LED",
- },
-};
-
-static int __init ixp4xxgpioled_init(void)
-{
- return platform_driver_register(&ixp4xxgpioled_driver);
-}
-
-static void __exit ixp4xxgpioled_exit(void)
-{
- platform_driver_unregister(&ixp4xxgpioled_driver);
-}
-
-module_init(ixp4xxgpioled_init);
-module_exit(ixp4xxgpioled_exit);
-
-MODULE_AUTHOR("John Bowler <jbowler@acm.org>");
-MODULE_DESCRIPTION("IXP4XX GPIO LED driver");
-MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/leds/leds-locomo.c b/drivers/leds/leds-locomo.c
index 2207335e9212..7295f7f52185 100644
--- a/drivers/leds/leds-locomo.c
+++ b/drivers/leds/leds-locomo.c
@@ -43,13 +43,13 @@ static void locomoled_brightness_set1(struct led_classdev *led_cdev,
}
static struct led_classdev locomo_led0 = {
- .name = "locomo:amber",
+ .name = "locomo:amber:charge",
.default_trigger = "sharpsl-charge",
.brightness_set = locomoled_brightness_set0,
};
static struct led_classdev locomo_led1 = {
- .name = "locomo:green",
+ .name = "locomo:green:mail",
.default_trigger = "nand-disk",
.brightness_set = locomoled_brightness_set1,
};
diff --git a/drivers/leds/leds-net48xx.c b/drivers/leds/leds-net48xx.c
index 45ba3d45bcb8..054360473c94 100644
--- a/drivers/leds/leds-net48xx.c
+++ b/drivers/leds/leds-net48xx.c
@@ -31,7 +31,7 @@ static void net48xx_error_led_set(struct led_classdev *led_cdev,
}
static struct led_classdev net48xx_error_led = {
- .name = "net48xx:error",
+ .name = "net48xx::error",
.brightness_set = net48xx_error_led_set,
};
diff --git a/drivers/leds/leds-spitz.c b/drivers/leds/leds-spitz.c
index 126d09cc96ec..93e1012b17e6 100644
--- a/drivers/leds/leds-spitz.c
+++ b/drivers/leds/leds-spitz.c
@@ -38,13 +38,13 @@ static void spitzled_green_set(struct led_classdev *led_cdev, enum led_brightnes
}
static struct led_classdev spitz_amber_led = {
- .name = "spitz:amber",
+ .name = "spitz:amber:charge",
.default_trigger = "sharpsl-charge",
.brightness_set = spitzled_amber_set,
};
static struct led_classdev spitz_green_led = {
- .name = "spitz:green",
+ .name = "spitz:green:hddactivity",
.default_trigger = "ide-disk",
.brightness_set = spitzled_green_set,
};
@@ -72,8 +72,10 @@ static int spitzled_probe(struct platform_device *pdev)
{
int ret;
- if (machine_is_akita())
+ if (machine_is_akita()) {
+ spitz_green_led.name = "spitz:green:mail";
spitz_green_led.default_trigger = "nand-disk";
+ }
ret = led_classdev_register(&pdev->dev, &spitz_amber_led);
if (ret < 0)
diff --git a/drivers/leds/leds-tosa.c b/drivers/leds/leds-tosa.c
index fb2416a38303..9e0a188fbb0a 100644
--- a/drivers/leds/leds-tosa.c
+++ b/drivers/leds/leds-tosa.c
@@ -45,13 +45,13 @@ static void tosaled_green_set(struct led_classdev *led_cdev,
}
static struct led_classdev tosa_amber_led = {
- .name = "tosa:amber",
+ .name = "tosa:amber:charge",
.default_trigger = "sharpsl-charge",
.brightness_set = tosaled_amber_set,
};
static struct led_classdev tosa_green_led = {
- .name = "tosa:green",
+ .name = "tosa:green:mail",
.default_trigger = "nand-disk",
.brightness_set = tosaled_green_set,
};
diff --git a/drivers/leds/leds-wrap.c b/drivers/leds/leds-wrap.c
index 27fb2d8e991f..7ac61a7b56ad 100644
--- a/drivers/leds/leds-wrap.c
+++ b/drivers/leds/leds-wrap.c
@@ -19,11 +19,21 @@
#include <linux/scx200_gpio.h>
#define DRVNAME "wrap-led"
+#define WRAP_POWER_LED_GPIO 2
#define WRAP_ERROR_LED_GPIO 3
-#define WRAP_EXTRA_LED_GPIO 18
+#define WRAP_EXTRA_LED_GPIO 18
static struct platform_device *pdev;
+static void wrap_power_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (value)
+ scx200_gpio_set_low(WRAP_POWER_LED_GPIO);
+ else
+ scx200_gpio_set_high(WRAP_POWER_LED_GPIO);
+}
+
static void wrap_error_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
@@ -42,13 +52,18 @@ static void wrap_extra_led_set(struct led_classdev *led_cdev,
scx200_gpio_set_high(WRAP_EXTRA_LED_GPIO);
}
+static struct led_classdev wrap_power_led = {
+ .name = "wrap::power",
+ .brightness_set = wrap_power_led_set,
+};
+
static struct led_classdev wrap_error_led = {
- .name = "wrap:error",
+ .name = "wrap::error",
.brightness_set = wrap_error_led_set,
};
static struct led_classdev wrap_extra_led = {
- .name = "wrap:extra",
+ .name = "wrap::extra",
.brightness_set = wrap_extra_led_set,
};
@@ -56,6 +71,7 @@ static struct led_classdev wrap_extra_led = {
static int wrap_led_suspend(struct platform_device *dev,
pm_message_t state)
{
+ led_classdev_suspend(&wrap_power_led);
led_classdev_suspend(&wrap_error_led);
led_classdev_suspend(&wrap_extra_led);
return 0;
@@ -63,6 +79,7 @@ static int wrap_led_suspend(struct platform_device *dev,
static int wrap_led_resume(struct platform_device *dev)
{
+ led_classdev_resume(&wrap_power_led);
led_classdev_resume(&wrap_error_led);
led_classdev_resume(&wrap_extra_led);
return 0;
@@ -76,17 +93,31 @@ static int wrap_led_probe(struct platform_device *pdev)
{
int ret;
+ ret = led_classdev_register(&pdev->dev, &wrap_power_led);
+ if (ret < 0)
+ return ret;
+
ret = led_classdev_register(&pdev->dev, &wrap_error_led);
- if (ret == 0) {
- ret = led_classdev_register(&pdev->dev, &wrap_extra_led);
- if (ret < 0)
- led_classdev_unregister(&wrap_error_led);
- }
+ if (ret < 0)
+ goto err1;
+
+ ret = led_classdev_register(&pdev->dev, &wrap_extra_led);
+ if (ret < 0)
+ goto err2;
+
+ return ret;
+
+err2:
+ led_classdev_unregister(&wrap_error_led);
+err1:
+ led_classdev_unregister(&wrap_power_led);
+
return ret;
}
static int wrap_led_remove(struct platform_device *pdev)
{
+ led_classdev_unregister(&wrap_power_led);
led_classdev_unregister(&wrap_error_led);
led_classdev_unregister(&wrap_extra_led);
return 0;
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index ed9ff02c77ea..82c55d6e4902 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -77,8 +77,21 @@ static ssize_t led_delay_on_store(struct device *dev,
count++;
if (count == size) {
- timer_data->delay_on = state;
- mod_timer(&timer_data->timer, jiffies + 1);
+ if (timer_data->delay_on != state) {
+ /* the new value differs from the previous */
+ timer_data->delay_on = state;
+
+ /* deactivate previous settings */
+ del_timer_sync(&timer_data->timer);
+
+ /* try to activate hardware acceleration, if any */
+ if (!led_cdev->blink_set ||
+ led_cdev->blink_set(led_cdev,
+ &timer_data->delay_on, &timer_data->delay_off)) {
+ /* no hardware acceleration, blink via timer */
+ mod_timer(&timer_data->timer, jiffies + 1);
+ }
+ }
ret = count;
}
@@ -110,8 +123,21 @@ static ssize_t led_delay_off_store(struct device *dev,
count++;
if (count == size) {
- timer_data->delay_off = state;
- mod_timer(&timer_data->timer, jiffies + 1);
+ if (timer_data->delay_off != state) {
+ /* the new value differs from the previous */
+ timer_data->delay_off = state;
+
+ /* deactivate previous settings */
+ del_timer_sync(&timer_data->timer);
+
+ /* try to activate hardware acceleration, if any */
+ if (!led_cdev->blink_set ||
+ led_cdev->blink_set(led_cdev,
+ &timer_data->delay_on, &timer_data->delay_off)) {
+ /* no hardware acceleration, blink via timer */
+ mod_timer(&timer_data->timer, jiffies + 1);
+ }
+ }
ret = count;
}
@@ -143,6 +169,13 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
if (rc)
goto err_out_delayon;
+ /* If there is hardware support for blinking, start one
+ * user friendly blink rate chosen by the driver.
+ */
+ if (led_cdev->blink_set)
+ led_cdev->blink_set(led_cdev,
+ &timer_data->delay_on, &timer_data->delay_off);
+
return;
err_out_delayon:
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index de9ebbfbf122..936788272a5f 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -78,12 +78,14 @@ struct media_bay_info {
int cached_gpio;
int sleeping;
struct semaphore lock;
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
void __iomem *cd_base;
- int cd_index;
int cd_irq;
int cd_retry;
#endif
+#if defined(CONFIG_BLK_DEV_IDE_PMAC) || defined(CONFIG_MAC_FLOPPY)
+ int cd_index;
+#endif
};
#define MAX_BAYS 2
@@ -91,7 +93,7 @@ struct media_bay_info {
static struct media_bay_info media_bays[MAX_BAYS];
int media_bay_count = 0;
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
/* check the busy bit in the media-bay ide interface
(assumes the media-bay contains an ide device) */
#define MB_IDE_READY(i) ((readb(media_bays[i].cd_base + 0x70) & 0x80) == 0)
@@ -401,7 +403,7 @@ static void poll_media_bay(struct media_bay_info* bay)
set_mb_power(bay, id != MB_NO);
bay->content_id = id;
if (id == MB_NO) {
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
bay->cd_retry = 0;
#endif
printk(KERN_INFO "media bay %d is empty\n", bay->index);
@@ -414,9 +416,9 @@ static void poll_media_bay(struct media_bay_info* bay)
}
}
+#ifdef CONFIG_MAC_FLOPPY
int check_media_bay(struct device_node *which_bay, int what)
{
-#ifdef CONFIG_BLK_DEV_IDE
int i;
for (i=0; i<media_bay_count; i++)
@@ -426,14 +428,14 @@ int check_media_bay(struct device_node *which_bay, int what)
media_bays[i].cd_index = -1;
return -EINVAL;
}
-#endif /* CONFIG_BLK_DEV_IDE */
return -ENODEV;
}
EXPORT_SYMBOL(check_media_bay);
+#endif /* CONFIG_MAC_FLOPPY */
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
int check_media_bay_by_base(unsigned long base, int what)
{
-#ifdef CONFIG_BLK_DEV_IDE
int i;
for (i=0; i<media_bay_count; i++)
@@ -443,15 +445,13 @@ int check_media_bay_by_base(unsigned long base, int what)
media_bays[i].cd_index = -1;
return -EINVAL;
}
-#endif
-
+
return -ENODEV;
}
int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
- int irq, int index)
+ int irq, int index)
{
-#ifdef CONFIG_BLK_DEV_IDE
int i;
for (i=0; i<media_bay_count; i++) {
@@ -483,10 +483,10 @@ int media_bay_set_ide_infos(struct device_node* which_bay, unsigned long base,
return -ENODEV;
}
}
-#endif /* CONFIG_BLK_DEV_IDE */
-
+
return -ENODEV;
}
+#endif /* CONFIG_BLK_DEV_IDE_PMAC */
static void media_bay_step(int i)
{
@@ -521,14 +521,13 @@ static void media_bay_step(int i)
bay->state = mb_resetting;
MBDBG("mediabay%d: waiting reset (kind:%d)\n", i, bay->content_id);
break;
-
case mb_resetting:
if (bay->content_id != MB_CD) {
MBDBG("mediabay%d: bay is up (kind:%d)\n", i, bay->content_id);
bay->state = mb_up;
break;
}
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
MBDBG("mediabay%d: waiting IDE reset (kind:%d)\n", i, bay->content_id);
bay->ops->un_reset_ide(bay);
bay->timer = msecs_to_jiffies(MB_IDE_WAIT);
@@ -536,16 +535,14 @@ static void media_bay_step(int i)
#else
printk(KERN_DEBUG "media-bay %d is ide (not compiled in kernel)\n", i);
set_mb_power(bay, 0);
-#endif /* CONFIG_BLK_DEV_IDE */
+#endif /* CONFIG_BLK_DEV_IDE_PMAC */
break;
-
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
case mb_ide_resetting:
bay->timer = msecs_to_jiffies(MB_IDE_TIMEOUT);
bay->state = mb_ide_waiting;
MBDBG("mediabay%d: waiting IDE ready (kind:%d)\n", i, bay->content_id);
break;
-
case mb_ide_waiting:
if (bay->cd_base == NULL) {
bay->timer = 0;
@@ -587,11 +584,10 @@ static void media_bay_step(int i)
bay->timer = 0;
}
break;
-#endif /* CONFIG_BLK_DEV_IDE */
-
+#endif /* CONFIG_BLK_DEV_IDE_PMAC */
case mb_powering_down:
bay->state = mb_empty;
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
if (bay->cd_index >= 0) {
printk(KERN_DEBUG "Unregistering mb %d ide, index:%d\n", i,
bay->cd_index);
@@ -607,7 +603,7 @@ static void media_bay_step(int i)
bay->content_id = MB_NO;
}
}
-#endif /* CONFIG_BLK_DEV_IDE */
+#endif /* CONFIG_BLK_DEV_IDE_PMAC */
MBDBG("mediabay%d: end of power down\n", i);
break;
}
@@ -739,7 +735,7 @@ static int media_bay_resume(struct macio_dev *mdev)
bay->last_value = bay->content_id;
bay->value_count = msecs_to_jiffies(MB_STABLE_DELAY);
bay->timer = msecs_to_jiffies(MB_POWER_DELAY);
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
bay->cd_retry = 0;
#endif
do {
@@ -829,7 +825,7 @@ static int __init media_bay_init(void)
for (i=0; i<MAX_BAYS; i++) {
memset((char *)&media_bays[i], 0, sizeof(struct media_bay_info));
media_bays[i].content_id = -1;
-#ifdef CONFIG_BLK_DEV_IDE
+#ifdef CONFIG_BLK_DEV_IDE_PMAC
media_bays[i].cd_index = -1;
#endif
}
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index 01b8eca7ccd5..6e6dd17ab572 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -111,7 +111,7 @@ static enum macii_state {
static struct adb_request *current_req; /* first request struct in the queue */
static struct adb_request *last_req; /* last request struct in the queue */
static unsigned char reply_buf[16]; /* storage for autopolled replies */
-static unsigned char *reply_ptr; /* next byte in req->data or reply_buf */
+static unsigned char *reply_ptr; /* next byte in reply_buf or req->reply */
static int reading_reply; /* store reply in reply_buf else req->reply */
static int data_index; /* index of the next byte to send from req->data */
static int reply_len; /* number of bytes received in reply_buf or req->reply */
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 1b1ef3130e6e..a0585fb6da94 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -237,7 +237,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde
if (!page)
return ERR_PTR(-ENOMEM);
- ITERATE_RDEV(mddev, rdev, tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (! test_bit(In_sync, &rdev->flags)
|| test_bit(Faulty, &rdev->flags))
continue;
@@ -261,7 +261,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
struct list_head *tmp;
mddev_t *mddev = bitmap->mddev;
- ITERATE_RDEV(mddev, rdev, tmp)
+ rdev_for_each(rdev, tmp, mddev)
if (test_bit(In_sync, &rdev->flags)
&& !test_bit(Faulty, &rdev->flags)) {
int size = PAGE_SIZE;
@@ -1348,14 +1348,38 @@ void bitmap_close_sync(struct bitmap *bitmap)
*/
sector_t sector = 0;
int blocks;
- if (!bitmap) return;
+ if (!bitmap)
+ return;
while (sector < bitmap->mddev->resync_max_sectors) {
bitmap_end_sync(bitmap, sector, &blocks, 0);
-/*
- if (sector < 500) printk("bitmap_close_sync: sec %llu blks %d\n",
- (unsigned long long)sector, blocks);
-*/ sector += blocks;
+ sector += blocks;
+ }
+}
+
+void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector)
+{
+ sector_t s = 0;
+ int blocks;
+
+ if (!bitmap)
+ return;
+ if (sector == 0) {
+ bitmap->last_end_sync = jiffies;
+ return;
+ }
+ if (time_before(jiffies, (bitmap->last_end_sync
+ + bitmap->daemon_sleep * HZ)))
+ return;
+ wait_event(bitmap->mddev->recovery_wait,
+ atomic_read(&bitmap->mddev->recovery_active) == 0);
+
+ sector &= ~((1ULL << CHUNK_BLOCK_SHIFT(bitmap)) - 1);
+ s = 0;
+ while (s < sector && s < bitmap->mddev->resync_max_sectors) {
+ bitmap_end_sync(bitmap, s, &blocks, 0);
+ s += blocks;
}
+ bitmap->last_end_sync = jiffies;
}
static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed)
@@ -1565,3 +1589,4 @@ EXPORT_SYMBOL(bitmap_start_sync);
EXPORT_SYMBOL(bitmap_end_sync);
EXPORT_SYMBOL(bitmap_unplug);
EXPORT_SYMBOL(bitmap_close_sync);
+EXPORT_SYMBOL(bitmap_cond_end_sync);
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index cf2ddce34118..d107ddceefcd 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -294,7 +294,7 @@ static int run(mddev_t *mddev)
}
conf->nfaults = 0;
- ITERATE_RDEV(mddev, rdev, tmp)
+ rdev_for_each(rdev, tmp, mddev)
conf->rdev = rdev;
mddev->array_size = mddev->size;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 3dac1cfb8189..0b8511776b3e 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -122,7 +122,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
cnt = 0;
conf->array_size = 0;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
int j = rdev->raid_disk;
dev_info_t *disk = conf->disks + j;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index c28a120b4161..5fc326d3970e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -195,7 +195,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
* Any code which breaks out of this loop while own
* a reference to the current mddev and must mddev_put it.
*/
-#define ITERATE_MDDEV(mddev,tmp) \
+#define for_each_mddev(mddev,tmp) \
\
for (({ spin_lock(&all_mddevs_lock); \
tmp = all_mddevs.next; \
@@ -275,6 +275,7 @@ static mddev_t * mddev_find(dev_t unit)
spin_lock_init(&new->write_lock);
init_waitqueue_head(&new->sb_wait);
new->reshape_position = MaxSector;
+ new->resync_max = MaxSector;
new->queue = blk_alloc_queue(GFP_KERNEL);
if (!new->queue) {
@@ -310,7 +311,7 @@ static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
mdk_rdev_t * rdev;
struct list_head *tmp;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (rdev->desc_nr == nr)
return rdev;
}
@@ -322,7 +323,7 @@ static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev)
struct list_head *tmp;
mdk_rdev_t *rdev;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (rdev->bdev->bd_dev == dev)
return rdev;
}
@@ -773,12 +774,16 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
__u64 ev1 = md_event(sb);
rdev->raid_disk = -1;
- rdev->flags = 0;
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
+ clear_bit(WriteMostly, &rdev->flags);
+ clear_bit(BarriersNotsupp, &rdev->flags);
+
if (mddev->raid_disks == 0) {
mddev->major_version = 0;
mddev->minor_version = sb->minor_version;
mddev->patch_version = sb->patch_version;
- mddev->persistent = ! sb->not_persistent;
+ mddev->external = 0;
mddev->chunk_size = sb->chunk_size;
mddev->ctime = sb->ctime;
mddev->utime = sb->utime;
@@ -904,7 +909,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->size = mddev->size;
sb->raid_disks = mddev->raid_disks;
sb->md_minor = mddev->md_minor;
- sb->not_persistent = !mddev->persistent;
+ sb->not_persistent = 0;
sb->utime = mddev->utime;
sb->state = 0;
sb->events_hi = (mddev->events>>32);
@@ -938,7 +943,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->state |= (1<<MD_SB_BITMAP_PRESENT);
sb->disks[0].state = (1<<MD_DISK_REMOVED);
- ITERATE_RDEV(mddev,rdev2,tmp) {
+ rdev_for_each(rdev2, tmp, mddev) {
mdp_disk_t *d;
int desc_nr;
if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags)
@@ -1153,11 +1158,15 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
__u64 ev1 = le64_to_cpu(sb->events);
rdev->raid_disk = -1;
- rdev->flags = 0;
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(In_sync, &rdev->flags);
+ clear_bit(WriteMostly, &rdev->flags);
+ clear_bit(BarriersNotsupp, &rdev->flags);
+
if (mddev->raid_disks == 0) {
mddev->major_version = 1;
mddev->patch_version = 0;
- mddev->persistent = 1;
+ mddev->external = 0;
mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9;
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
@@ -1286,7 +1295,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
}
max_dev = 0;
- ITERATE_RDEV(mddev,rdev2,tmp)
+ rdev_for_each(rdev2, tmp, mddev)
if (rdev2->desc_nr+1 > max_dev)
max_dev = rdev2->desc_nr+1;
@@ -1295,7 +1304,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
for (i=0; i<max_dev;i++)
sb->dev_roles[i] = cpu_to_le16(0xfffe);
- ITERATE_RDEV(mddev,rdev2,tmp) {
+ rdev_for_each(rdev2, tmp, mddev) {
i = rdev2->desc_nr;
if (test_bit(Faulty, &rdev2->flags))
sb->dev_roles[i] = cpu_to_le16(0xfffe);
@@ -1333,8 +1342,8 @@ static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
struct list_head *tmp, *tmp2;
mdk_rdev_t *rdev, *rdev2;
- ITERATE_RDEV(mddev1,rdev,tmp)
- ITERATE_RDEV(mddev2, rdev2, tmp2)
+ rdev_for_each(rdev, tmp, mddev1)
+ rdev_for_each(rdev2, tmp2, mddev2)
if (rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains)
return 1;
@@ -1401,7 +1410,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
goto fail;
}
list_add(&rdev->same_set, &mddev->disks);
- bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk);
+ bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
return 0;
fail:
@@ -1410,10 +1419,11 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
return err;
}
-static void delayed_delete(struct work_struct *ws)
+static void md_delayed_delete(struct work_struct *ws)
{
mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work);
kobject_del(&rdev->kobj);
+ kobject_put(&rdev->kobj);
}
static void unbind_rdev_from_array(mdk_rdev_t * rdev)
@@ -1432,7 +1442,8 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
/* We need to delay this, otherwise we can deadlock when
* writing to 'remove' to "dev/state"
*/
- INIT_WORK(&rdev->del_work, delayed_delete);
+ INIT_WORK(&rdev->del_work, md_delayed_delete);
+ kobject_get(&rdev->kobj);
schedule_work(&rdev->del_work);
}
@@ -1441,7 +1452,7 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
* otherwise reused by a RAID array (or any other kernel
* subsystem), by bd_claiming the device.
*/
-static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
+static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
{
int err = 0;
struct block_device *bdev;
@@ -1453,13 +1464,15 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev)
__bdevname(dev, b));
return PTR_ERR(bdev);
}
- err = bd_claim(bdev, rdev);
+ err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
if (err) {
printk(KERN_ERR "md: could not bd_claim %s.\n",
bdevname(bdev, b));
blkdev_put(bdev);
return err;
}
+ if (!shared)
+ set_bit(AllReserved, &rdev->flags);
rdev->bdev = bdev;
return err;
}
@@ -1503,7 +1516,7 @@ static void export_array(mddev_t *mddev)
struct list_head *tmp;
mdk_rdev_t *rdev;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (!rdev->mddev) {
MD_BUG();
continue;
@@ -1581,17 +1594,17 @@ static void md_print_devices(void)
printk("md: **********************************\n");
printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n");
printk("md: **********************************\n");
- ITERATE_MDDEV(mddev,tmp) {
+ for_each_mddev(mddev, tmp) {
if (mddev->bitmap)
bitmap_print_sb(mddev->bitmap);
else
printk("%s: ", mdname(mddev));
- ITERATE_RDEV(mddev,rdev,tmp2)
+ rdev_for_each(rdev, tmp2, mddev)
printk("<%s>", bdevname(rdev->bdev,b));
printk("\n");
- ITERATE_RDEV(mddev,rdev,tmp2)
+ rdev_for_each(rdev, tmp2, mddev)
print_rdev(rdev);
}
printk("md: **********************************\n");
@@ -1610,7 +1623,7 @@ static void sync_sbs(mddev_t * mddev, int nospares)
mdk_rdev_t *rdev;
struct list_head *tmp;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (rdev->sb_events == mddev->events ||
(nospares &&
rdev->raid_disk < 0 &&
@@ -1696,18 +1709,20 @@ repeat:
MD_BUG();
mddev->events --;
}
- sync_sbs(mddev, nospares);
/*
* do not write anything to disk if using
* nonpersistent superblocks
*/
if (!mddev->persistent) {
- clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+ if (!mddev->external)
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
+
spin_unlock_irq(&mddev->write_lock);
wake_up(&mddev->sb_wait);
return;
}
+ sync_sbs(mddev, nospares);
spin_unlock_irq(&mddev->write_lock);
dprintk(KERN_INFO
@@ -1715,7 +1730,7 @@ repeat:
mdname(mddev),mddev->in_sync);
bitmap_update_sb(mddev->bitmap);
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
char b[BDEVNAME_SIZE];
dprintk(KERN_INFO "md: ");
if (rdev->sb_loaded != 1)
@@ -1785,7 +1800,7 @@ static ssize_t
state_show(mdk_rdev_t *rdev, char *page)
{
char *sep = "";
- int len=0;
+ size_t len = 0;
if (test_bit(Faulty, &rdev->flags)) {
len+= sprintf(page+len, "%sfaulty",sep);
@@ -1887,20 +1902,45 @@ static ssize_t
slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
char *e;
+ int err;
+ char nm[20];
int slot = simple_strtoul(buf, &e, 10);
if (strncmp(buf, "none", 4)==0)
slot = -1;
else if (e==buf || (*e && *e!= '\n'))
return -EINVAL;
- if (rdev->mddev->pers)
- /* Cannot set slot in active array (yet) */
- return -EBUSY;
- if (slot >= rdev->mddev->raid_disks)
- return -ENOSPC;
- rdev->raid_disk = slot;
- /* assume it is working */
- rdev->flags = 0;
- set_bit(In_sync, &rdev->flags);
+ if (rdev->mddev->pers) {
+ /* Setting 'slot' on an active array requires also
+ * updating the 'rd%d' link, and communicating
+ * with the personality with ->hot_*_disk.
+ * For now we only support removing
+ * failed/spare devices. This normally happens automatically,
+ * but not when the metadata is externally managed.
+ */
+ if (slot != -1)
+ return -EBUSY;
+ if (rdev->raid_disk == -1)
+ return -EEXIST;
+ /* personality does all needed checks */
+ if (rdev->mddev->pers->hot_add_disk == NULL)
+ return -EINVAL;
+ err = rdev->mddev->pers->
+ hot_remove_disk(rdev->mddev, rdev->raid_disk);
+ if (err)
+ return err;
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&rdev->mddev->kobj, nm);
+ set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
+ md_wakeup_thread(rdev->mddev->thread);
+ } else {
+ if (slot >= rdev->mddev->raid_disks)
+ return -ENOSPC;
+ rdev->raid_disk = slot;
+ /* assume it is working */
+ clear_bit(Faulty, &rdev->flags);
+ clear_bit(WriteMostly, &rdev->flags);
+ set_bit(In_sync, &rdev->flags);
+ }
return len;
}
@@ -1923,6 +1963,10 @@ offset_store(mdk_rdev_t *rdev, const char *buf, size_t len)
return -EINVAL;
if (rdev->mddev->pers)
return -EBUSY;
+ if (rdev->size && rdev->mddev->external)
+ /* Must set offset before size, so overlap checks
+ * can be sane */
+ return -EBUSY;
rdev->data_offset = offset;
return len;
}
@@ -1936,16 +1980,69 @@ rdev_size_show(mdk_rdev_t *rdev, char *page)
return sprintf(page, "%llu\n", (unsigned long long)rdev->size);
}
+static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2)
+{
+ /* check if two start/length pairs overlap */
+ if (s1+l1 <= s2)
+ return 0;
+ if (s2+l2 <= s1)
+ return 0;
+ return 1;
+}
+
static ssize_t
rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
{
char *e;
unsigned long long size = simple_strtoull(buf, &e, 10);
+ unsigned long long oldsize = rdev->size;
if (e==buf || (*e && *e != '\n'))
return -EINVAL;
if (rdev->mddev->pers)
return -EBUSY;
rdev->size = size;
+ if (size > oldsize && rdev->mddev->external) {
+ /* need to check that all other rdevs with the same ->bdev
+ * do not overlap. We need to unlock the mddev to avoid
+ * a deadlock. We have already changed rdev->size, and if
+ * we have to change it back, we will have the lock again.
+ */
+ mddev_t *mddev;
+ int overlap = 0;
+ struct list_head *tmp, *tmp2;
+
+ mddev_unlock(rdev->mddev);
+ for_each_mddev(mddev, tmp) {
+ mdk_rdev_t *rdev2;
+
+ mddev_lock(mddev);
+ rdev_for_each(rdev2, tmp2, mddev)
+ if (test_bit(AllReserved, &rdev2->flags) ||
+ (rdev->bdev == rdev2->bdev &&
+ rdev != rdev2 &&
+ overlaps(rdev->data_offset, rdev->size,
+ rdev2->data_offset, rdev2->size))) {
+ overlap = 1;
+ break;
+ }
+ mddev_unlock(mddev);
+ if (overlap) {
+ mddev_put(mddev);
+ break;
+ }
+ }
+ mddev_lock(rdev->mddev);
+ if (overlap) {
+ /* Someone else could have slipped in a size
+ * change here, but doing so is just silly.
+ * We put oldsize back because we *know* it is
+ * safe, and trust userspace not to race with
+ * itself
+ */
+ rdev->size = oldsize;
+ return -EBUSY;
+ }
+ }
if (size < rdev->mddev->size || rdev->mddev->size == 0)
rdev->mddev->size = size;
return len;
@@ -1980,12 +2077,18 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
{
struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr);
mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj);
+ int rv;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- return entry->store(rdev, page, length);
+ rv = mddev_lock(rdev->mddev);
+ if (!rv) {
+ rv = entry->store(rdev, page, length);
+ mddev_unlock(rdev->mddev);
+ }
+ return rv;
}
static void rdev_free(struct kobject *ko)
@@ -2029,7 +2132,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
if ((err = alloc_disk_sb(rdev)))
goto abort_free;
- err = lock_rdev(rdev, newdev);
+ err = lock_rdev(rdev, newdev, super_format == -2);
if (err)
goto abort_free;
@@ -2099,7 +2202,7 @@ static void analyze_sbs(mddev_t * mddev)
char b[BDEVNAME_SIZE];
freshest = NULL;
- ITERATE_RDEV(mddev,rdev,tmp)
+ rdev_for_each(rdev, tmp, mddev)
switch (super_types[mddev->major_version].
load_super(rdev, freshest, mddev->minor_version)) {
case 1:
@@ -2120,7 +2223,7 @@ static void analyze_sbs(mddev_t * mddev)
validate_super(mddev, freshest);
i = 0;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (rdev != freshest)
if (super_types[mddev->major_version].
validate_super(mddev, rdev)) {
@@ -2215,7 +2318,7 @@ level_show(mddev_t *mddev, char *page)
static ssize_t
level_store(mddev_t *mddev, const char *buf, size_t len)
{
- int rv = len;
+ ssize_t rv = len;
if (mddev->pers)
return -EBUSY;
if (len == 0)
@@ -2425,6 +2528,8 @@ array_state_show(mddev_t *mddev, char *page)
case 0:
if (mddev->in_sync)
st = clean;
+ else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
+ st = write_pending;
else if (mddev->safemode)
st = active_idle;
else
@@ -2455,11 +2560,9 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
break;
case clear:
/* stopping an active array */
- if (mddev->pers) {
- if (atomic_read(&mddev->active) > 1)
- return -EBUSY;
- err = do_md_stop(mddev, 0);
- }
+ if (atomic_read(&mddev->active) > 1)
+ return -EBUSY;
+ err = do_md_stop(mddev, 0);
break;
case inactive:
/* stopping an active array */
@@ -2467,7 +2570,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
if (atomic_read(&mddev->active) > 1)
return -EBUSY;
err = do_md_stop(mddev, 2);
- }
+ } else
+ err = 0; /* already inactive */
break;
case suspended:
break; /* not supported yet */
@@ -2495,9 +2599,15 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
restart_array(mddev);
spin_lock_irq(&mddev->write_lock);
if (atomic_read(&mddev->writes_pending) == 0) {
- mddev->in_sync = 1;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
- }
+ if (mddev->in_sync == 0) {
+ mddev->in_sync = 1;
+ if (mddev->persistent)
+ set_bit(MD_CHANGE_CLEAN,
+ &mddev->flags);
+ }
+ err = 0;
+ } else
+ err = -EBUSY;
spin_unlock_irq(&mddev->write_lock);
} else {
mddev->ro = 0;
@@ -2508,7 +2618,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
case active:
if (mddev->pers) {
restart_array(mddev);
- clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ if (mddev->external)
+ clear_bit(MD_CHANGE_CLEAN, &mddev->flags);
wake_up(&mddev->sb_wait);
err = 0;
} else {
@@ -2574,7 +2685,9 @@ new_dev_store(mddev_t *mddev, const char *buf, size_t len)
if (err < 0)
goto out;
}
- } else
+ } else if (mddev->external)
+ rdev = md_import_device(dev, -2, -1);
+ else
rdev = md_import_device(dev, -1, -1);
if (IS_ERR(rdev))
@@ -2659,7 +2772,9 @@ __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store);
/* Metdata version.
- * This is either 'none' for arrays with externally managed metadata,
+ * This is one of
+ * 'none' for arrays with no metadata (good luck...)
+ * 'external' for arrays with externally managed metadata,
* or N.M for internally known formats
*/
static ssize_t
@@ -2668,6 +2783,8 @@ metadata_show(mddev_t *mddev, char *page)
if (mddev->persistent)
return sprintf(page, "%d.%d\n",
mddev->major_version, mddev->minor_version);
+ else if (mddev->external)
+ return sprintf(page, "external:%s\n", mddev->metadata_type);
else
return sprintf(page, "none\n");
}
@@ -2682,6 +2799,21 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
if (cmd_match(buf, "none")) {
mddev->persistent = 0;
+ mddev->external = 0;
+ mddev->major_version = 0;
+ mddev->minor_version = 90;
+ return len;
+ }
+ if (strncmp(buf, "external:", 9) == 0) {
+ size_t namelen = len-9;
+ if (namelen >= sizeof(mddev->metadata_type))
+ namelen = sizeof(mddev->metadata_type)-1;
+ strncpy(mddev->metadata_type, buf+9, namelen);
+ mddev->metadata_type[namelen] = 0;
+ if (namelen && mddev->metadata_type[namelen-1] == '\n')
+ mddev->metadata_type[--namelen] = 0;
+ mddev->persistent = 0;
+ mddev->external = 1;
mddev->major_version = 0;
mddev->minor_version = 90;
return len;
@@ -2698,6 +2830,7 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len)
mddev->major_version = major;
mddev->minor_version = minor;
mddev->persistent = 1;
+ mddev->external = 0;
return len;
}
@@ -2865,6 +2998,43 @@ sync_completed_show(mddev_t *mddev, char *page)
static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed);
static ssize_t
+max_sync_show(mddev_t *mddev, char *page)
+{
+ if (mddev->resync_max == MaxSector)
+ return sprintf(page, "max\n");
+ else
+ return sprintf(page, "%llu\n",
+ (unsigned long long)mddev->resync_max);
+}
+static ssize_t
+max_sync_store(mddev_t *mddev, const char *buf, size_t len)
+{
+ if (strncmp(buf, "max", 3) == 0)
+ mddev->resync_max = MaxSector;
+ else {
+ char *ep;
+ unsigned long long max = simple_strtoull(buf, &ep, 10);
+ if (ep == buf || (*ep != 0 && *ep != '\n'))
+ return -EINVAL;
+ if (max < mddev->resync_max &&
+ test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ return -EBUSY;
+
+ /* Must be a multiple of chunk_size */
+ if (mddev->chunk_size) {
+ if (max & (sector_t)((mddev->chunk_size>>9)-1))
+ return -EINVAL;
+ }
+ mddev->resync_max = max;
+ }
+ wake_up(&mddev->recovery_wait);
+ return len;
+}
+
+static struct md_sysfs_entry md_max_sync =
+__ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store);
+
+static ssize_t
suspend_lo_show(mddev_t *mddev, char *page)
{
return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo);
@@ -2974,6 +3144,7 @@ static struct attribute *md_redundancy_attrs[] = {
&md_sync_max.attr,
&md_sync_speed.attr,
&md_sync_completed.attr,
+ &md_max_sync.attr,
&md_suspend_lo.attr,
&md_suspend_hi.attr,
&md_bitmap.attr,
@@ -3118,8 +3289,11 @@ static int do_md_run(mddev_t * mddev)
/*
* Analyze all RAID superblock(s)
*/
- if (!mddev->raid_disks)
+ if (!mddev->raid_disks) {
+ if (!mddev->persistent)
+ return -EINVAL;
analyze_sbs(mddev);
+ }
chunk_size = mddev->chunk_size;
@@ -3143,7 +3317,7 @@ static int do_md_run(mddev_t * mddev)
}
/* devices must have minimum size of one chunk */
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (test_bit(Faulty, &rdev->flags))
continue;
if (rdev->size < chunk_size / 1024) {
@@ -3170,7 +3344,7 @@ static int do_md_run(mddev_t * mddev)
* the only valid external interface is through the md
* device.
*/
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
if (test_bit(Faulty, &rdev->flags))
continue;
sync_blockdev(rdev->bdev);
@@ -3236,8 +3410,8 @@ static int do_md_run(mddev_t * mddev)
mdk_rdev_t *rdev2;
struct list_head *tmp2;
int warned = 0;
- ITERATE_RDEV(mddev, rdev, tmp) {
- ITERATE_RDEV(mddev, rdev2, tmp2) {
+ rdev_for_each(rdev, tmp, mddev) {
+ rdev_for_each(rdev2, tmp2, mddev) {
if (rdev < rdev2 &&
rdev->bdev->bd_contains ==
rdev2->bdev->bd_contains) {
@@ -3297,7 +3471,7 @@ static int do_md_run(mddev_t * mddev)
mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */
mddev->in_sync = 1;
- ITERATE_RDEV(mddev,rdev,tmp)
+ rdev_for_each(rdev, tmp, mddev)
if (rdev->raid_disk >= 0) {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
@@ -3330,7 +3504,7 @@ static int do_md_run(mddev_t * mddev)
if (mddev->degraded && !mddev->sync_thread) {
struct list_head *rtmp;
int spares = 0;
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(In_sync, &rdev->flags) &&
!test_bit(Faulty, &rdev->flags))
@@ -3507,14 +3681,14 @@ static int do_md_stop(mddev_t * mddev, int mode)
}
mddev->bitmap_offset = 0;
- ITERATE_RDEV(mddev,rdev,tmp)
+ rdev_for_each(rdev, tmp, mddev)
if (rdev->raid_disk >= 0) {
char nm[20];
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
}
- /* make sure all delayed_delete calls have finished */
+ /* make sure all md_delayed_delete calls have finished */
flush_scheduled_work();
export_array(mddev);
@@ -3523,7 +3697,10 @@ static int do_md_stop(mddev_t * mddev, int mode)
mddev->size = 0;
mddev->raid_disks = 0;
mddev->recovery_cp = 0;
+ mddev->resync_max = MaxSector;
mddev->reshape_position = MaxSector;
+ mddev->external = 0;
+ mddev->persistent = 0;
} else if (mddev->pers)
printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -3546,7 +3723,7 @@ static void autorun_array(mddev_t *mddev)
printk(KERN_INFO "md: running: ");
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
char b[BDEVNAME_SIZE];
printk("<%s>", bdevname(rdev->bdev,b));
}
@@ -3589,7 +3766,7 @@ static void autorun_devices(int part)
printk(KERN_INFO "md: considering %s ...\n",
bdevname(rdev0->bdev,b));
INIT_LIST_HEAD(&candidates);
- ITERATE_RDEV_PENDING(rdev,tmp)
+ rdev_for_each_list(rdev, tmp, pending_raid_disks)
if (super_90_load(rdev, rdev0, 0) >= 0) {
printk(KERN_INFO "md: adding %s ...\n",
bdevname(rdev->bdev,b));
@@ -3632,7 +3809,8 @@ static void autorun_devices(int part)
mddev_unlock(mddev);
} else {
printk(KERN_INFO "md: created %s\n", mdname(mddev));
- ITERATE_RDEV_GENERIC(candidates,rdev,tmp) {
+ mddev->persistent = 1;
+ rdev_for_each_list(rdev, tmp, candidates) {
list_del_init(&rdev->same_set);
if (bind_rdev_to_array(rdev, mddev))
export_rdev(rdev);
@@ -3643,7 +3821,7 @@ static void autorun_devices(int part)
/* on success, candidates will be empty, on error
* it won't...
*/
- ITERATE_RDEV_GENERIC(candidates,rdev,tmp)
+ rdev_for_each_list(rdev, tmp, candidates)
export_rdev(rdev);
mddev_put(mddev);
}
@@ -3673,7 +3851,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
struct list_head *tmp;
nr=working=active=failed=spare=0;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
nr++;
if (test_bit(Faulty, &rdev->flags))
failed++;
@@ -3919,8 +4097,6 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
else
rdev->raid_disk = -1;
- rdev->flags = 0;
-
if (rdev->raid_disk < mddev->raid_disks)
if (info->state & (1<<MD_DISK_SYNC))
set_bit(In_sync, &rdev->flags);
@@ -4165,13 +4341,15 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
else
mddev->recovery_cp = 0;
mddev->persistent = ! info->not_persistent;
+ mddev->external = 0;
mddev->layout = info->layout;
mddev->chunk_size = info->chunk_size;
mddev->max_disks = MD_SB_DISKS;
- mddev->flags = 0;
+ if (mddev->persistent)
+ mddev->flags = 0;
set_bit(MD_CHANGE_DEVS, &mddev->flags);
mddev->default_bitmap_offset = MD_SB_BYTES >> 9;
@@ -4213,7 +4391,7 @@ static int update_size(mddev_t *mddev, unsigned long size)
*/
if (mddev->sync_thread)
return -EBUSY;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
sector_t avail;
avail = rdev->size * 2;
@@ -4471,9 +4649,10 @@ static int md_ioctl(struct inode *inode, struct file *file,
*/
/* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY,
* RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */
- if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
- && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
- && cmd != GET_BITMAP_FILE) {
+ if ((!mddev->raid_disks && !mddev->external)
+ && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY
+ && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE
+ && cmd != GET_BITMAP_FILE) {
err = -ENODEV;
goto abort_unlock;
}
@@ -4757,7 +4936,7 @@ static void status_unused(struct seq_file *seq)
seq_printf(seq, "unused devices: ");
- ITERATE_RDEV_PENDING(rdev,tmp) {
+ rdev_for_each_list(rdev, tmp, pending_raid_disks) {
char b[BDEVNAME_SIZE];
i++;
seq_printf(seq, "%s ",
@@ -4953,7 +5132,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
}
size = 0;
- ITERATE_RDEV(mddev,rdev,tmp2) {
+ rdev_for_each(rdev, tmp2, mddev) {
char b[BDEVNAME_SIZE];
seq_printf(seq, " %s[%d]",
bdevname(rdev->bdev,b), rdev->desc_nr);
@@ -4982,7 +5161,10 @@ static int md_seq_show(struct seq_file *seq, void *v)
mddev->major_version,
mddev->minor_version);
}
- } else
+ } else if (mddev->external)
+ seq_printf(seq, " super external:%s",
+ mddev->metadata_type);
+ else
seq_printf(seq, " super non-persistent");
if (mddev->pers) {
@@ -5106,7 +5288,7 @@ static int is_mddev_idle(mddev_t *mddev)
long curr_events;
idle = 1;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
curr_events = disk_stat_read(disk, sectors[0]) +
disk_stat_read(disk, sectors[1]) -
@@ -5283,7 +5465,7 @@ void md_do_sync(mddev_t *mddev)
set_bit(MD_RECOVERY_INTR, &mddev->recovery);
goto skip;
}
- ITERATE_MDDEV(mddev2,tmp) {
+ for_each_mddev(mddev2, tmp) {
if (mddev2 == mddev)
continue;
if (mddev2->curr_resync &&
@@ -5333,7 +5515,7 @@ void md_do_sync(mddev_t *mddev)
/* recovery follows the physical size of devices */
max_sectors = mddev->size << 1;
j = MaxSector;
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
@@ -5381,8 +5563,16 @@ void md_do_sync(mddev_t *mddev)
sector_t sectors;
skipped = 0;
+ if (j >= mddev->resync_max) {
+ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
+ wait_event(mddev->recovery_wait,
+ mddev->resync_max > j
+ || kthread_should_stop());
+ }
+ if (kthread_should_stop())
+ goto interrupted;
sectors = mddev->pers->sync_request(mddev, j, &skipped,
- currspeed < speed_min(mddev));
+ currspeed < speed_min(mddev));
if (sectors == 0) {
set_bit(MD_RECOVERY_ERR, &mddev->recovery);
goto out;
@@ -5424,15 +5614,9 @@ void md_do_sync(mddev_t *mddev)
}
- if (kthread_should_stop()) {
- /*
- * got a signal, exit.
- */
- printk(KERN_INFO
- "md: md_do_sync() got signal ... exiting\n");
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- goto out;
- }
+ if (kthread_should_stop())
+ goto interrupted;
+
/*
* this loop exits only if either when we are slower than
@@ -5484,7 +5668,7 @@ void md_do_sync(mddev_t *mddev)
} else {
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
mddev->curr_resync = MaxSector;
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk >= 0 &&
!test_bit(Faulty, &rdev->flags) &&
!test_bit(In_sync, &rdev->flags) &&
@@ -5496,9 +5680,22 @@ void md_do_sync(mddev_t *mddev)
skip:
mddev->curr_resync = 0;
+ mddev->resync_max = MaxSector;
+ sysfs_notify(&mddev->kobj, NULL, "sync_completed");
wake_up(&resync_wait);
set_bit(MD_RECOVERY_DONE, &mddev->recovery);
md_wakeup_thread(mddev->thread);
+ return;
+
+ interrupted:
+ /*
+ * got a signal, exit.
+ */
+ printk(KERN_INFO
+ "md: md_do_sync() got signal ... exiting\n");
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+ goto out;
+
}
EXPORT_SYMBOL_GPL(md_do_sync);
@@ -5509,8 +5706,9 @@ static int remove_and_add_spares(mddev_t *mddev)
struct list_head *rtmp;
int spares = 0;
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk >= 0 &&
+ !mddev->external &&
(test_bit(Faulty, &rdev->flags) ||
! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
@@ -5524,7 +5722,7 @@ static int remove_and_add_spares(mddev_t *mddev)
}
if (mddev->degraded) {
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk < 0
&& !test_bit(Faulty, &rdev->flags)) {
rdev->recovery_offset = 0;
@@ -5589,7 +5787,7 @@ void md_check_recovery(mddev_t *mddev)
}
if ( ! (
- mddev->flags ||
+ (mddev->flags && !mddev->external) ||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
(mddev->safemode == 1) ||
@@ -5605,7 +5803,8 @@ void md_check_recovery(mddev_t *mddev)
if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
!mddev->in_sync && mddev->recovery_cp == MaxSector) {
mddev->in_sync = 1;
- set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ if (mddev->persistent)
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
}
if (mddev->safemode == 1)
mddev->safemode = 0;
@@ -5637,7 +5836,7 @@ void md_check_recovery(mddev_t *mddev)
* information must be scrapped
*/
if (!mddev->degraded)
- ITERATE_RDEV(mddev,rdev,rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
rdev->saved_raid_disk = -1;
mddev->recovery = 0;
@@ -5714,7 +5913,7 @@ static int md_notify_reboot(struct notifier_block *this,
printk(KERN_INFO "md: stopping all md devices.\n");
- ITERATE_MDDEV(mddev,tmp)
+ for_each_mddev(mddev, tmp)
if (mddev_trylock(mddev)) {
do_md_stop (mddev, 1);
mddev_unlock(mddev);
@@ -5848,7 +6047,7 @@ static __exit void md_exit(void)
unregister_reboot_notifier(&md_notifier);
unregister_sysctl_table(raid_table_header);
remove_proc_entry("mdstat", NULL);
- ITERATE_MDDEV(mddev,tmp) {
+ for_each_mddev(mddev, tmp) {
struct gendisk *disk = mddev->gendisk;
if (!disk)
continue;
diff --git a/drivers/md/mktables.c b/drivers/md/mktables.c
index adef299908cf..b61d5767aae7 100644
--- a/drivers/md/mktables.c
+++ b/drivers/md/mktables.c
@@ -1,13 +1,10 @@
-#ident "$Id: mktables.c,v 1.2 2002/12/12 22:41:27 hpa Exp $"
-/* ----------------------------------------------------------------------- *
+/* -*- linux-c -*- ------------------------------------------------------- *
*
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Bostom MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2 or (at your
+ * option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
@@ -26,100 +23,98 @@
static uint8_t gfmul(uint8_t a, uint8_t b)
{
- uint8_t v = 0;
-
- while ( b ) {
- if ( b & 1 ) v ^= a;
- a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
- b >>= 1;
- }
- return v;
+ uint8_t v = 0;
+
+ while (b) {
+ if (b & 1)
+ v ^= a;
+ a = (a << 1) ^ (a & 0x80 ? 0x1d : 0);
+ b >>= 1;
+ }
+
+ return v;
}
static uint8_t gfpow(uint8_t a, int b)
{
- uint8_t v = 1;
-
- b %= 255;
- if ( b < 0 )
- b += 255;
-
- while ( b ) {
- if ( b & 1 ) v = gfmul(v,a);
- a = gfmul(a,a);
- b >>= 1;
- }
- return v;
+ uint8_t v = 1;
+
+ b %= 255;
+ if (b < 0)
+ b += 255;
+
+ while (b) {
+ if (b & 1)
+ v = gfmul(v, a);
+ a = gfmul(a, a);
+ b >>= 1;
+ }
+
+ return v;
}
int main(int argc, char *argv[])
{
- int i, j, k;
- uint8_t v;
- uint8_t exptbl[256], invtbl[256];
-
- printf("#include \"raid6.h\"\n");
-
- /* Compute multiplication table */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfmul[256][256] =\n"
- "{\n");
- for ( i = 0 ; i < 256 ; i++ ) {
- printf("\t{\n");
- for ( j = 0 ; j < 256 ; j += 8 ) {
- printf("\t\t");
- for ( k = 0 ; k < 8 ; k++ ) {
- printf("0x%02x, ", gfmul(i,j+k));
- }
- printf("\n");
- }
- printf("\t},\n");
- }
- printf("};\n");
-
- /* Compute power-of-2 table (exponent) */
- v = 1;
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfexp[256] =\n"
- "{\n");
- for ( i = 0 ; i < 256 ; i += 8 ) {
- printf("\t");
- for ( j = 0 ; j < 8 ; j++ ) {
- exptbl[i+j] = v;
- printf("0x%02x, ", v);
- v = gfmul(v,2);
- if ( v == 1 ) v = 0; /* For entry 255, not a real entry */
- }
- printf("\n");
- }
- printf("};\n");
-
- /* Compute inverse table x^-1 == x^254 */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfinv[256] =\n"
- "{\n");
- for ( i = 0 ; i < 256 ; i += 8 ) {
- printf("\t");
- for ( j = 0 ; j < 8 ; j++ ) {
- invtbl[i+j] = v = gfpow(i+j,254);
- printf("0x%02x, ", v);
- }
- printf("\n");
- }
- printf("};\n");
-
- /* Compute inv(2^x + 1) (exponent-xor-inverse) table */
- printf("\nconst u8 __attribute__((aligned(256)))\n"
- "raid6_gfexi[256] =\n"
- "{\n");
- for ( i = 0 ; i < 256 ; i += 8 ) {
- printf("\t");
- for ( j = 0 ; j < 8 ; j++ ) {
- printf("0x%02x, ", invtbl[exptbl[i+j]^1]);
- }
- printf("\n");
- }
- printf("};\n\n");
-
- return 0;
+ int i, j, k;
+ uint8_t v;
+ uint8_t exptbl[256], invtbl[256];
+
+ printf("#include \"raid6.h\"\n");
+
+ /* Compute multiplication table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfmul[256][256] =\n"
+ "{\n");
+ for (i = 0; i < 256; i++) {
+ printf("\t{\n");
+ for (j = 0; j < 256; j += 8) {
+ printf("\t\t");
+ for (k = 0; k < 8; k++)
+ printf("0x%02x,%c", gfmul(i, j + k),
+ (k == 7) ? '\n' : ' ');
+ }
+ printf("\t},\n");
+ }
+ printf("};\n");
+
+ /* Compute power-of-2 table (exponent) */
+ v = 1;
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfexp[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ exptbl[i + j] = v;
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ v = gfmul(v, 2);
+ if (v == 1)
+ v = 0; /* For entry 255, not a real entry */
+ }
+ }
+ printf("};\n");
+
+ /* Compute inverse table x^-1 == x^254 */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfinv[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++) {
+ invtbl[i + j] = v = gfpow(i + j, 254);
+ printf("0x%02x,%c", v, (j == 7) ? '\n' : ' ');
+ }
+ }
+ printf("};\n");
+
+ /* Compute inv(2^x + 1) (exponent-xor-inverse) table */
+ printf("\nconst u8 __attribute__((aligned(256)))\n"
+ "raid6_gfexi[256] =\n" "{\n");
+ for (i = 0; i < 256; i += 8) {
+ printf("\t");
+ for (j = 0; j < 8; j++)
+ printf("0x%02x,%c", invtbl[exptbl[i + j] ^ 1],
+ (j == 7) ? '\n' : ' ');
+ }
+ printf("};\n");
+
+ return 0;
}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index eb631ebed686..3f299d835a2b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -436,7 +436,7 @@ static int multipath_run (mddev_t *mddev)
}
conf->working_disks = 0;
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
disk_idx = rdev->raid_disk;
if (disk_idx < 0 ||
disk_idx >= mddev->raid_disks)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f8e591708d1f..818b48284096 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -72,11 +72,11 @@ static int create_strip_zones (mddev_t *mddev)
*/
conf->nr_strip_zones = 0;
- ITERATE_RDEV(mddev,rdev1,tmp1) {
+ rdev_for_each(rdev1, tmp1, mddev) {
printk("raid0: looking at %s\n",
bdevname(rdev1->bdev,b));
c = 0;
- ITERATE_RDEV(mddev,rdev2,tmp2) {
+ rdev_for_each(rdev2, tmp2, mddev) {
printk("raid0: comparing %s(%llu)",
bdevname(rdev1->bdev,b),
(unsigned long long)rdev1->size);
@@ -124,7 +124,7 @@ static int create_strip_zones (mddev_t *mddev)
cnt = 0;
smallest = NULL;
zone->dev = conf->devlist;
- ITERATE_RDEV(mddev, rdev1, tmp1) {
+ rdev_for_each(rdev1, tmp1, mddev) {
int j = rdev1->raid_disk;
if (j < 0 || j >= mddev->raid_disks) {
@@ -293,7 +293,7 @@ static int raid0_run (mddev_t *mddev)
/* calculate array device size */
mddev->array_size = 0;
- ITERATE_RDEV(mddev,rdev,tmp)
+ rdev_for_each(rdev, tmp, mddev)
mddev->array_size += rdev->size;
printk("raid0 : md_size is %llu blocks.\n",
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4a69c416e045..5c7fef091cec 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1684,6 +1684,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
raise_barrier(conf);
conf->next_resync = sector_nr;
@@ -1766,6 +1767,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return rv;
}
+ if (max_sector > mddev->resync_max)
+ max_sector = mddev->resync_max; /* Don't do IO beyond here */
nr_sectors = 0;
sync_blocks = 0;
do {
@@ -1884,7 +1887,7 @@ static int run(mddev_t *mddev)
if (!conf->r1bio_pool)
goto out_no_mem;
- ITERATE_RDEV(mddev, rdev, tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5cdcc9386200..017f58113c33 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1657,6 +1657,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
return (max_sector - sector_nr) + sectors_skipped;
}
+ if (max_sector > mddev->resync_max)
+ max_sector = mddev->resync_max; /* Don't do IO beyond here */
+
/* make sure whole request will fit in a chunk - if chunks
* are meaningful
*/
@@ -1670,6 +1673,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
if (!go_faster && conf->nr_waiting)
msleep_interruptible(1000);
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+
/* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that
* have bi_end_io, bi_sector, bi_bdev set,
@@ -2021,7 +2026,7 @@ static int run(mddev_t *mddev)
goto out_free_conf;
}
- ITERATE_RDEV(mddev, rdev, tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e8c8157b02fc..2d6f1a51359c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3159,7 +3159,8 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
atomic_inc(&conf->preread_active_stripes);
list_add_tail(&sh->lru, &conf->handle_list);
}
- }
+ } else
+ blk_plug_device(conf->mddev->queue);
}
static void activate_bit_delay(raid5_conf_t *conf)
@@ -3549,7 +3550,8 @@ static int make_request(struct request_queue *q, struct bio * bi)
goto retry;
}
finish_wait(&conf->wait_for_overlap, &w);
- handle_stripe(sh, NULL);
+ set_bit(STRIPE_HANDLE, &sh->state);
+ clear_bit(STRIPE_DELAYED, &sh->state);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -3698,6 +3700,25 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
release_stripe(sh);
first_sector += STRIPE_SECTORS;
}
+ /* If this takes us to the resync_max point where we have to pause,
+ * then we need to write out the superblock.
+ */
+ sector_nr += conf->chunk_size>>9;
+ if (sector_nr >= mddev->resync_max) {
+ /* Cannot proceed until we've updated the superblock... */
+ wait_event(conf->wait_for_overlap,
+ atomic_read(&conf->reshape_stripes) == 0);
+ mddev->reshape_position = conf->expand_progress;
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ md_wakeup_thread(mddev->thread);
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_DEVS, &mddev->flags)
+ || kthread_should_stop());
+ spin_lock_irq(&conf->device_lock);
+ conf->expand_lo = mddev->reshape_position;
+ spin_unlock_irq(&conf->device_lock);
+ wake_up(&conf->wait_for_overlap);
+ }
return conf->chunk_size>>9;
}
@@ -3734,6 +3755,12 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
return reshape_request(mddev, sector_nr, skipped);
+ /* No need to check resync_max as we never do more than one
+ * stripe, and as resync_max will always be on a chunk boundary,
+ * if the check in md_do_sync didn't fire, there is no chance
+ * of overstepping resync_max here
+ */
+
/* if there is too many failed drives and we are trying
* to resync, then assert that we are finished, because there is
* nothing we can do.
@@ -3753,6 +3780,9 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
}
+
+ bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+
pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks);
sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1);
if (sh == NULL) {
@@ -3864,7 +3894,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
* During the scan, completed stripes are saved for us by the interrupt
* handler, so that they will not have to wait for our next wakeup.
*/
-static void raid5d (mddev_t *mddev)
+static void raid5d(mddev_t *mddev)
{
struct stripe_head *sh;
raid5_conf_t *conf = mddev_to_conf(mddev);
@@ -3889,12 +3919,6 @@ static void raid5d (mddev_t *mddev)
activate_bit_delay(conf);
}
- if (list_empty(&conf->handle_list) &&
- atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
- !blk_queue_plugged(mddev->queue) &&
- !list_empty(&conf->delayed_list))
- raid5_activate_delayed(conf);
-
while ((bio = remove_bio_from_retry(conf))) {
int ok;
spin_unlock_irq(&conf->device_lock);
@@ -4108,7 +4132,7 @@ static int run(mddev_t *mddev)
pr_debug("raid5: run(%s) called.\n", mdname(mddev));
- ITERATE_RDEV(mddev,rdev,tmp) {
+ rdev_for_each(rdev, tmp, mddev) {
raid_disk = rdev->raid_disk;
if (raid_disk >= conf->raid_disks
|| raid_disk < 0)
@@ -4521,7 +4545,7 @@ static int raid5_start_reshape(mddev_t *mddev)
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
- ITERATE_RDEV(mddev, rdev, rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags))
spares++;
@@ -4543,7 +4567,7 @@ static int raid5_start_reshape(mddev_t *mddev)
/* Add some new drives, as many as will fit.
* We know there are enough to make the newly sized array work.
*/
- ITERATE_RDEV(mddev, rdev, rtmp)
+ rdev_for_each(rdev, rtmp, mddev)
if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags)) {
if (raid5_add_disk(mddev, rdev)) {
diff --git a/drivers/md/raid6test/test.c b/drivers/md/raid6test/test.c
index 0d5cd57accd7..559cc41b2585 100644
--- a/drivers/md/raid6test/test.c
+++ b/drivers/md/raid6test/test.c
@@ -1,12 +1,10 @@
/* -*- linux-c -*- ------------------------------------------------------- *
*
- * Copyright 2002 H. Peter Anvin - All Rights Reserved
+ * Copyright 2002-2007 H. Peter Anvin - All Rights Reserved
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
- * Bostom MA 02111-1307, USA; either version 2 of the License, or
- * (at your option) any later version; incorporated herein by reference.
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2 or (at your
+ * option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
@@ -30,67 +28,87 @@ char *dataptrs[NDISKS];
char data[NDISKS][PAGE_SIZE];
char recovi[PAGE_SIZE], recovj[PAGE_SIZE];
-void makedata(void)
+static void makedata(void)
{
int i, j;
- for ( i = 0 ; i < NDISKS ; i++ ) {
- for ( j = 0 ; j < PAGE_SIZE ; j++ ) {
+ for (i = 0; i < NDISKS; i++) {
+ for (j = 0; j < PAGE_SIZE; j++)
data[i][j] = rand();
- }
+
dataptrs[i] = data[i];
}
}
+static char disk_type(int d)
+{
+ switch (d) {
+ case NDISKS-2:
+ return 'P';
+ case NDISKS-1:
+ return 'Q';
+ default:
+ return 'D';
+ }
+}
+
+static int test_disks(int i, int j)
+{
+ int erra, errb;
+
+ memset(recovi, 0xf0, PAGE_SIZE);
+ memset(recovj, 0xba, PAGE_SIZE);
+
+ dataptrs[i] = recovi;
+ dataptrs[j] = recovj;
+
+ raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs);
+
+ erra = memcmp(data[i], recovi, PAGE_SIZE);
+ errb = memcmp(data[j], recovj, PAGE_SIZE);
+
+ if (i < NDISKS-2 && j == NDISKS-1) {
+ /* We don't implement the DQ failure scenario, since it's
+ equivalent to a RAID-5 failure (XOR, then recompute Q) */
+ erra = errb = 0;
+ } else {
+ printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
+ raid6_call.name,
+ i, disk_type(i),
+ j, disk_type(j),
+ (!erra && !errb) ? "OK" :
+ !erra ? "ERRB" :
+ !errb ? "ERRA" : "ERRAB");
+ }
+
+ dataptrs[i] = data[i];
+ dataptrs[j] = data[j];
+
+ return erra || errb;
+}
+
int main(int argc, char *argv[])
{
- const struct raid6_calls * const * algo;
+ const struct raid6_calls *const *algo;
int i, j;
- int erra, errb;
+ int err = 0;
makedata();
- for ( algo = raid6_algos ; *algo ; algo++ ) {
- if ( !(*algo)->valid || (*algo)->valid() ) {
+ for (algo = raid6_algos; *algo; algo++) {
+ if (!(*algo)->valid || (*algo)->valid()) {
raid6_call = **algo;
/* Nuke syndromes */
memset(data[NDISKS-2], 0xee, 2*PAGE_SIZE);
/* Generate assumed good syndrome */
- raid6_call.gen_syndrome(NDISKS, PAGE_SIZE, (void **)&dataptrs);
-
- for ( i = 0 ; i < NDISKS-1 ; i++ ) {
- for ( j = i+1 ; j < NDISKS ; j++ ) {
- memset(recovi, 0xf0, PAGE_SIZE);
- memset(recovj, 0xba, PAGE_SIZE);
-
- dataptrs[i] = recovi;
- dataptrs[j] = recovj;
-
- raid6_dual_recov(NDISKS, PAGE_SIZE, i, j, (void **)&dataptrs);
-
- erra = memcmp(data[i], recovi, PAGE_SIZE);
- errb = memcmp(data[j], recovj, PAGE_SIZE);
-
- if ( i < NDISKS-2 && j == NDISKS-1 ) {
- /* We don't implement the DQ failure scenario, since it's
- equivalent to a RAID-5 failure (XOR, then recompute Q) */
- } else {
- printf("algo=%-8s faila=%3d(%c) failb=%3d(%c) %s\n",
- raid6_call.name,
- i, (i==NDISKS-2)?'P':'D',
- j, (j==NDISKS-1)?'Q':(j==NDISKS-2)?'P':'D',
- (!erra && !errb) ? "OK" :
- !erra ? "ERRB" :
- !errb ? "ERRA" :
- "ERRAB");
- }
-
- dataptrs[i] = data[i];
- dataptrs[j] = data[j];
- }
- }
+ raid6_call.gen_syndrome(NDISKS, PAGE_SIZE,
+ (void **)&dataptrs);
+
+ for (i = 0; i < NDISKS-1; i++)
+ for (j = i+1; j < NDISKS; j++)
+ err += test_disks(i, j);
}
printf("\n");
}
@@ -99,5 +117,8 @@ int main(int argc, char *argv[])
/* Pick the best algorithm test */
raid6_select_algo();
- return 0;
+ if (err)
+ printf("\n*** ERRORS FOUND ***\n");
+
+ return err;
}
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 28ddd146c1c5..850b8c6f4577 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -22,7 +22,6 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
-obj-$(CONFIG_SOUND_TVMIXER) += tvmixer.o
obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
obj-$(CONFIG_VIDEO_SAA5246A) += saa5246a.o
diff --git a/drivers/media/video/tvmixer.c b/drivers/media/video/tvmixer.c
deleted file mode 100644
index 9fa5b702e073..000000000000
--- a/drivers/media/video/tvmixer.c
+++ /dev/null
@@ -1,336 +0,0 @@
-/*
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/videodev.h>
-#include <linux/init.h>
-#include <linux/kdev_t.h>
-#include <linux/sound.h>
-#include <linux/soundcard.h>
-
-#include <asm/semaphore.h>
-#include <asm/uaccess.h>
-
-
-#define DEV_MAX 4
-
-static int devnr = -1;
-module_param(devnr, int, 0644);
-
-MODULE_AUTHOR("Gerd Knorr");
-MODULE_LICENSE("GPL");
-
-/* ----------------------------------------------------------------------- */
-
-struct TVMIXER {
- struct i2c_client *dev;
- int minor;
- int count;
-};
-
-static struct TVMIXER devices[DEV_MAX];
-
-static int tvmixer_adapters(struct i2c_adapter *adap);
-static int tvmixer_clients(struct i2c_client *client);
-
-/* ----------------------------------------------------------------------- */
-
-static int mix_to_v4l(int i)
-{
- int r;
-
- r = ((i & 0xff) * 65536 + 50) / 100;
- if (r > 65535) r = 65535;
- if (r < 0) r = 0;
- return r;
-}
-
-static int v4l_to_mix(int i)
-{
- int r;
-
- r = (i * 100 + 32768) / 65536;
- if (r > 100) r = 100;
- if (r < 0) r = 0;
- return r | (r << 8);
-}
-
-static int v4l_to_mix2(int l, int r)
-{
- r = (r * 100 + 32768) / 65536;
- if (r > 100) r = 100;
- if (r < 0) r = 0;
- l = (l * 100 + 32768) / 65536;
- if (l > 100) l = 100;
- if (l < 0) l = 0;
- return (r << 8) | l;
-}
-
-static int tvmixer_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct video_audio va;
- int left,right,ret,val = 0;
- struct TVMIXER *mix = file->private_data;
- struct i2c_client *client = mix->dev;
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
-
- if (NULL == client)
- return -ENODEV;
-
- if (cmd == SOUND_MIXER_INFO) {
- mixer_info info;
- strlcpy(info.id, "tv card", sizeof(info.id));
- strlcpy(info.name, client->name, sizeof(info.name));
- info.modify_counter = 42 /* FIXME */;
- if (copy_to_user(argp, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
- if (cmd == SOUND_OLD_MIXER_INFO) {
- _old_mixer_info info;
- strlcpy(info.id, "tv card", sizeof(info.id));
- strlcpy(info.name, client->name, sizeof(info.name));
- if (copy_to_user(argp, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
- if (cmd == OSS_GETVERSION)
- return put_user(SOUND_VERSION, p);
-
- if (_SIOC_DIR(cmd) & _SIOC_WRITE)
- if (get_user(val, p))
- return -EFAULT;
-
- /* read state */
- memset(&va,0,sizeof(va));
- client->driver->command(client,VIDIOCGAUDIO,&va);
-
- switch (cmd) {
- case MIXER_READ(SOUND_MIXER_RECMASK):
- case MIXER_READ(SOUND_MIXER_CAPS):
- case MIXER_READ(SOUND_MIXER_RECSRC):
- case MIXER_WRITE(SOUND_MIXER_RECSRC):
- ret = 0;
- break;
-
- case MIXER_READ(SOUND_MIXER_STEREODEVS):
- ret = SOUND_MASK_VOLUME;
- break;
- case MIXER_READ(SOUND_MIXER_DEVMASK):
- ret = SOUND_MASK_VOLUME;
- if (va.flags & VIDEO_AUDIO_BASS)
- ret |= SOUND_MASK_BASS;
- if (va.flags & VIDEO_AUDIO_TREBLE)
- ret |= SOUND_MASK_TREBLE;
- break;
-
- case MIXER_WRITE(SOUND_MIXER_VOLUME):
- left = mix_to_v4l(val);
- right = mix_to_v4l(val >> 8);
- va.volume = max(left,right);
- va.balance = (32768*min(left,right)) / (va.volume ? va.volume : 1);
- va.balance = (left<right) ? (65535-va.balance) : va.balance;
- if (va.volume)
- va.flags &= ~VIDEO_AUDIO_MUTE;
- client->driver->command(client,VIDIOCSAUDIO,&va);
- client->driver->command(client,VIDIOCGAUDIO,&va);
- /* fall throuth */
- case MIXER_READ(SOUND_MIXER_VOLUME):
- left = (min(65536 - va.balance,32768) *
- va.volume) / 32768;
- right = (min(va.balance,(u16)32768) *
- va.volume) / 32768;
- ret = v4l_to_mix2(left,right);
- break;
-
- case MIXER_WRITE(SOUND_MIXER_BASS):
- va.bass = mix_to_v4l(val);
- client->driver->command(client,VIDIOCSAUDIO,&va);
- client->driver->command(client,VIDIOCGAUDIO,&va);
- /* fall throuth */
- case MIXER_READ(SOUND_MIXER_BASS):
- ret = v4l_to_mix(va.bass);
- break;
-
- case MIXER_WRITE(SOUND_MIXER_TREBLE):
- va.treble = mix_to_v4l(val);
- client->driver->command(client,VIDIOCSAUDIO,&va);
- client->driver->command(client,VIDIOCGAUDIO,&va);
- /* fall throuth */
- case MIXER_READ(SOUND_MIXER_TREBLE):
- ret = v4l_to_mix(va.treble);
- break;
-
- default:
- return -EINVAL;
- }
- if (put_user(ret, p))
- return -EFAULT;
- return 0;
-}
-
-static int tvmixer_open(struct inode *inode, struct file *file)
-{
- int i, minor = iminor(inode);
- struct TVMIXER *mix = NULL;
- struct i2c_client *client = NULL;
-
- for (i = 0; i < DEV_MAX; i++) {
- if (devices[i].minor == minor) {
- mix = devices+i;
- client = mix->dev;
- break;
- }
- }
-
- if (NULL == client)
- return -ENODEV;
-
- /* lock bttv in memory while the mixer is in use */
- file->private_data = mix;
- if (client->adapter->owner)
- try_module_get(client->adapter->owner);
- return 0;
-}
-
-static int tvmixer_release(struct inode *inode, struct file *file)
-{
- struct TVMIXER *mix = file->private_data;
- struct i2c_client *client;
-
- client = mix->dev;
- if (NULL == client) {
- return -ENODEV;
- }
-
- module_put(client->adapter->owner);
- return 0;
-}
-
-static struct i2c_driver driver = {
- .driver = {
- .name = "tvmixer",
- },
- .id = I2C_DRIVERID_TVMIXER,
- .detach_adapter = tvmixer_adapters,
- .attach_adapter = tvmixer_adapters,
- .detach_client = tvmixer_clients,
-};
-
-static const struct file_operations tvmixer_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .ioctl = tvmixer_ioctl,
- .open = tvmixer_open,
- .release = tvmixer_release,
-};
-
-/* ----------------------------------------------------------------------- */
-
-static int tvmixer_adapters(struct i2c_adapter *adap)
-{
- struct i2c_client *client;
-
- list_for_each_entry(client, &adap->clients, list)
- tvmixer_clients(client);
- return 0;
-}
-
-static int tvmixer_clients(struct i2c_client *client)
-{
- struct video_audio va;
- int i,minor;
-
- if (!(client->adapter->class & I2C_CLASS_TV_ANALOG))
- return -1;
-
- /* unregister ?? */
- for (i = 0; i < DEV_MAX; i++) {
- if (devices[i].dev == client) {
- /* unregister */
- unregister_sound_mixer(devices[i].minor);
- devices[i].dev = NULL;
- devices[i].minor = -1;
- printk("tvmixer: %s unregistered (#1)\n",
- client->name);
- return 0;
- }
- }
-
- /* look for a free slot */
- for (i = 0; i < DEV_MAX; i++)
- if (NULL == devices[i].dev)
- break;
- if (i == DEV_MAX) {
- printk(KERN_WARNING "tvmixer: DEV_MAX too small\n");
- return -1;
- }
-
- /* audio chip with mixer ??? */
- if (NULL == client->driver->command)
- return -1;
- memset(&va,0,sizeof(va));
- if (0 != client->driver->command(client,VIDIOCGAUDIO,&va))
- return -1;
- if (0 == (va.flags & VIDEO_AUDIO_VOLUME))
- return -1;
-
- /* everything is fine, register */
- if ((minor = register_sound_mixer(&tvmixer_fops,devnr)) < 0) {
- printk(KERN_ERR "tvmixer: cannot allocate mixer device\n");
- return -1;
- }
-
- devices[i].minor = minor;
- devices[i].count = 0;
- devices[i].dev = client;
- printk("tvmixer: %s (%s) registered with minor %d\n",
- client->name,client->adapter->name,minor);
-
- return 0;
-}
-
-/* ----------------------------------------------------------------------- */
-
-static int __init tvmixer_init_module(void)
-{
- int i;
-
- for (i = 0; i < DEV_MAX; i++)
- devices[i].minor = -1;
-
- return i2c_add_driver(&driver);
-}
-
-static void __exit tvmixer_cleanup_module(void)
-{
- int i;
-
- i2c_del_driver(&driver);
- for (i = 0; i < DEV_MAX; i++) {
- if (devices[i].minor != -1) {
- unregister_sound_mixer(devices[i].minor);
- printk("tvmixer: %s unregistered (#2)\n",
- devices[i].dev->name);
- }
- }
-}
-
-module_init(tvmixer_init_module);
-module_exit(tvmixer_cleanup_module);
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
index 3e93f8058770..719b17ce83f8 100644
--- a/drivers/media/video/usbvideo/konicawc.c
+++ b/drivers/media/video/usbvideo/konicawc.c
@@ -241,8 +241,6 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
input_dev->evbit[0] = BIT_MASK(EV_KEY);
input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
- input_dev->private = cam;
-
error = input_register_device(cam->input);
if (error) {
warn("Failed to register camera's input device, err: %d\n",
diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
index 5e7b79501370..a2acba0bcc47 100644
--- a/drivers/media/video/usbvideo/quickcam_messenger.c
+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
@@ -105,8 +105,6 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
input_dev->evbit[0] = BIT_MASK(EV_KEY);
input_dev->keybit[BIT_WORD(BTN_0)] = BIT_MASK(BTN_0);
- input_dev->private = cam;
-
error = input_register_device(cam->input);
if (error) {
warn("Failed to register camera's input device, err: %d\n",
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 25716193a534..0c886c882385 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -15,6 +15,13 @@ config MFD_SM501
interface. The device may be connected by PCI or local bus with
varying functions enabled.
+config MFD_ASIC3
+ bool "Support for Compaq ASIC3"
+ depends on GENERIC_HARDIRQS && ARM
+ ---help---
+ This driver supports the ASIC3 multifunction chip found on many
+ PDAs (mainly iPAQ and HTC based ones)
+
endmenu
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 51432091b323..521cd5cb68af 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_MFD_SM501) += sm501.o
+obj-$(CONFIG_MFD_ASIC3) += asic3.o
obj-$(CONFIG_MCP) += mcp-core.o
obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
new file mode 100644
index 000000000000..63fb1ff3ad10
--- /dev/null
+++ b/drivers/mfd/asic3.c
@@ -0,0 +1,588 @@
+/*
+ * driver/mfd/asic3.c
+ *
+ * Compaq ASIC3 support.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright 2001 Compaq Computer Corporation.
+ * Copyright 2004-2005 Phil Blundell
+ * Copyright 2007 OpenedHand Ltd.
+ *
+ * Authors: Phil Blundell <pb@handhelds.org>,
+ * Samuel Ortiz <sameo@openedhand.com>
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/asic3.h>
+
+static inline void asic3_write_register(struct asic3 *asic,
+ unsigned int reg, u32 value)
+{
+ iowrite16(value, (unsigned long)asic->mapping +
+ (reg >> asic->bus_shift));
+}
+
+static inline u32 asic3_read_register(struct asic3 *asic,
+ unsigned int reg)
+{
+ return ioread16((unsigned long)asic->mapping +
+ (reg >> asic->bus_shift));
+}
+
+/* IRQs */
+#define MAX_ASIC_ISR_LOOPS 20
+#define ASIC3_GPIO_Base_INCR \
+ (ASIC3_GPIO_B_Base - ASIC3_GPIO_A_Base)
+
+static void asic3_irq_flip_edge(struct asic3 *asic,
+ u32 base, int bit)
+{
+ u16 edge;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ edge = asic3_read_register(asic,
+ base + ASIC3_GPIO_EdgeTrigger);
+ edge ^= bit;
+ asic3_write_register(asic,
+ base + ASIC3_GPIO_EdgeTrigger, edge);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
+{
+ int iter, i;
+ unsigned long flags;
+ struct asic3 *asic;
+
+ desc->chip->ack(irq);
+
+ asic = desc->handler_data;
+
+ for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
+ u32 status;
+ int bank;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ status = asic3_read_register(asic,
+ ASIC3_OFFSET(INTR, PIntStat));
+ spin_unlock_irqrestore(&asic->lock, flags);
+
+ /* Check all ten register bits */
+ if ((status & 0x3ff) == 0)
+ break;
+
+ /* Handle GPIO IRQs */
+ for (bank = 0; bank < ASIC3_NUM_GPIO_BANKS; bank++) {
+ if (status & (1 << bank)) {
+ unsigned long base, istat;
+
+ base = ASIC3_GPIO_A_Base
+ + bank * ASIC3_GPIO_Base_INCR;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ istat = asic3_read_register(asic,
+ base +
+ ASIC3_GPIO_IntStatus);
+ /* Clearing IntStatus */
+ asic3_write_register(asic,
+ base +
+ ASIC3_GPIO_IntStatus, 0);
+ spin_unlock_irqrestore(&asic->lock, flags);
+
+ for (i = 0; i < ASIC3_GPIOS_PER_BANK; i++) {
+ int bit = (1 << i);
+ unsigned int irqnr;
+
+ if (!(istat & bit))
+ continue;
+
+ irqnr = asic->irq_base +
+ (ASIC3_GPIOS_PER_BANK * bank)
+ + i;
+ desc = irq_desc + irqnr;
+ desc->handle_irq(irqnr, desc);
+ if (asic->irq_bothedge[bank] & bit)
+ asic3_irq_flip_edge(asic, base,
+ bit);
+ }
+ }
+ }
+
+ /* Handle remaining IRQs in the status register */
+ for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) {
+ /* They start at bit 4 and go up */
+ if (status & (1 << (i - ASIC3_NUM_GPIOS + 4))) {
+ desc = irq_desc + + i;
+ desc->handle_irq(asic->irq_base + i,
+ desc);
+ }
+ }
+ }
+
+ if (iter >= MAX_ASIC_ISR_LOOPS)
+ printk(KERN_ERR "%s: interrupt processing overrun\n",
+ __FUNCTION__);
+}
+
+static inline int asic3_irq_to_bank(struct asic3 *asic, int irq)
+{
+ int n;
+
+ n = (irq - asic->irq_base) >> 4;
+
+ return (n * (ASIC3_GPIO_B_Base - ASIC3_GPIO_A_Base));
+}
+
+static inline int asic3_irq_to_index(struct asic3 *asic, int irq)
+{
+ return (irq - asic->irq_base) & 0xf;
+}
+
+static void asic3_mask_gpio_irq(unsigned int irq)
+{
+ struct asic3 *asic = get_irq_chip_data(irq);
+ u32 val, bank, index;
+ unsigned long flags;
+
+ bank = asic3_irq_to_bank(asic, irq);
+ index = asic3_irq_to_index(asic, irq);
+
+ spin_lock_irqsave(&asic->lock, flags);
+ val = asic3_read_register(asic, bank + ASIC3_GPIO_Mask);
+ val |= 1 << index;
+ asic3_write_register(asic, bank + ASIC3_GPIO_Mask, val);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+static void asic3_mask_irq(unsigned int irq)
+{
+ struct asic3 *asic = get_irq_chip_data(irq);
+ int regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ regval = asic3_read_register(asic,
+ ASIC3_INTR_Base +
+ ASIC3_INTR_IntMask);
+
+ regval &= ~(ASIC3_INTMASK_MASK0 <<
+ (irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
+
+ asic3_write_register(asic,
+ ASIC3_INTR_Base +
+ ASIC3_INTR_IntMask,
+ regval);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+static void asic3_unmask_gpio_irq(unsigned int irq)
+{
+ struct asic3 *asic = get_irq_chip_data(irq);
+ u32 val, bank, index;
+ unsigned long flags;
+
+ bank = asic3_irq_to_bank(asic, irq);
+ index = asic3_irq_to_index(asic, irq);
+
+ spin_lock_irqsave(&asic->lock, flags);
+ val = asic3_read_register(asic, bank + ASIC3_GPIO_Mask);
+ val &= ~(1 << index);
+ asic3_write_register(asic, bank + ASIC3_GPIO_Mask, val);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+static void asic3_unmask_irq(unsigned int irq)
+{
+ struct asic3 *asic = get_irq_chip_data(irq);
+ int regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ regval = asic3_read_register(asic,
+ ASIC3_INTR_Base +
+ ASIC3_INTR_IntMask);
+
+ regval |= (ASIC3_INTMASK_MASK0 <<
+ (irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
+
+ asic3_write_register(asic,
+ ASIC3_INTR_Base +
+ ASIC3_INTR_IntMask,
+ regval);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+static int asic3_gpio_irq_type(unsigned int irq, unsigned int type)
+{
+ struct asic3 *asic = get_irq_chip_data(irq);
+ u32 bank, index;
+ u16 trigger, level, edge, bit;
+ unsigned long flags;
+
+ bank = asic3_irq_to_bank(asic, irq);
+ index = asic3_irq_to_index(asic, irq);
+ bit = 1<<index;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ level = asic3_read_register(asic,
+ bank + ASIC3_GPIO_LevelTrigger);
+ edge = asic3_read_register(asic,
+ bank + ASIC3_GPIO_EdgeTrigger);
+ trigger = asic3_read_register(asic,
+ bank + ASIC3_GPIO_TriggerType);
+ asic->irq_bothedge[(irq - asic->irq_base) >> 4] &= ~bit;
+
+ if (type == IRQT_RISING) {
+ trigger |= bit;
+ edge |= bit;
+ } else if (type == IRQT_FALLING) {
+ trigger |= bit;
+ edge &= ~bit;
+ } else if (type == IRQT_BOTHEDGE) {
+ trigger |= bit;
+ if (asic3_gpio_get_value(asic, irq - asic->irq_base))
+ edge &= ~bit;
+ else
+ edge |= bit;
+ asic->irq_bothedge[(irq - asic->irq_base) >> 4] |= bit;
+ } else if (type == IRQT_LOW) {
+ trigger &= ~bit;
+ level &= ~bit;
+ } else if (type == IRQT_HIGH) {
+ trigger &= ~bit;
+ level |= bit;
+ } else {
+ /*
+ * if type == IRQT_NOEDGE, we should mask interrupts, but
+ * be careful to not unmask them if mask was also called.
+ * Probably need internal state for mask.
+ */
+ printk(KERN_NOTICE "asic3: irq type not changed.\n");
+ }
+ asic3_write_register(asic, bank + ASIC3_GPIO_LevelTrigger,
+ level);
+ asic3_write_register(asic, bank + ASIC3_GPIO_EdgeTrigger,
+ edge);
+ asic3_write_register(asic, bank + ASIC3_GPIO_TriggerType,
+ trigger);
+ spin_unlock_irqrestore(&asic->lock, flags);
+ return 0;
+}
+
+static struct irq_chip asic3_gpio_irq_chip = {
+ .name = "ASIC3-GPIO",
+ .ack = asic3_mask_gpio_irq,
+ .mask = asic3_mask_gpio_irq,
+ .unmask = asic3_unmask_gpio_irq,
+ .set_type = asic3_gpio_irq_type,
+};
+
+static struct irq_chip asic3_irq_chip = {
+ .name = "ASIC3",
+ .ack = asic3_mask_irq,
+ .mask = asic3_mask_irq,
+ .unmask = asic3_unmask_irq,
+};
+
+static int asic3_irq_probe(struct platform_device *pdev)
+{
+ struct asic3 *asic = platform_get_drvdata(pdev);
+ unsigned long clksel = 0;
+ unsigned int irq, irq_base;
+
+ asic->irq_nr = platform_get_irq(pdev, 0);
+ if (asic->irq_nr < 0)
+ return asic->irq_nr;
+
+ /* turn on clock to IRQ controller */
+ clksel |= CLOCK_SEL_CX;
+ asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL),
+ clksel);
+
+ irq_base = asic->irq_base;
+
+ for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
+ if (irq < asic->irq_base + ASIC3_NUM_GPIOS)
+ set_irq_chip(irq, &asic3_gpio_irq_chip);
+ else
+ set_irq_chip(irq, &asic3_irq_chip);
+
+ set_irq_chip_data(irq, asic);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ }
+
+ asic3_write_register(asic, ASIC3_OFFSET(INTR, IntMask),
+ ASIC3_INTMASK_GINTMASK);
+
+ set_irq_chained_handler(asic->irq_nr, asic3_irq_demux);
+ set_irq_type(asic->irq_nr, IRQT_RISING);
+ set_irq_data(asic->irq_nr, asic);
+
+ return 0;
+}
+
+static void asic3_irq_remove(struct platform_device *pdev)
+{
+ struct asic3 *asic = platform_get_drvdata(pdev);
+ unsigned int irq, irq_base;
+
+ irq_base = asic->irq_base;
+
+ for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
+ set_irq_flags(irq, 0);
+ set_irq_handler(irq, NULL);
+ set_irq_chip(irq, NULL);
+ set_irq_chip_data(irq, NULL);
+ }
+ set_irq_chained_handler(asic->irq_nr, NULL);
+}
+
+/* GPIOs */
+static inline u32 asic3_get_gpio(struct asic3 *asic, unsigned int base,
+ unsigned int function)
+{
+ return asic3_read_register(asic, base + function);
+}
+
+static void asic3_set_gpio(struct asic3 *asic, unsigned int base,
+ unsigned int function, u32 bits, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&asic->lock, flags);
+ val |= (asic3_read_register(asic, base + function) & ~bits);
+
+ asic3_write_register(asic, base + function, val);
+ spin_unlock_irqrestore(&asic->lock, flags);
+}
+
+#define asic3_get_gpio_a(asic, fn) \
+ asic3_get_gpio(asic, ASIC3_GPIO_A_Base, ASIC3_GPIO_##fn)
+#define asic3_get_gpio_b(asic, fn) \
+ asic3_get_gpio(asic, ASIC3_GPIO_B_Base, ASIC3_GPIO_##fn)
+#define asic3_get_gpio_c(asic, fn) \
+ asic3_get_gpio(asic, ASIC3_GPIO_C_Base, ASIC3_GPIO_##fn)
+#define asic3_get_gpio_d(asic, fn) \
+ asic3_get_gpio(asic, ASIC3_GPIO_D_Base, ASIC3_GPIO_##fn)
+
+#define asic3_set_gpio_a(asic, fn, bits, val) \
+ asic3_set_gpio(asic, ASIC3_GPIO_A_Base, ASIC3_GPIO_##fn, bits, val)
+#define asic3_set_gpio_b(asic, fn, bits, val) \
+ asic3_set_gpio(asic, ASIC3_GPIO_B_Base, ASIC3_GPIO_##fn, bits, val)
+#define asic3_set_gpio_c(asic, fn, bits, val) \
+ asic3_set_gpio(asic, ASIC3_GPIO_C_Base, ASIC3_GPIO_##fn, bits, val)
+#define asic3_set_gpio_d(asic, fn, bits, val) \
+ asic3_set_gpio(asic, ASIC3_GPIO_D_Base, ASIC3_GPIO_##fn, bits, val)
+
+#define asic3_set_gpio_banks(asic, fn, bits, pdata, field) \
+ do { \
+ asic3_set_gpio_a((asic), fn, (bits), (pdata)->gpio_a.field); \
+ asic3_set_gpio_b((asic), fn, (bits), (pdata)->gpio_b.field); \
+ asic3_set_gpio_c((asic), fn, (bits), (pdata)->gpio_c.field); \
+ asic3_set_gpio_d((asic), fn, (bits), (pdata)->gpio_d.field); \
+ } while (0)
+
+int asic3_gpio_get_value(struct asic3 *asic, unsigned gpio)
+{
+ u32 mask = ASIC3_GPIO_bit(gpio);
+
+ switch (gpio >> 4) {
+ case ASIC3_GPIO_BANK_A:
+ return asic3_get_gpio_a(asic, Status) & mask;
+ case ASIC3_GPIO_BANK_B:
+ return asic3_get_gpio_b(asic, Status) & mask;
+ case ASIC3_GPIO_BANK_C:
+ return asic3_get_gpio_c(asic, Status) & mask;
+ case ASIC3_GPIO_BANK_D:
+ return asic3_get_gpio_d(asic, Status) & mask;
+ default:
+ printk(KERN_ERR "%s: invalid GPIO value 0x%x",
+ __FUNCTION__, gpio);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(asic3_gpio_get_value);
+
+void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val)
+{
+ u32 mask = ASIC3_GPIO_bit(gpio);
+ u32 bitval = 0;
+ if (val)
+ bitval = mask;
+
+ switch (gpio >> 4) {
+ case ASIC3_GPIO_BANK_A:
+ asic3_set_gpio_a(asic, Out, mask, bitval);
+ return;
+ case ASIC3_GPIO_BANK_B:
+ asic3_set_gpio_b(asic, Out, mask, bitval);
+ return;
+ case ASIC3_GPIO_BANK_C:
+ asic3_set_gpio_c(asic, Out, mask, bitval);
+ return;
+ case ASIC3_GPIO_BANK_D:
+ asic3_set_gpio_d(asic, Out, mask, bitval);
+ return;
+ default:
+ printk(KERN_ERR "%s: invalid GPIO value 0x%x",
+ __FUNCTION__, gpio);
+ return;
+ }
+}
+EXPORT_SYMBOL(asic3_gpio_set_value);
+
+static int asic3_gpio_probe(struct platform_device *pdev)
+{
+ struct asic3_platform_data *pdata = pdev->dev.platform_data;
+ struct asic3 *asic = platform_get_drvdata(pdev);
+
+ asic3_write_register(asic, ASIC3_GPIO_OFFSET(A, Mask), 0xffff);
+ asic3_write_register(asic, ASIC3_GPIO_OFFSET(B, Mask), 0xffff);
+ asic3_write_register(asic, ASIC3_GPIO_OFFSET(C, Mask), 0xffff);
+ asic3_write_register(asic, ASIC3_GPIO_OFFSET(D, Mask), 0xffff);
+
+ asic3_set_gpio_a(asic, SleepMask, 0xffff, 0xffff);
+ asic3_set_gpio_b(asic, SleepMask, 0xffff, 0xffff);
+ asic3_set_gpio_c(asic, SleepMask, 0xffff, 0xffff);
+ asic3_set_gpio_d(asic, SleepMask, 0xffff, 0xffff);
+
+ if (pdata) {
+ asic3_set_gpio_banks(asic, Out, 0xffff, pdata, init);
+ asic3_set_gpio_banks(asic, Direction, 0xffff, pdata, dir);
+ asic3_set_gpio_banks(asic, SleepMask, 0xffff, pdata,
+ sleep_mask);
+ asic3_set_gpio_banks(asic, SleepOut, 0xffff, pdata, sleep_out);
+ asic3_set_gpio_banks(asic, BattFaultOut, 0xffff, pdata,
+ batt_fault_out);
+ asic3_set_gpio_banks(asic, SleepConf, 0xffff, pdata,
+ sleep_conf);
+ asic3_set_gpio_banks(asic, AltFunction, 0xffff, pdata,
+ alt_function);
+ }
+
+ return 0;
+}
+
+static void asic3_gpio_remove(struct platform_device *pdev)
+{
+ return;
+}
+
+
+/* Core */
+static int asic3_probe(struct platform_device *pdev)
+{
+ struct asic3_platform_data *pdata = pdev->dev.platform_data;
+ struct asic3 *asic;
+ struct resource *mem;
+ unsigned long clksel;
+ int ret;
+
+ asic = kzalloc(sizeof(struct asic3), GFP_KERNEL);
+ if (!asic)
+ return -ENOMEM;
+
+ spin_lock_init(&asic->lock);
+ platform_set_drvdata(pdev, asic);
+ asic->dev = &pdev->dev;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "asic3: no MEM resource\n");
+ goto err_out_1;
+ }
+
+ asic->mapping = ioremap(mem->start, PAGE_SIZE);
+ if (!asic->mapping) {
+ ret = -ENOMEM;
+ printk(KERN_ERR "asic3: couldn't ioremap\n");
+ goto err_out_1;
+ }
+
+ asic->irq_base = pdata->irq_base;
+
+ if (pdata && pdata->bus_shift)
+ asic->bus_shift = 2 - pdata->bus_shift;
+ else
+ asic->bus_shift = 0;
+
+ clksel = 0;
+ asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), clksel);
+
+ ret = asic3_irq_probe(pdev);
+ if (ret < 0) {
+ printk(KERN_ERR "asic3: couldn't probe IRQs\n");
+ goto err_out_2;
+ }
+ asic3_gpio_probe(pdev);
+
+ if (pdata->children) {
+ int i;
+ for (i = 0; i < pdata->n_children; i++) {
+ pdata->children[i]->dev.parent = &pdev->dev;
+ platform_device_register(pdata->children[i]);
+ }
+ }
+
+ printk(KERN_INFO "ASIC3 Core driver\n");
+
+ return 0;
+
+ err_out_2:
+ iounmap(asic->mapping);
+ err_out_1:
+ kfree(asic);
+
+ return ret;
+}
+
+static int asic3_remove(struct platform_device *pdev)
+{
+ struct asic3 *asic = platform_get_drvdata(pdev);
+
+ asic3_gpio_remove(pdev);
+ asic3_irq_remove(pdev);
+
+ asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), 0);
+
+ iounmap(asic->mapping);
+
+ kfree(asic);
+
+ return 0;
+}
+
+static void asic3_shutdown(struct platform_device *pdev)
+{
+}
+
+static struct platform_driver asic3_device_driver = {
+ .driver = {
+ .name = "asic3",
+ },
+ .probe = asic3_probe,
+ .remove = __devexit_p(asic3_remove),
+ .shutdown = asic3_shutdown,
+};
+
+static int __init asic3_init(void)
+{
+ int retval = 0;
+ retval = platform_driver_register(&asic3_device_driver);
+ return retval;
+}
+
+subsys_initcall(asic3_init);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index b5e67c0ff433..7b5220ca7d7f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -92,6 +92,22 @@ config TIFM_7XX1
To compile this driver as a module, choose M here: the module will
be called tifm_7xx1.
+config ACER_WMI
+ tristate "Acer WMI Laptop Extras (EXPERIMENTAL)"
+ depends on X86
+ depends on EXPERIMENTAL
+ depends on ACPI
+ depends on ACPI_WMI
+ depends on LEDS_CLASS
+ depends on BACKLIGHT_CLASS_DEVICE
+ ---help---
+ This is a driver for newer Acer (and Wistron) laptops. It adds
+ wireless radio and bluetooth control, and on some laptops,
+ exposes the mail LED and LCD backlight.
+
+ If you have an ACPI-WMI compatible Acer/ Wistron laptop, say Y or M
+ here.
+
config ASUS_LAPTOP
tristate "Asus Laptop Extras (EXPERIMENTAL)"
depends on X86
@@ -126,6 +142,15 @@ config FUJITSU_LAPTOP
If you have a Fujitsu laptop, say Y or M here.
+config TC1100_WMI
+ tristate "HP Compaq TC1100 Tablet WMI Extras"
+ depends on X86 && !X86_64
+ depends on ACPI
+ depends on ACPI_WMI
+ ---help---
+ This is a driver for the WMI extensions (wireless and bluetooth power
+ control) of the HP Compaq TC1100 tablet.
+
config MSI_LAPTOP
tristate "MSI Laptop Extras"
depends on X86
@@ -219,6 +244,25 @@ config THINKPAD_ACPI_BAY
If you are not sure, say Y here.
+config THINKPAD_ACPI_HOTKEY_POLL
+ bool "Suport NVRAM polling for hot keys"
+ depends on THINKPAD_ACPI
+ default y
+ ---help---
+ Some thinkpad models benefit from NVRAM polling to detect a few of
+ the hot key press events. If you know your ThinkPad model does not
+ need to do NVRAM polling to support any of the hot keys you use,
+ unselecting this option will save about 1kB of memory.
+
+ ThinkPads T40 and newer, R52 and newer, and X31 and newer are
+ unlikely to need NVRAM polling in their latest BIOS versions.
+
+ NVRAM polling can detect at most the following keys: ThinkPad/Access
+ IBM, Zoom, Switch Display (fn+F7), ThinkLight, Volume up/down/mute,
+ Brightness up/down, Display Expand (fn+F8), Hibernate (fn+F12).
+
+ If you are not sure, say Y here. The driver enables polling only if
+ it is strictly necessary to do so.
config ATMEL_SSC
tristate "Device driver for Atmel SSC peripheral"
@@ -232,4 +276,22 @@ config ATMEL_SSC
If unsure, say N.
+config INTEL_MENLOW
+ tristate "Thermal Management driver for Intel menlow platform"
+ depends on ACPI_THERMAL
+ ---help---
+ ACPI thermal management enhancement driver on
+ Intel Menlow platform.
+
+ If unsure, say N.
+
+config ENCLOSURE_SERVICES
+ tristate "Enclosure Services"
+ default n
+ help
+ Provides support for intelligent enclosures (bays which
+ contain storage devices). You also need either a host
+ driver (SCSI/ATA) which supports enclosures
+ or a SCSI enclosure device (SES) to use these services.
+
endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 87f2685d728f..7f13549cc87e 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -6,8 +6,10 @@ obj- := misc.o # Dummy rule to force built-in.o to be made
obj-$(CONFIG_IBM_ASM) += ibmasm/
obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
+obj-$(CONFIG_ACER_WMI) += acer-wmi.o
obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
+obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
obj-$(CONFIG_LKDTM) += lkdtm.o
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
@@ -17,3 +19,5 @@ obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
+obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
+obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
diff --git a/drivers/misc/acer-wmi.c b/drivers/misc/acer-wmi.c
new file mode 100644
index 000000000000..a4d677504250
--- /dev/null
+++ b/drivers/misc/acer-wmi.c
@@ -0,0 +1,1109 @@
+/*
+ * Acer WMI Laptop Extras
+ *
+ * Copyright (C) 2007-2008 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ *
+ * Based on acer_acpi:
+ * Copyright (C) 2005-2007 E.M. Smith
+ * Copyright (C) 2007-2008 Carlos Corbacho <cathectic@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define ACER_WMI_VERSION "0.1"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/i8042.h>
+
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Carlos Corbacho");
+MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver");
+MODULE_LICENSE("GPL");
+
+#define ACER_LOGPREFIX "acer-wmi: "
+#define ACER_ERR KERN_ERR ACER_LOGPREFIX
+#define ACER_NOTICE KERN_NOTICE ACER_LOGPREFIX
+#define ACER_INFO KERN_INFO ACER_LOGPREFIX
+
+/*
+ * The following defines quirks to get some specific functions to work
+ * which are known to not be supported over ACPI-WMI (such as the mail LED
+ * on WMID based Acer's)
+ */
+struct acer_quirks {
+ const char *vendor;
+ const char *model;
+ u16 quirks;
+};
+
+/*
+ * Magic Number
+ * Meaning is unknown - this number is required for writing to ACPI for AMW0
+ * (it's also used in acerhk when directly accessing the BIOS)
+ */
+#define ACER_AMW0_WRITE 0x9610
+
+/*
+ * Bit masks for the AMW0 interface
+ */
+#define ACER_AMW0_WIRELESS_MASK 0x35
+#define ACER_AMW0_BLUETOOTH_MASK 0x34
+#define ACER_AMW0_MAILLED_MASK 0x31
+
+/*
+ * Method IDs for WMID interface
+ */
+#define ACER_WMID_GET_WIRELESS_METHODID 1
+#define ACER_WMID_GET_BLUETOOTH_METHODID 2
+#define ACER_WMID_GET_BRIGHTNESS_METHODID 3
+#define ACER_WMID_SET_WIRELESS_METHODID 4
+#define ACER_WMID_SET_BLUETOOTH_METHODID 5
+#define ACER_WMID_SET_BRIGHTNESS_METHODID 6
+#define ACER_WMID_GET_THREEG_METHODID 10
+#define ACER_WMID_SET_THREEG_METHODID 11
+
+/*
+ * Acer ACPI method GUIDs
+ */
+#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
+#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3"
+#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
+
+MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB");
+MODULE_ALIAS("wmi:6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3");
+
+/* Temporary workaround until the WMI sysfs interface goes in */
+MODULE_ALIAS("dmi:*:*Acer*:*:");
+
+/*
+ * Interface capability flags
+ */
+#define ACER_CAP_MAILLED (1<<0)
+#define ACER_CAP_WIRELESS (1<<1)
+#define ACER_CAP_BLUETOOTH (1<<2)
+#define ACER_CAP_BRIGHTNESS (1<<3)
+#define ACER_CAP_THREEG (1<<4)
+#define ACER_CAP_ANY (0xFFFFFFFF)
+
+/*
+ * Interface type flags
+ */
+enum interface_flags {
+ ACER_AMW0,
+ ACER_AMW0_V2,
+ ACER_WMID,
+};
+
+#define ACER_DEFAULT_WIRELESS 0
+#define ACER_DEFAULT_BLUETOOTH 0
+#define ACER_DEFAULT_MAILLED 0
+#define ACER_DEFAULT_THREEG 0
+
+static int max_brightness = 0xF;
+
+static int wireless = -1;
+static int bluetooth = -1;
+static int mailled = -1;
+static int brightness = -1;
+static int threeg = -1;
+static int force_series;
+
+module_param(mailled, int, 0444);
+module_param(wireless, int, 0444);
+module_param(bluetooth, int, 0444);
+module_param(brightness, int, 0444);
+module_param(threeg, int, 0444);
+module_param(force_series, int, 0444);
+MODULE_PARM_DESC(wireless, "Set initial state of Wireless hardware");
+MODULE_PARM_DESC(bluetooth, "Set initial state of Bluetooth hardware");
+MODULE_PARM_DESC(mailled, "Set initial state of Mail LED");
+MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness");
+MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware");
+MODULE_PARM_DESC(force_series, "Force a different laptop series");
+
+struct acer_data {
+ int mailled;
+ int wireless;
+ int bluetooth;
+ int threeg;
+ int brightness;
+};
+
+/* Each low-level interface must define at least some of the following */
+struct wmi_interface {
+ /* The WMI device type */
+ u32 type;
+
+ /* The capabilities this interface provides */
+ u32 capability;
+
+ /* Private data for the current interface */
+ struct acer_data data;
+};
+
+/* The static interface pointer, points to the currently detected interface */
+static struct wmi_interface *interface;
+
+/*
+ * Embedded Controller quirks
+ * Some laptops require us to directly access the EC to either enable or query
+ * features that are not available through WMI.
+ */
+
+struct quirk_entry {
+ u8 wireless;
+ u8 mailled;
+ u8 brightness;
+ u8 bluetooth;
+};
+
+static struct quirk_entry *quirks;
+
+static void set_quirks(void)
+{
+ if (quirks->mailled)
+ interface->capability |= ACER_CAP_MAILLED;
+
+ if (quirks->brightness)
+ interface->capability |= ACER_CAP_BRIGHTNESS;
+}
+
+static int dmi_matched(const struct dmi_system_id *dmi)
+{
+ quirks = dmi->driver_data;
+ return 0;
+}
+
+static struct quirk_entry quirk_unknown = {
+};
+
+static struct quirk_entry quirk_acer_travelmate_2490 = {
+ .mailled = 1,
+};
+
+/* This AMW0 laptop has no bluetooth */
+static struct quirk_entry quirk_medion_md_98300 = {
+ .wireless = 1,
+};
+
+static struct dmi_system_id acer_quirks[] = {
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 3100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5630",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5650",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 5680",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer Aspire 9110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Acer TravelMate 2490",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"),
+ },
+ .driver_data = &quirk_acer_travelmate_2490,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Medion MD 98300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "WAM2030"),
+ },
+ .driver_data = &quirk_medion_md_98300,
+ },
+ {}
+};
+
+/* Find which quirks are needed for a particular vendor/ model pair */
+static void find_quirks(void)
+{
+ if (!force_series) {
+ dmi_check_system(acer_quirks);
+ } else if (force_series == 2490) {
+ quirks = &quirk_acer_travelmate_2490;
+ }
+
+ if (quirks == NULL)
+ quirks = &quirk_unknown;
+
+ set_quirks();
+}
+
+/*
+ * General interface convenience methods
+ */
+
+static bool has_cap(u32 cap)
+{
+ if ((interface->capability & cap) != 0)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * AMW0 (V1) interface
+ */
+struct wmab_args {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+};
+
+struct wmab_ret {
+ u32 eax;
+ u32 ebx;
+ u32 ecx;
+ u32 edx;
+ u32 eex;
+};
+
+static acpi_status wmab_execute(struct wmab_args *regbuf,
+struct acpi_buffer *result)
+{
+ struct acpi_buffer input;
+ acpi_status status;
+ input.length = sizeof(struct wmab_args);
+ input.pointer = (u8 *)regbuf;
+
+ status = wmi_evaluate_method(AMW0_GUID1, 1, 1, &input, result);
+
+ return status;
+}
+
+static acpi_status AMW0_get_u32(u32 *value, u32 cap,
+struct wmi_interface *iface)
+{
+ int err;
+ u8 result;
+
+ switch (cap) {
+ case ACER_CAP_MAILLED:
+ switch (quirks->mailled) {
+ default:
+ err = ec_read(0xA, &result);
+ if (err)
+ return AE_ERROR;
+ *value = (result >> 7) & 0x1;
+ return AE_OK;
+ }
+ break;
+ case ACER_CAP_WIRELESS:
+ switch (quirks->wireless) {
+ case 1:
+ err = ec_read(0x7B, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result & 0x1;
+ return AE_OK;
+ default:
+ err = ec_read(0xA, &result);
+ if (err)
+ return AE_ERROR;
+ *value = (result >> 2) & 0x1;
+ return AE_OK;
+ }
+ break;
+ case ACER_CAP_BLUETOOTH:
+ switch (quirks->bluetooth) {
+ default:
+ err = ec_read(0xA, &result);
+ if (err)
+ return AE_ERROR;
+ *value = (result >> 4) & 0x1;
+ return AE_OK;
+ }
+ break;
+ case ACER_CAP_BRIGHTNESS:
+ switch (quirks->brightness) {
+ default:
+ err = ec_read(0x83, &result);
+ if (err)
+ return AE_ERROR;
+ *value = result;
+ return AE_OK;
+ }
+ break;
+ default:
+ return AE_BAD_ADDRESS;
+ }
+ return AE_OK;
+}
+
+static acpi_status AMW0_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
+{
+ struct wmab_args args;
+
+ args.eax = ACER_AMW0_WRITE;
+ args.ebx = value ? (1<<8) : 0;
+ args.ecx = args.edx = 0;
+
+ switch (cap) {
+ case ACER_CAP_MAILLED:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ args.ebx |= ACER_AMW0_MAILLED_MASK;
+ break;
+ case ACER_CAP_WIRELESS:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ args.ebx |= ACER_AMW0_WIRELESS_MASK;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ args.ebx |= ACER_AMW0_BLUETOOTH_MASK;
+ break;
+ case ACER_CAP_BRIGHTNESS:
+ if (value > max_brightness)
+ return AE_BAD_PARAMETER;
+ switch (quirks->brightness) {
+ case 1:
+ return ec_write(0x83, value);
+ default:
+ return AE_BAD_ADDRESS;
+ break;
+ }
+ default:
+ return AE_BAD_ADDRESS;
+ }
+
+ /* Actually do the set */
+ return wmab_execute(&args, NULL);
+}
+
+static acpi_status AMW0_find_mailled(void)
+{
+ struct wmab_args args;
+ struct wmab_ret ret;
+ acpi_status status = AE_OK;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+ args.eax = 0x86;
+ args.ebx = args.ecx = args.edx = 0;
+
+ status = wmab_execute(&args, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(struct wmab_ret)) {
+ ret = *((struct wmab_ret *) obj->buffer.pointer);
+ } else {
+ return AE_ERROR;
+ }
+
+ if (ret.eex & 0x1)
+ interface->capability |= ACER_CAP_MAILLED;
+
+ kfree(out.pointer);
+
+ return AE_OK;
+}
+
+static acpi_status AMW0_set_capabilities(void)
+{
+ struct wmab_args args;
+ struct wmab_ret ret;
+ acpi_status status = AE_OK;
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+ args.eax = ACER_AMW0_WRITE;
+ args.ecx = args.edx = 0;
+
+ args.ebx = 0xa2 << 8;
+ args.ebx |= ACER_AMW0_WIRELESS_MASK;
+
+ status = wmab_execute(&args, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(struct wmab_ret)) {
+ ret = *((struct wmab_ret *) obj->buffer.pointer);
+ } else {
+ return AE_ERROR;
+ }
+
+ if (ret.eax & 0x1)
+ interface->capability |= ACER_CAP_WIRELESS;
+
+ args.ebx = 2 << 8;
+ args.ebx |= ACER_AMW0_BLUETOOTH_MASK;
+
+ status = wmab_execute(&args, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER
+ && obj->buffer.length == sizeof(struct wmab_ret)) {
+ ret = *((struct wmab_ret *) obj->buffer.pointer);
+ } else {
+ return AE_ERROR;
+ }
+
+ if (ret.eax & 0x1)
+ interface->capability |= ACER_CAP_BLUETOOTH;
+
+ kfree(out.pointer);
+
+ /*
+ * This appears to be safe to enable, since all Wistron based laptops
+ * appear to use the same EC register for brightness, even if they
+ * differ for wireless, etc
+ */
+ interface->capability |= ACER_CAP_BRIGHTNESS;
+
+ return AE_OK;
+}
+
+static struct wmi_interface AMW0_interface = {
+ .type = ACER_AMW0,
+};
+
+static struct wmi_interface AMW0_V2_interface = {
+ .type = ACER_AMW0_V2,
+};
+
+/*
+ * New interface (The WMID interface)
+ */
+static acpi_status
+WMI_execute_u32(u32 method_id, u32 in, u32 *out)
+{
+ struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) };
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ u32 tmp;
+ acpi_status status;
+
+ status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) result.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(u32)) {
+ tmp = *((u32 *) obj->buffer.pointer);
+ } else {
+ tmp = 0;
+ }
+
+ if (out)
+ *out = tmp;
+
+ kfree(result.pointer);
+
+ return status;
+}
+
+static acpi_status WMID_get_u32(u32 *value, u32 cap,
+struct wmi_interface *iface)
+{
+ acpi_status status;
+ u8 tmp;
+ u32 result, method_id = 0;
+
+ switch (cap) {
+ case ACER_CAP_WIRELESS:
+ method_id = ACER_WMID_GET_WIRELESS_METHODID;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ method_id = ACER_WMID_GET_BLUETOOTH_METHODID;
+ break;
+ case ACER_CAP_BRIGHTNESS:
+ method_id = ACER_WMID_GET_BRIGHTNESS_METHODID;
+ break;
+ case ACER_CAP_THREEG:
+ method_id = ACER_WMID_GET_THREEG_METHODID;
+ break;
+ case ACER_CAP_MAILLED:
+ if (quirks->mailled == 1) {
+ ec_read(0x9f, &tmp);
+ *value = tmp & 0x1;
+ return 0;
+ }
+ default:
+ return AE_BAD_ADDRESS;
+ }
+ status = WMI_execute_u32(method_id, 0, &result);
+
+ if (ACPI_SUCCESS(status))
+ *value = (u8)result;
+
+ return status;
+}
+
+static acpi_status WMID_set_u32(u32 value, u32 cap, struct wmi_interface *iface)
+{
+ u32 method_id = 0;
+ char param;
+
+ switch (cap) {
+ case ACER_CAP_BRIGHTNESS:
+ if (value > max_brightness)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_BRIGHTNESS_METHODID;
+ break;
+ case ACER_CAP_WIRELESS:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_WIRELESS_METHODID;
+ break;
+ case ACER_CAP_BLUETOOTH:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_BLUETOOTH_METHODID;
+ break;
+ case ACER_CAP_THREEG:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ method_id = ACER_WMID_SET_THREEG_METHODID;
+ break;
+ case ACER_CAP_MAILLED:
+ if (value > 1)
+ return AE_BAD_PARAMETER;
+ if (quirks->mailled == 1) {
+ param = value ? 0x92 : 0x93;
+ i8042_command(&param, 0x1059);
+ return 0;
+ }
+ break;
+ default:
+ return AE_BAD_ADDRESS;
+ }
+ return WMI_execute_u32(method_id, (u32)value, NULL);
+}
+
+static acpi_status WMID_set_capabilities(void)
+{
+ struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *obj;
+ acpi_status status;
+ u32 devices;
+
+ status = wmi_query_block(WMID_GUID2, 1, &out);
+ if (ACPI_FAILURE(status))
+ return status;
+
+ obj = (union acpi_object *) out.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(u32)) {
+ devices = *((u32 *) obj->buffer.pointer);
+ } else {
+ return AE_ERROR;
+ }
+
+ /* Not sure on the meaning of the relevant bits yet to detect these */
+ interface->capability |= ACER_CAP_WIRELESS;
+ interface->capability |= ACER_CAP_THREEG;
+
+ /* WMID always provides brightness methods */
+ interface->capability |= ACER_CAP_BRIGHTNESS;
+
+ if (devices & 0x10)
+ interface->capability |= ACER_CAP_BLUETOOTH;
+
+ if (!(devices & 0x20))
+ max_brightness = 0x9;
+
+ return status;
+}
+
+static struct wmi_interface wmid_interface = {
+ .type = ACER_WMID,
+};
+
+/*
+ * Generic Device (interface-independent)
+ */
+
+static acpi_status get_u32(u32 *value, u32 cap)
+{
+ acpi_status status = AE_BAD_ADDRESS;
+
+ switch (interface->type) {
+ case ACER_AMW0:
+ status = AMW0_get_u32(value, cap, interface);
+ break;
+ case ACER_AMW0_V2:
+ if (cap == ACER_CAP_MAILLED) {
+ status = AMW0_get_u32(value, cap, interface);
+ break;
+ }
+ case ACER_WMID:
+ status = WMID_get_u32(value, cap, interface);
+ break;
+ }
+
+ return status;
+}
+
+static acpi_status set_u32(u32 value, u32 cap)
+{
+ if (interface->capability & cap) {
+ switch (interface->type) {
+ case ACER_AMW0:
+ return AMW0_set_u32(value, cap, interface);
+ case ACER_AMW0_V2:
+ case ACER_WMID:
+ return WMID_set_u32(value, cap, interface);
+ default:
+ return AE_BAD_PARAMETER;
+ }
+ }
+ return AE_BAD_PARAMETER;
+}
+
+static void __init acer_commandline_init(void)
+{
+ /*
+ * These will all fail silently if the value given is invalid, or the
+ * capability isn't available on the given interface
+ */
+ set_u32(mailled, ACER_CAP_MAILLED);
+ set_u32(wireless, ACER_CAP_WIRELESS);
+ set_u32(bluetooth, ACER_CAP_BLUETOOTH);
+ set_u32(threeg, ACER_CAP_THREEG);
+ set_u32(brightness, ACER_CAP_BRIGHTNESS);
+}
+
+/*
+ * LED device (Mail LED only, no other LEDs known yet)
+ */
+static void mail_led_set(struct led_classdev *led_cdev,
+enum led_brightness value)
+{
+ set_u32(value, ACER_CAP_MAILLED);
+}
+
+static struct led_classdev mail_led = {
+ .name = "acer-mail:green",
+ .brightness_set = mail_led_set,
+};
+
+static int __init acer_led_init(struct device *dev)
+{
+ return led_classdev_register(dev, &mail_led);
+}
+
+static void acer_led_exit(void)
+{
+ led_classdev_unregister(&mail_led);
+}
+
+/*
+ * Backlight device
+ */
+static struct backlight_device *acer_backlight_device;
+
+static int read_brightness(struct backlight_device *bd)
+{
+ u32 value;
+ get_u32(&value, ACER_CAP_BRIGHTNESS);
+ return value;
+}
+
+static int update_bl_status(struct backlight_device *bd)
+{
+ set_u32(bd->props.brightness, ACER_CAP_BRIGHTNESS);
+ return 0;
+}
+
+static struct backlight_ops acer_bl_ops = {
+ .get_brightness = read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int __init acer_backlight_init(struct device *dev)
+{
+ struct backlight_device *bd;
+
+ bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops);
+ if (IS_ERR(bd)) {
+ printk(ACER_ERR "Could not register Acer backlight device\n");
+ acer_backlight_device = NULL;
+ return PTR_ERR(bd);
+ }
+
+ acer_backlight_device = bd;
+
+ bd->props.max_brightness = max_brightness;
+ bd->props.brightness = read_brightness(NULL);
+ backlight_update_status(bd);
+ return 0;
+}
+
+static void __exit acer_backlight_exit(void)
+{
+ backlight_device_unregister(acer_backlight_device);
+}
+
+/*
+ * Read/ write bool sysfs macro
+ */
+#define show_set_bool(value, cap) \
+static ssize_t \
+show_bool_##value(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ u32 result; \
+ acpi_status status = get_u32(&result, cap); \
+ if (ACPI_SUCCESS(status)) \
+ return sprintf(buf, "%u\n", result); \
+ return sprintf(buf, "Read error\n"); \
+} \
+\
+static ssize_t \
+set_bool_##value(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ u32 tmp = simple_strtoul(buf, NULL, 10); \
+ acpi_status status = set_u32(tmp, cap); \
+ if (ACPI_FAILURE(status)) \
+ return -EINVAL; \
+ return count; \
+} \
+static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+ show_bool_##value, set_bool_##value);
+
+show_set_bool(wireless, ACER_CAP_WIRELESS);
+show_set_bool(bluetooth, ACER_CAP_BLUETOOTH);
+show_set_bool(threeg, ACER_CAP_THREEG);
+
+/*
+ * Read interface sysfs macro
+ */
+static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ switch (interface->type) {
+ case ACER_AMW0:
+ return sprintf(buf, "AMW0\n");
+ case ACER_AMW0_V2:
+ return sprintf(buf, "AMW0 v2\n");
+ case ACER_WMID:
+ return sprintf(buf, "WMID\n");
+ default:
+ return sprintf(buf, "Error!\n");
+ }
+}
+
+static DEVICE_ATTR(interface, S_IWUGO | S_IRUGO | S_IWUSR,
+ show_interface, NULL);
+
+/*
+ * Platform device
+ */
+static int __devinit acer_platform_probe(struct platform_device *device)
+{
+ int err;
+
+ if (has_cap(ACER_CAP_MAILLED)) {
+ err = acer_led_init(&device->dev);
+ if (err)
+ goto error_mailled;
+ }
+
+ if (has_cap(ACER_CAP_BRIGHTNESS)) {
+ err = acer_backlight_init(&device->dev);
+ if (err)
+ goto error_brightness;
+ }
+
+ return 0;
+
+error_brightness:
+ acer_led_exit();
+error_mailled:
+ return err;
+}
+
+static int acer_platform_remove(struct platform_device *device)
+{
+ if (has_cap(ACER_CAP_MAILLED))
+ acer_led_exit();
+ if (has_cap(ACER_CAP_BRIGHTNESS))
+ acer_backlight_exit();
+ return 0;
+}
+
+static int acer_platform_suspend(struct platform_device *dev,
+pm_message_t state)
+{
+ u32 value;
+ struct acer_data *data = &interface->data;
+
+ if (!data)
+ return -ENOMEM;
+
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ get_u32(&value, ACER_CAP_WIRELESS);
+ data->wireless = value;
+ }
+
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ get_u32(&value, ACER_CAP_BLUETOOTH);
+ data->bluetooth = value;
+ }
+
+ if (has_cap(ACER_CAP_MAILLED)) {
+ get_u32(&value, ACER_CAP_MAILLED);
+ data->mailled = value;
+ }
+
+ if (has_cap(ACER_CAP_BRIGHTNESS)) {
+ get_u32(&value, ACER_CAP_BRIGHTNESS);
+ data->brightness = value;
+ }
+
+ return 0;
+}
+
+static int acer_platform_resume(struct platform_device *device)
+{
+ struct acer_data *data = &interface->data;
+
+ if (!data)
+ return -ENOMEM;
+
+ if (has_cap(ACER_CAP_WIRELESS))
+ set_u32(data->wireless, ACER_CAP_WIRELESS);
+
+ if (has_cap(ACER_CAP_BLUETOOTH))
+ set_u32(data->bluetooth, ACER_CAP_BLUETOOTH);
+
+ if (has_cap(ACER_CAP_THREEG))
+ set_u32(data->threeg, ACER_CAP_THREEG);
+
+ if (has_cap(ACER_CAP_MAILLED))
+ set_u32(data->mailled, ACER_CAP_MAILLED);
+
+ if (has_cap(ACER_CAP_BRIGHTNESS))
+ set_u32(data->brightness, ACER_CAP_BRIGHTNESS);
+
+ return 0;
+}
+
+static struct platform_driver acer_platform_driver = {
+ .driver = {
+ .name = "acer-wmi",
+ .owner = THIS_MODULE,
+ },
+ .probe = acer_platform_probe,
+ .remove = acer_platform_remove,
+ .suspend = acer_platform_suspend,
+ .resume = acer_platform_resume,
+};
+
+static struct platform_device *acer_platform_device;
+
+static int remove_sysfs(struct platform_device *device)
+{
+ if (has_cap(ACER_CAP_WIRELESS))
+ device_remove_file(&device->dev, &dev_attr_wireless);
+
+ if (has_cap(ACER_CAP_BLUETOOTH))
+ device_remove_file(&device->dev, &dev_attr_bluetooth);
+
+ if (has_cap(ACER_CAP_THREEG))
+ device_remove_file(&device->dev, &dev_attr_threeg);
+
+ device_remove_file(&device->dev, &dev_attr_interface);
+
+ return 0;
+}
+
+static int create_sysfs(void)
+{
+ int retval = -ENOMEM;
+
+ if (has_cap(ACER_CAP_WIRELESS)) {
+ retval = device_create_file(&acer_platform_device->dev,
+ &dev_attr_wireless);
+ if (retval)
+ goto error_sysfs;
+ }
+
+ if (has_cap(ACER_CAP_BLUETOOTH)) {
+ retval = device_create_file(&acer_platform_device->dev,
+ &dev_attr_bluetooth);
+ if (retval)
+ goto error_sysfs;
+ }
+
+ if (has_cap(ACER_CAP_THREEG)) {
+ retval = device_create_file(&acer_platform_device->dev,
+ &dev_attr_threeg);
+ if (retval)
+ goto error_sysfs;
+ }
+
+ retval = device_create_file(&acer_platform_device->dev,
+ &dev_attr_interface);
+ if (retval)
+ goto error_sysfs;
+
+ return 0;
+
+error_sysfs:
+ remove_sysfs(acer_platform_device);
+ return retval;
+}
+
+static int __init acer_wmi_init(void)
+{
+ int err;
+
+ printk(ACER_INFO "Acer Laptop ACPI-WMI Extras version %s\n",
+ ACER_WMI_VERSION);
+
+ /*
+ * Detect which ACPI-WMI interface we're using.
+ */
+ if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
+ interface = &AMW0_V2_interface;
+
+ if (!wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1))
+ interface = &wmid_interface;
+
+ if (wmi_has_guid(WMID_GUID2) && interface) {
+ if (ACPI_FAILURE(WMID_set_capabilities())) {
+ printk(ACER_ERR "Unable to detect available devices\n");
+ return -ENODEV;
+ }
+ } else if (!wmi_has_guid(WMID_GUID2) && interface) {
+ printk(ACER_ERR "Unable to detect available devices\n");
+ return -ENODEV;
+ }
+
+ if (wmi_has_guid(AMW0_GUID1) && !wmi_has_guid(WMID_GUID1)) {
+ interface = &AMW0_interface;
+
+ if (ACPI_FAILURE(AMW0_set_capabilities())) {
+ printk(ACER_ERR "Unable to detect available devices\n");
+ return -ENODEV;
+ }
+ }
+
+ if (wmi_has_guid(AMW0_GUID1)) {
+ if (ACPI_FAILURE(AMW0_find_mailled()))
+ printk(ACER_ERR "Unable to detect mail LED\n");
+ }
+
+ find_quirks();
+
+ if (!interface) {
+ printk(ACER_ERR "No or unsupported WMI interface, unable to ");
+ printk(KERN_CONT "load.\n");
+ return -ENODEV;
+ }
+
+ if (platform_driver_register(&acer_platform_driver)) {
+ printk(ACER_ERR "Unable to register platform driver.\n");
+ goto error_platform_register;
+ }
+ acer_platform_device = platform_device_alloc("acer-wmi", -1);
+ platform_device_add(acer_platform_device);
+
+ err = create_sysfs();
+ if (err)
+ return err;
+
+ /* Override any initial settings with values from the commandline */
+ acer_commandline_init();
+
+ return 0;
+
+error_platform_register:
+ return -ENODEV;
+}
+
+static void __exit acer_wmi_exit(void)
+{
+ remove_sysfs(acer_platform_device);
+ platform_device_del(acer_platform_device);
+ platform_driver_unregister(&acer_platform_driver);
+
+ printk(ACER_INFO "Acer Laptop WMI Extras unloaded\n");
+ return;
+}
+
+module_init(acer_wmi_init);
+module_exit(acer_wmi_exit);
diff --git a/drivers/misc/asus-laptop.c b/drivers/misc/asus-laptop.c
index 7dce318df1bd..7c6dfd03de9f 100644
--- a/drivers/misc/asus-laptop.c
+++ b/drivers/misc/asus-laptop.c
@@ -33,7 +33,6 @@
* Sam Lin - GPS support
*/
-#include <linux/autoconf.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -240,7 +239,7 @@ static struct workqueue_struct *led_workqueue;
static int object##_led_wk; \
static DECLARE_WORK(object##_led_work, object##_led_update); \
static struct led_classdev object##_led = { \
- .name = "asus:" ledname, \
+ .name = "asus::" ledname, \
.brightness_set = object##_led_set, \
}
@@ -255,7 +254,7 @@ ASUS_LED(gled, "gaming");
* method is searched within the scope of the handle, can be NULL. The output
* of the method is written is output, which can also be NULL
*
- * returns 1 if write is successful, 0 else.
+ * returns 0 if write is successful, -1 else.
*/
static int write_acpi_int(acpi_handle handle, const char *method, int val,
struct acpi_buffer *output)
@@ -264,13 +263,19 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
union acpi_object in_obj; //the only param we use
acpi_status status;
+ if (!handle)
+ return 0;
+
params.count = 1;
params.pointer = &in_obj;
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = val;
status = acpi_evaluate_object(handle, (char *)method, &params, output);
- return (status == AE_OK);
+ if (status == AE_OK)
+ return 0;
+ else
+ return -1;
}
static int read_wireless_status(int mask)
@@ -322,7 +327,7 @@ static void write_status(acpi_handle handle, int out, int mask)
switch (mask) {
case MLED_ON:
- out = !out & 0x1;
+ out = !(out & 0x1);
break;
case GLED_ON:
out = (out & 0x1) + 1;
@@ -336,7 +341,7 @@ static void write_status(acpi_handle handle, int out, int mask)
break;
}
- if (handle && !write_acpi_int(handle, NULL, out, NULL))
+ if (write_acpi_int(handle, NULL, out, NULL))
printk(ASUS_WARNING " write failed %x\n", mask);
}
@@ -416,7 +421,7 @@ static int set_brightness(struct backlight_device *bd, int value)
value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
/* 0 <= value <= 15 */
- if (!write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
+ if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
printk(ASUS_WARNING "Error changing brightness\n");
ret = -EIO;
}
@@ -546,7 +551,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
rv = parse_arg(buf, count, &value);
if (rv > 0) {
- if (!write_acpi_int(ledd_set_handle, NULL, value, NULL))
+ if (write_acpi_int(ledd_set_handle, NULL, value, NULL))
printk(ASUS_WARNING "LED display write failed\n");
else
hotk->ledd_status = (u32) value;
@@ -591,7 +596,7 @@ static ssize_t store_bluetooth(struct device *dev,
static void set_display(int value)
{
/* no sanity check needed for now */
- if (!write_acpi_int(display_set_handle, NULL, value, NULL))
+ if (write_acpi_int(display_set_handle, NULL, value, NULL))
printk(ASUS_WARNING "Error setting display\n");
return;
}
@@ -648,7 +653,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
*/
static void set_light_sens_switch(int value)
{
- if (!write_acpi_int(ls_switch_handle, NULL, value, NULL))
+ if (write_acpi_int(ls_switch_handle, NULL, value, NULL))
printk(ASUS_WARNING "Error setting light sensor switch\n");
hotk->light_switch = value;
}
@@ -673,7 +678,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
static void set_light_sens_level(int value)
{
- if (!write_acpi_int(ls_level_handle, NULL, value, NULL))
+ if (write_acpi_int(ls_level_handle, NULL, value, NULL))
printk(ASUS_WARNING "Error setting light sensor level\n");
hotk->light_level = value;
}
@@ -861,7 +866,7 @@ static int asus_hotk_get_info(void)
printk(ASUS_WARNING "Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
- if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
+ if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
printk(ASUS_ERR "Hotkey initialization failed\n");
return -ENODEV;
}
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
new file mode 100644
index 000000000000..6fcb0e96adf4
--- /dev/null
+++ b/drivers/misc/enclosure.c
@@ -0,0 +1,484 @@
+/*
+ * Enclosure Services
+ *
+ * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
+ *
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or
+** modify it under the terms of the GNU General Public License
+** version 2 as published by the Free Software Foundation.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+*/
+#include <linux/device.h>
+#include <linux/enclosure.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+static LIST_HEAD(container_list);
+static DEFINE_MUTEX(container_list_lock);
+static struct class enclosure_class;
+static struct class enclosure_component_class;
+
+/**
+ * enclosure_find - find an enclosure given a device
+ * @dev: the device to find for
+ *
+ * Looks through the list of registered enclosures to see
+ * if it can find a match for a device. Returns NULL if no
+ * enclosure is found. Obtains a reference to the enclosure class
+ * device which must be released with class_device_put().
+ */
+struct enclosure_device *enclosure_find(struct device *dev)
+{
+ struct enclosure_device *edev = NULL;
+
+ mutex_lock(&container_list_lock);
+ list_for_each_entry(edev, &container_list, node) {
+ if (edev->cdev.dev == dev) {
+ class_device_get(&edev->cdev);
+ mutex_unlock(&container_list_lock);
+ return edev;
+ }
+ }
+ mutex_unlock(&container_list_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(enclosure_find);
+
+/**
+ * enclosure_for_each_device - calls a function for each enclosure
+ * @fn: the function to call
+ * @data: the data to pass to each call
+ *
+ * Loops over all the enclosures calling the function.
+ *
+ * Note, this function uses a mutex which will be held across calls to
+ * @fn, so it must have non atomic context, and @fn may (although it
+ * should not) sleep or otherwise cause the mutex to be held for
+ * indefinite periods
+ */
+int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
+ void *data)
+{
+ int error = 0;
+ struct enclosure_device *edev;
+
+ mutex_lock(&container_list_lock);
+ list_for_each_entry(edev, &container_list, node) {
+ error = fn(edev, data);
+ if (error)
+ break;
+ }
+ mutex_unlock(&container_list_lock);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(enclosure_for_each_device);
+
+/**
+ * enclosure_register - register device as an enclosure
+ *
+ * @dev: device containing the enclosure
+ * @components: number of components in the enclosure
+ *
+ * This sets up the device for being an enclosure. Note that @dev does
+ * not have to be a dedicated enclosure device. It may be some other type
+ * of device that additionally responds to enclosure services
+ */
+struct enclosure_device *
+enclosure_register(struct device *dev, const char *name, int components,
+ struct enclosure_component_callbacks *cb)
+{
+ struct enclosure_device *edev =
+ kzalloc(sizeof(struct enclosure_device) +
+ sizeof(struct enclosure_component)*components,
+ GFP_KERNEL);
+ int err, i;
+
+ BUG_ON(!cb);
+
+ if (!edev)
+ return ERR_PTR(-ENOMEM);
+
+ edev->components = components;
+
+ edev->cdev.class = &enclosure_class;
+ edev->cdev.dev = get_device(dev);
+ edev->cb = cb;
+ snprintf(edev->cdev.class_id, BUS_ID_SIZE, "%s", name);
+ err = class_device_register(&edev->cdev);
+ if (err)
+ goto err;
+
+ for (i = 0; i < components; i++)
+ edev->component[i].number = -1;
+
+ mutex_lock(&container_list_lock);
+ list_add_tail(&edev->node, &container_list);
+ mutex_unlock(&container_list_lock);
+
+ return edev;
+
+ err:
+ put_device(edev->cdev.dev);
+ kfree(edev);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(enclosure_register);
+
+static struct enclosure_component_callbacks enclosure_null_callbacks;
+
+/**
+ * enclosure_unregister - remove an enclosure
+ *
+ * @edev: the registered enclosure to remove;
+ */
+void enclosure_unregister(struct enclosure_device *edev)
+{
+ int i;
+
+ mutex_lock(&container_list_lock);
+ list_del(&edev->node);
+ mutex_unlock(&container_list_lock);
+
+ for (i = 0; i < edev->components; i++)
+ if (edev->component[i].number != -1)
+ class_device_unregister(&edev->component[i].cdev);
+
+ /* prevent any callbacks into service user */
+ edev->cb = &enclosure_null_callbacks;
+ class_device_unregister(&edev->cdev);
+}
+EXPORT_SYMBOL_GPL(enclosure_unregister);
+
+static void enclosure_release(struct class_device *cdev)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev);
+
+ put_device(cdev->dev);
+ kfree(edev);
+}
+
+static void enclosure_component_release(struct class_device *cdev)
+{
+ if (cdev->dev)
+ put_device(cdev->dev);
+ class_device_put(cdev->parent);
+}
+
+/**
+ * enclosure_component_register - add a particular component to an enclosure
+ * @edev: the enclosure to add the component
+ * @num: the device number
+ * @type: the type of component being added
+ * @name: an optional name to appear in sysfs (leave NULL if none)
+ *
+ * Registers the component. The name is optional for enclosures that
+ * give their components a unique name. If not, leave the field NULL
+ * and a name will be assigned.
+ *
+ * Returns a pointer to the enclosure component or an error.
+ */
+struct enclosure_component *
+enclosure_component_register(struct enclosure_device *edev,
+ unsigned int number,
+ enum enclosure_component_type type,
+ const char *name)
+{
+ struct enclosure_component *ecomp;
+ struct class_device *cdev;
+ int err;
+
+ if (number >= edev->components)
+ return ERR_PTR(-EINVAL);
+
+ ecomp = &edev->component[number];
+
+ if (ecomp->number != -1)
+ return ERR_PTR(-EINVAL);
+
+ ecomp->type = type;
+ ecomp->number = number;
+ cdev = &ecomp->cdev;
+ cdev->parent = class_device_get(&edev->cdev);
+ cdev->class = &enclosure_component_class;
+ if (name)
+ snprintf(cdev->class_id, BUS_ID_SIZE, "%s", name);
+ else
+ snprintf(cdev->class_id, BUS_ID_SIZE, "%u", number);
+
+ err = class_device_register(cdev);
+ if (err)
+ ERR_PTR(err);
+
+ return ecomp;
+}
+EXPORT_SYMBOL_GPL(enclosure_component_register);
+
+/**
+ * enclosure_add_device - add a device as being part of an enclosure
+ * @edev: the enclosure device being added to.
+ * @num: the number of the component
+ * @dev: the device being added
+ *
+ * Declares a real device to reside in slot (or identifier) @num of an
+ * enclosure. This will cause the relevant sysfs links to appear.
+ * This function may also be used to change a device associated with
+ * an enclosure without having to call enclosure_remove_device() in
+ * between.
+ *
+ * Returns zero on success or an error.
+ */
+int enclosure_add_device(struct enclosure_device *edev, int component,
+ struct device *dev)
+{
+ struct class_device *cdev;
+
+ if (!edev || component >= edev->components)
+ return -EINVAL;
+
+ cdev = &edev->component[component].cdev;
+
+ class_device_del(cdev);
+ if (cdev->dev)
+ put_device(cdev->dev);
+ cdev->dev = get_device(dev);
+ return class_device_add(cdev);
+}
+EXPORT_SYMBOL_GPL(enclosure_add_device);
+
+/**
+ * enclosure_remove_device - remove a device from an enclosure
+ * @edev: the enclosure device
+ * @num: the number of the component to remove
+ *
+ * Returns zero on success or an error.
+ *
+ */
+int enclosure_remove_device(struct enclosure_device *edev, int component)
+{
+ struct class_device *cdev;
+
+ if (!edev || component >= edev->components)
+ return -EINVAL;
+
+ cdev = &edev->component[component].cdev;
+
+ class_device_del(cdev);
+ if (cdev->dev)
+ put_device(cdev->dev);
+ cdev->dev = NULL;
+ return class_device_add(cdev);
+}
+EXPORT_SYMBOL_GPL(enclosure_remove_device);
+
+/*
+ * sysfs pieces below
+ */
+
+static ssize_t enclosure_show_components(struct class_device *cdev, char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev);
+
+ return snprintf(buf, 40, "%d\n", edev->components);
+}
+
+static struct class_device_attribute enclosure_attrs[] = {
+ __ATTR(components, S_IRUGO, enclosure_show_components, NULL),
+ __ATTR_NULL
+};
+
+static struct class enclosure_class = {
+ .name = "enclosure",
+ .owner = THIS_MODULE,
+ .release = enclosure_release,
+ .class_dev_attrs = enclosure_attrs,
+};
+
+static const char *const enclosure_status [] = {
+ [ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported",
+ [ENCLOSURE_STATUS_OK] = "OK",
+ [ENCLOSURE_STATUS_CRITICAL] = "critical",
+ [ENCLOSURE_STATUS_NON_CRITICAL] = "non-critical",
+ [ENCLOSURE_STATUS_UNRECOVERABLE] = "unrecoverable",
+ [ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
+ [ENCLOSURE_STATUS_UNKNOWN] = "unknown",
+ [ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
+};
+
+static const char *const enclosure_type [] = {
+ [ENCLOSURE_COMPONENT_DEVICE] = "device",
+ [ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device",
+};
+
+static ssize_t get_component_fault(struct class_device *cdev, char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ if (edev->cb->get_fault)
+ edev->cb->get_fault(edev, ecomp);
+ return snprintf(buf, 40, "%d\n", ecomp->fault);
+}
+
+static ssize_t set_component_fault(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+ int val = simple_strtoul(buf, NULL, 0);
+
+ if (edev->cb->set_fault)
+ edev->cb->set_fault(edev, ecomp, val);
+ return count;
+}
+
+static ssize_t get_component_status(struct class_device *cdev, char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ if (edev->cb->get_status)
+ edev->cb->get_status(edev, ecomp);
+ return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]);
+}
+
+static ssize_t set_component_status(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+ int i;
+
+ for (i = 0; enclosure_status[i]; i++) {
+ if (strncmp(buf, enclosure_status[i],
+ strlen(enclosure_status[i])) == 0 &&
+ (buf[strlen(enclosure_status[i])] == '\n' ||
+ buf[strlen(enclosure_status[i])] == '\0'))
+ break;
+ }
+
+ if (enclosure_status[i] && edev->cb->set_status) {
+ edev->cb->set_status(edev, ecomp, i);
+ return count;
+ } else
+ return -EINVAL;
+}
+
+static ssize_t get_component_active(struct class_device *cdev, char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ if (edev->cb->get_active)
+ edev->cb->get_active(edev, ecomp);
+ return snprintf(buf, 40, "%d\n", ecomp->active);
+}
+
+static ssize_t set_component_active(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+ int val = simple_strtoul(buf, NULL, 0);
+
+ if (edev->cb->set_active)
+ edev->cb->set_active(edev, ecomp, val);
+ return count;
+}
+
+static ssize_t get_component_locate(struct class_device *cdev, char *buf)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ if (edev->cb->get_locate)
+ edev->cb->get_locate(edev, ecomp);
+ return snprintf(buf, 40, "%d\n", ecomp->locate);
+}
+
+static ssize_t set_component_locate(struct class_device *cdev, const char *buf,
+ size_t count)
+{
+ struct enclosure_device *edev = to_enclosure_device(cdev->parent);
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+ int val = simple_strtoul(buf, NULL, 0);
+
+ if (edev->cb->set_locate)
+ edev->cb->set_locate(edev, ecomp, val);
+ return count;
+}
+
+static ssize_t get_component_type(struct class_device *cdev, char *buf)
+{
+ struct enclosure_component *ecomp = to_enclosure_component(cdev);
+
+ return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]);
+}
+
+
+static struct class_device_attribute enclosure_component_attrs[] = {
+ __ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
+ set_component_fault),
+ __ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
+ set_component_status),
+ __ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
+ set_component_active),
+ __ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
+ set_component_locate),
+ __ATTR(type, S_IRUGO, get_component_type, NULL),
+ __ATTR_NULL
+};
+
+static struct class enclosure_component_class = {
+ .name = "enclosure_component",
+ .owner = THIS_MODULE,
+ .class_dev_attrs = enclosure_component_attrs,
+ .release = enclosure_component_release,
+};
+
+static int __init enclosure_init(void)
+{
+ int err;
+
+ err = class_register(&enclosure_class);
+ if (err)
+ return err;
+ err = class_register(&enclosure_component_class);
+ if (err)
+ goto err_out;
+
+ return 0;
+ err_out:
+ class_unregister(&enclosure_class);
+
+ return err;
+}
+
+static void __exit enclosure_exit(void)
+{
+ class_unregister(&enclosure_component_class);
+ class_unregister(&enclosure_class);
+}
+
+module_init(enclosure_init);
+module_exit(enclosure_exit);
+
+MODULE_AUTHOR("James Bottomley");
+MODULE_DESCRIPTION("Enclosure Services");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/fujitsu-laptop.c b/drivers/misc/fujitsu-laptop.c
index c8d62c268b11..1cfd7f3f1294 100644
--- a/drivers/misc/fujitsu-laptop.c
+++ b/drivers/misc/fujitsu-laptop.c
@@ -50,7 +50,6 @@
#include <linux/dmi.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
-#include <linux/autoconf.h>
#define FUJITSU_DRIVER_VERSION "0.3"
diff --git a/drivers/misc/intel_menlow.c b/drivers/misc/intel_menlow.c
new file mode 100644
index 000000000000..f70984ab1e1b
--- /dev/null
+++ b/drivers/misc/intel_menlow.c
@@ -0,0 +1,526 @@
+/*
+ * intel_menlow.c - Intel menlow Driver for thermal management extension
+ *
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This driver creates the sys I/F for programming the sensors.
+ * It also implements the driver for intel menlow memory controller (hardware
+ * id is INT0002) which makes use of the platform specific ACPI methods
+ * to get/set bandwidth.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+
+#include <linux/thermal.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Thomas Sujith");
+MODULE_AUTHOR("Zhang Rui");
+MODULE_DESCRIPTION("Intel Menlow platform specific driver");
+MODULE_LICENSE("GPL");
+
+/*
+ * Memory controller device control
+ */
+
+#define MEMORY_GET_BANDWIDTH "GTHS"
+#define MEMORY_SET_BANDWIDTH "STHS"
+#define MEMORY_ARG_CUR_BANDWIDTH 1
+#define MEMORY_ARG_MAX_BANDWIDTH 0
+
+static int memory_get_int_max_bandwidth(struct thermal_cooling_device *cdev,
+ unsigned long *max_state)
+{
+ struct acpi_device *device = cdev->devdata;
+ acpi_handle handle = device->handle;
+ unsigned long value;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status = AE_OK;
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = MEMORY_ARG_MAX_BANDWIDTH;
+ status = acpi_evaluate_integer(handle, MEMORY_GET_BANDWIDTH,
+ &arg_list, &value);
+ if (ACPI_FAILURE(status))
+ return -EFAULT;
+
+ *max_state = value - 1;
+ return 0;
+}
+
+static int memory_get_max_bandwidth(struct thermal_cooling_device *cdev,
+ char *buf)
+{
+ unsigned long value;
+ if (memory_get_int_max_bandwidth(cdev, &value))
+ return -EINVAL;
+
+ return sprintf(buf, "%ld\n", value);
+}
+
+static int memory_get_cur_bandwidth(struct thermal_cooling_device *cdev,
+ char *buf)
+{
+ struct acpi_device *device = cdev->devdata;
+ acpi_handle handle = device->handle;
+ unsigned long value;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status = AE_OK;
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = MEMORY_ARG_CUR_BANDWIDTH;
+ status = acpi_evaluate_integer(handle, MEMORY_GET_BANDWIDTH,
+ &arg_list, &value);
+ if (ACPI_FAILURE(status))
+ return -EFAULT;
+
+ return sprintf(buf, "%ld\n", value);
+}
+
+static int memory_set_cur_bandwidth(struct thermal_cooling_device *cdev,
+ unsigned int state)
+{
+ struct acpi_device *device = cdev->devdata;
+ acpi_handle handle = device->handle;
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status;
+ int temp;
+ unsigned long max_state;
+
+ if (memory_get_int_max_bandwidth(cdev, &max_state))
+ return -EFAULT;
+
+ if (max_state < 0 || state > max_state)
+ return -EINVAL;
+
+ arg_list.count = 1;
+ arg_list.pointer = &arg;
+ arg.type = ACPI_TYPE_INTEGER;
+ arg.integer.value = state;
+
+ status =
+ acpi_evaluate_integer(handle, MEMORY_SET_BANDWIDTH, &arg_list,
+ (unsigned long *)&temp);
+
+ printk(KERN_INFO
+ "Bandwidth value was %d: status is %d\n", state, status);
+ if (ACPI_FAILURE(status))
+ return -EFAULT;
+
+ return 0;
+}
+
+static struct thermal_cooling_device_ops memory_cooling_ops = {
+ .get_max_state = memory_get_max_bandwidth,
+ .get_cur_state = memory_get_cur_bandwidth,
+ .set_cur_state = memory_set_cur_bandwidth,
+};
+
+/*
+ * Memory Device Management
+ */
+static int intel_menlow_memory_add(struct acpi_device *device)
+{
+ int result = -ENODEV;
+ acpi_status status = AE_OK;
+ acpi_handle dummy;
+ struct thermal_cooling_device *cdev;
+
+ if (!device)
+ return -EINVAL;
+
+ status = acpi_get_handle(device->handle, MEMORY_GET_BANDWIDTH, &dummy);
+ if (ACPI_FAILURE(status))
+ goto end;
+
+ status = acpi_get_handle(device->handle, MEMORY_SET_BANDWIDTH, &dummy);
+ if (ACPI_FAILURE(status))
+ goto end;
+
+ cdev = thermal_cooling_device_register("Memory controller", device,
+ &memory_cooling_ops);
+ acpi_driver_data(device) = cdev;
+ if (!cdev)
+ result = -ENODEV;
+ else {
+ result = sysfs_create_link(&device->dev.kobj,
+ &cdev->device.kobj, "thermal_cooling");
+ if (result)
+ goto unregister;
+
+ result = sysfs_create_link(&cdev->device.kobj,
+ &device->dev.kobj, "device");
+ if (result) {
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ goto unregister;
+ }
+ }
+
+ end:
+ return result;
+
+ unregister:
+ thermal_cooling_device_unregister(cdev);
+ return result;
+
+}
+
+static int intel_menlow_memory_remove(struct acpi_device *device, int type)
+{
+ struct thermal_cooling_device *cdev = acpi_driver_data(device);
+
+ if (!device || !cdev)
+ return -EINVAL;
+
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+ sysfs_remove_link(&cdev->device.kobj, "device");
+ thermal_cooling_device_unregister(cdev);
+
+ return 0;
+}
+
+const static struct acpi_device_id intel_menlow_memory_ids[] = {
+ {"INT0002", 0},
+ {"", 0},
+};
+
+static struct acpi_driver intel_menlow_memory_driver = {
+ .name = "intel_menlow_thermal_control",
+ .ids = intel_menlow_memory_ids,
+ .ops = {
+ .add = intel_menlow_memory_add,
+ .remove = intel_menlow_memory_remove,
+ },
+};
+
+/*
+ * Sensor control on menlow platform
+ */
+
+#define THERMAL_AUX0 0
+#define THERMAL_AUX1 1
+#define GET_AUX0 "GAX0"
+#define GET_AUX1 "GAX1"
+#define SET_AUX0 "SAX0"
+#define SET_AUX1 "SAX1"
+
+struct intel_menlow_attribute {
+ struct device_attribute attr;
+ struct device *device;
+ acpi_handle handle;
+ struct list_head node;
+};
+
+static LIST_HEAD(intel_menlow_attr_list);
+static DEFINE_MUTEX(intel_menlow_attr_lock);
+
+/*
+ * sensor_get_auxtrip - get the current auxtrip value from sensor
+ * @name: Thermalzone name
+ * @auxtype : AUX0/AUX1
+ * @buf: syfs buffer
+ */
+static int sensor_get_auxtrip(acpi_handle handle, int index, int *value)
+{
+ acpi_status status;
+
+ if ((index != 0 && index != 1) || !value)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(handle, index ? GET_AUX1 : GET_AUX0,
+ NULL, (unsigned long *)value);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * sensor_set_auxtrip - set the new auxtrip value to sensor
+ * @name: Thermalzone name
+ * @auxtype : AUX0/AUX1
+ * @buf: syfs buffer
+ */
+static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
+{
+ acpi_status status;
+ union acpi_object arg = {
+ ACPI_TYPE_INTEGER
+ };
+ struct acpi_object_list args = {
+ 1, &arg
+ };
+ int temp;
+
+ if (index != 0 && index != 1)
+ return -EINVAL;
+
+ status = acpi_evaluate_integer(handle, index ? GET_AUX0 : GET_AUX1,
+ NULL, (unsigned long *)&temp);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ if ((index && value < temp) || (!index && value > temp))
+ return -EINVAL;
+
+ arg.integer.value = value;
+ status = acpi_evaluate_integer(handle, index ? SET_AUX1 : SET_AUX0,
+ &args, (unsigned long *)&temp);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* do we need to check the return value of SAX0/SAX1 ? */
+
+ return 0;
+}
+
+#define to_intel_menlow_attr(_attr) \
+ container_of(_attr, struct intel_menlow_attribute, attr)
+
+static ssize_t aux0_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
+ int value;
+ int result;
+
+ result = sensor_get_auxtrip(attr->handle, 0, &value);
+
+ return result ? result : sprintf(buf, "%lu", KELVIN_TO_CELSIUS(value));
+}
+
+static ssize_t aux1_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
+ int value;
+ int result;
+
+ result = sensor_get_auxtrip(attr->handle, 1, &value);
+
+ return result ? result : sprintf(buf, "%lu", KELVIN_TO_CELSIUS(value));
+}
+
+static ssize_t aux0_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
+ int value;
+ int result;
+
+ /*Sanity check; should be a positive integer */
+ if (!sscanf(buf, "%d", &value))
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ result = sensor_set_auxtrip(attr->handle, 0, CELSIUS_TO_KELVIN(value));
+ return result ? result : count;
+}
+
+static ssize_t aux1_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
+ int value;
+ int result;
+
+ /*Sanity check; should be a positive integer */
+ if (!sscanf(buf, "%d", &value))
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ result = sensor_set_auxtrip(attr->handle, 1, CELSIUS_TO_KELVIN(value));
+ return result ? result : count;
+}
+
+/* BIOS can enable/disable the thermal user application in dabney platform */
+#define BIOS_ENABLED "\\_TZ.GSTS"
+static ssize_t bios_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ acpi_status status;
+ unsigned long bios_enabled;
+
+ status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &bios_enabled);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n", bios_enabled ? "enabled" : "disabled");
+}
+
+static int intel_menlow_add_one_attribute(char *name, int mode, void *show,
+ void *store, struct device *dev,
+ acpi_handle handle)
+{
+ struct intel_menlow_attribute *attr;
+ int result;
+
+ attr = kzalloc(sizeof(struct intel_menlow_attribute), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ attr->attr.attr.name = name;
+ attr->attr.attr.mode = mode;
+ attr->attr.show = show;
+ attr->attr.store = store;
+ attr->device = dev;
+ attr->handle = handle;
+
+ result = device_create_file(dev, &attr->attr);
+ if (result)
+ return result;
+
+ mutex_lock(&intel_menlow_attr_lock);
+ list_add_tail(&attr->node, &intel_menlow_attr_list);
+ mutex_unlock(&intel_menlow_attr_lock);
+
+ return 0;
+}
+
+static acpi_status intel_menlow_register_sensor(acpi_handle handle, u32 lvl,
+ void *context, void **rv)
+{
+ acpi_status status;
+ acpi_handle dummy;
+ struct thermal_zone_device *thermal;
+ int result;
+
+ result = acpi_bus_get_private_data(handle, (void **)&thermal);
+ if (result)
+ return 0;
+
+ /* _TZ must have the AUX0/1 methods */
+ status = acpi_get_handle(handle, GET_AUX0, &dummy);
+ if (ACPI_FAILURE(status))
+ goto not_found;
+
+ status = acpi_get_handle(handle, SET_AUX0, &dummy);
+ if (ACPI_FAILURE(status))
+ goto not_found;
+
+ result = intel_menlow_add_one_attribute("aux0", 0644,
+ aux0_show, aux0_store,
+ &thermal->device, handle);
+ if (result)
+ return AE_ERROR;
+
+ status = acpi_get_handle(handle, GET_AUX1, &dummy);
+ if (ACPI_FAILURE(status))
+ goto not_found;
+
+ status = acpi_get_handle(handle, SET_AUX1, &dummy);
+ if (ACPI_FAILURE(status))
+ goto not_found;
+
+ result = intel_menlow_add_one_attribute("aux1", 0644,
+ aux1_show, aux1_store,
+ &thermal->device, handle);
+ if (result)
+ return AE_ERROR;
+
+ /*
+ * create the "dabney_enabled" attribute which means the user app
+ * should be loaded or not
+ */
+
+ result = intel_menlow_add_one_attribute("bios_enabled", 0444,
+ bios_enabled_show, NULL,
+ &thermal->device, handle);
+ if (result)
+ return AE_ERROR;
+
+ not_found:
+ if (status == AE_NOT_FOUND)
+ return AE_OK;
+ else
+ return status;
+}
+
+static void intel_menlow_unregister_sensor(void)
+{
+ struct intel_menlow_attribute *pos, *next;
+
+ mutex_lock(&intel_menlow_attr_lock);
+ list_for_each_entry_safe(pos, next, &intel_menlow_attr_list, node) {
+ list_del(&pos->node);
+ device_remove_file(pos->device, &pos->attr);
+ kfree(pos);
+ }
+ mutex_unlock(&intel_menlow_attr_lock);
+
+ return;
+}
+
+static int __init intel_menlow_module_init(void)
+{
+ int result = -ENODEV;
+ acpi_status status;
+ unsigned long enable;
+
+ if (acpi_disabled)
+ return result;
+
+ /* Looking for the \_TZ.GSTS method */
+ status = acpi_evaluate_integer(NULL, BIOS_ENABLED, NULL, &enable);
+ if (ACPI_FAILURE(status) || !enable)
+ return -ENODEV;
+
+ /* Looking for ACPI device MEM0 with hardware id INT0002 */
+ result = acpi_bus_register_driver(&intel_menlow_memory_driver);
+ if (result)
+ return result;
+
+ /* Looking for sensors in each ACPI thermal zone */
+ status = acpi_walk_namespace(ACPI_TYPE_THERMAL, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ intel_menlow_register_sensor, NULL, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+}
+
+static void __exit intel_menlow_module_exit(void)
+{
+ acpi_bus_unregister_driver(&intel_menlow_memory_driver);
+ intel_menlow_unregister_sensor();
+}
+
+module_init(intel_menlow_module_init);
+module_exit(intel_menlow_module_exit);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 552b7957a92a..c884730c5eaf 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -129,27 +129,28 @@ module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
-unsigned int jp_do_irq(unsigned int irq)
+static unsigned int jp_do_irq(unsigned int irq)
{
lkdtm_handler();
jprobe_return();
return 0;
}
-irqreturn_t jp_handle_irq_event(unsigned int irq, struct irqaction *action)
+static irqreturn_t jp_handle_irq_event(unsigned int irq,
+ struct irqaction *action)
{
lkdtm_handler();
jprobe_return();
return 0;
}
-void jp_tasklet_action(struct softirq_action *a)
+static void jp_tasklet_action(struct softirq_action *a)
{
lkdtm_handler();
jprobe_return();
}
-void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
+static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
{
lkdtm_handler();
jprobe_return();
@@ -157,23 +158,24 @@ void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
struct scan_control;
-unsigned long jp_shrink_inactive_list(unsigned long max_scan,
- struct zone *zone, struct scan_control *sc)
+static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
+ struct zone *zone,
+ struct scan_control *sc)
{
lkdtm_handler();
jprobe_return();
return 0;
}
-int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
- const enum hrtimer_mode mode)
+static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
{
lkdtm_handler();
jprobe_return();
return 0;
}
-int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{
lkdtm_handler();
jprobe_return();
@@ -270,7 +272,7 @@ void lkdtm_handler(void)
}
}
-int lkdtm_module_init(void)
+static int __init lkdtm_module_init(void)
{
int ret;
@@ -331,7 +333,7 @@ int lkdtm_module_init(void)
return 0;
}
-void lkdtm_module_exit(void)
+static void __exit lkdtm_module_exit(void)
{
unregister_jprobe(&lkdtm);
printk(KERN_INFO "lkdtm : Crash point unregistered\n");
diff --git a/drivers/misc/msi-laptop.c b/drivers/misc/msi-laptop.c
index 83679c762925..de898c6938f3 100644
--- a/drivers/misc/msi-laptop.c
+++ b/drivers/misc/msi-laptop.c
@@ -58,7 +58,6 @@
#include <linux/dmi.h>
#include <linux/backlight.h>
#include <linux/platform_device.h>
-#include <linux/autoconf.h>
#define MSI_DRIVER_VERSION "0.5"
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index cd221fd0fb94..7fa61e907e1c 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -25,7 +25,7 @@
#include <asm/atomic.h>
#include <asm/io.h>
-#define PHANTOM_VERSION "n0.9.7"
+#define PHANTOM_VERSION "n0.9.8"
#define PHANTOM_MAX_MINORS 8
@@ -456,8 +456,9 @@ static int phantom_resume(struct pci_dev *pdev)
#endif
static struct pci_device_id phantom_pci_tbl[] __devinitdata = {
- { PCI_DEVICE(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050),
- .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 },
+ { .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050,
+ .subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_9050,
+ .class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, phantom_pci_tbl);
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c
index b0f68031b49d..899e3f75f288 100644
--- a/drivers/misc/sony-laptop.c
+++ b/drivers/misc/sony-laptop.c
@@ -73,7 +73,7 @@
if (debug) printk(KERN_WARNING DRV_PFX msg); \
} while (0)
-#define SONY_LAPTOP_DRIVER_VERSION "0.5"
+#define SONY_LAPTOP_DRIVER_VERSION "0.6"
#define SONY_NC_CLASS "sony-nc"
#define SONY_NC_HID "SNY5001"
@@ -146,68 +146,70 @@ struct sony_laptop_keypress {
* and input layer indexes in the keymap
*/
static int sony_laptop_input_index[] = {
- -1, /* no event */
- -1, /* SONYPI_EVENT_JOGDIAL_DOWN */
- -1, /* SONYPI_EVENT_JOGDIAL_UP */
- -1, /* SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */
- -1, /* SONYPI_EVENT_JOGDIAL_UP_PRESSED */
- -1, /* SONYPI_EVENT_JOGDIAL_PRESSED */
- -1, /* SONYPI_EVENT_JOGDIAL_RELEASED */
- 0, /* SONYPI_EVENT_CAPTURE_PRESSED */
- 1, /* SONYPI_EVENT_CAPTURE_RELEASED */
- 2, /* SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
- 3, /* SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
- 4, /* SONYPI_EVENT_FNKEY_ESC */
- 5, /* SONYPI_EVENT_FNKEY_F1 */
- 6, /* SONYPI_EVENT_FNKEY_F2 */
- 7, /* SONYPI_EVENT_FNKEY_F3 */
- 8, /* SONYPI_EVENT_FNKEY_F4 */
- 9, /* SONYPI_EVENT_FNKEY_F5 */
- 10, /* SONYPI_EVENT_FNKEY_F6 */
- 11, /* SONYPI_EVENT_FNKEY_F7 */
- 12, /* SONYPI_EVENT_FNKEY_F8 */
- 13, /* SONYPI_EVENT_FNKEY_F9 */
- 14, /* SONYPI_EVENT_FNKEY_F10 */
- 15, /* SONYPI_EVENT_FNKEY_F11 */
- 16, /* SONYPI_EVENT_FNKEY_F12 */
- 17, /* SONYPI_EVENT_FNKEY_1 */
- 18, /* SONYPI_EVENT_FNKEY_2 */
- 19, /* SONYPI_EVENT_FNKEY_D */
- 20, /* SONYPI_EVENT_FNKEY_E */
- 21, /* SONYPI_EVENT_FNKEY_F */
- 22, /* SONYPI_EVENT_FNKEY_S */
- 23, /* SONYPI_EVENT_FNKEY_B */
- 24, /* SONYPI_EVENT_BLUETOOTH_PRESSED */
- 25, /* SONYPI_EVENT_PKEY_P1 */
- 26, /* SONYPI_EVENT_PKEY_P2 */
- 27, /* SONYPI_EVENT_PKEY_P3 */
- 28, /* SONYPI_EVENT_BACK_PRESSED */
- -1, /* SONYPI_EVENT_LID_CLOSED */
- -1, /* SONYPI_EVENT_LID_OPENED */
- 29, /* SONYPI_EVENT_BLUETOOTH_ON */
- 30, /* SONYPI_EVENT_BLUETOOTH_OFF */
- 31, /* SONYPI_EVENT_HELP_PRESSED */
- 32, /* SONYPI_EVENT_FNKEY_ONLY */
- 33, /* SONYPI_EVENT_JOGDIAL_FAST_DOWN */
- 34, /* SONYPI_EVENT_JOGDIAL_FAST_UP */
- 35, /* SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
- 36, /* SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
- 37, /* SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
- 38, /* SONYPI_EVENT_JOGDIAL_VFAST_UP */
- 39, /* SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
- 40, /* SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
- 41, /* SONYPI_EVENT_ZOOM_PRESSED */
- 42, /* SONYPI_EVENT_THUMBPHRASE_PRESSED */
- 43, /* SONYPI_EVENT_MEYE_FACE */
- 44, /* SONYPI_EVENT_MEYE_OPPOSITE */
- 45, /* SONYPI_EVENT_MEMORYSTICK_INSERT */
- 46, /* SONYPI_EVENT_MEMORYSTICK_EJECT */
- -1, /* SONYPI_EVENT_ANYBUTTON_RELEASED */
- -1, /* SONYPI_EVENT_BATTERY_INSERT */
- -1, /* SONYPI_EVENT_BATTERY_REMOVE */
- -1, /* SONYPI_EVENT_FNKEY_RELEASED */
- 47, /* SONYPI_EVENT_WIRELESS_ON */
- 48, /* SONYPI_EVENT_WIRELESS_OFF */
+ -1, /* 0 no event */
+ -1, /* 1 SONYPI_EVENT_JOGDIAL_DOWN */
+ -1, /* 2 SONYPI_EVENT_JOGDIAL_UP */
+ -1, /* 3 SONYPI_EVENT_JOGDIAL_DOWN_PRESSED */
+ -1, /* 4 SONYPI_EVENT_JOGDIAL_UP_PRESSED */
+ -1, /* 5 SONYPI_EVENT_JOGDIAL_PRESSED */
+ -1, /* 6 SONYPI_EVENT_JOGDIAL_RELEASED */
+ 0, /* 7 SONYPI_EVENT_CAPTURE_PRESSED */
+ 1, /* 8 SONYPI_EVENT_CAPTURE_RELEASED */
+ 2, /* 9 SONYPI_EVENT_CAPTURE_PARTIALPRESSED */
+ 3, /* 10 SONYPI_EVENT_CAPTURE_PARTIALRELEASED */
+ 4, /* 11 SONYPI_EVENT_FNKEY_ESC */
+ 5, /* 12 SONYPI_EVENT_FNKEY_F1 */
+ 6, /* 13 SONYPI_EVENT_FNKEY_F2 */
+ 7, /* 14 SONYPI_EVENT_FNKEY_F3 */
+ 8, /* 15 SONYPI_EVENT_FNKEY_F4 */
+ 9, /* 16 SONYPI_EVENT_FNKEY_F5 */
+ 10, /* 17 SONYPI_EVENT_FNKEY_F6 */
+ 11, /* 18 SONYPI_EVENT_FNKEY_F7 */
+ 12, /* 19 SONYPI_EVENT_FNKEY_F8 */
+ 13, /* 20 SONYPI_EVENT_FNKEY_F9 */
+ 14, /* 21 SONYPI_EVENT_FNKEY_F10 */
+ 15, /* 22 SONYPI_EVENT_FNKEY_F11 */
+ 16, /* 23 SONYPI_EVENT_FNKEY_F12 */
+ 17, /* 24 SONYPI_EVENT_FNKEY_1 */
+ 18, /* 25 SONYPI_EVENT_FNKEY_2 */
+ 19, /* 26 SONYPI_EVENT_FNKEY_D */
+ 20, /* 27 SONYPI_EVENT_FNKEY_E */
+ 21, /* 28 SONYPI_EVENT_FNKEY_F */
+ 22, /* 29 SONYPI_EVENT_FNKEY_S */
+ 23, /* 30 SONYPI_EVENT_FNKEY_B */
+ 24, /* 31 SONYPI_EVENT_BLUETOOTH_PRESSED */
+ 25, /* 32 SONYPI_EVENT_PKEY_P1 */
+ 26, /* 33 SONYPI_EVENT_PKEY_P2 */
+ 27, /* 34 SONYPI_EVENT_PKEY_P3 */
+ 28, /* 35 SONYPI_EVENT_BACK_PRESSED */
+ -1, /* 36 SONYPI_EVENT_LID_CLOSED */
+ -1, /* 37 SONYPI_EVENT_LID_OPENED */
+ 29, /* 38 SONYPI_EVENT_BLUETOOTH_ON */
+ 30, /* 39 SONYPI_EVENT_BLUETOOTH_OFF */
+ 31, /* 40 SONYPI_EVENT_HELP_PRESSED */
+ 32, /* 41 SONYPI_EVENT_FNKEY_ONLY */
+ 33, /* 42 SONYPI_EVENT_JOGDIAL_FAST_DOWN */
+ 34, /* 43 SONYPI_EVENT_JOGDIAL_FAST_UP */
+ 35, /* 44 SONYPI_EVENT_JOGDIAL_FAST_DOWN_PRESSED */
+ 36, /* 45 SONYPI_EVENT_JOGDIAL_FAST_UP_PRESSED */
+ 37, /* 46 SONYPI_EVENT_JOGDIAL_VFAST_DOWN */
+ 38, /* 47 SONYPI_EVENT_JOGDIAL_VFAST_UP */
+ 39, /* 48 SONYPI_EVENT_JOGDIAL_VFAST_DOWN_PRESSED */
+ 40, /* 49 SONYPI_EVENT_JOGDIAL_VFAST_UP_PRESSED */
+ 41, /* 50 SONYPI_EVENT_ZOOM_PRESSED */
+ 42, /* 51 SONYPI_EVENT_THUMBPHRASE_PRESSED */
+ 43, /* 52 SONYPI_EVENT_MEYE_FACE */
+ 44, /* 53 SONYPI_EVENT_MEYE_OPPOSITE */
+ 45, /* 54 SONYPI_EVENT_MEMORYSTICK_INSERT */
+ 46, /* 55 SONYPI_EVENT_MEMORYSTICK_EJECT */
+ -1, /* 56 SONYPI_EVENT_ANYBUTTON_RELEASED */
+ -1, /* 57 SONYPI_EVENT_BATTERY_INSERT */
+ -1, /* 58 SONYPI_EVENT_BATTERY_REMOVE */
+ -1, /* 59 SONYPI_EVENT_FNKEY_RELEASED */
+ 47, /* 60 SONYPI_EVENT_WIRELESS_ON */
+ 48, /* 61 SONYPI_EVENT_WIRELESS_OFF */
+ 49, /* 62 SONYPI_EVENT_ZOOM_IN_PRESSED */
+ 50, /* 63 SONYPI_EVENT_ZOOM_OUT_PRESSED */
};
static int sony_laptop_input_keycode_map[] = {
@@ -260,6 +262,8 @@ static int sony_laptop_input_keycode_map[] = {
KEY_RESERVED, /* 46 SONYPI_EVENT_MEMORYSTICK_EJECT */
KEY_WLAN, /* 47 SONYPI_EVENT_WIRELESS_ON */
KEY_WLAN, /* 48 SONYPI_EVENT_WIRELESS_OFF */
+ KEY_ZOOMIN, /* 49 SONYPI_EVENT_ZOOM_IN_PRESSED */
+ KEY_ZOOMOUT /* 50 SONYPI_EVENT_ZOOM_OUT_PRESSED */
};
/* release buttons after a short delay if pressed */
@@ -311,7 +315,7 @@ static void sony_laptop_report_input_event(u8 event)
break;
default:
- if (event > ARRAY_SIZE (sony_laptop_input_keycode_map)) {
+ if (event > ARRAY_SIZE(sony_laptop_input_index)) {
dprintk("sony_laptop_report_input_event, event not known: %d\n", event);
break;
}
@@ -875,6 +879,15 @@ static const struct dmi_system_id sony_nc_ids[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-C"),
},
},
+ {
+ .ident = "Sony Vaio N Series",
+ .callback = sony_nc_C_enable,
+ .driver_data = sony_C_events,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VGN-N"),
+ },
+ },
{ }
};
@@ -1169,10 +1182,12 @@ static struct acpi_driver sony_nc_driver = {
#define SONYPI_DEVICE_TYPE1 0x00000001
#define SONYPI_DEVICE_TYPE2 0x00000002
#define SONYPI_DEVICE_TYPE3 0x00000004
+#define SONYPI_DEVICE_TYPE4 0x00000008
#define SONYPI_TYPE1_OFFSET 0x04
#define SONYPI_TYPE2_OFFSET 0x12
#define SONYPI_TYPE3_OFFSET 0x12
+#define SONYPI_TYPE4_OFFSET 0x12
struct sony_pic_ioport {
struct acpi_resource_io io1;
@@ -1185,18 +1200,33 @@ struct sony_pic_irq {
struct list_head list;
};
+struct sonypi_eventtypes {
+ u8 data;
+ unsigned long mask;
+ struct sonypi_event *events;
+};
+
+struct device_ctrl {
+ int model;
+ int (*handle_irq)(const u8, const u8);
+ u16 evport_offset;
+ u8 has_camera;
+ u8 has_bluetooth;
+ u8 has_wwan;
+ struct sonypi_eventtypes *event_types;
+};
+
struct sony_pic_dev {
- int model;
- u16 evport_offset;
- u8 camera_power;
- u8 bluetooth_power;
- u8 wwan_power;
+ struct device_ctrl *control;
struct acpi_device *acpi_dev;
struct sony_pic_irq *cur_irq;
struct sony_pic_ioport *cur_ioport;
struct list_head interrupts;
struct list_head ioports;
struct mutex lock;
+ u8 camera_power;
+ u8 bluetooth_power;
+ u8 wwan_power;
};
static struct sony_pic_dev spic_dev = {
@@ -1253,6 +1283,7 @@ static struct sonypi_event sonypi_joggerev[] = {
static struct sonypi_event sonypi_captureev[] = {
{ 0x05, SONYPI_EVENT_CAPTURE_PARTIALPRESSED },
{ 0x07, SONYPI_EVENT_CAPTURE_PRESSED },
+ { 0x40, SONYPI_EVENT_CAPTURE_PRESSED },
{ 0x01, SONYPI_EVENT_CAPTURE_PARTIALRELEASED },
{ 0, 0 }
};
@@ -1289,7 +1320,6 @@ static struct sonypi_event sonypi_pkeyev[] = {
{ 0x01, SONYPI_EVENT_PKEY_P1 },
{ 0x02, SONYPI_EVENT_PKEY_P2 },
{ 0x04, SONYPI_EVENT_PKEY_P3 },
- { 0x5c, SONYPI_EVENT_PKEY_P1 },
{ 0, 0 }
};
@@ -1331,6 +1361,8 @@ static struct sonypi_event sonypi_lidev[] = {
/* The set of possible zoom events */
static struct sonypi_event sonypi_zoomev[] = {
{ 0x39, SONYPI_EVENT_ZOOM_PRESSED },
+ { 0x10, SONYPI_EVENT_ZOOM_IN_PRESSED },
+ { 0x20, SONYPI_EVENT_ZOOM_OUT_PRESSED },
{ 0, 0 }
};
@@ -1361,76 +1393,58 @@ static struct sonypi_event sonypi_batteryev[] = {
{ 0, 0 }
};
-static struct sonypi_eventtypes {
- int model;
- u8 data;
- unsigned long mask;
- struct sonypi_event * events;
-} sony_pic_eventtypes[] = {
- { SONYPI_DEVICE_TYPE1, 0, 0xffffffff, sonypi_releaseev },
- { SONYPI_DEVICE_TYPE1, 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
- { SONYPI_DEVICE_TYPE1, 0x30, SONYPI_LID_MASK, sonypi_lidev },
- { SONYPI_DEVICE_TYPE1, 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
- { SONYPI_DEVICE_TYPE1, 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
- { SONYPI_DEVICE_TYPE1, 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
- { SONYPI_DEVICE_TYPE1, 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
- { SONYPI_DEVICE_TYPE1, 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
- { SONYPI_DEVICE_TYPE1, 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
- { SONYPI_DEVICE_TYPE1, 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
-
- { SONYPI_DEVICE_TYPE2, 0, 0xffffffff, sonypi_releaseev },
- { SONYPI_DEVICE_TYPE2, 0x38, SONYPI_LID_MASK, sonypi_lidev },
- { SONYPI_DEVICE_TYPE2, 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
- { SONYPI_DEVICE_TYPE2, 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
- { SONYPI_DEVICE_TYPE2, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
- { SONYPI_DEVICE_TYPE2, 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
- { SONYPI_DEVICE_TYPE2, 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
- { SONYPI_DEVICE_TYPE2, 0x11, SONYPI_BACK_MASK, sonypi_backev },
- { SONYPI_DEVICE_TYPE2, 0x21, SONYPI_HELP_MASK, sonypi_helpev },
- { SONYPI_DEVICE_TYPE2, 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
- { SONYPI_DEVICE_TYPE2, 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
- { SONYPI_DEVICE_TYPE2, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
- { SONYPI_DEVICE_TYPE2, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
- { SONYPI_DEVICE_TYPE2, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
-
- { SONYPI_DEVICE_TYPE3, 0, 0xffffffff, sonypi_releaseev },
- { SONYPI_DEVICE_TYPE3, 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
- { SONYPI_DEVICE_TYPE3, 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
- { SONYPI_DEVICE_TYPE3, 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
- { SONYPI_DEVICE_TYPE3, 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
- { SONYPI_DEVICE_TYPE3, 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
- { 0 }
+static struct sonypi_eventtypes type1_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x70, SONYPI_MEYE_MASK, sonypi_meyeev },
+ { 0x30, SONYPI_LID_MASK, sonypi_lidev },
+ { 0x60, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { 0x10, SONYPI_JOGGER_MASK, sonypi_joggerev },
+ { 0x20, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x30, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+ { 0x40, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0x30, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x40, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0 },
+};
+static struct sonypi_eventtypes type2_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x38, SONYPI_LID_MASK, sonypi_lidev },
+ { 0x11, SONYPI_JOGGER_MASK, sonypi_joggerev },
+ { 0x61, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x31, SONYPI_BLUETOOTH_MASK, sonypi_blueev },
+ { 0x08, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0x11, SONYPI_BACK_MASK, sonypi_backev },
+ { 0x21, SONYPI_HELP_MASK, sonypi_helpev },
+ { 0x21, SONYPI_ZOOM_MASK, sonypi_zoomev },
+ { 0x20, SONYPI_THUMBPHRASE_MASK, sonypi_thumbphraseev },
+ { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0 },
+};
+static struct sonypi_eventtypes type3_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
+ { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0x31, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0 },
+};
+static struct sonypi_eventtypes type4_events[] = {
+ { 0, 0xffffffff, sonypi_releaseev },
+ { 0x21, SONYPI_FNKEY_MASK, sonypi_fnkeyev },
+ { 0x31, SONYPI_WIRELESS_MASK, sonypi_wlessev },
+ { 0x31, SONYPI_MEMORYSTICK_MASK, sonypi_memorystickev },
+ { 0x41, SONYPI_BATTERY_MASK, sonypi_batteryev },
+ { 0x05, SONYPI_PKEY_MASK, sonypi_pkeyev },
+ { 0x05, SONYPI_ZOOM_MASK, sonypi_zoomev },
+ { 0x05, SONYPI_CAPTURE_MASK, sonypi_captureev },
+ { 0 },
};
-static int sony_pic_detect_device_type(void)
-{
- struct pci_dev *pcidev;
- int model = 0;
-
- if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_82371AB_3, NULL)))
- model = SONYPI_DEVICE_TYPE1;
-
- else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_ICH6_1, NULL)))
- model = SONYPI_DEVICE_TYPE3;
-
- else if ((pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_ICH7_1, NULL)))
- model = SONYPI_DEVICE_TYPE3;
-
- else
- model = SONYPI_DEVICE_TYPE2;
-
- if (pcidev)
- pci_dev_put(pcidev);
-
- printk(KERN_INFO DRV_PFX "detected Type%d model\n",
- model == SONYPI_DEVICE_TYPE1 ? 1 :
- model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
- return model;
-}
-
+/* low level spic calls */
#define ITERATIONS_LONG 10000
#define ITERATIONS_SHORT 10
#define wait_on_command(command, iterations) { \
@@ -1451,7 +1465,7 @@ static u8 sony_pic_call1(u8 dev)
outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4);
v2 = inb_p(spic_dev.cur_ioport->io1.minimum);
- dprintk("sony_pic_call1: 0x%.4x\n", (v2 << 8) | v1);
+ dprintk("sony_pic_call1(0x%.2x): 0x%.4x\n", dev, (v2 << 8) | v1);
return v2;
}
@@ -1466,7 +1480,7 @@ static u8 sony_pic_call2(u8 dev, u8 fn)
ITERATIONS_LONG);
outb(fn, spic_dev.cur_ioport->io1.minimum);
v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
- dprintk("sony_pic_call2: 0x%.4x\n", v1);
+ dprintk("sony_pic_call2(0x%.2x - 0x%.2x): 0x%.4x\n", dev, fn, v1);
return v1;
}
@@ -1481,10 +1495,105 @@ static u8 sony_pic_call3(u8 dev, u8 fn, u8 v)
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
outb(v, spic_dev.cur_ioport->io1.minimum);
v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
- dprintk("sony_pic_call3: 0x%.4x\n", v1);
+ dprintk("sony_pic_call3(0x%.2x - 0x%.2x - 0x%.2x): 0x%.4x\n",
+ dev, fn, v, v1);
return v1;
}
+/*
+ * minidrivers for SPIC models
+ */
+static int type4_handle_irq(const u8 data_mask, const u8 ev)
+{
+ /*
+ * 0x31 could mean we have to take some extra action and wait for
+ * the next irq for some Type4 models, it will generate a new
+ * irq and we can read new data from the device:
+ * - 0x5c and 0x5f requires 0xA0
+ * - 0x61 requires 0xB3
+ */
+ if (data_mask == 0x31) {
+ if (ev == 0x5c || ev == 0x5f)
+ sony_pic_call1(0xA0);
+ else if (ev == 0x61)
+ sony_pic_call1(0xB3);
+ return 0;
+ }
+ return 1;
+}
+
+static struct device_ctrl spic_types[] = {
+ {
+ .model = SONYPI_DEVICE_TYPE1,
+ .handle_irq = NULL,
+ .evport_offset = SONYPI_TYPE1_OFFSET,
+ .event_types = type1_events,
+ },
+ {
+ .model = SONYPI_DEVICE_TYPE2,
+ .handle_irq = NULL,
+ .evport_offset = SONYPI_TYPE2_OFFSET,
+ .event_types = type2_events,
+ },
+ {
+ .model = SONYPI_DEVICE_TYPE3,
+ .handle_irq = NULL,
+ .evport_offset = SONYPI_TYPE3_OFFSET,
+ .event_types = type3_events,
+ },
+ {
+ .model = SONYPI_DEVICE_TYPE4,
+ .handle_irq = type4_handle_irq,
+ .evport_offset = SONYPI_TYPE4_OFFSET,
+ .event_types = type4_events,
+ },
+};
+
+static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
+{
+ struct pci_dev *pcidev;
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
+ if (pcidev) {
+ dev->control = &spic_types[0];
+ goto out;
+ }
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH6_1, NULL);
+ if (pcidev) {
+ dev->control = &spic_types[2];
+ goto out;
+ }
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH7_1, NULL);
+ if (pcidev) {
+ dev->control = &spic_types[3];
+ goto out;
+ }
+
+ pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_ICH8_4, NULL);
+ if (pcidev) {
+ dev->control = &spic_types[3];
+ goto out;
+ }
+
+ /* default */
+ dev->control = &spic_types[1];
+
+out:
+ if (pcidev)
+ pci_dev_put(pcidev);
+
+ printk(KERN_INFO DRV_PFX "detected Type%d model\n",
+ dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 :
+ dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 :
+ dev->control->model == SONYPI_DEVICE_TYPE3 ? 3 : 4);
+}
+
/* camera tests and poweron/poweroff */
#define SONYPI_CAMERA_PICTURE 5
#define SONYPI_CAMERA_CONTROL 0x10
@@ -2253,7 +2362,7 @@ static int sony_pic_enable(struct acpi_device *device,
buffer.pointer = resource;
/* setup Type 1 resources */
- if (spic_dev.model == SONYPI_DEVICE_TYPE1) {
+ if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) {
/* setup io resources */
resource->res1.type = ACPI_RESOURCE_TYPE_IO;
@@ -2335,39 +2444,49 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
if (dev->cur_ioport->io2.minimum)
data_mask = inb_p(dev->cur_ioport->io2.minimum);
else
- data_mask = inb_p(dev->cur_ioport->io1.minimum + dev->evport_offset);
+ data_mask = inb_p(dev->cur_ioport->io1.minimum +
+ dev->control->evport_offset);
dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
- ev, data_mask, dev->cur_ioport->io1.minimum, dev->evport_offset);
+ ev, data_mask, dev->cur_ioport->io1.minimum,
+ dev->control->evport_offset);
if (ev == 0x00 || ev == 0xff)
return IRQ_HANDLED;
- for (i = 0; sony_pic_eventtypes[i].model; i++) {
-
- if (spic_dev.model != sony_pic_eventtypes[i].model)
- continue;
+ for (i = 0; dev->control->event_types[i].mask; i++) {
- if ((data_mask & sony_pic_eventtypes[i].data) !=
- sony_pic_eventtypes[i].data)
+ if ((data_mask & dev->control->event_types[i].data) !=
+ dev->control->event_types[i].data)
continue;
- if (!(mask & sony_pic_eventtypes[i].mask))
+ if (!(mask & dev->control->event_types[i].mask))
continue;
- for (j = 0; sony_pic_eventtypes[i].events[j].event; j++) {
- if (ev == sony_pic_eventtypes[i].events[j].data) {
+ for (j = 0; dev->control->event_types[i].events[j].event; j++) {
+ if (ev == dev->control->event_types[i].events[j].data) {
device_event =
- sony_pic_eventtypes[i].events[j].event;
+ dev->control->
+ event_types[i].events[j].event;
goto found;
}
}
}
+ /* Still not able to decode the event try to pass
+ * it over to the minidriver
+ */
+ if (dev->control->handle_irq &&
+ dev->control->handle_irq(data_mask, ev) == 0)
+ return IRQ_HANDLED;
+
+ dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
+ ev, data_mask, dev->cur_ioport->io1.minimum,
+ dev->control->evport_offset);
return IRQ_HANDLED;
found:
sony_laptop_report_input_event(device_event);
- acpi_bus_generate_proc_event(spic_dev.acpi_dev, 1, device_event);
+ acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event);
sonypi_compat_report_event(device_event);
return IRQ_HANDLED;
@@ -2429,23 +2548,9 @@ static int sony_pic_add(struct acpi_device *device)
spic_dev.acpi_dev = device;
strcpy(acpi_device_class(device), "sony/hotkey");
- spic_dev.model = sony_pic_detect_device_type();
+ sony_pic_detect_device_type(&spic_dev);
mutex_init(&spic_dev.lock);
- /* model specific characteristics */
- switch(spic_dev.model) {
- case SONYPI_DEVICE_TYPE1:
- spic_dev.evport_offset = SONYPI_TYPE1_OFFSET;
- break;
- case SONYPI_DEVICE_TYPE3:
- spic_dev.evport_offset = SONYPI_TYPE3_OFFSET;
- break;
- case SONYPI_DEVICE_TYPE2:
- default:
- spic_dev.evport_offset = SONYPI_TYPE2_OFFSET;
- break;
- }
-
/* read _PRS resources */
result = sony_pic_possible_resources(device);
if (result) {
diff --git a/drivers/misc/tc1100-wmi.c b/drivers/misc/tc1100-wmi.c
new file mode 100644
index 000000000000..f25e4c974dcf
--- /dev/null
+++ b/drivers/misc/tc1100-wmi.c
@@ -0,0 +1,290 @@
+/*
+ * HP Compaq TC1100 Tablet WMI Extras Driver
+ *
+ * Copyright (C) 2007 Carlos Corbacho <carlos@strangeworlds.co.uk>
+ * Copyright (C) 2004 Jamey Hicks <jamey.hicks@hp.com>
+ * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi.h>
+#include <acpi/actypes.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+#include <linux/platform_device.h>
+
+#define GUID "C364AC71-36DB-495A-8494-B439D472A505"
+
+#define TC1100_INSTANCE_WIRELESS 1
+#define TC1100_INSTANCE_JOGDIAL 2
+
+#define TC1100_LOGPREFIX "tc1100-wmi: "
+#define TC1100_INFO KERN_INFO TC1100_LOGPREFIX
+
+MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho");
+MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505");
+
+static int tc1100_probe(struct platform_device *device);
+static int tc1100_remove(struct platform_device *device);
+static int tc1100_suspend(struct platform_device *device, pm_message_t state);
+static int tc1100_resume(struct platform_device *device);
+
+static struct platform_driver tc1100_driver = {
+ .driver = {
+ .name = "tc1100-wmi",
+ .owner = THIS_MODULE,
+ },
+ .probe = tc1100_probe,
+ .remove = tc1100_remove,
+ .suspend = tc1100_suspend,
+ .resume = tc1100_resume,
+};
+
+static struct platform_device *tc1100_device;
+
+struct tc1100_data {
+ u32 wireless;
+ u32 jogdial;
+};
+
+static struct tc1100_data suspend_data;
+
+/* --------------------------------------------------------------------------
+ Device Management
+ -------------------------------------------------------------------------- */
+
+static int get_state(u32 *out, u8 instance)
+{
+ u32 tmp;
+ acpi_status status;
+ struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+
+ if (!out)
+ return -EINVAL;
+
+ if (instance > 2)
+ return -ENODEV;
+
+ status = wmi_query_block(GUID, instance, &result);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ obj = (union acpi_object *) result.pointer;
+ if (obj && obj->type == ACPI_TYPE_BUFFER &&
+ obj->buffer.length == sizeof(u32)) {
+ tmp = *((u32 *) obj->buffer.pointer);
+ } else {
+ tmp = 0;
+ }
+
+ if (result.length > 0 && result.pointer)
+ kfree(result.pointer);
+
+ switch (instance) {
+ case TC1100_INSTANCE_WIRELESS:
+ *out = (tmp == 3) ? 1 : 0;
+ return 0;
+ case TC1100_INSTANCE_JOGDIAL:
+ *out = (tmp == 1) ? 1 : 0;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static int set_state(u32 *in, u8 instance)
+{
+ u32 value;
+ acpi_status status;
+ struct acpi_buffer input;
+
+ if (!in)
+ return -EINVAL;
+
+ if (instance > 2)
+ return -ENODEV;
+
+ switch (instance) {
+ case TC1100_INSTANCE_WIRELESS:
+ value = (*in) ? 1 : 2;
+ break;
+ case TC1100_INSTANCE_JOGDIAL:
+ value = (*in) ? 0 : 1;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ input.length = sizeof(u32);
+ input.pointer = &value;
+
+ status = wmi_set_block(GUID, instance, &input);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ return 0;
+}
+
+/* --------------------------------------------------------------------------
+ FS Interface (/sys)
+ -------------------------------------------------------------------------- */
+
+/*
+ * Read/ write bool sysfs macro
+ */
+#define show_set_bool(value, instance) \
+static ssize_t \
+show_bool_##value(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ u32 result; \
+ acpi_status status = get_state(&result, instance); \
+ if (ACPI_SUCCESS(status)) \
+ return sprintf(buf, "%d\n", result); \
+ return sprintf(buf, "Read error\n"); \
+} \
+\
+static ssize_t \
+set_bool_##value(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ u32 tmp = simple_strtoul(buf, NULL, 10); \
+ acpi_status status = set_state(&tmp, instance); \
+ if (ACPI_FAILURE(status)) \
+ return -EINVAL; \
+ return count; \
+} \
+static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \
+ show_bool_##value, set_bool_##value);
+
+show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
+show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL);
+
+static void remove_fs(void)
+{
+ device_remove_file(&tc1100_device->dev, &dev_attr_wireless);
+ device_remove_file(&tc1100_device->dev, &dev_attr_jogdial);
+}
+
+static int add_fs(void)
+{
+ int ret;
+
+ ret = device_create_file(&tc1100_device->dev, &dev_attr_wireless);
+ if (ret)
+ goto add_sysfs_error;
+
+ ret = device_create_file(&tc1100_device->dev, &dev_attr_jogdial);
+ if (ret)
+ goto add_sysfs_error;
+
+ return ret;
+
+add_sysfs_error:
+ remove_fs();
+ return ret;
+}
+
+/* --------------------------------------------------------------------------
+ Driver Model
+ -------------------------------------------------------------------------- */
+
+static int tc1100_probe(struct platform_device *device)
+{
+ int result = 0;
+
+ result = add_fs();
+ return result;
+}
+
+
+static int tc1100_remove(struct platform_device *device)
+{
+ remove_fs();
+ return 0;
+}
+
+static int tc1100_suspend(struct platform_device *dev, pm_message_t state)
+{
+ int ret;
+
+ ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
+ if (ret)
+ return ret;
+
+ ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int tc1100_resume(struct platform_device *dev)
+{
+ int ret;
+
+ ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS);
+ if (ret)
+ return ret;
+
+ ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int __init tc1100_init(void)
+{
+ int result = 0;
+
+ if (!wmi_has_guid(GUID))
+ return -ENODEV;
+
+ result = platform_driver_register(&tc1100_driver);
+ if (result)
+ return result;
+
+ tc1100_device = platform_device_alloc("tc1100-wmi", -1);
+ platform_device_add(tc1100_device);
+
+ printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras loaded\n");
+
+ return result;
+}
+
+static void __exit tc1100_exit(void)
+{
+ platform_device_del(tc1100_device);
+ platform_driver_unregister(&tc1100_driver);
+
+ printk(TC1100_INFO "HP Compaq TC1100 Tablet WMI Extras unloaded\n");
+}
+
+module_init(tc1100_init);
+module_exit(tc1100_exit);
diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
index cf56647a6ca4..7ba1acad5402 100644
--- a/drivers/misc/thinkpad_acpi.c
+++ b/drivers/misc/thinkpad_acpi.c
@@ -3,7 +3,7 @@
*
*
* Copyright (C) 2004-2005 Borislav Deianov <borislav@users.sf.net>
- * Copyright (C) 2006-2007 Henrique de Moraes Holschuh <hmh@hmh.eng.br>
+ * Copyright (C) 2006-2008 Henrique de Moraes Holschuh <hmh@hmh.eng.br>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,11 +21,13 @@
* 02110-1301, USA.
*/
-#define IBM_VERSION "0.17"
-#define TPACPI_SYSFS_VERSION 0x020000
+#define TPACPI_VERSION "0.19"
+#define TPACPI_SYSFS_VERSION 0x020200
/*
* Changelog:
+ * 2007-10-20 changelog trimmed down
+ *
* 2007-03-27 0.14 renamed to thinkpad_acpi and moved to
* drivers/misc.
*
@@ -33,89 +35,219 @@
* changelog now lives in git commit history, and will
* not be updated further in-file.
*
- * 2005-08-17 0.12 fix compilation on 2.6.13-rc kernels
* 2005-03-17 0.11 support for 600e, 770x
* thanks to Jamie Lentin <lentinj@dial.pipex.com>
- * support for 770e, G41
- * G40 and G41 don't have a thinklight
- * temperatures no longer experimental
- * experimental brightness control
- * experimental volume control
- * experimental fan enable/disable
- * 2005-01-16 0.10 fix module loading on R30, R31
- * 2005-01-16 0.9 support for 570, R30, R31
- * ultrabay support on A22p, A3x
- * limit arg for cmos, led, beep, drop experimental status
- * more capable led control on A21e, A22p, T20-22, X20
- * experimental temperatures and fan speed
- * experimental embedded controller register dump
- * mark more functions as __init, drop incorrect __exit
- * use MODULE_VERSION
+ *
+ * 2005-01-16 0.9 use MODULE_VERSION
* thanks to Henrik Brix Andersen <brix@gentoo.org>
* fix parameter passing on module loading
* thanks to Rusty Russell <rusty@rustcorp.com.au>
* thanks to Jim Radford <radford@blackbean.org>
* 2004-11-08 0.8 fix init error case, don't return from a macro
* thanks to Chris Wright <chrisw@osdl.org>
- * 2004-10-23 0.7 fix module loading on A21e, A22p, T20, T21, X20
- * fix led control on A21e
- * 2004-10-19 0.6 use acpi_bus_register_driver() to claim HKEY device
- * 2004-10-18 0.5 thinklight support on A21e, G40, R32, T20, T21, X20
- * proc file format changed
- * video_switch command
- * experimental cmos control
- * experimental led control
- * experimental acpi sounds
- * 2004-09-16 0.4 support for module parameters
- * hotkey mask can be prefixed by 0x
- * video output switching
- * video expansion control
- * ultrabay eject support
- * removed lcd brightness/on/off control, didn't work
- * 2004-08-17 0.3 support for R40
- * lcd off, brightness control
- * thinklight on/off
- * 2004-08-14 0.2 support for T series, X20
- * bluetooth enable/disable
- * hotkey events disabled by default
- * removed fan control, currently useless
- * 2004-08-09 0.1 initial release, support for X series
*/
-#include "thinkpad_acpi.h"
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/delay.h>
+
+#include <linux/nvram.h>
+#include <linux/proc_fs.h>
+#include <linux/sysfs.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/platform_device.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/input.h>
+#include <asm/uaccess.h>
+
+#include <linux/dmi.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+
+#include <acpi/acpi_drivers.h>
+#include <acpi/acnamesp.h>
+
+#include <linux/pci_ids.h>
+
+
+/* ThinkPad CMOS commands */
+#define TP_CMOS_VOLUME_DOWN 0
+#define TP_CMOS_VOLUME_UP 1
+#define TP_CMOS_VOLUME_MUTE 2
+#define TP_CMOS_BRIGHTNESS_UP 4
+#define TP_CMOS_BRIGHTNESS_DOWN 5
+
+/* NVRAM Addresses */
+enum tp_nvram_addr {
+ TP_NVRAM_ADDR_HK2 = 0x57,
+ TP_NVRAM_ADDR_THINKLIGHT = 0x58,
+ TP_NVRAM_ADDR_VIDEO = 0x59,
+ TP_NVRAM_ADDR_BRIGHTNESS = 0x5e,
+ TP_NVRAM_ADDR_MIXER = 0x60,
+};
-MODULE_AUTHOR("Borislav Deianov, Henrique de Moraes Holschuh");
-MODULE_DESCRIPTION(IBM_DESC);
-MODULE_VERSION(IBM_VERSION);
-MODULE_LICENSE("GPL");
+/* NVRAM bit masks */
+enum {
+ TP_NVRAM_MASK_HKT_THINKPAD = 0x08,
+ TP_NVRAM_MASK_HKT_ZOOM = 0x20,
+ TP_NVRAM_MASK_HKT_DISPLAY = 0x40,
+ TP_NVRAM_MASK_HKT_HIBERNATE = 0x80,
+ TP_NVRAM_MASK_THINKLIGHT = 0x10,
+ TP_NVRAM_MASK_HKT_DISPEXPND = 0x30,
+ TP_NVRAM_MASK_HKT_BRIGHTNESS = 0x20,
+ TP_NVRAM_MASK_LEVEL_BRIGHTNESS = 0x0f,
+ TP_NVRAM_POS_LEVEL_BRIGHTNESS = 0,
+ TP_NVRAM_MASK_MUTE = 0x40,
+ TP_NVRAM_MASK_HKT_VOLUME = 0x80,
+ TP_NVRAM_MASK_LEVEL_VOLUME = 0x0f,
+ TP_NVRAM_POS_LEVEL_VOLUME = 0,
+};
-/* Please remove this in year 2009 */
-MODULE_ALIAS("ibm_acpi");
+/* ACPI HIDs */
+#define TPACPI_ACPI_HKEY_HID "IBM0068"
-/*
- * DMI matching for module autoloading
- *
- * See http://thinkwiki.org/wiki/List_of_DMI_IDs
- * See http://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
- *
- * Only models listed in thinkwiki will be supported, so add yours
- * if it is not there yet.
+/* Input IDs */
+#define TPACPI_HKEY_INPUT_PRODUCT 0x5054 /* "TP" */
+#define TPACPI_HKEY_INPUT_VERSION 0x4101
+
+
+/****************************************************************************
+ * Main driver
*/
-#define IBM_BIOS_MODULE_ALIAS(__type) \
- MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW")
-/* Non-ancient thinkpads */
-MODULE_ALIAS("dmi:bvnIBM:*:svnIBM:*:pvrThinkPad*:rvnIBM:*");
-MODULE_ALIAS("dmi:bvnLENOVO:*:svnLENOVO:*:pvrThinkPad*:rvnLENOVO:*");
+#define TPACPI_NAME "thinkpad"
+#define TPACPI_DESC "ThinkPad ACPI Extras"
+#define TPACPI_FILE TPACPI_NAME "_acpi"
+#define TPACPI_URL "http://ibm-acpi.sf.net/"
+#define TPACPI_MAIL "ibm-acpi-devel@lists.sourceforge.net"
+
+#define TPACPI_PROC_DIR "ibm"
+#define TPACPI_ACPI_EVENT_PREFIX "ibm"
+#define TPACPI_DRVR_NAME TPACPI_FILE
+#define TPACPI_HWMON_DRVR_NAME TPACPI_NAME "_hwmon"
+
+#define TPACPI_MAX_ACPI_ARGS 3
+
+/* Debugging */
+#define TPACPI_LOG TPACPI_FILE ": "
+#define TPACPI_ERR KERN_ERR TPACPI_LOG
+#define TPACPI_NOTICE KERN_NOTICE TPACPI_LOG
+#define TPACPI_INFO KERN_INFO TPACPI_LOG
+#define TPACPI_DEBUG KERN_DEBUG TPACPI_LOG
+
+#define TPACPI_DBG_ALL 0xffff
+#define TPACPI_DBG_ALL 0xffff
+#define TPACPI_DBG_INIT 0x0001
+#define TPACPI_DBG_EXIT 0x0002
+#define dbg_printk(a_dbg_level, format, arg...) \
+ do { if (dbg_level & a_dbg_level) \
+ printk(TPACPI_DEBUG "%s: " format, __func__ , ## arg); \
+ } while (0)
+#ifdef CONFIG_THINKPAD_ACPI_DEBUG
+#define vdbg_printk(a_dbg_level, format, arg...) \
+ dbg_printk(a_dbg_level, format, ## arg)
+static const char *str_supported(int is_supported);
+#else
+#define vdbg_printk(a_dbg_level, format, arg...)
+#endif
-/* Ancient thinkpad BIOSes have to be identified by
- * BIOS type or model number, and there are far less
- * BIOS types than model numbers... */
-IBM_BIOS_MODULE_ALIAS("I[B,D,H,I,M,N,O,T,W,V,Y,Z]");
-IBM_BIOS_MODULE_ALIAS("1[0,3,6,8,A-G,I,K,M-P,S,T]");
-IBM_BIOS_MODULE_ALIAS("K[U,X-Z]");
+#define onoff(status, bit) ((status) & (1 << (bit)) ? "on" : "off")
+#define enabled(status, bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
+#define strlencmp(a, b) (strncmp((a), (b), strlen(b)))
+
+
+/****************************************************************************
+ * Driver-wide structs and misc. variables
+ */
+
+struct ibm_struct;
+
+struct tp_acpi_drv_struct {
+ const struct acpi_device_id *hid;
+ struct acpi_driver *driver;
-#define __unused __attribute__ ((unused))
+ void (*notify) (struct ibm_struct *, u32);
+ acpi_handle *handle;
+ u32 type;
+ struct acpi_device *device;
+};
+
+struct ibm_struct {
+ char *name;
+
+ int (*read) (char *);
+ int (*write) (char *);
+ void (*exit) (void);
+ void (*resume) (void);
+ void (*suspend) (pm_message_t state);
+
+ struct list_head all_drivers;
+
+ struct tp_acpi_drv_struct *acpi;
+
+ struct {
+ u8 acpi_driver_registered:1;
+ u8 acpi_notify_installed:1;
+ u8 proc_created:1;
+ u8 init_called:1;
+ u8 experimental:1;
+ } flags;
+};
+
+struct ibm_init_struct {
+ char param[32];
+
+ int (*init) (struct ibm_init_struct *);
+ struct ibm_struct *data;
+};
+
+static struct {
+#ifdef CONFIG_THINKPAD_ACPI_BAY
+ u32 bay_status:1;
+ u32 bay_eject:1;
+ u32 bay_status2:1;
+ u32 bay_eject2:1;
+#endif
+ u32 bluetooth:1;
+ u32 hotkey:1;
+ u32 hotkey_mask:1;
+ u32 hotkey_wlsw:1;
+ u32 light:1;
+ u32 light_status:1;
+ u32 bright_16levels:1;
+ u32 wan:1;
+ u32 fan_ctrl_status_undef:1;
+ u32 input_device_registered:1;
+ u32 platform_drv_registered:1;
+ u32 platform_drv_attrs_registered:1;
+ u32 sensors_pdrv_registered:1;
+ u32 sensors_pdrv_attrs_registered:1;
+ u32 sensors_pdev_attrs_registered:1;
+ u32 hotkey_poll_active:1;
+} tp_features;
+
+struct thinkpad_id_data {
+ unsigned int vendor; /* ThinkPad vendor:
+ * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */
+
+ char *bios_version_str; /* Something like 1ZET51WW (1.03z) */
+ char *ec_version_str; /* Something like 1ZHT51WW-1.04a */
+
+ u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */
+ u16 ec_model;
+
+ char *model_str;
+};
+static struct thinkpad_id_data thinkpad_id;
static enum {
TPACPI_LIFE_INIT = 0,
@@ -123,6 +255,9 @@ static enum {
TPACPI_LIFE_EXITING,
} tpacpi_lifecycle;
+static int experimental;
+static u32 dbg_level;
+
/****************************************************************************
****************************************************************************
*
@@ -137,13 +272,13 @@ static enum {
static acpi_handle root_handle;
-#define IBM_HANDLE(object, parent, paths...) \
+#define TPACPI_HANDLE(object, parent, paths...) \
static acpi_handle object##_handle; \
static acpi_handle *object##_parent = &parent##_handle; \
static char *object##_path; \
static char *object##_paths[] = { paths }
-IBM_HANDLE(ec, root, "\\_SB.PCI0.ISA.EC0", /* 240, 240x */
+TPACPI_HANDLE(ec, root, "\\_SB.PCI0.ISA.EC0", /* 240, 240x */
"\\_SB.PCI.ISA.EC", /* 570 */
"\\_SB.PCI0.ISA0.EC0", /* 600e/x, 770e, 770x */
"\\_SB.PCI0.ISA.EC", /* A21e, A2xm/p, T20-22, X20-21 */
@@ -152,20 +287,16 @@ IBM_HANDLE(ec, root, "\\_SB.PCI0.ISA.EC0", /* 240, 240x */
"\\_SB.PCI0.LPC.EC", /* all others */
);
-IBM_HANDLE(ecrd, ec, "ECRD"); /* 570 */
-IBM_HANDLE(ecwr, ec, "ECWR"); /* 570 */
-
-
-/*************************************************************************
- * Misc ACPI handles
- */
+TPACPI_HANDLE(ecrd, ec, "ECRD"); /* 570 */
+TPACPI_HANDLE(ecwr, ec, "ECWR"); /* 570 */
-IBM_HANDLE(cmos, root, "\\UCMS", /* R50, R50e, R50p, R51, T4x, X31, X40 */
+TPACPI_HANDLE(cmos, root, "\\UCMS", /* R50, R50e, R50p, R51, */
+ /* T4x, X31, X40 */
"\\CMOS", /* A3x, G4x, R32, T23, T30, X22-24, X30 */
"\\CMS", /* R40, R40e */
); /* all others */
-IBM_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */
+TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */
"^HKEY", /* R30, R31 */
"HKEY", /* all others */
); /* 570 */
@@ -180,7 +311,7 @@ static int acpi_evalf(acpi_handle handle,
{
char *fmt0 = fmt;
struct acpi_object_list params;
- union acpi_object in_objs[IBM_MAX_ACPI_ARGS];
+ union acpi_object in_objs[TPACPI_MAX_ACPI_ARGS];
struct acpi_buffer result, *resultp;
union acpi_object out_obj;
acpi_status status;
@@ -190,7 +321,7 @@ static int acpi_evalf(acpi_handle handle,
int quiet;
if (!*fmt) {
- printk(IBM_ERR "acpi_evalf() called with empty format\n");
+ printk(TPACPI_ERR "acpi_evalf() called with empty format\n");
return 0;
}
@@ -215,7 +346,7 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- printk(IBM_ERR "acpi_evalf() called "
+ printk(TPACPI_ERR "acpi_evalf() called "
"with invalid format character '%c'\n", c);
return 0;
}
@@ -242,29 +373,19 @@ static int acpi_evalf(acpi_handle handle,
break;
/* add more types as needed */
default:
- printk(IBM_ERR "acpi_evalf() called "
+ printk(TPACPI_ERR "acpi_evalf() called "
"with invalid format character '%c'\n", res_type);
return 0;
}
if (!success && !quiet)
- printk(IBM_ERR "acpi_evalf(%s, %s, ...) failed: %d\n",
+ printk(TPACPI_ERR "acpi_evalf(%s, %s, ...) failed: %d\n",
method, fmt0, status);
return success;
}
-static void __unused acpi_print_int(acpi_handle handle, char *method)
-{
- int i;
-
- if (acpi_evalf(handle, &i, method, "d"))
- printk(IBM_INFO "%s = 0x%x\n", method, i);
- else
- printk(IBM_ERR "error calling %s\n", method);
-}
-
-static int acpi_ec_read(int i, u8 * p)
+static int acpi_ec_read(int i, u8 *p)
{
int v;
@@ -293,6 +414,7 @@ static int acpi_ec_write(int i, u8 v)
return 1;
}
+#if defined(CONFIG_THINKPAD_ACPI_DOCK) || defined(CONFIG_THINKPAD_ACPI_BAY)
static int _sta(acpi_handle handle)
{
int status;
@@ -302,6 +424,7 @@ static int _sta(acpi_handle handle)
return status;
}
+#endif
static int issue_thinkpad_cmos_command(int cmos_cmd)
{
@@ -318,6 +441,10 @@ static int issue_thinkpad_cmos_command(int cmos_cmd)
* ACPI device model
*/
+#define TPACPI_ACPIHANDLE_INIT(object) \
+ drv_acpi_handle_init(#object, &object##_handle, *object##_parent, \
+ object##_paths, ARRAY_SIZE(object##_paths), &object##_path)
+
static void drv_acpi_handle_init(char *name,
acpi_handle *handle, acpi_handle parent,
char **paths, int num_paths, char **path)
@@ -372,25 +499,27 @@ static int __init setup_acpi_notify(struct ibm_struct *ibm)
rc = acpi_bus_get_device(*ibm->acpi->handle, &ibm->acpi->device);
if (rc < 0) {
- printk(IBM_ERR "acpi_bus_get_device(%s) failed: %d\n",
+ printk(TPACPI_ERR "acpi_bus_get_device(%s) failed: %d\n",
ibm->name, rc);
return -ENODEV;
}
acpi_driver_data(ibm->acpi->device) = ibm;
sprintf(acpi_device_class(ibm->acpi->device), "%s/%s",
- IBM_ACPI_EVENT_PREFIX,
+ TPACPI_ACPI_EVENT_PREFIX,
ibm->name);
status = acpi_install_notify_handler(*ibm->acpi->handle,
ibm->acpi->type, dispatch_acpi_notify, ibm);
if (ACPI_FAILURE(status)) {
if (status == AE_ALREADY_EXISTS) {
- printk(IBM_NOTICE "another device driver is already handling %s events\n",
- ibm->name);
+ printk(TPACPI_NOTICE
+ "another device driver is already "
+ "handling %s events\n", ibm->name);
} else {
- printk(IBM_ERR "acpi_install_notify_handler(%s) failed: %d\n",
- ibm->name, status);
+ printk(TPACPI_ERR
+ "acpi_install_notify_handler(%s) failed: %d\n",
+ ibm->name, status);
}
return -ENODEV;
}
@@ -414,18 +543,18 @@ static int __init register_tpacpi_subdriver(struct ibm_struct *ibm)
ibm->acpi->driver = kzalloc(sizeof(struct acpi_driver), GFP_KERNEL);
if (!ibm->acpi->driver) {
- printk(IBM_ERR "kzalloc(ibm->driver) failed\n");
+ printk(TPACPI_ERR "kzalloc(ibm->driver) failed\n");
return -ENOMEM;
}
- sprintf(ibm->acpi->driver->name, "%s_%s", IBM_NAME, ibm->name);
+ sprintf(ibm->acpi->driver->name, "%s_%s", TPACPI_NAME, ibm->name);
ibm->acpi->driver->ids = ibm->acpi->hid;
ibm->acpi->driver->ops.add = &tpacpi_device_add;
rc = acpi_bus_register_driver(ibm->acpi->driver);
if (rc < 0) {
- printk(IBM_ERR "acpi_bus_register_driver(%s) failed: %d\n",
+ printk(TPACPI_ERR "acpi_bus_register_driver(%s) failed: %d\n",
ibm->name, rc);
kfree(ibm->acpi->driver);
ibm->acpi->driver = NULL;
@@ -470,7 +599,7 @@ static int dispatch_procfs_read(char *page, char **start, off_t off,
}
static int dispatch_procfs_write(struct file *file,
- const char __user * userbuf,
+ const char __user *userbuf,
unsigned long count, void *data)
{
struct ibm_struct *ibm = data;
@@ -530,7 +659,22 @@ static struct platform_device *tpacpi_sensors_pdev;
static struct device *tpacpi_hwmon;
static struct input_dev *tpacpi_inputdev;
static struct mutex tpacpi_inputdev_send_mutex;
+static LIST_HEAD(tpacpi_all_drivers);
+
+static int tpacpi_suspend_handler(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ list_for_each_entry_safe(ibm, itmp,
+ &tpacpi_all_drivers,
+ all_drivers) {
+ if (ibm->suspend)
+ (ibm->suspend)(state);
+ }
+ return 0;
+}
static int tpacpi_resume_handler(struct platform_device *pdev)
{
@@ -548,107 +692,36 @@ static int tpacpi_resume_handler(struct platform_device *pdev)
static struct platform_driver tpacpi_pdriver = {
.driver = {
- .name = IBM_DRVR_NAME,
+ .name = TPACPI_DRVR_NAME,
.owner = THIS_MODULE,
},
+ .suspend = tpacpi_suspend_handler,
.resume = tpacpi_resume_handler,
};
static struct platform_driver tpacpi_hwmon_pdriver = {
.driver = {
- .name = IBM_HWMON_DRVR_NAME,
+ .name = TPACPI_HWMON_DRVR_NAME,
.owner = THIS_MODULE,
},
};
/*************************************************************************
- * thinkpad-acpi driver attributes
+ * sysfs support helpers
*/
-/* interface_version --------------------------------------------------- */
-static ssize_t tpacpi_driver_interface_version_show(
- struct device_driver *drv,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", TPACPI_SYSFS_VERSION);
-}
-
-static DRIVER_ATTR(interface_version, S_IRUGO,
- tpacpi_driver_interface_version_show, NULL);
-
-/* debug_level --------------------------------------------------------- */
-static ssize_t tpacpi_driver_debug_show(struct device_driver *drv,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", dbg_level);
-}
-
-static ssize_t tpacpi_driver_debug_store(struct device_driver *drv,
- const char *buf, size_t count)
-{
- unsigned long t;
-
- if (parse_strtoul(buf, 0xffff, &t))
- return -EINVAL;
-
- dbg_level = t;
-
- return count;
-}
-
-static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
- tpacpi_driver_debug_show, tpacpi_driver_debug_store);
-
-/* version ------------------------------------------------------------- */
-static ssize_t tpacpi_driver_version_show(struct device_driver *drv,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s v%s\n", IBM_DESC, IBM_VERSION);
-}
-
-static DRIVER_ATTR(version, S_IRUGO,
- tpacpi_driver_version_show, NULL);
-
-/* --------------------------------------------------------------------- */
-
-static struct driver_attribute* tpacpi_driver_attributes[] = {
- &driver_attr_debug_level, &driver_attr_version,
- &driver_attr_interface_version,
+struct attribute_set {
+ unsigned int members, max_members;
+ struct attribute_group group;
};
-static int __init tpacpi_create_driver_attributes(struct device_driver *drv)
-{
- int i, res;
-
- i = 0;
- res = 0;
- while (!res && i < ARRAY_SIZE(tpacpi_driver_attributes)) {
- res = driver_create_file(drv, tpacpi_driver_attributes[i]);
- i++;
- }
-
- return res;
-}
-
-static void tpacpi_remove_driver_attributes(struct device_driver *drv)
-{
- int i;
-
- for(i = 0; i < ARRAY_SIZE(tpacpi_driver_attributes); i++)
- driver_remove_file(drv, tpacpi_driver_attributes[i]);
-}
-
-/*************************************************************************
- * sysfs support helpers
- */
-
struct attribute_set_obj {
struct attribute_set s;
struct attribute *a;
} __attribute__((packed));
static struct attribute_set *create_attr_set(unsigned int max_members,
- const char* name)
+ const char *name)
{
struct attribute_set_obj *sobj;
@@ -668,8 +741,11 @@ static struct attribute_set *create_attr_set(unsigned int max_members,
return &sobj->s;
}
+#define destroy_attr_set(_set) \
+ kfree(_set);
+
/* not multi-threaded safe, use it in a single thread per set */
-static int add_to_attr_set(struct attribute_set* s, struct attribute *attr)
+static int add_to_attr_set(struct attribute_set *s, struct attribute *attr)
{
if (!s || !attr)
return -EINVAL;
@@ -683,7 +759,7 @@ static int add_to_attr_set(struct attribute_set* s, struct attribute *attr)
return 0;
}
-static int add_many_to_attr_set(struct attribute_set* s,
+static int add_many_to_attr_set(struct attribute_set *s,
struct attribute **attr,
unsigned int count)
{
@@ -698,12 +774,15 @@ static int add_many_to_attr_set(struct attribute_set* s,
return 0;
}
-static void delete_attr_set(struct attribute_set* s, struct kobject *kobj)
+static void delete_attr_set(struct attribute_set *s, struct kobject *kobj)
{
sysfs_remove_group(kobj, &s->group);
destroy_attr_set(s);
}
+#define register_attr_set_with_sysfs(_attr_set, _kobj) \
+ sysfs_create_group(_kobj, &_attr_set->group)
+
static int parse_strtoul(const char *buf,
unsigned long max, unsigned long *value)
{
@@ -720,6 +799,84 @@ static int parse_strtoul(const char *buf,
return 0;
}
+/*************************************************************************
+ * thinkpad-acpi driver attributes
+ */
+
+/* interface_version --------------------------------------------------- */
+static ssize_t tpacpi_driver_interface_version_show(
+ struct device_driver *drv,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", TPACPI_SYSFS_VERSION);
+}
+
+static DRIVER_ATTR(interface_version, S_IRUGO,
+ tpacpi_driver_interface_version_show, NULL);
+
+/* debug_level --------------------------------------------------------- */
+static ssize_t tpacpi_driver_debug_show(struct device_driver *drv,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", dbg_level);
+}
+
+static ssize_t tpacpi_driver_debug_store(struct device_driver *drv,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 0xffff, &t))
+ return -EINVAL;
+
+ dbg_level = t;
+
+ return count;
+}
+
+static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
+ tpacpi_driver_debug_show, tpacpi_driver_debug_store);
+
+/* version ------------------------------------------------------------- */
+static ssize_t tpacpi_driver_version_show(struct device_driver *drv,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s v%s\n",
+ TPACPI_DESC, TPACPI_VERSION);
+}
+
+static DRIVER_ATTR(version, S_IRUGO,
+ tpacpi_driver_version_show, NULL);
+
+/* --------------------------------------------------------------------- */
+
+static struct driver_attribute *tpacpi_driver_attributes[] = {
+ &driver_attr_debug_level, &driver_attr_version,
+ &driver_attr_interface_version,
+};
+
+static int __init tpacpi_create_driver_attributes(struct device_driver *drv)
+{
+ int i, res;
+
+ i = 0;
+ res = 0;
+ while (!res && i < ARRAY_SIZE(tpacpi_driver_attributes)) {
+ res = driver_create_file(drv, tpacpi_driver_attributes[i]);
+ i++;
+ }
+
+ return res;
+}
+
+static void tpacpi_remove_driver_attributes(struct device_driver *drv)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tpacpi_driver_attributes); i++)
+ driver_remove_file(drv, tpacpi_driver_attributes[i]);
+}
+
/****************************************************************************
****************************************************************************
*
@@ -734,17 +891,17 @@ static int parse_strtoul(const char *buf,
static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
{
- printk(IBM_INFO "%s v%s\n", IBM_DESC, IBM_VERSION);
- printk(IBM_INFO "%s\n", IBM_URL);
+ printk(TPACPI_INFO "%s v%s\n", TPACPI_DESC, TPACPI_VERSION);
+ printk(TPACPI_INFO "%s\n", TPACPI_URL);
- printk(IBM_INFO "ThinkPad BIOS %s, EC %s\n",
+ printk(TPACPI_INFO "ThinkPad BIOS %s, EC %s\n",
(thinkpad_id.bios_version_str) ?
thinkpad_id.bios_version_str : "unknown",
(thinkpad_id.ec_version_str) ?
thinkpad_id.ec_version_str : "unknown");
if (thinkpad_id.vendor && thinkpad_id.model_str)
- printk(IBM_INFO "%s %s\n",
+ printk(TPACPI_INFO "%s %s\n",
(thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
"IBM" : ((thinkpad_id.vendor ==
PCI_VENDOR_ID_LENOVO) ?
@@ -758,8 +915,8 @@ static int thinkpad_acpi_driver_read(char *p)
{
int len = 0;
- len += sprintf(p + len, "driver:\t\t%s\n", IBM_DESC);
- len += sprintf(p + len, "version:\t%s\n", IBM_VERSION);
+ len += sprintf(p + len, "driver:\t\t%s\n", TPACPI_DESC);
+ len += sprintf(p + len, "version:\t%s\n", TPACPI_VERSION);
return len;
}
@@ -773,15 +930,129 @@ static struct ibm_struct thinkpad_acpi_driver_data = {
* Hotkey subdriver
*/
+enum { /* hot key scan codes (derived from ACPI DSDT) */
+ TP_ACPI_HOTKEYSCAN_FNF1 = 0,
+ TP_ACPI_HOTKEYSCAN_FNF2,
+ TP_ACPI_HOTKEYSCAN_FNF3,
+ TP_ACPI_HOTKEYSCAN_FNF4,
+ TP_ACPI_HOTKEYSCAN_FNF5,
+ TP_ACPI_HOTKEYSCAN_FNF6,
+ TP_ACPI_HOTKEYSCAN_FNF7,
+ TP_ACPI_HOTKEYSCAN_FNF8,
+ TP_ACPI_HOTKEYSCAN_FNF9,
+ TP_ACPI_HOTKEYSCAN_FNF10,
+ TP_ACPI_HOTKEYSCAN_FNF11,
+ TP_ACPI_HOTKEYSCAN_FNF12,
+ TP_ACPI_HOTKEYSCAN_FNBACKSPACE,
+ TP_ACPI_HOTKEYSCAN_FNINSERT,
+ TP_ACPI_HOTKEYSCAN_FNDELETE,
+ TP_ACPI_HOTKEYSCAN_FNHOME,
+ TP_ACPI_HOTKEYSCAN_FNEND,
+ TP_ACPI_HOTKEYSCAN_FNPAGEUP,
+ TP_ACPI_HOTKEYSCAN_FNPAGEDOWN,
+ TP_ACPI_HOTKEYSCAN_FNSPACE,
+ TP_ACPI_HOTKEYSCAN_VOLUMEUP,
+ TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
+ TP_ACPI_HOTKEYSCAN_MUTE,
+ TP_ACPI_HOTKEYSCAN_THINKPAD,
+};
+
+enum { /* Keys available through NVRAM polling */
+ TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U,
+ TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U,
+};
+
+enum { /* Positions of some of the keys in hotkey masks */
+ TP_ACPI_HKEY_DISPSWTCH_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF7,
+ TP_ACPI_HKEY_DISPXPAND_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF8,
+ TP_ACPI_HKEY_HIBERNATE_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNF12,
+ TP_ACPI_HKEY_BRGHTUP_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNHOME,
+ TP_ACPI_HKEY_BRGHTDWN_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNEND,
+ TP_ACPI_HKEY_THNKLGHT_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNPAGEUP,
+ TP_ACPI_HKEY_ZOOM_MASK = 1 << TP_ACPI_HOTKEYSCAN_FNSPACE,
+ TP_ACPI_HKEY_VOLUP_MASK = 1 << TP_ACPI_HOTKEYSCAN_VOLUMEUP,
+ TP_ACPI_HKEY_VOLDWN_MASK = 1 << TP_ACPI_HOTKEYSCAN_VOLUMEDOWN,
+ TP_ACPI_HKEY_MUTE_MASK = 1 << TP_ACPI_HOTKEYSCAN_MUTE,
+ TP_ACPI_HKEY_THINKPAD_MASK = 1 << TP_ACPI_HOTKEYSCAN_THINKPAD,
+};
+
+enum { /* NVRAM to ACPI HKEY group map */
+ TP_NVRAM_HKEY_GROUP_HK2 = TP_ACPI_HKEY_THINKPAD_MASK |
+ TP_ACPI_HKEY_ZOOM_MASK |
+ TP_ACPI_HKEY_DISPSWTCH_MASK |
+ TP_ACPI_HKEY_HIBERNATE_MASK,
+ TP_NVRAM_HKEY_GROUP_BRIGHTNESS = TP_ACPI_HKEY_BRGHTUP_MASK |
+ TP_ACPI_HKEY_BRGHTDWN_MASK,
+ TP_NVRAM_HKEY_GROUP_VOLUME = TP_ACPI_HKEY_VOLUP_MASK |
+ TP_ACPI_HKEY_VOLDWN_MASK |
+ TP_ACPI_HKEY_MUTE_MASK,
+};
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+struct tp_nvram_state {
+ u16 thinkpad_toggle:1;
+ u16 zoom_toggle:1;
+ u16 display_toggle:1;
+ u16 thinklight_toggle:1;
+ u16 hibernate_toggle:1;
+ u16 displayexp_toggle:1;
+ u16 display_state:1;
+ u16 brightness_toggle:1;
+ u16 volume_toggle:1;
+ u16 mute:1;
+
+ u8 brightness_level;
+ u8 volume_level;
+};
+
+static struct task_struct *tpacpi_hotkey_task;
+static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */
+static int hotkey_poll_freq = 10; /* Hz */
+static struct mutex hotkey_thread_mutex;
+static struct mutex hotkey_thread_data_mutex;
+static unsigned int hotkey_config_change;
+
+#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+#define hotkey_source_mask 0U
+
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+static struct mutex hotkey_mutex;
+
+static enum { /* Reasons for waking up */
+ TP_ACPI_WAKEUP_NONE = 0, /* None or unknown */
+ TP_ACPI_WAKEUP_BAYEJ, /* Bay ejection request */
+ TP_ACPI_WAKEUP_UNDOCK, /* Undock request */
+} hotkey_wakeup_reason;
+
+static int hotkey_autosleep_ack;
+
static int hotkey_orig_status;
static u32 hotkey_orig_mask;
static u32 hotkey_all_mask;
static u32 hotkey_reserved_mask;
+static u32 hotkey_mask;
+
+static unsigned int hotkey_report_mode;
static u16 *hotkey_keycode_map;
static struct attribute_set *hotkey_dev_attributes;
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+#define HOTKEY_CONFIG_CRITICAL_START \
+ do { \
+ mutex_lock(&hotkey_thread_data_mutex); \
+ hotkey_config_change++; \
+ } while (0);
+#define HOTKEY_CONFIG_CRITICAL_END \
+ mutex_unlock(&hotkey_thread_data_mutex);
+#else
+#define HOTKEY_CONFIG_CRITICAL_START
+#define HOTKEY_CONFIG_CRITICAL_END
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
static int hotkey_get_wlsw(int *status)
{
if (!acpi_evalf(hkey_handle, status, "WLSW", "d"))
@@ -789,15 +1060,400 @@ static int hotkey_get_wlsw(int *status)
return 0;
}
+/*
+ * Call with hotkey_mutex held
+ */
+static int hotkey_mask_get(void)
+{
+ u32 m = 0;
+
+ if (tp_features.hotkey_mask) {
+ if (!acpi_evalf(hkey_handle, &m, "DHKN", "d"))
+ return -EIO;
+ }
+ hotkey_mask = m | (hotkey_source_mask & hotkey_mask);
+
+ return 0;
+}
+
+/*
+ * Call with hotkey_mutex held
+ */
+static int hotkey_mask_set(u32 mask)
+{
+ int i;
+ int rc = 0;
+
+ if (tp_features.hotkey_mask) {
+ HOTKEY_CONFIG_CRITICAL_START
+ for (i = 0; i < 32; i++) {
+ u32 m = 1 << i;
+ /* enable in firmware mask only keys not in NVRAM
+ * mode, but enable the key in the cached hotkey_mask
+ * regardless of mode, or the key will end up
+ * disabled by hotkey_mask_get() */
+ if (!acpi_evalf(hkey_handle,
+ NULL, "MHKM", "vdd", i + 1,
+ !!((mask & ~hotkey_source_mask) & m))) {
+ rc = -EIO;
+ break;
+ } else {
+ hotkey_mask = (hotkey_mask & ~m) | (mask & m);
+ }
+ }
+ HOTKEY_CONFIG_CRITICAL_END
+
+ /* hotkey_mask_get must be called unconditionally below */
+ if (!hotkey_mask_get() && !rc &&
+ (hotkey_mask & ~hotkey_source_mask) !=
+ (mask & ~hotkey_source_mask)) {
+ printk(TPACPI_NOTICE
+ "requested hot key mask 0x%08x, but "
+ "firmware forced it to 0x%08x\n",
+ mask, hotkey_mask);
+ }
+ } else {
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ HOTKEY_CONFIG_CRITICAL_START
+ hotkey_mask = mask & hotkey_source_mask;
+ HOTKEY_CONFIG_CRITICAL_END
+ hotkey_mask_get();
+ if (hotkey_mask != mask) {
+ printk(TPACPI_NOTICE
+ "requested hot key mask 0x%08x, "
+ "forced to 0x%08x (NVRAM poll mask is "
+ "0x%08x): no firmware mask support\n",
+ mask, hotkey_mask, hotkey_source_mask);
+ }
+#else
+ hotkey_mask_get();
+ rc = -ENXIO;
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+ }
+
+ return rc;
+}
+
+static int hotkey_status_get(int *status)
+{
+ if (!acpi_evalf(hkey_handle, status, "DHKC", "d"))
+ return -EIO;
+
+ return 0;
+}
+
+static int hotkey_status_set(int status)
+{
+ if (!acpi_evalf(hkey_handle, NULL, "MHKC", "vd", status))
+ return -EIO;
+
+ return 0;
+}
+
+static void tpacpi_input_send_radiosw(void)
+{
+ int wlsw;
+
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
+ input_report_switch(tpacpi_inputdev,
+ SW_RADIO, !!wlsw);
+ input_sync(tpacpi_inputdev);
+ }
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+}
+
+static void tpacpi_input_send_key(unsigned int scancode)
+{
+ unsigned int keycode;
+
+ keycode = hotkey_keycode_map[scancode];
+
+ if (keycode != KEY_RESERVED) {
+ mutex_lock(&tpacpi_inputdev_send_mutex);
+
+ input_report_key(tpacpi_inputdev, keycode, 1);
+ if (keycode == KEY_UNKNOWN)
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
+ scancode);
+ input_sync(tpacpi_inputdev);
+
+ input_report_key(tpacpi_inputdev, keycode, 0);
+ if (keycode == KEY_UNKNOWN)
+ input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
+ scancode);
+ input_sync(tpacpi_inputdev);
+
+ mutex_unlock(&tpacpi_inputdev_send_mutex);
+ }
+}
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
+
+static void tpacpi_hotkey_send_key(unsigned int scancode)
+{
+ tpacpi_input_send_key(scancode);
+ if (hotkey_report_mode < 2) {
+ acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
+ 0x80, 0x1001 + scancode);
+ }
+}
+
+static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m)
+{
+ u8 d;
+
+ if (m & TP_NVRAM_HKEY_GROUP_HK2) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_HK2);
+ n->thinkpad_toggle = !!(d & TP_NVRAM_MASK_HKT_THINKPAD);
+ n->zoom_toggle = !!(d & TP_NVRAM_MASK_HKT_ZOOM);
+ n->display_toggle = !!(d & TP_NVRAM_MASK_HKT_DISPLAY);
+ n->hibernate_toggle = !!(d & TP_NVRAM_MASK_HKT_HIBERNATE);
+ }
+ if (m & TP_ACPI_HKEY_THNKLGHT_MASK) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_THINKLIGHT);
+ n->thinklight_toggle = !!(d & TP_NVRAM_MASK_THINKLIGHT);
+ }
+ if (m & TP_ACPI_HKEY_DISPXPAND_MASK) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_VIDEO);
+ n->displayexp_toggle =
+ !!(d & TP_NVRAM_MASK_HKT_DISPEXPND);
+ }
+ if (m & TP_NVRAM_HKEY_GROUP_BRIGHTNESS) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS);
+ n->brightness_level = (d & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
+ >> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
+ n->brightness_toggle =
+ !!(d & TP_NVRAM_MASK_HKT_BRIGHTNESS);
+ }
+ if (m & TP_NVRAM_HKEY_GROUP_VOLUME) {
+ d = nvram_read_byte(TP_NVRAM_ADDR_MIXER);
+ n->volume_level = (d & TP_NVRAM_MASK_LEVEL_VOLUME)
+ >> TP_NVRAM_POS_LEVEL_VOLUME;
+ n->mute = !!(d & TP_NVRAM_MASK_MUTE);
+ n->volume_toggle = !!(d & TP_NVRAM_MASK_HKT_VOLUME);
+ }
+}
+
+#define TPACPI_COMPARE_KEY(__scancode, __member) \
+ do { \
+ if ((mask & (1 << __scancode)) && \
+ oldn->__member != newn->__member) \
+ tpacpi_hotkey_send_key(__scancode); \
+ } while (0)
+
+#define TPACPI_MAY_SEND_KEY(__scancode) \
+ do { if (mask & (1 << __scancode)) \
+ tpacpi_hotkey_send_key(__scancode); } while (0)
+
+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
+ struct tp_nvram_state *newn,
+ u32 mask)
+{
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF12, hibernate_toggle);
+
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNPAGEUP, thinklight_toggle);
+
+ TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF8, displayexp_toggle);
+
+ /* handle volume */
+ if (oldn->volume_toggle != newn->volume_toggle) {
+ if (oldn->mute != newn->mute) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
+ }
+ if (oldn->volume_level > newn->volume_level) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+ } else if (oldn->volume_level < newn->volume_level) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ } else if (oldn->mute == newn->mute) {
+ /* repeated key presses that didn't change state */
+ if (newn->mute) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
+ } else if (newn->volume_level != 0) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
+ } else {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
+ }
+ }
+ }
+
+ /* handle brightness */
+ if (oldn->brightness_toggle != newn->brightness_toggle) {
+ if (oldn->brightness_level < newn->brightness_level) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+ } else if (oldn->brightness_level > newn->brightness_level) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+ } else {
+ /* repeated key presses that didn't change state */
+ if (newn->brightness_level != 0) {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
+ } else {
+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
+ }
+ }
+ }
+}
+
+#undef TPACPI_COMPARE_KEY
+#undef TPACPI_MAY_SEND_KEY
+
+static int hotkey_kthread(void *data)
+{
+ struct tp_nvram_state s[2];
+ u32 mask;
+ unsigned int si, so;
+ unsigned long t;
+ unsigned int change_detector, must_reset;
+
+ mutex_lock(&hotkey_thread_mutex);
+
+ if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)
+ goto exit;
+
+ set_freezable();
+
+ so = 0;
+ si = 1;
+ t = 0;
+
+ /* Initial state for compares */
+ mutex_lock(&hotkey_thread_data_mutex);
+ change_detector = hotkey_config_change;
+ mask = hotkey_source_mask & hotkey_mask;
+ mutex_unlock(&hotkey_thread_data_mutex);
+ hotkey_read_nvram(&s[so], mask);
+
+ while (!kthread_should_stop() && hotkey_poll_freq) {
+ if (t == 0)
+ t = 1000/hotkey_poll_freq;
+ t = msleep_interruptible(t);
+ if (unlikely(kthread_should_stop()))
+ break;
+ must_reset = try_to_freeze();
+ if (t > 0 && !must_reset)
+ continue;
+
+ mutex_lock(&hotkey_thread_data_mutex);
+ if (must_reset || hotkey_config_change != change_detector) {
+ /* forget old state on thaw or config change */
+ si = so;
+ t = 0;
+ change_detector = hotkey_config_change;
+ }
+ mask = hotkey_source_mask & hotkey_mask;
+ mutex_unlock(&hotkey_thread_data_mutex);
+
+ if (likely(mask)) {
+ hotkey_read_nvram(&s[si], mask);
+ if (likely(si != so)) {
+ hotkey_compare_and_issue_event(&s[so], &s[si],
+ mask);
+ }
+ }
+
+ so = si;
+ si ^= 1;
+ }
+
+exit:
+ mutex_unlock(&hotkey_thread_mutex);
+ return 0;
+}
+
+static void hotkey_poll_stop_sync(void)
+{
+ if (tpacpi_hotkey_task) {
+ if (frozen(tpacpi_hotkey_task) ||
+ freezing(tpacpi_hotkey_task))
+ thaw_process(tpacpi_hotkey_task);
+
+ kthread_stop(tpacpi_hotkey_task);
+ tpacpi_hotkey_task = NULL;
+ mutex_lock(&hotkey_thread_mutex);
+ /* at this point, the thread did exit */
+ mutex_unlock(&hotkey_thread_mutex);
+ }
+}
+
+/* call with hotkey_mutex held */
+static void hotkey_poll_setup(int may_warn)
+{
+ if ((hotkey_source_mask & hotkey_mask) != 0 &&
+ hotkey_poll_freq > 0 &&
+ (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) {
+ if (!tpacpi_hotkey_task) {
+ tpacpi_hotkey_task = kthread_run(hotkey_kthread,
+ NULL,
+ TPACPI_FILE "d");
+ if (IS_ERR(tpacpi_hotkey_task)) {
+ tpacpi_hotkey_task = NULL;
+ printk(TPACPI_ERR
+ "could not create kernel thread "
+ "for hotkey polling\n");
+ }
+ }
+ } else {
+ hotkey_poll_stop_sync();
+ if (may_warn &&
+ hotkey_source_mask != 0 && hotkey_poll_freq == 0) {
+ printk(TPACPI_NOTICE
+ "hot keys 0x%08x require polling, "
+ "which is currently disabled\n",
+ hotkey_source_mask);
+ }
+ }
+}
+
+static void hotkey_poll_setup_safe(int may_warn)
+{
+ mutex_lock(&hotkey_mutex);
+ hotkey_poll_setup(may_warn);
+ mutex_unlock(&hotkey_mutex);
+}
+
+static int hotkey_inputdev_open(struct input_dev *dev)
+{
+ switch (tpacpi_lifecycle) {
+ case TPACPI_LIFE_INIT:
+ /*
+ * hotkey_init will call hotkey_poll_setup_safe
+ * at the appropriate moment
+ */
+ return 0;
+ case TPACPI_LIFE_EXITING:
+ return -EBUSY;
+ case TPACPI_LIFE_RUNNING:
+ hotkey_poll_setup_safe(0);
+ return 0;
+ }
+
+ /* Should only happen if tpacpi_lifecycle is corrupt */
+ BUG();
+ return -EBUSY;
+}
+
+static void hotkey_inputdev_close(struct input_dev *dev)
+{
+ /* disable hotkey polling when possible */
+ if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING)
+ hotkey_poll_setup_safe(0);
+}
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
/* sysfs hotkey enable ------------------------------------------------- */
static ssize_t hotkey_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int res, status;
- u32 mask;
- res = hotkey_get(&status, &mask);
+ res = hotkey_status_get(&status);
if (res)
return res;
@@ -809,15 +1465,12 @@ static ssize_t hotkey_enable_store(struct device *dev,
const char *buf, size_t count)
{
unsigned long t;
- int res, status;
- u32 mask;
+ int res;
if (parse_strtoul(buf, 1, &t))
return -EINVAL;
- res = hotkey_get(&status, &mask);
- if (!res)
- res = hotkey_set(t, mask);
+ res = hotkey_status_set(t);
return (res) ? res : count;
}
@@ -831,14 +1484,15 @@ static ssize_t hotkey_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int res, status;
- u32 mask;
+ int res;
- res = hotkey_get(&status, &mask);
- if (res)
- return res;
+ if (mutex_lock_interruptible(&hotkey_mutex))
+ return -ERESTARTSYS;
+ res = hotkey_mask_get();
+ mutex_unlock(&hotkey_mutex);
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", mask);
+ return (res)?
+ res : snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_mask);
}
static ssize_t hotkey_mask_store(struct device *dev,
@@ -846,15 +1500,21 @@ static ssize_t hotkey_mask_store(struct device *dev,
const char *buf, size_t count)
{
unsigned long t;
- int res, status;
- u32 mask;
+ int res;
if (parse_strtoul(buf, 0xffffffffUL, &t))
return -EINVAL;
- res = hotkey_get(&status, &mask);
- if (!res)
- hotkey_set(status, t);
+ if (mutex_lock_interruptible(&hotkey_mutex))
+ return -ERESTARTSYS;
+
+ res = hotkey_mask_set(t);
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_poll_setup(1);
+#endif
+
+ mutex_unlock(&hotkey_mutex);
return (res) ? res : count;
}
@@ -890,7 +1550,8 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_all_mask);
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+ hotkey_all_mask | hotkey_source_mask);
}
static struct device_attribute dev_attr_hotkey_all_mask =
@@ -902,14 +1563,87 @@ static ssize_t hotkey_recommended_mask_show(struct device *dev,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "0x%08x\n",
- hotkey_all_mask & ~hotkey_reserved_mask);
+ (hotkey_all_mask | hotkey_source_mask)
+ & ~hotkey_reserved_mask);
}
static struct device_attribute dev_attr_hotkey_recommended_mask =
__ATTR(hotkey_recommended_mask, S_IRUGO,
hotkey_recommended_mask_show, NULL);
-/* sysfs hotkey radio_sw ----------------------------------------------- */
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+
+/* sysfs hotkey hotkey_source_mask ------------------------------------- */
+static ssize_t hotkey_source_mask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_source_mask);
+}
+
+static ssize_t hotkey_source_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 0xffffffffUL, &t) ||
+ ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&hotkey_mutex))
+ return -ERESTARTSYS;
+
+ HOTKEY_CONFIG_CRITICAL_START
+ hotkey_source_mask = t;
+ HOTKEY_CONFIG_CRITICAL_END
+
+ hotkey_poll_setup(1);
+
+ mutex_unlock(&hotkey_mutex);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_hotkey_source_mask =
+ __ATTR(hotkey_source_mask, S_IWUSR | S_IRUGO,
+ hotkey_source_mask_show, hotkey_source_mask_store);
+
+/* sysfs hotkey hotkey_poll_freq --------------------------------------- */
+static ssize_t hotkey_poll_freq_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_poll_freq);
+}
+
+static ssize_t hotkey_poll_freq_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned long t;
+
+ if (parse_strtoul(buf, 25, &t))
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&hotkey_mutex))
+ return -ERESTARTSYS;
+
+ hotkey_poll_freq = t;
+
+ hotkey_poll_setup(1);
+ mutex_unlock(&hotkey_mutex);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_hotkey_poll_freq =
+ __ATTR(hotkey_poll_freq, S_IWUSR | S_IRUGO,
+ hotkey_poll_freq_show, hotkey_poll_freq_store);
+
+#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
+
+/* sysfs hotkey radio_sw (pollable) ------------------------------------ */
static ssize_t hotkey_radio_sw_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -925,6 +1659,13 @@ static ssize_t hotkey_radio_sw_show(struct device *dev,
static struct device_attribute dev_attr_hotkey_radio_sw =
__ATTR(hotkey_radio_sw, S_IRUGO, hotkey_radio_sw_show, NULL);
+static void hotkey_radio_sw_notify_change(void)
+{
+ if (tp_features.hotkey_wlsw)
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "hotkey_radio_sw");
+}
+
/* sysfs hotkey report_mode -------------------------------------------- */
static ssize_t hotkey_report_mode_show(struct device *dev,
struct device_attribute *attr,
@@ -937,43 +1678,132 @@ static ssize_t hotkey_report_mode_show(struct device *dev,
static struct device_attribute dev_attr_hotkey_report_mode =
__ATTR(hotkey_report_mode, S_IRUGO, hotkey_report_mode_show, NULL);
+/* sysfs wakeup reason (pollable) -------------------------------------- */
+static ssize_t hotkey_wakeup_reason_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
+}
+
+static struct device_attribute dev_attr_hotkey_wakeup_reason =
+ __ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
+
+void hotkey_wakeup_reason_notify_change(void)
+{
+ if (tp_features.hotkey_mask)
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "wakeup_reason");
+}
+
+/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */
+static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
+}
+
+static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
+ __ATTR(wakeup_hotunplug_complete, S_IRUGO,
+ hotkey_wakeup_hotunplug_complete_show, NULL);
+
+void hotkey_wakeup_hotunplug_complete_notify_change(void)
+{
+ if (tp_features.hotkey_mask)
+ sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
+ "wakeup_hotunplug_complete");
+}
+
/* --------------------------------------------------------------------- */
static struct attribute *hotkey_attributes[] __initdata = {
&dev_attr_hotkey_enable.attr,
+ &dev_attr_hotkey_bios_enabled.attr,
&dev_attr_hotkey_report_mode.attr,
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ &dev_attr_hotkey_mask.attr,
+ &dev_attr_hotkey_all_mask.attr,
+ &dev_attr_hotkey_recommended_mask.attr,
+ &dev_attr_hotkey_source_mask.attr,
+ &dev_attr_hotkey_poll_freq.attr,
+#endif
};
static struct attribute *hotkey_mask_attributes[] __initdata = {
- &dev_attr_hotkey_mask.attr,
- &dev_attr_hotkey_bios_enabled.attr,
&dev_attr_hotkey_bios_mask.attr,
+#ifndef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ &dev_attr_hotkey_mask.attr,
&dev_attr_hotkey_all_mask.attr,
&dev_attr_hotkey_recommended_mask.attr,
+#endif
+ &dev_attr_hotkey_wakeup_reason.attr,
+ &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
};
static int __init hotkey_init(struct ibm_init_struct *iibm)
{
-
+ /* Requirements for changing the default keymaps:
+ *
+ * 1. Many of the keys are mapped to KEY_RESERVED for very
+ * good reasons. Do not change them unless you have deep
+ * knowledge on the IBM and Lenovo ThinkPad firmware for
+ * the various ThinkPad models. The driver behaves
+ * differently for KEY_RESERVED: such keys have their
+ * hot key mask *unset* in mask_recommended, and also
+ * in the initial hot key mask programmed into the
+ * firmware at driver load time, which means the firm-
+ * ware may react very differently if you change them to
+ * something else;
+ *
+ * 2. You must be subscribed to the linux-thinkpad and
+ * ibm-acpi-devel mailing lists, and you should read the
+ * list archives since 2007 if you want to change the
+ * keymaps. This requirement exists so that you will
+ * know the past history of problems with the thinkpad-
+ * acpi driver keymaps, and also that you will be
+ * listening to any bug reports;
+ *
+ * 3. Do not send thinkpad-acpi specific patches directly to
+ * for merging, *ever*. Send them to the linux-acpi
+ * mailinglist for comments. Merging is to be done only
+ * through acpi-test and the ACPI maintainer.
+ *
+ * If the above is too much to ask, don't change the keymap.
+ * Ask the thinkpad-acpi maintainer to do it, instead.
+ */
static u16 ibm_keycode_map[] __initdata = {
/* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */
KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP,
KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
- /* Scan codes 0x0C to 0x0F: Other ACPI HKEY hot keys */
+
+ /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
KEY_UNKNOWN, /* 0x0D: FN+INSERT */
KEY_UNKNOWN, /* 0x0E: FN+DELETE */
+
+ /* brightness: firmware always reacts to them, unless
+ * X.org did some tricks in the radeon BIOS scratch
+ * registers of *some* models */
KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
- /* Scan codes 0x10 to 0x1F: Extended ACPI HKEY hot keys */
KEY_RESERVED, /* 0x10: FN+END (brightness down) */
+
+ /* Thinklight: firmware always react to it */
KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
+
KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
+
+ /* Volume: firmware always react to it and reprograms
+ * the built-in *extra* mixer. Never map it to control
+ * another mixer by default. */
KEY_RESERVED, /* 0x14: VOLUME UP */
KEY_RESERVED, /* 0x15: VOLUME DOWN */
KEY_RESERVED, /* 0x16: MUTE */
+
KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
+
/* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
@@ -983,20 +1813,37 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP,
KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8,
KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND,
- /* Scan codes 0x0C to 0x0F: Other ACPI HKEY hot keys */
+
+ /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */
KEY_UNKNOWN, /* 0x0C: FN+BACKSPACE */
KEY_UNKNOWN, /* 0x0D: FN+INSERT */
KEY_UNKNOWN, /* 0x0E: FN+DELETE */
+
KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
- /* Scan codes 0x10 to 0x1F: Extended ACPI HKEY hot keys */
KEY_RESERVED, /* 0x10: FN+END (brightness down) */
+
KEY_RESERVED, /* 0x11: FN+PGUP (thinklight toggle) */
+
KEY_UNKNOWN, /* 0x12: FN+PGDOWN */
KEY_ZOOM, /* 0x13: FN+SPACE (zoom) */
+
+ /* Volume: z60/z61, T60 (BIOS version?): firmware always
+ * react to it and reprograms the built-in *extra* mixer.
+ * Never map it to control another mixer by default.
+ *
+ * T60?, T61, R60?, R61: firmware and EC tries to send
+ * these over the regular keyboard, so these are no-ops,
+ * but there are still weird bugs re. MUTE, so do not
+ * change unless you get test reports from all Lenovo
+ * models. May cause the BIOS to interfere with the
+ * HDA mixer.
+ */
KEY_RESERVED, /* 0x14: VOLUME UP */
KEY_RESERVED, /* 0x15: VOLUME DOWN */
KEY_RESERVED, /* 0x16: MUTE */
+
KEY_VENDOR, /* 0x17: Thinkpad/AccessIBM/Lenovo */
+
/* (assignments unknown, please report if found) */
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
@@ -1013,10 +1860,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing hotkey subdriver\n");
BUG_ON(!tpacpi_inputdev);
+ BUG_ON(tpacpi_inputdev->open != NULL ||
+ tpacpi_inputdev->close != NULL);
- IBM_ACPIHANDLE_INIT(hkey);
+ TPACPI_ACPIHANDLE_INIT(hkey);
mutex_init(&hotkey_mutex);
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ mutex_init(&hotkey_thread_mutex);
+ mutex_init(&hotkey_thread_data_mutex);
+#endif
+
/* hotkey not supported on 570 */
tp_features.hotkey = hkey_handle != NULL;
@@ -1024,7 +1878,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
str_supported(tp_features.hotkey));
if (tp_features.hotkey) {
- hotkey_dev_attributes = create_attr_set(8, NULL);
+ hotkey_dev_attributes = create_attr_set(12, NULL);
if (!hotkey_dev_attributes)
return -ENOMEM;
res = add_many_to_attr_set(hotkey_dev_attributes,
@@ -1038,15 +1892,15 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
for HKEY interface version 0x100 */
if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
if ((hkeyv >> 8) != 1) {
- printk(IBM_ERR "unknown version of the "
+ printk(TPACPI_ERR "unknown version of the "
"HKEY interface: 0x%x\n", hkeyv);
- printk(IBM_ERR "please report this to %s\n",
- IBM_MAIL);
+ printk(TPACPI_ERR "please report this to %s\n",
+ TPACPI_MAIL);
} else {
/*
* MHKV 0x100 in A31, R40, R40e,
* T4x, X31, and later
- * */
+ */
tp_features.hotkey_mask = 1;
}
}
@@ -1057,25 +1911,46 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (tp_features.hotkey_mask) {
if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
"MHKA", "qd")) {
- printk(IBM_ERR
+ printk(TPACPI_ERR
"missing MHKA handler, "
"please report this to %s\n",
- IBM_MAIL);
- hotkey_all_mask = 0x080cU; /* FN+F12, FN+F4, FN+F3 */
+ TPACPI_MAIL);
+ /* FN+F12, FN+F4, FN+F3 */
+ hotkey_all_mask = 0x080cU;
}
}
- res = hotkey_get(&hotkey_orig_status, &hotkey_orig_mask);
+ /* hotkey_source_mask *must* be zero for
+ * the first hotkey_mask_get */
+ res = hotkey_status_get(&hotkey_orig_status);
if (!res && tp_features.hotkey_mask) {
- res = add_many_to_attr_set(hotkey_dev_attributes,
- hotkey_mask_attributes,
- ARRAY_SIZE(hotkey_mask_attributes));
+ res = hotkey_mask_get();
+ hotkey_orig_mask = hotkey_mask;
+ if (!res) {
+ res = add_many_to_attr_set(
+ hotkey_dev_attributes,
+ hotkey_mask_attributes,
+ ARRAY_SIZE(hotkey_mask_attributes));
+ }
+ }
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ if (tp_features.hotkey_mask) {
+ hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
+ & ~hotkey_all_mask;
+ } else {
+ hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK;
}
+ vdbg_printk(TPACPI_DBG_INIT,
+ "hotkey source mask 0x%08x, polling freq %d\n",
+ hotkey_source_mask, hotkey_poll_freq);
+#endif
+
/* Not all thinkpads have a hardware radio switch */
if (!res && acpi_evalf(hkey_handle, &status, "WLSW", "qd")) {
tp_features.hotkey_wlsw = 1;
- printk(IBM_INFO
+ printk(TPACPI_INFO
"radio switch found; radios are %s\n",
enabled(status, 0));
res = add_to_attr_set(hotkey_dev_attributes,
@@ -1094,7 +1969,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE,
GFP_KERNEL);
if (!hotkey_keycode_map) {
- printk(IBM_ERR "failed to allocate memory for key map\n");
+ printk(TPACPI_ERR
+ "failed to allocate memory for key map\n");
return -ENOMEM;
}
@@ -1133,15 +2009,26 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
dbg_printk(TPACPI_DBG_INIT,
"enabling hot key handling\n");
- res = hotkey_set(1, (hotkey_all_mask & ~hotkey_reserved_mask)
- | hotkey_orig_mask);
+ res = hotkey_status_set(1);
if (res)
return res;
+ res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask)
+ & ~hotkey_reserved_mask)
+ | hotkey_orig_mask);
+ if (res < 0 && res != -ENXIO)
+ return res;
dbg_printk(TPACPI_DBG_INIT,
"legacy hot key reporting over procfs %s\n",
(hotkey_report_mode < 2) ?
"enabled" : "disabled");
+
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ tpacpi_inputdev->open = &hotkey_inputdev_open;
+ tpacpi_inputdev->close = &hotkey_inputdev_close;
+
+ hotkey_poll_setup_safe(1);
+#endif
}
return (tp_features.hotkey)? 0 : 1;
@@ -1149,13 +2036,19 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
static void hotkey_exit(void)
{
- int res;
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_poll_stop_sync();
+#endif
if (tp_features.hotkey) {
- dbg_printk(TPACPI_DBG_EXIT, "restoring original hotkey mask\n");
- res = hotkey_set(hotkey_orig_status, hotkey_orig_mask);
- if (res)
- printk(IBM_ERR "failed to restore hotkey to BIOS defaults\n");
+ dbg_printk(TPACPI_DBG_EXIT,
+ "restoring original hot key mask\n");
+ /* no short-circuit boolean operator below! */
+ if ((hotkey_mask_set(hotkey_orig_mask) |
+ hotkey_status_set(hotkey_orig_status)) != 0)
+ printk(TPACPI_ERR
+ "failed to restore hot key mask "
+ "to BIOS defaults\n");
}
if (hotkey_dev_attributes) {
@@ -1164,62 +2057,28 @@ static void hotkey_exit(void)
}
}
-static void tpacpi_input_send_key(unsigned int scancode,
- unsigned int keycode)
-{
- if (keycode != KEY_RESERVED) {
- mutex_lock(&tpacpi_inputdev_send_mutex);
-
- input_report_key(tpacpi_inputdev, keycode, 1);
- if (keycode == KEY_UNKNOWN)
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
- scancode);
- input_sync(tpacpi_inputdev);
-
- input_report_key(tpacpi_inputdev, keycode, 0);
- if (keycode == KEY_UNKNOWN)
- input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
- scancode);
- input_sync(tpacpi_inputdev);
-
- mutex_unlock(&tpacpi_inputdev_send_mutex);
- }
-}
-
-static void tpacpi_input_send_radiosw(void)
-{
- int wlsw;
-
- mutex_lock(&tpacpi_inputdev_send_mutex);
-
- if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
- input_report_switch(tpacpi_inputdev,
- SW_RADIO, !!wlsw);
- input_sync(tpacpi_inputdev);
- }
-
- mutex_unlock(&tpacpi_inputdev_send_mutex);
-}
-
static void hotkey_notify(struct ibm_struct *ibm, u32 event)
{
u32 hkey;
- unsigned int keycode, scancode;
+ unsigned int scancode;
int send_acpi_ev;
int ignore_acpi_ev;
+ int unk_ev;
if (event != 0x80) {
- printk(IBM_ERR "unknown HKEY notification event %d\n", event);
+ printk(TPACPI_ERR
+ "unknown HKEY notification event %d\n", event);
/* forward it to userspace, maybe it knows how to handle it */
- acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
- ibm->acpi->device->dev.bus_id,
- event, 0);
+ acpi_bus_generate_netlink_event(
+ ibm->acpi->device->pnp.device_class,
+ ibm->acpi->device->dev.bus_id,
+ event, 0);
return;
}
while (1) {
if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
- printk(IBM_ERR "failed to retrieve HKEY event\n");
+ printk(TPACPI_ERR "failed to retrieve HKEY event\n");
return;
}
@@ -1228,8 +2087,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
return;
}
- send_acpi_ev = 0;
+ send_acpi_ev = 1;
ignore_acpi_ev = 0;
+ unk_ev = 0;
switch (hkey >> 12) {
case 1:
@@ -1237,104 +2097,139 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
scancode = hkey & 0xfff;
if (scancode > 0 && scancode < 0x21) {
scancode--;
- keycode = hotkey_keycode_map[scancode];
- tpacpi_input_send_key(scancode, keycode);
+ if (!(hotkey_source_mask & (1 << scancode))) {
+ tpacpi_input_send_key(scancode);
+ send_acpi_ev = 0;
+ } else {
+ ignore_acpi_ev = 1;
+ }
} else {
- printk(IBM_ERR
- "hotkey 0x%04x out of range for keyboard map\n",
- hkey);
- send_acpi_ev = 1;
+ unk_ev = 1;
}
break;
- case 5:
- /* 0x5000-0x5FFF: LID */
- /* we don't handle it through this path, just
- * eat up known LID events */
- if (hkey != 0x5001 && hkey != 0x5002) {
- printk(IBM_ERR
- "unknown LID-related HKEY event: 0x%04x\n",
- hkey);
- send_acpi_ev = 1;
+ case 2:
+ /* Wakeup reason */
+ switch (hkey) {
+ case 0x2304: /* suspend, undock */
+ case 0x2404: /* hibernation, undock */
+ hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
+ ignore_acpi_ev = 1;
+ break;
+ case 0x2305: /* suspend, bay eject */
+ case 0x2405: /* hibernation, bay eject */
+ hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
+ ignore_acpi_ev = 1;
+ break;
+ default:
+ unk_ev = 1;
+ }
+ if (hotkey_wakeup_reason != TP_ACPI_WAKEUP_NONE) {
+ printk(TPACPI_INFO
+ "woke up due to a hot-unplug "
+ "request...\n");
+ hotkey_wakeup_reason_notify_change();
+ }
+ break;
+ case 3:
+ /* bay-related wakeups */
+ if (hkey == 0x3003) {
+ hotkey_autosleep_ack = 1;
+ printk(TPACPI_INFO
+ "bay ejected\n");
+ hotkey_wakeup_hotunplug_complete_notify_change();
} else {
+ unk_ev = 1;
+ }
+ break;
+ case 4:
+ /* dock-related wakeups */
+ if (hkey == 0x4003) {
+ hotkey_autosleep_ack = 1;
+ printk(TPACPI_INFO
+ "undocked\n");
+ hotkey_wakeup_hotunplug_complete_notify_change();
+ } else {
+ unk_ev = 1;
+ }
+ break;
+ case 5:
+ /* 0x5000-0x5FFF: human interface helpers */
+ switch (hkey) {
+ case 0x5010: /* Lenovo new BIOS: brightness changed */
+ case 0x5009: /* X61t: swivel up (tablet mode) */
+ case 0x500a: /* X61t: swivel down (normal mode) */
+ case 0x500b: /* X61t: tablet pen inserted into bay */
+ case 0x500c: /* X61t: tablet pen removed from bay */
+ break;
+ case 0x5001:
+ case 0x5002:
+ /* LID switch events. Do not propagate */
ignore_acpi_ev = 1;
+ break;
+ default:
+ unk_ev = 1;
}
break;
case 7:
/* 0x7000-0x7FFF: misc */
if (tp_features.hotkey_wlsw && hkey == 0x7000) {
tpacpi_input_send_radiosw();
+ hotkey_radio_sw_notify_change();
+ send_acpi_ev = 0;
break;
}
/* fallthrough to default */
default:
- /* case 2: dock-related */
- /* 0x2305 - T43 waking up due to bay lever eject while aslept */
- /* case 3: ultra-bay related. maybe bay in dock? */
- /* 0x3003 - T43 after wake up by bay lever eject (0x2305) */
- printk(IBM_NOTICE "unhandled HKEY event 0x%04x\n", hkey);
- send_acpi_ev = 1;
+ unk_ev = 1;
+ }
+ if (unk_ev) {
+ printk(TPACPI_NOTICE
+ "unhandled HKEY event 0x%04x\n", hkey);
}
/* Legacy events */
- if (!ignore_acpi_ev && (send_acpi_ev || hotkey_report_mode < 2)) {
- acpi_bus_generate_proc_event(ibm->acpi->device, event, hkey);
+ if (!ignore_acpi_ev &&
+ (send_acpi_ev || hotkey_report_mode < 2)) {
+ acpi_bus_generate_proc_event(ibm->acpi->device,
+ event, hkey);
}
/* netlink events */
if (!ignore_acpi_ev && send_acpi_ev) {
- acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
- ibm->acpi->device->dev.bus_id,
- event, hkey);
+ acpi_bus_generate_netlink_event(
+ ibm->acpi->device->pnp.device_class,
+ ibm->acpi->device->dev.bus_id,
+ event, hkey);
}
}
}
-static void hotkey_resume(void)
+static void hotkey_suspend(pm_message_t state)
{
- tpacpi_input_send_radiosw();
+ /* Do these on suspend, we get the events on early resume! */
+ hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
+ hotkey_autosleep_ack = 0;
}
-/*
- * Call with hotkey_mutex held
- */
-static int hotkey_get(int *status, u32 *mask)
-{
- if (!acpi_evalf(hkey_handle, status, "DHKC", "d"))
- return -EIO;
-
- if (tp_features.hotkey_mask)
- if (!acpi_evalf(hkey_handle, mask, "DHKN", "d"))
- return -EIO;
-
- return 0;
-}
-
-/*
- * Call with hotkey_mutex held
- */
-static int hotkey_set(int status, u32 mask)
+static void hotkey_resume(void)
{
- int i;
-
- if (!acpi_evalf(hkey_handle, NULL, "MHKC", "vd", status))
- return -EIO;
-
- if (tp_features.hotkey_mask)
- for (i = 0; i < 32; i++) {
- int bit = ((1 << i) & mask) != 0;
- if (!acpi_evalf(hkey_handle,
- NULL, "MHKM", "vdd", i + 1, bit))
- return -EIO;
- }
-
- return 0;
+ if (hotkey_mask_get())
+ printk(TPACPI_ERR
+ "error while trying to read hot key mask "
+ "from firmware\n");
+ tpacpi_input_send_radiosw();
+ hotkey_radio_sw_notify_change();
+ hotkey_wakeup_reason_notify_change();
+ hotkey_wakeup_hotunplug_complete_notify_change();
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ hotkey_poll_setup_safe(0);
+#endif
}
/* procfs -------------------------------------------------------------- */
static int hotkey_read(char *p)
{
int res, status;
- u32 mask;
int len = 0;
if (!tp_features.hotkey) {
@@ -1344,14 +2239,16 @@ static int hotkey_read(char *p)
if (mutex_lock_interruptible(&hotkey_mutex))
return -ERESTARTSYS;
- res = hotkey_get(&status, &mask);
+ res = hotkey_status_get(&status);
+ if (!res)
+ res = hotkey_mask_get();
mutex_unlock(&hotkey_mutex);
if (res)
return res;
len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
if (tp_features.hotkey_mask) {
- len += sprintf(p + len, "mask:\t\t0x%08x\n", mask);
+ len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_mask);
len += sprintf(p + len,
"commands:\tenable, disable, reset, <mask>\n");
} else {
@@ -1367,7 +2264,6 @@ static int hotkey_write(char *buf)
int res, status;
u32 mask;
char *cmd;
- int do_cmd = 0;
if (!tp_features.hotkey)
return -ENODEV;
@@ -1375,9 +2271,8 @@ static int hotkey_write(char *buf)
if (mutex_lock_interruptible(&hotkey_mutex))
return -ERESTARTSYS;
- res = hotkey_get(&status, &mask);
- if (res)
- goto errexit;
+ status = -1;
+ mask = hotkey_mask;
res = 0;
while ((cmd = next_cmd(&buf))) {
@@ -1396,11 +2291,12 @@ static int hotkey_write(char *buf)
res = -EINVAL;
goto errexit;
}
- do_cmd = 1;
}
+ if (status != -1)
+ res = hotkey_status_set(status);
- if (do_cmd)
- res = hotkey_set(status, mask);
+ if (!res && mask != hotkey_mask)
+ res = hotkey_mask_set(mask);
errexit:
mutex_unlock(&hotkey_mutex);
@@ -1408,7 +2304,7 @@ errexit:
}
static const struct acpi_device_id ibm_htk_device_ids[] = {
- {IBM_HKEY_HID, 0},
+ {TPACPI_ACPI_HKEY_HID, 0},
{"", 0},
};
@@ -1425,6 +2321,7 @@ static struct ibm_struct hotkey_driver_data = {
.write = hotkey_write,
.exit = hotkey_exit,
.resume = hotkey_resume,
+ .suspend = hotkey_suspend,
.acpi = &ibm_hotkey_acpidriver,
};
@@ -1432,6 +2329,16 @@ static struct ibm_struct hotkey_driver_data = {
* Bluetooth subdriver
*/
+enum {
+ /* ACPI GBDC/SBDC bits */
+ TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
+ TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
+ TP_ACPI_BLUETOOTH_UNK = 0x04, /* unknown function */
+};
+
+static int bluetooth_get_radiosw(void);
+static int bluetooth_set_radiosw(int radio_on);
+
/* sysfs bluetooth enable ---------------------------------------------- */
static ssize_t bluetooth_enable_show(struct device *dev,
struct device_attribute *attr,
@@ -1483,7 +2390,7 @@ static int __init bluetooth_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing bluetooth subdriver\n");
- IBM_ACPIHANDLE_INIT(hkey);
+ TPACPI_ACPIHANDLE_INIT(hkey);
/* bluetooth not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
G4x, R30, R31, R40e, R50e, T20-22, X20-21 */
@@ -1596,6 +2503,16 @@ static struct ibm_struct bluetooth_driver_data = {
* Wan subdriver
*/
+enum {
+ /* ACPI GWAN/SWAN bits */
+ TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
+ TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
+ TP_ACPI_WANCARD_UNK = 0x04, /* unknown function */
+};
+
+static int wan_get_radiosw(void);
+static int wan_set_radiosw(int radio_on);
+
/* sysfs wan enable ---------------------------------------------------- */
static ssize_t wan_enable_show(struct device *dev,
struct device_attribute *attr,
@@ -1647,7 +2564,7 @@ static int __init wan_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing wan subdriver\n");
- IBM_ACPIHANDLE_INIT(hkey);
+ TPACPI_ACPIHANDLE_INIT(hkey);
tp_features.wan = hkey_handle &&
acpi_evalf(hkey_handle, &status, "GWAN", "qd");
@@ -1759,17 +2676,41 @@ static struct ibm_struct wan_driver_data = {
* Video subdriver
*/
+enum video_access_mode {
+ TPACPI_VIDEO_NONE = 0,
+ TPACPI_VIDEO_570, /* 570 */
+ TPACPI_VIDEO_770, /* 600e/x, 770e, 770x */
+ TPACPI_VIDEO_NEW, /* all others */
+};
+
+enum { /* video status flags, based on VIDEO_570 */
+ TP_ACPI_VIDEO_S_LCD = 0x01, /* LCD output enabled */
+ TP_ACPI_VIDEO_S_CRT = 0x02, /* CRT output enabled */
+ TP_ACPI_VIDEO_S_DVI = 0x08, /* DVI output enabled */
+};
+
+enum { /* TPACPI_VIDEO_570 constants */
+ TP_ACPI_VIDEO_570_PHSCMD = 0x87, /* unknown magic constant :( */
+ TP_ACPI_VIDEO_570_PHSMASK = 0x03, /* PHS bits that map to
+ * video_status_flags */
+ TP_ACPI_VIDEO_570_PHS2CMD = 0x8b, /* unknown magic constant :( */
+ TP_ACPI_VIDEO_570_PHS2SET = 0x80, /* unknown magic constant :( */
+};
+
static enum video_access_mode video_supported;
static int video_orig_autosw;
-IBM_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
+static int video_autosw_get(void);
+static int video_autosw_set(int enable);
+
+TPACPI_HANDLE(vid, root, "\\_SB.PCI.AGP.VGA", /* 570 */
"\\_SB.PCI0.AGP0.VID0", /* 600e/x, 770x */
"\\_SB.PCI0.VID0", /* 770e */
"\\_SB.PCI0.VID", /* A21e, G4x, R50e, X30, X40 */
"\\_SB.PCI0.AGP.VID", /* all others */
); /* R30, R31 */
-IBM_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */
+TPACPI_HANDLE(vid2, root, "\\_SB.PCI0.AGPB.VID"); /* G41 */
static int __init video_init(struct ibm_init_struct *iibm)
{
@@ -1777,8 +2718,8 @@ static int __init video_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT, "initializing video subdriver\n");
- IBM_ACPIHANDLE_INIT(vid);
- IBM_ACPIHANDLE_INIT(vid2);
+ TPACPI_ACPIHANDLE_INIT(vid);
+ TPACPI_ACPIHANDLE_INIT(vid2);
if (vid2_handle && acpi_evalf(NULL, &ivga, "\\IVGA", "d") && ivga)
/* G41, assume IVGA doesn't change */
@@ -1809,7 +2750,7 @@ static void video_exit(void)
dbg_printk(TPACPI_DBG_EXIT,
"restoring original video autoswitch mode\n");
if (video_autosw_set(video_orig_autosw))
- printk(IBM_ERR "error while trying to restore original "
+ printk(TPACPI_ERR "error while trying to restore original "
"video autoswitch mode\n");
}
@@ -1882,13 +2823,14 @@ static int video_outputsw_set(int status)
res = acpi_evalf(vid_handle, NULL,
"ASWT", "vdd", status * 0x100, 0);
if (!autosw && video_autosw_set(autosw)) {
- printk(IBM_ERR "video auto-switch left enabled due to error\n");
+ printk(TPACPI_ERR
+ "video auto-switch left enabled due to error\n");
return -EIO;
}
break;
case TPACPI_VIDEO_NEW:
res = acpi_evalf(NULL, NULL, "\\VUPS", "vd", 0x80) &&
- acpi_evalf(NULL, NULL, "\\VSDS", "vdd", status, 1);
+ acpi_evalf(NULL, NULL, "\\VSDS", "vdd", status, 1);
break;
default:
return -ENOSYS;
@@ -1951,7 +2893,8 @@ static int video_outputsw_cycle(void)
return -ENOSYS;
}
if (!autosw && video_autosw_set(autosw)) {
- printk(IBM_ERR "video auto-switch left enabled due to error\n");
+ printk(TPACPI_ERR
+ "video auto-switch left enabled due to error\n");
return -EIO;
}
@@ -2080,16 +3023,16 @@ static struct ibm_struct video_driver_data = {
* Light (thinklight) subdriver
*/
-IBM_HANDLE(lght, root, "\\LGHT"); /* A21e, A2xm/p, T20-22, X20-21 */
-IBM_HANDLE(ledb, ec, "LEDB"); /* G4x */
+TPACPI_HANDLE(lght, root, "\\LGHT"); /* A21e, A2xm/p, T20-22, X20-21 */
+TPACPI_HANDLE(ledb, ec, "LEDB"); /* G4x */
static int __init light_init(struct ibm_init_struct *iibm)
{
vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n");
- IBM_ACPIHANDLE_INIT(ledb);
- IBM_ACPIHANDLE_INIT(lght);
- IBM_ACPIHANDLE_INIT(cmos);
+ TPACPI_ACPIHANDLE_INIT(ledb);
+ TPACPI_ACPIHANDLE_INIT(lght);
+ TPACPI_ACPIHANDLE_INIT(cmos);
/* light not supported on 570, 600e/x, 770e, 770x, G4x, R30, R31 */
tp_features.light = (cmos_handle || lght_handle) && !ledb_handle;
@@ -2167,14 +3110,18 @@ static struct ibm_struct light_driver_data = {
#ifdef CONFIG_THINKPAD_ACPI_DOCK
-IBM_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */
+static void dock_notify(struct ibm_struct *ibm, u32 event);
+static int dock_read(char *p);
+static int dock_write(char *buf);
+
+TPACPI_HANDLE(dock, root, "\\_SB.GDCK", /* X30, X31, X40 */
"\\_SB.PCI0.DOCK", /* 600e/x,770e,770x,A2xm/p,T20-22,X20-21 */
"\\_SB.PCI0.PCI1.DOCK", /* all others */
"\\_SB.PCI.ISA.SLCE", /* 570 */
); /* A21e,G4x,R30,R31,R32,R40,R40e,R50e */
/* don't list other alternatives as we install a notify handler on the 570 */
-IBM_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */
+TPACPI_HANDLE(pci, root, "\\_SB.PCI"); /* 570 */
static const struct acpi_device_id ibm_pci_device_ids[] = {
{PCI_ROOT_HID_STRING, 0},
@@ -2217,7 +3164,7 @@ static int __init dock_init(struct ibm_init_struct *iibm)
{
vdbg_printk(TPACPI_DBG_INIT, "initializing dock subdriver\n");
- IBM_ACPIHANDLE_INIT(dock);
+ TPACPI_ACPIHANDLE_INIT(dock);
vdbg_printk(TPACPI_DBG_INIT, "dock is %s\n",
str_supported(dock_handle != NULL));
@@ -2233,7 +3180,7 @@ static int __init dock_init2(struct ibm_init_struct *iibm)
if (dock_driver_data[0].flags.acpi_driver_registered &&
dock_driver_data[0].flags.acpi_notify_installed) {
- IBM_ACPIHANDLE_INIT(pci);
+ TPACPI_ACPIHANDLE_INIT(pci);
dock2_needed = (pci_handle != NULL);
vdbg_printk(TPACPI_DBG_INIT,
"dock PCI handler for the TP 570 is %s\n",
@@ -2265,7 +3212,7 @@ static void dock_notify(struct ibm_struct *ibm, u32 event)
else if (event == 0 && docked)
data = 3; /* dock */
else {
- printk(IBM_ERR "unknown dock event %d, status %d\n",
+ printk(TPACPI_ERR "unknown dock event %d, status %d\n",
event, _sta(dock_handle));
data = 0; /* unknown */
}
@@ -2321,18 +3268,19 @@ static int dock_write(char *buf)
*/
#ifdef CONFIG_THINKPAD_ACPI_BAY
-IBM_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */
+
+TPACPI_HANDLE(bay, root, "\\_SB.PCI.IDE.SECN.MAST", /* 570 */
"\\_SB.PCI0.IDE0.IDES.IDSM", /* 600e/x, 770e, 770x */
"\\_SB.PCI0.SATA.SCND.MSTR", /* T60, X60, Z60 */
"\\_SB.PCI0.IDE0.SCND.MSTR", /* all others */
); /* A21e, R30, R31 */
-IBM_HANDLE(bay_ej, bay, "_EJ3", /* 600e/x, A2xm/p, A3x */
+TPACPI_HANDLE(bay_ej, bay, "_EJ3", /* 600e/x, A2xm/p, A3x */
"_EJ0", /* all others */
); /* 570,A21e,G4x,R30,R31,R32,R40e,R50e */
-IBM_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */
+TPACPI_HANDLE(bay2, root, "\\_SB.PCI0.IDE0.PRIM.SLAV", /* A3x, R32 */
"\\_SB.PCI0.IDE0.IDEP.IDPS", /* 600e/x, 770e, 770x */
); /* all others */
-IBM_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */
+TPACPI_HANDLE(bay2_ej, bay2, "_EJ3", /* 600e/x, 770e, A3x */
"_EJ0", /* 770x */
); /* all others */
@@ -2340,12 +3288,12 @@ static int __init bay_init(struct ibm_init_struct *iibm)
{
vdbg_printk(TPACPI_DBG_INIT, "initializing bay subdriver\n");
- IBM_ACPIHANDLE_INIT(bay);
+ TPACPI_ACPIHANDLE_INIT(bay);
if (bay_handle)
- IBM_ACPIHANDLE_INIT(bay_ej);
- IBM_ACPIHANDLE_INIT(bay2);
+ TPACPI_ACPIHANDLE_INIT(bay_ej);
+ TPACPI_ACPIHANDLE_INIT(bay2);
if (bay2_handle)
- IBM_ACPIHANDLE_INIT(bay2_ej);
+ TPACPI_ACPIHANDLE_INIT(bay2_ej);
tp_features.bay_status = bay_handle &&
acpi_evalf(bay_handle, NULL, "_STA", "qv");
@@ -2474,7 +3422,7 @@ static int __init cmos_init(struct ibm_init_struct *iibm)
vdbg_printk(TPACPI_DBG_INIT,
"initializing cmos commands subdriver\n");
- IBM_ACPIHANDLE_INIT(cmos);
+ TPACPI_ACPIHANDLE_INIT(cmos);
vdbg_printk(TPACPI_DBG_INIT, "cmos commands are %s\n",
str_supported(cmos_handle != NULL));
@@ -2538,10 +3486,24 @@ static struct ibm_struct cmos_driver_data = {
* LED subdriver
*/
+enum led_access_mode {
+ TPACPI_LED_NONE = 0,
+ TPACPI_LED_570, /* 570 */
+ TPACPI_LED_OLD, /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
+ TPACPI_LED_NEW, /* all others */
+};
+
+enum { /* For TPACPI_LED_OLD */
+ TPACPI_LED_EC_HLCL = 0x0c, /* EC reg to get led to power on */
+ TPACPI_LED_EC_HLBL = 0x0d, /* EC reg to blink a lit led */
+ TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */
+};
+
static enum led_access_mode led_supported;
-IBM_HANDLE(led, ec, "SLED", /* 570 */
- "SYSL", /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
+TPACPI_HANDLE(led, ec, "SLED", /* 570 */
+ "SYSL", /* 600e/x, 770e, 770x, A21e, A2xm/p, */
+ /* T20-22, X20-21 */
"LED", /* all others */
); /* R30, R31 */
@@ -2549,7 +3511,7 @@ static int __init led_init(struct ibm_init_struct *iibm)
{
vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
- IBM_ACPIHANDLE_INIT(led);
+ TPACPI_ACPIHANDLE_INIT(led);
if (!led_handle)
/* led not supported on R30, R31 */
@@ -2638,13 +3600,11 @@ static int led_write(char *buf)
led = 1 << led;
ret = ec_write(TPACPI_LED_EC_HLMS, led);
if (ret >= 0)
- ret =
- ec_write(TPACPI_LED_EC_HLBL,
- led * led_exp_hlbl[ind]);
+ ret = ec_write(TPACPI_LED_EC_HLBL,
+ led * led_exp_hlbl[ind]);
if (ret >= 0)
- ret =
- ec_write(TPACPI_LED_EC_HLCL,
- led * led_exp_hlcl[ind]);
+ ret = ec_write(TPACPI_LED_EC_HLCL,
+ led * led_exp_hlcl[ind]);
if (ret < 0)
return ret;
} else {
@@ -2668,13 +3628,13 @@ static struct ibm_struct led_driver_data = {
* Beep subdriver
*/
-IBM_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */
+TPACPI_HANDLE(beep, ec, "BEEP"); /* all except R30, R31 */
static int __init beep_init(struct ibm_init_struct *iibm)
{
vdbg_printk(TPACPI_DBG_INIT, "initializing beep subdriver\n");
- IBM_ACPIHANDLE_INIT(beep);
+ TPACPI_ACPIHANDLE_INIT(beep);
vdbg_printk(TPACPI_DBG_INIT, "beep is %s\n",
str_supported(beep_handle != NULL));
@@ -2727,8 +3687,109 @@ static struct ibm_struct beep_driver_data = {
* Thermal subdriver
*/
+enum thermal_access_mode {
+ TPACPI_THERMAL_NONE = 0, /* No thermal support */
+ TPACPI_THERMAL_ACPI_TMP07, /* Use ACPI TMP0-7 */
+ TPACPI_THERMAL_ACPI_UPDT, /* Use ACPI TMP0-7 with UPDT */
+ TPACPI_THERMAL_TPEC_8, /* Use ACPI EC regs, 8 sensors */
+ TPACPI_THERMAL_TPEC_16, /* Use ACPI EC regs, 16 sensors */
+};
+
+enum { /* TPACPI_THERMAL_TPEC_* */
+ TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
+ TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
+ TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
+};
+
+#define TPACPI_MAX_THERMAL_SENSORS 16 /* Max thermal sensors supported */
+struct ibm_thermal_sensors_struct {
+ s32 temp[TPACPI_MAX_THERMAL_SENSORS];
+};
+
static enum thermal_access_mode thermal_read_mode;
+/* idx is zero-based */
+static int thermal_get_sensor(int idx, s32 *value)
+{
+ int t;
+ s8 tmp;
+ char tmpi[5];
+
+ t = TP_EC_THERMAL_TMP0;
+
+ switch (thermal_read_mode) {
+#if TPACPI_MAX_THERMAL_SENSORS >= 16
+ case TPACPI_THERMAL_TPEC_16:
+ if (idx >= 8 && idx <= 15) {
+ t = TP_EC_THERMAL_TMP8;
+ idx -= 8;
+ }
+ /* fallthrough */
+#endif
+ case TPACPI_THERMAL_TPEC_8:
+ if (idx <= 7) {
+ if (!acpi_ec_read(t + idx, &tmp))
+ return -EIO;
+ *value = tmp * 1000;
+ return 0;
+ }
+ break;
+
+ case TPACPI_THERMAL_ACPI_UPDT:
+ if (idx <= 7) {
+ snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
+ if (!acpi_evalf(ec_handle, NULL, "UPDT", "v"))
+ return -EIO;
+ if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
+ return -EIO;
+ *value = (t - 2732) * 100;
+ return 0;
+ }
+ break;
+
+ case TPACPI_THERMAL_ACPI_TMP07:
+ if (idx <= 7) {
+ snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
+ if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
+ return -EIO;
+ if (t > 127 || t < -127)
+ t = TP_EC_THERMAL_TMP_NA;
+ *value = t * 1000;
+ return 0;
+ }
+ break;
+
+ case TPACPI_THERMAL_NONE:
+ default:
+ return -ENOSYS;
+ }
+
+ return -EINVAL;
+}
+
+static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
+{
+ int res, i;
+ int n;
+
+ n = 8;
+ i = 0;
+
+ if (!s)
+ return -EINVAL;
+
+ if (thermal_read_mode == TPACPI_THERMAL_TPEC_16)
+ n = 16;
+
+ for (i = 0 ; i < n; i++) {
+ res = thermal_get_sensor(i, &s->temp[i]);
+ if (res)
+ return res;
+ }
+
+ return n;
+}
+
/* sysfs temp##_input -------------------------------------------------- */
static ssize_t thermal_temp_input_show(struct device *dev,
@@ -2751,7 +3812,8 @@ static ssize_t thermal_temp_input_show(struct device *dev,
}
#define THERMAL_SENSOR_ATTR_TEMP(_idxA, _idxB) \
- SENSOR_ATTR(temp##_idxA##_input, S_IRUGO, thermal_temp_input_show, NULL, _idxB)
+ SENSOR_ATTR(temp##_idxA##_input, S_IRUGO, \
+ thermal_temp_input_show, NULL, _idxB)
static struct sensor_device_attribute sensor_dev_attr_thermal_temp_input[] = {
THERMAL_SENSOR_ATTR_TEMP(1, 0),
@@ -2845,12 +3907,13 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
if (ta1 == 0) {
/* This is sheer paranoia, but we handle it anyway */
if (acpi_tmp7) {
- printk(IBM_ERR
+ printk(TPACPI_ERR
"ThinkPad ACPI EC access misbehaving, "
- "falling back to ACPI TMPx access mode\n");
+ "falling back to ACPI TMPx access "
+ "mode\n");
thermal_read_mode = TPACPI_THERMAL_ACPI_TMP07;
} else {
- printk(IBM_ERR
+ printk(TPACPI_ERR
"ThinkPad ACPI EC access misbehaving, "
"disabling thermal sensors access\n");
thermal_read_mode = TPACPI_THERMAL_NONE;
@@ -2877,7 +3940,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
str_supported(thermal_read_mode != TPACPI_THERMAL_NONE),
thermal_read_mode);
- switch(thermal_read_mode) {
+ switch (thermal_read_mode) {
case TPACPI_THERMAL_TPEC_16:
res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
&thermal_temp_input16_group);
@@ -2902,7 +3965,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
static void thermal_exit(void)
{
- switch(thermal_read_mode) {
+ switch (thermal_read_mode) {
case TPACPI_THERMAL_TPEC_16:
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
&thermal_temp_input16_group);
@@ -2919,88 +3982,6 @@ static void thermal_exit(void)
}
}
-/* idx is zero-based */
-static int thermal_get_sensor(int idx, s32 *value)
-{
- int t;
- s8 tmp;
- char tmpi[5];
-
- t = TP_EC_THERMAL_TMP0;
-
- switch (thermal_read_mode) {
-#if TPACPI_MAX_THERMAL_SENSORS >= 16
- case TPACPI_THERMAL_TPEC_16:
- if (idx >= 8 && idx <= 15) {
- t = TP_EC_THERMAL_TMP8;
- idx -= 8;
- }
- /* fallthrough */
-#endif
- case TPACPI_THERMAL_TPEC_8:
- if (idx <= 7) {
- if (!acpi_ec_read(t + idx, &tmp))
- return -EIO;
- *value = tmp * 1000;
- return 0;
- }
- break;
-
- case TPACPI_THERMAL_ACPI_UPDT:
- if (idx <= 7) {
- snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
- if (!acpi_evalf(ec_handle, NULL, "UPDT", "v"))
- return -EIO;
- if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
- return -EIO;
- *value = (t - 2732) * 100;
- return 0;
- }
- break;
-
- case TPACPI_THERMAL_ACPI_TMP07:
- if (idx <= 7) {
- snprintf(tmpi, sizeof(tmpi), "TMP%c", '0' + idx);
- if (!acpi_evalf(ec_handle, &t, tmpi, "d"))
- return -EIO;
- if (t > 127 || t < -127)
- t = TP_EC_THERMAL_TMP_NA;
- *value = t * 1000;
- return 0;
- }
- break;
-
- case TPACPI_THERMAL_NONE:
- default:
- return -ENOSYS;
- }
-
- return -EINVAL;
-}
-
-static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s)
-{
- int res, i;
- int n;
-
- n = 8;
- i = 0;
-
- if (!s)
- return -EINVAL;
-
- if (thermal_read_mode == TPACPI_THERMAL_TPEC_16)
- n = 16;
-
- for(i = 0 ; i < n; i++) {
- res = thermal_get_sensor(i, &s->temp[i]);
- if (res)
- return res;
- }
-
- return n;
-}
-
static int thermal_read(char *p)
{
int len = 0;
@@ -3103,14 +4084,110 @@ static struct ibm_struct ecdump_driver_data = {
* Backlight/brightness subdriver
*/
+#define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen"
+
static struct backlight_device *ibm_backlight_device;
+static int brightness_offset = 0x31;
+static int brightness_mode;
+static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */
+
+static struct mutex brightness_mutex;
+
+/*
+ * ThinkPads can read brightness from two places: EC 0x31, or
+ * CMOS NVRAM byte 0x5E, bits 0-3.
+ */
+static int brightness_get(struct backlight_device *bd)
+{
+ u8 lec = 0, lcmos = 0, level = 0;
+
+ if (brightness_mode & 1) {
+ if (!acpi_ec_read(brightness_offset, &lec))
+ return -EIO;
+ lec &= (tp_features.bright_16levels)? 0x0f : 0x07;
+ level = lec;
+ };
+ if (brightness_mode & 2) {
+ lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
+ & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
+ >> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
+ lcmos &= (tp_features.bright_16levels)? 0x0f : 0x07;
+ level = lcmos;
+ }
+
+ if (brightness_mode == 3 && lec != lcmos) {
+ printk(TPACPI_ERR
+ "CMOS NVRAM (%u) and EC (%u) do not agree "
+ "on display brightness level\n",
+ (unsigned int) lcmos,
+ (unsigned int) lec);
+ return -EIO;
+ }
+
+ return level;
+}
+
+/* May return EINTR which can always be mapped to ERESTARTSYS */
+static int brightness_set(int value)
+{
+ int cmos_cmd, inc, i, res;
+ int current_value;
+
+ if (value > ((tp_features.bright_16levels)? 15 : 7))
+ return -EINVAL;
+
+ res = mutex_lock_interruptible(&brightness_mutex);
+ if (res < 0)
+ return res;
+
+ current_value = brightness_get(NULL);
+ if (current_value < 0) {
+ res = current_value;
+ goto errout;
+ }
+
+ cmos_cmd = value > current_value ?
+ TP_CMOS_BRIGHTNESS_UP :
+ TP_CMOS_BRIGHTNESS_DOWN;
+ inc = (value > current_value)? 1 : -1;
+
+ res = 0;
+ for (i = current_value; i != value; i += inc) {
+ if ((brightness_mode & 2) &&
+ issue_thinkpad_cmos_command(cmos_cmd)) {
+ res = -EIO;
+ goto errout;
+ }
+ if ((brightness_mode & 1) &&
+ !acpi_ec_write(brightness_offset, i + inc)) {
+ res = -EIO;
+ goto errout;;
+ }
+ }
+
+errout:
+ mutex_unlock(&brightness_mutex);
+ return res;
+}
+
+/* sysfs backlight class ----------------------------------------------- */
+
+static int brightness_update_status(struct backlight_device *bd)
+{
+ /* it is the backlight class's job (caller) to handle
+ * EINTR and other errors properly */
+ return brightness_set(
+ (bd->props.fb_blank == FB_BLANK_UNBLANK &&
+ bd->props.power == FB_BLANK_UNBLANK) ?
+ bd->props.brightness : 0);
+}
static struct backlight_ops ibm_backlight_data = {
- .get_brightness = brightness_get,
- .update_status = brightness_update_status,
+ .get_brightness = brightness_get,
+ .update_status = brightness_update_status,
};
-static struct mutex brightness_mutex;
+/* --------------------------------------------------------------------- */
static int __init tpacpi_query_bcll_levels(acpi_handle handle)
{
@@ -3121,8 +4198,8 @@ static int __init tpacpi_query_bcll_levels(acpi_handle handle)
if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
obj = (union acpi_object *)buffer.pointer;
if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
- printk(IBM_ERR "Unknown BCLL data, "
- "please report this to %s\n", IBM_MAIL);
+ printk(TPACPI_ERR "Unknown BCLL data, "
+ "please report this to %s\n", TPACPI_MAIL);
rc = 0;
} else {
rc = obj->package.count;
@@ -3160,14 +4237,15 @@ static int __init brightness_check_levels(void)
void *found_node = NULL;
if (!vid_handle) {
- IBM_ACPIHANDLE_INIT(vid);
+ TPACPI_ACPIHANDLE_INIT(vid);
}
if (!vid_handle)
return 0;
/* Search for a BCLL package with 16 levels */
status = acpi_walk_namespace(ACPI_TYPE_PACKAGE, vid_handle, 3,
- brightness_find_bcll, NULL, &found_node);
+ brightness_find_bcll, NULL,
+ &found_node);
return (ACPI_SUCCESS(status) && found_node != NULL);
}
@@ -3193,14 +4271,14 @@ static int __init brightness_check_std_acpi_support(void)
void *found_node = NULL;
if (!vid_handle) {
- IBM_ACPIHANDLE_INIT(vid);
+ TPACPI_ACPIHANDLE_INIT(vid);
}
if (!vid_handle)
return 0;
/* Search for a _BCL method, but don't execute it */
status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
- brightness_find_bcl, NULL, &found_node);
+ brightness_find_bcl, NULL, &found_node);
return (ACPI_SUCCESS(status) && found_node != NULL);
}
@@ -3215,12 +4293,14 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
if (!brightness_enable) {
dbg_printk(TPACPI_DBG_INIT,
- "brightness support disabled by module parameter\n");
+ "brightness support disabled by "
+ "module parameter\n");
return 1;
} else if (brightness_enable > 1) {
if (brightness_check_std_acpi_support()) {
- printk(IBM_NOTICE
- "standard ACPI backlight interface available, not loading native one...\n");
+ printk(TPACPI_NOTICE
+ "standard ACPI backlight interface "
+ "available, not loading native one...\n");
return 1;
}
}
@@ -3247,13 +4327,14 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
return 1;
if (tp_features.bright_16levels)
- printk(IBM_INFO "detected a 16-level brightness capable ThinkPad\n");
+ printk(TPACPI_INFO
+ "detected a 16-level brightness capable ThinkPad\n");
ibm_backlight_device = backlight_device_register(
TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
&ibm_backlight_data);
if (IS_ERR(ibm_backlight_device)) {
- printk(IBM_ERR "Could not register backlight device\n");
+ printk(TPACPI_ERR "Could not register backlight device\n");
return PTR_ERR(ibm_backlight_device);
}
vdbg_printk(TPACPI_DBG_INIT, "brightness is supported\n");
@@ -3276,99 +4357,13 @@ static void brightness_exit(void)
}
}
-static int brightness_update_status(struct backlight_device *bd)
-{
- /* it is the backlight class's job (caller) to handle
- * EINTR and other errors properly */
- return brightness_set(
- (bd->props.fb_blank == FB_BLANK_UNBLANK &&
- bd->props.power == FB_BLANK_UNBLANK) ?
- bd->props.brightness : 0);
-}
-
-/*
- * ThinkPads can read brightness from two places: EC 0x31, or
- * CMOS NVRAM byte 0x5E, bits 0-3.
- */
-static int brightness_get(struct backlight_device *bd)
-{
- u8 lec = 0, lcmos = 0, level = 0;
-
- if (brightness_mode & 1) {
- if (!acpi_ec_read(brightness_offset, &lec))
- return -EIO;
- lec &= (tp_features.bright_16levels)? 0x0f : 0x07;
- level = lec;
- };
- if (brightness_mode & 2) {
- lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
- & TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
- >> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
- lcmos &= (tp_features.bright_16levels)? 0x0f : 0x07;
- level = lcmos;
- }
-
- if (brightness_mode == 3 && lec != lcmos) {
- printk(IBM_ERR
- "CMOS NVRAM (%u) and EC (%u) do not agree "
- "on display brightness level\n",
- (unsigned int) lcmos,
- (unsigned int) lec);
- return -EIO;
- }
-
- return level;
-}
-
-/* May return EINTR which can always be mapped to ERESTARTSYS */
-static int brightness_set(int value)
-{
- int cmos_cmd, inc, i, res;
- int current_value;
-
- if (value > ((tp_features.bright_16levels)? 15 : 7))
- return -EINVAL;
-
- res = mutex_lock_interruptible(&brightness_mutex);
- if (res < 0)
- return res;
-
- current_value = brightness_get(NULL);
- if (current_value < 0) {
- res = current_value;
- goto errout;
- }
-
- cmos_cmd = value > current_value ?
- TP_CMOS_BRIGHTNESS_UP :
- TP_CMOS_BRIGHTNESS_DOWN;
- inc = (value > current_value)? 1 : -1;
-
- res = 0;
- for (i = current_value; i != value; i += inc) {
- if ((brightness_mode & 2) &&
- issue_thinkpad_cmos_command(cmos_cmd)) {
- res = -EIO;
- goto errout;
- }
- if ((brightness_mode & 1) &&
- !acpi_ec_write(brightness_offset, i + inc)) {
- res = -EIO;
- goto errout;;
- }
- }
-
-errout:
- mutex_unlock(&brightness_mutex);
- return res;
-}
-
static int brightness_read(char *p)
{
int len = 0;
int level;
- if ((level = brightness_get(NULL)) < 0) {
+ level = brightness_get(NULL);
+ if (level < 0) {
len += sprintf(p + len, "level:\t\tunreadable\n");
} else {
len += sprintf(p + len, "level:\t\t%d\n", level);
@@ -3425,6 +4420,8 @@ static struct ibm_struct brightness_driver_data = {
* Volume subdriver
*/
+static int volume_offset = 0x30;
+
static int volume_read(char *p)
{
int len = 0;
@@ -3474,8 +4471,11 @@ static int volume_write(char *buf)
} else
return -EINVAL;
- if (new_level != level) { /* mute doesn't change */
- cmos_cmd = new_level > level ? TP_CMOS_VOLUME_UP : TP_CMOS_VOLUME_DOWN;
+ if (new_level != level) {
+ /* mute doesn't change */
+
+ cmos_cmd = (new_level > level) ?
+ TP_CMOS_VOLUME_UP : TP_CMOS_VOLUME_DOWN;
inc = new_level > level ? 1 : -1;
if (mute && (issue_thinkpad_cmos_command(cmos_cmd) ||
@@ -3487,14 +4487,18 @@ static int volume_write(char *buf)
!acpi_ec_write(volume_offset, i + inc))
return -EIO;
- if (mute && (issue_thinkpad_cmos_command(TP_CMOS_VOLUME_MUTE) ||
- !acpi_ec_write(volume_offset,
- new_level + mute)))
+ if (mute &&
+ (issue_thinkpad_cmos_command(TP_CMOS_VOLUME_MUTE) ||
+ !acpi_ec_write(volume_offset, new_level + mute))) {
return -EIO;
+ }
}
- if (new_mute != mute) { /* level doesn't change */
- cmos_cmd = new_mute ? TP_CMOS_VOLUME_MUTE : TP_CMOS_VOLUME_UP;
+ if (new_mute != mute) {
+ /* level doesn't change */
+
+ cmos_cmd = (new_mute) ?
+ TP_CMOS_VOLUME_MUTE : TP_CMOS_VOLUME_UP;
if (issue_thinkpad_cmos_command(cmos_cmd) ||
!acpi_ec_write(volume_offset, level + new_mute))
@@ -3616,26 +4620,377 @@ static struct ibm_struct volume_driver_data = {
* but the ACPI tables just mention level 7.
*/
+enum { /* Fan control constants */
+ fan_status_offset = 0x2f, /* EC register 0x2f */
+ fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM)
+ * 0x84 must be read before 0x85 */
+
+ TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */
+ TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */
+
+ TPACPI_FAN_LAST_LEVEL = 0x100, /* Use cached last-seen fan level */
+};
+
+enum fan_status_access_mode {
+ TPACPI_FAN_NONE = 0, /* No fan status or control */
+ TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */
+ TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */
+};
+
+enum fan_control_access_mode {
+ TPACPI_FAN_WR_NONE = 0, /* No fan control */
+ TPACPI_FAN_WR_ACPI_SFAN, /* Use ACPI SFAN */
+ TPACPI_FAN_WR_TPEC, /* Use ACPI EC reg 0x2f */
+ TPACPI_FAN_WR_ACPI_FANS, /* Use ACPI FANS and EC reg 0x2f */
+};
+
+enum fan_control_commands {
+ TPACPI_FAN_CMD_SPEED = 0x0001, /* speed command */
+ TPACPI_FAN_CMD_LEVEL = 0x0002, /* level command */
+ TPACPI_FAN_CMD_ENABLE = 0x0004, /* enable/disable cmd,
+ * and also watchdog cmd */
+};
+
+static int fan_control_allowed;
+
static enum fan_status_access_mode fan_status_access_mode;
static enum fan_control_access_mode fan_control_access_mode;
static enum fan_control_commands fan_control_commands;
static u8 fan_control_initial_status;
static u8 fan_control_desired_level;
+static int fan_watchdog_maxinterval;
+
+static struct mutex fan_mutex;
static void fan_watchdog_fire(struct work_struct *ignored);
-static int fan_watchdog_maxinterval;
static DECLARE_DELAYED_WORK(fan_watchdog_task, fan_watchdog_fire);
-IBM_HANDLE(fans, ec, "FANS"); /* X31, X40, X41 */
-IBM_HANDLE(gfan, ec, "GFAN", /* 570 */
+TPACPI_HANDLE(fans, ec, "FANS"); /* X31, X40, X41 */
+TPACPI_HANDLE(gfan, ec, "GFAN", /* 570 */
"\\FSPD", /* 600e/x, 770e, 770x */
); /* all others */
-IBM_HANDLE(sfan, ec, "SFAN", /* 570 */
+TPACPI_HANDLE(sfan, ec, "SFAN", /* 570 */
"JFNS", /* 770x-JL */
); /* all others */
/*
+ * Call with fan_mutex held
+ */
+static void fan_update_desired_level(u8 status)
+{
+ if ((status &
+ (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
+ if (status > 7)
+ fan_control_desired_level = 7;
+ else
+ fan_control_desired_level = status;
+ }
+}
+
+static int fan_get_status(u8 *status)
+{
+ u8 s;
+
+ /* TODO:
+ * Add TPACPI_FAN_RD_ACPI_FANS ? */
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_ACPI_GFAN:
+ /* 570, 600e/x, 770e, 770x */
+
+ if (unlikely(!acpi_evalf(gfan_handle, &s, NULL, "d")))
+ return -EIO;
+
+ if (likely(status))
+ *status = s & 0x07;
+
+ break;
+
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!acpi_ec_read(fan_status_offset, &s)))
+ return -EIO;
+
+ if (likely(status))
+ *status = s;
+
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int fan_get_status_safe(u8 *status)
+{
+ int rc;
+ u8 s;
+
+ if (mutex_lock_interruptible(&fan_mutex))
+ return -ERESTARTSYS;
+ rc = fan_get_status(&s);
+ if (!rc)
+ fan_update_desired_level(s);
+ mutex_unlock(&fan_mutex);
+
+ if (status)
+ *status = s;
+
+ return rc;
+}
+
+static int fan_get_speed(unsigned int *speed)
+{
+ u8 hi, lo;
+
+ switch (fan_status_access_mode) {
+ case TPACPI_FAN_RD_TPEC:
+ /* all except 570, 600e/x, 770e, 770x */
+ if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) ||
+ !acpi_ec_read(fan_rpm_offset + 1, &hi)))
+ return -EIO;
+
+ if (likely(speed))
+ *speed = (hi << 8) | lo;
+
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int fan_set_level(int level)
+{
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ if (level >= 0 && level <= 7) {
+ if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", level))
+ return -EIO;
+ } else
+ return -EINVAL;
+ break;
+
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ if ((level != TP_EC_FAN_AUTO) &&
+ (level != TP_EC_FAN_FULLSPEED) &&
+ ((level < 0) || (level > 7)))
+ return -EINVAL;
+
+ /* safety net should the EC not support AUTO
+ * or FULLSPEED mode bits and just ignore them */
+ if (level & TP_EC_FAN_FULLSPEED)
+ level |= 7; /* safety min speed 7 */
+ else if (level & TP_EC_FAN_AUTO)
+ level |= 4; /* safety min speed 4 */
+
+ if (!acpi_ec_write(fan_status_offset, level))
+ return -EIO;
+ else
+ tp_features.fan_ctrl_status_undef = 0;
+ break;
+
+ default:
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static int fan_set_level_safe(int level)
+{
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_interruptible(&fan_mutex))
+ return -ERESTARTSYS;
+
+ if (level == TPACPI_FAN_LAST_LEVEL)
+ level = fan_control_desired_level;
+
+ rc = fan_set_level(level);
+ if (!rc)
+ fan_update_desired_level(level);
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static int fan_set_enable(void)
+{
+ u8 s;
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_interruptible(&fan_mutex))
+ return -ERESTARTSYS;
+
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ rc = fan_get_status(&s);
+ if (rc < 0)
+ break;
+
+ /* Don't go out of emergency fan mode */
+ if (s != 7) {
+ s &= 0x07;
+ s |= TP_EC_FAN_AUTO | 4; /* min fan speed 4 */
+ }
+
+ if (!acpi_ec_write(fan_status_offset, s))
+ rc = -EIO;
+ else {
+ tp_features.fan_ctrl_status_undef = 0;
+ rc = 0;
+ }
+ break;
+
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ rc = fan_get_status(&s);
+ if (rc < 0)
+ break;
+
+ s &= 0x07;
+
+ /* Set fan to at least level 4 */
+ s |= 4;
+
+ if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", s))
+ rc = -EIO;
+ else
+ rc = 0;
+ break;
+
+ default:
+ rc = -ENXIO;
+ }
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static int fan_set_disable(void)
+{
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_interruptible(&fan_mutex))
+ return -ERESTARTSYS;
+
+ rc = 0;
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_FANS:
+ case TPACPI_FAN_WR_TPEC:
+ if (!acpi_ec_write(fan_status_offset, 0x00))
+ rc = -EIO;
+ else {
+ fan_control_desired_level = 0;
+ tp_features.fan_ctrl_status_undef = 0;
+ }
+ break;
+
+ case TPACPI_FAN_WR_ACPI_SFAN:
+ if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", 0x00))
+ rc = -EIO;
+ else
+ fan_control_desired_level = 0;
+ break;
+
+ default:
+ rc = -ENXIO;
+ }
+
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static int fan_set_speed(int speed)
+{
+ int rc;
+
+ if (!fan_control_allowed)
+ return -EPERM;
+
+ if (mutex_lock_interruptible(&fan_mutex))
+ return -ERESTARTSYS;
+
+ rc = 0;
+ switch (fan_control_access_mode) {
+ case TPACPI_FAN_WR_ACPI_FANS:
+ if (speed >= 0 && speed <= 65535) {
+ if (!acpi_evalf(fans_handle, NULL, NULL, "vddd",
+ speed, speed, speed))
+ rc = -EIO;
+ } else
+ rc = -EINVAL;
+ break;
+
+ default:
+ rc = -ENXIO;
+ }
+
+ mutex_unlock(&fan_mutex);
+ return rc;
+}
+
+static void fan_watchdog_reset(void)
+{
+ static int fan_watchdog_active;
+
+ if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
+ return;
+
+ if (fan_watchdog_active)
+ cancel_delayed_work(&fan_watchdog_task);
+
+ if (fan_watchdog_maxinterval > 0 &&
+ tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
+ fan_watchdog_active = 1;
+ if (!schedule_delayed_work(&fan_watchdog_task,
+ msecs_to_jiffies(fan_watchdog_maxinterval
+ * 1000))) {
+ printk(TPACPI_ERR
+ "failed to schedule the fan watchdog, "
+ "watchdog will not trigger\n");
+ }
+ } else
+ fan_watchdog_active = 0;
+}
+
+static void fan_watchdog_fire(struct work_struct *ignored)
+{
+ int rc;
+
+ if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
+ return;
+
+ printk(TPACPI_NOTICE "fan watchdog: enabling fan\n");
+ rc = fan_set_enable();
+ if (rc < 0) {
+ printk(TPACPI_ERR "fan watchdog: error %d while enabling fan, "
+ "will try again later...\n", -rc);
+ /* reschedule for later */
+ fan_watchdog_reset();
+ }
+}
+
+/*
* SYSFS fan layout: hwmon compatible (device)
*
* pwm*_enable:
@@ -3868,9 +5223,9 @@ static int __init fan_init(struct ibm_init_struct *iibm)
tp_features.fan_ctrl_status_undef = 0;
fan_control_desired_level = 7;
- IBM_ACPIHANDLE_INIT(fans);
- IBM_ACPIHANDLE_INIT(gfan);
- IBM_ACPIHANDLE_INIT(sfan);
+ TPACPI_ACPIHANDLE_INIT(fans);
+ TPACPI_ACPIHANDLE_INIT(gfan);
+ TPACPI_ACPIHANDLE_INIT(sfan);
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
@@ -3896,16 +5251,16 @@ static int __init fan_init(struct ibm_init_struct *iibm)
case 0x3837: /* TP-78 */
case 0x3637: /* TP-76 */
case 0x3037: /* TP-70 */
- printk(IBM_NOTICE
- "fan_init: initial fan status is "
- "unknown, assuming it is in auto "
- "mode\n");
+ printk(TPACPI_NOTICE
+ "fan_init: initial fan status "
+ "is unknown, assuming it is "
+ "in auto mode\n");
tp_features.fan_ctrl_status_undef = 1;
;;
}
}
} else {
- printk(IBM_ERR
+ printk(TPACPI_ERR
"ThinkPad ACPI EC access misbehaving, "
"fan status and control unavailable\n");
return 1;
@@ -3970,333 +5325,20 @@ static int __init fan_init(struct ibm_init_struct *iibm)
return 1;
}
-/*
- * Call with fan_mutex held
- */
-static void fan_update_desired_level(u8 status)
-{
- if ((status &
- (TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
- if (status > 7)
- fan_control_desired_level = 7;
- else
- fan_control_desired_level = status;
- }
-}
-
-static int fan_get_status(u8 *status)
-{
- u8 s;
-
- /* TODO:
- * Add TPACPI_FAN_RD_ACPI_FANS ? */
-
- switch (fan_status_access_mode) {
- case TPACPI_FAN_RD_ACPI_GFAN:
- /* 570, 600e/x, 770e, 770x */
-
- if (unlikely(!acpi_evalf(gfan_handle, &s, NULL, "d")))
- return -EIO;
-
- if (likely(status))
- *status = s & 0x07;
-
- break;
-
- case TPACPI_FAN_RD_TPEC:
- /* all except 570, 600e/x, 770e, 770x */
- if (unlikely(!acpi_ec_read(fan_status_offset, &s)))
- return -EIO;
-
- if (likely(status))
- *status = s;
-
- break;
-
- default:
- return -ENXIO;
- }
-
- return 0;
-}
-
-static int fan_get_status_safe(u8 *status)
-{
- int rc;
- u8 s;
-
- if (mutex_lock_interruptible(&fan_mutex))
- return -ERESTARTSYS;
- rc = fan_get_status(&s);
- if (!rc)
- fan_update_desired_level(s);
- mutex_unlock(&fan_mutex);
-
- if (status)
- *status = s;
-
- return rc;
-}
-
static void fan_exit(void)
{
- vdbg_printk(TPACPI_DBG_EXIT, "cancelling any pending fan watchdog tasks\n");
+ vdbg_printk(TPACPI_DBG_EXIT,
+ "cancelling any pending fan watchdog tasks\n");
/* FIXME: can we really do this unconditionally? */
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, &fan_attr_group);
- driver_remove_file(&tpacpi_hwmon_pdriver.driver, &driver_attr_fan_watchdog);
+ driver_remove_file(&tpacpi_hwmon_pdriver.driver,
+ &driver_attr_fan_watchdog);
cancel_delayed_work(&fan_watchdog_task);
flush_scheduled_work();
}
-static int fan_get_speed(unsigned int *speed)
-{
- u8 hi, lo;
-
- switch (fan_status_access_mode) {
- case TPACPI_FAN_RD_TPEC:
- /* all except 570, 600e/x, 770e, 770x */
- if (unlikely(!acpi_ec_read(fan_rpm_offset, &lo) ||
- !acpi_ec_read(fan_rpm_offset + 1, &hi)))
- return -EIO;
-
- if (likely(speed))
- *speed = (hi << 8) | lo;
-
- break;
-
- default:
- return -ENXIO;
- }
-
- return 0;
-}
-
-static void fan_watchdog_fire(struct work_struct *ignored)
-{
- int rc;
-
- if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
- return;
-
- printk(IBM_NOTICE "fan watchdog: enabling fan\n");
- rc = fan_set_enable();
- if (rc < 0) {
- printk(IBM_ERR "fan watchdog: error %d while enabling fan, "
- "will try again later...\n", -rc);
- /* reschedule for later */
- fan_watchdog_reset();
- }
-}
-
-static void fan_watchdog_reset(void)
-{
- static int fan_watchdog_active;
-
- if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
- return;
-
- if (fan_watchdog_active)
- cancel_delayed_work(&fan_watchdog_task);
-
- if (fan_watchdog_maxinterval > 0 &&
- tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
- fan_watchdog_active = 1;
- if (!schedule_delayed_work(&fan_watchdog_task,
- msecs_to_jiffies(fan_watchdog_maxinterval
- * 1000))) {
- printk(IBM_ERR "failed to schedule the fan watchdog, "
- "watchdog will not trigger\n");
- }
- } else
- fan_watchdog_active = 0;
-}
-
-static int fan_set_level(int level)
-{
- if (!fan_control_allowed)
- return -EPERM;
-
- switch (fan_control_access_mode) {
- case TPACPI_FAN_WR_ACPI_SFAN:
- if (level >= 0 && level <= 7) {
- if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", level))
- return -EIO;
- } else
- return -EINVAL;
- break;
-
- case TPACPI_FAN_WR_ACPI_FANS:
- case TPACPI_FAN_WR_TPEC:
- if ((level != TP_EC_FAN_AUTO) &&
- (level != TP_EC_FAN_FULLSPEED) &&
- ((level < 0) || (level > 7)))
- return -EINVAL;
-
- /* safety net should the EC not support AUTO
- * or FULLSPEED mode bits and just ignore them */
- if (level & TP_EC_FAN_FULLSPEED)
- level |= 7; /* safety min speed 7 */
- else if (level & TP_EC_FAN_FULLSPEED)
- level |= 4; /* safety min speed 4 */
-
- if (!acpi_ec_write(fan_status_offset, level))
- return -EIO;
- else
- tp_features.fan_ctrl_status_undef = 0;
- break;
-
- default:
- return -ENXIO;
- }
- return 0;
-}
-
-static int fan_set_level_safe(int level)
-{
- int rc;
-
- if (!fan_control_allowed)
- return -EPERM;
-
- if (mutex_lock_interruptible(&fan_mutex))
- return -ERESTARTSYS;
-
- if (level == TPACPI_FAN_LAST_LEVEL)
- level = fan_control_desired_level;
-
- rc = fan_set_level(level);
- if (!rc)
- fan_update_desired_level(level);
-
- mutex_unlock(&fan_mutex);
- return rc;
-}
-
-static int fan_set_enable(void)
-{
- u8 s;
- int rc;
-
- if (!fan_control_allowed)
- return -EPERM;
-
- if (mutex_lock_interruptible(&fan_mutex))
- return -ERESTARTSYS;
-
- switch (fan_control_access_mode) {
- case TPACPI_FAN_WR_ACPI_FANS:
- case TPACPI_FAN_WR_TPEC:
- rc = fan_get_status(&s);
- if (rc < 0)
- break;
-
- /* Don't go out of emergency fan mode */
- if (s != 7) {
- s &= 0x07;
- s |= TP_EC_FAN_AUTO | 4; /* min fan speed 4 */
- }
-
- if (!acpi_ec_write(fan_status_offset, s))
- rc = -EIO;
- else {
- tp_features.fan_ctrl_status_undef = 0;
- rc = 0;
- }
- break;
-
- case TPACPI_FAN_WR_ACPI_SFAN:
- rc = fan_get_status(&s);
- if (rc < 0)
- break;
-
- s &= 0x07;
-
- /* Set fan to at least level 4 */
- s |= 4;
-
- if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", s))
- rc= -EIO;
- else
- rc = 0;
- break;
-
- default:
- rc = -ENXIO;
- }
-
- mutex_unlock(&fan_mutex);
- return rc;
-}
-
-static int fan_set_disable(void)
-{
- int rc;
-
- if (!fan_control_allowed)
- return -EPERM;
-
- if (mutex_lock_interruptible(&fan_mutex))
- return -ERESTARTSYS;
-
- rc = 0;
- switch (fan_control_access_mode) {
- case TPACPI_FAN_WR_ACPI_FANS:
- case TPACPI_FAN_WR_TPEC:
- if (!acpi_ec_write(fan_status_offset, 0x00))
- rc = -EIO;
- else {
- fan_control_desired_level = 0;
- tp_features.fan_ctrl_status_undef = 0;
- }
- break;
-
- case TPACPI_FAN_WR_ACPI_SFAN:
- if (!acpi_evalf(sfan_handle, NULL, NULL, "vd", 0x00))
- rc = -EIO;
- else
- fan_control_desired_level = 0;
- break;
-
- default:
- rc = -ENXIO;
- }
-
-
- mutex_unlock(&fan_mutex);
- return rc;
-}
-
-static int fan_set_speed(int speed)
-{
- int rc;
-
- if (!fan_control_allowed)
- return -EPERM;
-
- if (mutex_lock_interruptible(&fan_mutex))
- return -ERESTARTSYS;
-
- rc = 0;
- switch (fan_control_access_mode) {
- case TPACPI_FAN_WR_ACPI_FANS:
- if (speed >= 0 && speed <= 65535) {
- if (!acpi_evalf(fans_handle, NULL, NULL, "vddd",
- speed, speed, speed))
- rc = -EIO;
- } else
- rc = -EINVAL;
- break;
-
- default:
- rc = -ENXIO;
- }
-
- mutex_unlock(&fan_mutex);
- return rc;
-}
-
static int fan_read(char *p)
{
int len = 0;
@@ -4307,7 +5349,8 @@ static int fan_read(char *p)
switch (fan_status_access_mode) {
case TPACPI_FAN_RD_ACPI_GFAN:
/* 570, 600e/x, 770e, 770x */
- if ((rc = fan_get_status_safe(&status)) < 0)
+ rc = fan_get_status_safe(&status);
+ if (rc < 0)
return rc;
len += sprintf(p + len, "status:\t\t%s\n"
@@ -4317,7 +5360,8 @@ static int fan_read(char *p)
case TPACPI_FAN_RD_TPEC:
/* all except 570, 600e/x, 770e, 770x */
- if ((rc = fan_get_status_safe(&status)) < 0)
+ rc = fan_get_status_safe(&status);
+ if (rc < 0)
return rc;
if (unlikely(tp_features.fan_ctrl_status_undef)) {
@@ -4332,7 +5376,8 @@ static int fan_read(char *p)
len += sprintf(p + len, "status:\t\t%s\n",
(status != 0) ? "enabled" : "disabled");
- if ((rc = fan_get_speed(&speed)) < 0)
+ rc = fan_get_speed(&speed);
+ if (rc < 0)
return rc;
len += sprintf(p + len, "speed:\t\t%d\n", speed);
@@ -4368,8 +5413,8 @@ static int fan_read(char *p)
if (fan_control_commands & TPACPI_FAN_CMD_ENABLE)
len += sprintf(p + len, "commands:\tenable, disable\n"
- "commands:\twatchdog <timeout> (<timeout> is 0 (off), "
- "1-120 (seconds))\n");
+ "commands:\twatchdog <timeout> (<timeout> "
+ "is 0 (off), 1-120 (seconds))\n");
if (fan_control_commands & TPACPI_FAN_CMD_SPEED)
len += sprintf(p + len, "commands:\tspeed <speed>"
@@ -4385,13 +5430,14 @@ static int fan_write_cmd_level(const char *cmd, int *rc)
if (strlencmp(cmd, "level auto") == 0)
level = TP_EC_FAN_AUTO;
else if ((strlencmp(cmd, "level disengaged") == 0) |
- (strlencmp(cmd, "level full-speed") == 0))
+ (strlencmp(cmd, "level full-speed") == 0))
level = TP_EC_FAN_FULLSPEED;
else if (sscanf(cmd, "level %d", &level) != 1)
return 0;
- if ((*rc = fan_set_level_safe(level)) == -ENXIO)
- printk(IBM_ERR "level command accepted for unsupported "
+ *rc = fan_set_level_safe(level);
+ if (*rc == -ENXIO)
+ printk(TPACPI_ERR "level command accepted for unsupported "
"access mode %d", fan_control_access_mode);
return 1;
@@ -4402,8 +5448,9 @@ static int fan_write_cmd_enable(const char *cmd, int *rc)
if (strlencmp(cmd, "enable") != 0)
return 0;
- if ((*rc = fan_set_enable()) == -ENXIO)
- printk(IBM_ERR "enable command accepted for unsupported "
+ *rc = fan_set_enable();
+ if (*rc == -ENXIO)
+ printk(TPACPI_ERR "enable command accepted for unsupported "
"access mode %d", fan_control_access_mode);
return 1;
@@ -4414,8 +5461,9 @@ static int fan_write_cmd_disable(const char *cmd, int *rc)
if (strlencmp(cmd, "disable") != 0)
return 0;
- if ((*rc = fan_set_disable()) == -ENXIO)
- printk(IBM_ERR "disable command accepted for unsupported "
+ *rc = fan_set_disable();
+ if (*rc == -ENXIO)
+ printk(TPACPI_ERR "disable command accepted for unsupported "
"access mode %d", fan_control_access_mode);
return 1;
@@ -4431,8 +5479,9 @@ static int fan_write_cmd_speed(const char *cmd, int *rc)
if (sscanf(cmd, "speed %d", &speed) != 1)
return 0;
- if ((*rc = fan_set_speed(speed)) == -ENXIO)
- printk(IBM_ERR "speed command accepted for unsupported "
+ *rc = fan_set_speed(speed);
+ if (*rc == -ENXIO)
+ printk(TPACPI_ERR "speed command accepted for unsupported "
"access mode %d", fan_control_access_mode);
return 1;
@@ -4496,7 +5545,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s\n", IBM_NAME);
+ return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME);
}
static struct device_attribute dev_attr_thinkpad_acpi_pdev_name =
@@ -4507,14 +5556,12 @@ static struct device_attribute dev_attr_thinkpad_acpi_pdev_name =
/* /proc support */
static struct proc_dir_entry *proc_dir;
-/* Subdriver registry */
-static LIST_HEAD(tpacpi_all_drivers);
-
-
/*
* Module and infrastructure proble, init and exit handling
*/
+static int force_load;
+
#ifdef CONFIG_THINKPAD_ACPI_DEBUG
static const char * __init str_supported(int is_supported)
{
@@ -4524,6 +5571,48 @@ static const char * __init str_supported(int is_supported)
}
#endif /* CONFIG_THINKPAD_ACPI_DEBUG */
+static void ibm_exit(struct ibm_struct *ibm)
+{
+ dbg_printk(TPACPI_DBG_EXIT, "removing %s\n", ibm->name);
+
+ list_del_init(&ibm->all_drivers);
+
+ if (ibm->flags.acpi_notify_installed) {
+ dbg_printk(TPACPI_DBG_EXIT,
+ "%s: acpi_remove_notify_handler\n", ibm->name);
+ BUG_ON(!ibm->acpi);
+ acpi_remove_notify_handler(*ibm->acpi->handle,
+ ibm->acpi->type,
+ dispatch_acpi_notify);
+ ibm->flags.acpi_notify_installed = 0;
+ ibm->flags.acpi_notify_installed = 0;
+ }
+
+ if (ibm->flags.proc_created) {
+ dbg_printk(TPACPI_DBG_EXIT,
+ "%s: remove_proc_entry\n", ibm->name);
+ remove_proc_entry(ibm->name, proc_dir);
+ ibm->flags.proc_created = 0;
+ }
+
+ if (ibm->flags.acpi_driver_registered) {
+ dbg_printk(TPACPI_DBG_EXIT,
+ "%s: acpi_bus_unregister_driver\n", ibm->name);
+ BUG_ON(!ibm->acpi);
+ acpi_bus_unregister_driver(ibm->acpi->driver);
+ kfree(ibm->acpi->driver);
+ ibm->acpi->driver = NULL;
+ ibm->flags.acpi_driver_registered = 0;
+ }
+
+ if (ibm->flags.init_called && ibm->exit) {
+ ibm->exit();
+ ibm->flags.init_called = 0;
+ }
+
+ dbg_printk(TPACPI_DBG_INIT, "finished removing %s\n", ibm->name);
+}
+
static int __init ibm_init(struct ibm_init_struct *iibm)
{
int ret;
@@ -4560,7 +5649,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
if (ibm->acpi->notify) {
ret = setup_acpi_notify(ibm);
if (ret == -ENODEV) {
- printk(IBM_NOTICE "disabling subdriver %s\n",
+ printk(TPACPI_NOTICE "disabling subdriver %s\n",
ibm->name);
ret = 0;
goto err_out;
@@ -4578,7 +5667,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
S_IFREG | S_IRUGO | S_IWUSR,
proc_dir);
if (!entry) {
- printk(IBM_ERR "unable to create proc entry %s\n",
+ printk(TPACPI_ERR "unable to create proc entry %s\n",
ibm->name);
ret = -ENODEV;
goto err_out;
@@ -4604,48 +5693,6 @@ err_out:
return (ret < 0)? ret : 0;
}
-static void ibm_exit(struct ibm_struct *ibm)
-{
- dbg_printk(TPACPI_DBG_EXIT, "removing %s\n", ibm->name);
-
- list_del_init(&ibm->all_drivers);
-
- if (ibm->flags.acpi_notify_installed) {
- dbg_printk(TPACPI_DBG_EXIT,
- "%s: acpi_remove_notify_handler\n", ibm->name);
- BUG_ON(!ibm->acpi);
- acpi_remove_notify_handler(*ibm->acpi->handle,
- ibm->acpi->type,
- dispatch_acpi_notify);
- ibm->flags.acpi_notify_installed = 0;
- ibm->flags.acpi_notify_installed = 0;
- }
-
- if (ibm->flags.proc_created) {
- dbg_printk(TPACPI_DBG_EXIT,
- "%s: remove_proc_entry\n", ibm->name);
- remove_proc_entry(ibm->name, proc_dir);
- ibm->flags.proc_created = 0;
- }
-
- if (ibm->flags.acpi_driver_registered) {
- dbg_printk(TPACPI_DBG_EXIT,
- "%s: acpi_bus_unregister_driver\n", ibm->name);
- BUG_ON(!ibm->acpi);
- acpi_bus_unregister_driver(ibm->acpi->driver);
- kfree(ibm->acpi->driver);
- ibm->acpi->driver = NULL;
- ibm->flags.acpi_driver_registered = 0;
- }
-
- if (ibm->flags.init_called && ibm->exit) {
- ibm->exit();
- ibm->flags.init_called = 0;
- }
-
- dbg_printk(TPACPI_DBG_INIT, "finished removing %s\n", ibm->name);
-}
-
/* Probing */
static void __init get_thinkpad_model_data(struct thinkpad_id_data *tp)
@@ -4715,10 +5762,10 @@ static int __init probe_for_thinkpad(void)
is_thinkpad = (thinkpad_id.model_str != NULL);
/* ec is required because many other handles are relative to it */
- IBM_ACPIHANDLE_INIT(ec);
+ TPACPI_ACPIHANDLE_INIT(ec);
if (!ec_handle) {
if (is_thinkpad)
- printk(IBM_ERR
+ printk(TPACPI_ERR
"Not yet supported ThinkPad detected!\n");
return -ENODEV;
}
@@ -4839,47 +5886,110 @@ static int __init set_ibm_param(const char *val, struct kernel_param *kp)
return -EINVAL;
}
-static int experimental;
module_param(experimental, int, 0);
+MODULE_PARM_DESC(experimental,
+ "Enables experimental features when non-zero");
-static u32 dbg_level;
module_param_named(debug, dbg_level, uint, 0);
+MODULE_PARM_DESC(debug, "Sets debug level bit-mask");
-static int force_load;
module_param(force_load, bool, 0);
+MODULE_PARM_DESC(force_load,
+ "Attempts to load the driver even on a "
+ "mis-identified ThinkPad when true");
-static int fan_control_allowed;
module_param_named(fan_control, fan_control_allowed, bool, 0);
+MODULE_PARM_DESC(fan_control,
+ "Enables setting fan parameters features when true");
-static int brightness_mode;
module_param_named(brightness_mode, brightness_mode, int, 0);
+MODULE_PARM_DESC(brightness_mode,
+ "Selects brightness control strategy: "
+ "0=auto, 1=EC, 2=CMOS, 3=both");
-static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */
module_param(brightness_enable, uint, 0);
+MODULE_PARM_DESC(brightness_enable,
+ "Enables backlight control when 1, disables when 0");
-static unsigned int hotkey_report_mode;
module_param(hotkey_report_mode, uint, 0);
-
-#define IBM_PARAM(feature) \
- module_param_call(feature, set_ibm_param, NULL, NULL, 0)
-
-IBM_PARAM(hotkey);
-IBM_PARAM(bluetooth);
-IBM_PARAM(video);
-IBM_PARAM(light);
+MODULE_PARM_DESC(hotkey_report_mode,
+ "used for backwards compatibility with userspace, "
+ "see documentation");
+
+#define TPACPI_PARAM(feature) \
+ module_param_call(feature, set_ibm_param, NULL, NULL, 0); \
+ MODULE_PARM_DESC(feature, "Simulates thinkpad-aci procfs command " \
+ "at module load, see documentation")
+
+TPACPI_PARAM(hotkey);
+TPACPI_PARAM(bluetooth);
+TPACPI_PARAM(video);
+TPACPI_PARAM(light);
#ifdef CONFIG_THINKPAD_ACPI_DOCK
-IBM_PARAM(dock);
+TPACPI_PARAM(dock);
#endif
#ifdef CONFIG_THINKPAD_ACPI_BAY
-IBM_PARAM(bay);
+TPACPI_PARAM(bay);
#endif /* CONFIG_THINKPAD_ACPI_BAY */
-IBM_PARAM(cmos);
-IBM_PARAM(led);
-IBM_PARAM(beep);
-IBM_PARAM(ecdump);
-IBM_PARAM(brightness);
-IBM_PARAM(volume);
-IBM_PARAM(fan);
+TPACPI_PARAM(cmos);
+TPACPI_PARAM(led);
+TPACPI_PARAM(beep);
+TPACPI_PARAM(ecdump);
+TPACPI_PARAM(brightness);
+TPACPI_PARAM(volume);
+TPACPI_PARAM(fan);
+
+static void thinkpad_acpi_module_exit(void)
+{
+ struct ibm_struct *ibm, *itmp;
+
+ tpacpi_lifecycle = TPACPI_LIFE_EXITING;
+
+ list_for_each_entry_safe_reverse(ibm, itmp,
+ &tpacpi_all_drivers,
+ all_drivers) {
+ ibm_exit(ibm);
+ }
+
+ dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
+
+ if (tpacpi_inputdev) {
+ if (tp_features.input_device_registered)
+ input_unregister_device(tpacpi_inputdev);
+ else
+ input_free_device(tpacpi_inputdev);
+ }
+
+ if (tpacpi_hwmon)
+ hwmon_device_unregister(tpacpi_hwmon);
+
+ if (tp_features.sensors_pdev_attrs_registered)
+ device_remove_file(&tpacpi_sensors_pdev->dev,
+ &dev_attr_thinkpad_acpi_pdev_name);
+ if (tpacpi_sensors_pdev)
+ platform_device_unregister(tpacpi_sensors_pdev);
+ if (tpacpi_pdev)
+ platform_device_unregister(tpacpi_pdev);
+
+ if (tp_features.sensors_pdrv_attrs_registered)
+ tpacpi_remove_driver_attributes(&tpacpi_hwmon_pdriver.driver);
+ if (tp_features.platform_drv_attrs_registered)
+ tpacpi_remove_driver_attributes(&tpacpi_pdriver.driver);
+
+ if (tp_features.sensors_pdrv_registered)
+ platform_driver_unregister(&tpacpi_hwmon_pdriver);
+
+ if (tp_features.platform_drv_registered)
+ platform_driver_unregister(&tpacpi_pdriver);
+
+ if (proc_dir)
+ remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
+
+ kfree(thinkpad_id.bios_version_str);
+ kfree(thinkpad_id.ec_version_str);
+ kfree(thinkpad_id.model_str);
+}
+
static int __init thinkpad_acpi_module_init(void)
{
@@ -4902,12 +6012,13 @@ static int __init thinkpad_acpi_module_init(void)
/* Driver initialization */
- IBM_ACPIHANDLE_INIT(ecrd);
- IBM_ACPIHANDLE_INIT(ecwr);
+ TPACPI_ACPIHANDLE_INIT(ecrd);
+ TPACPI_ACPIHANDLE_INIT(ecwr);
- proc_dir = proc_mkdir(IBM_PROC_DIR, acpi_root_dir);
+ proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
if (!proc_dir) {
- printk(IBM_ERR "unable to create proc dir " IBM_PROC_DIR);
+ printk(TPACPI_ERR
+ "unable to create proc dir " TPACPI_PROC_DIR);
thinkpad_acpi_module_exit();
return -ENODEV;
}
@@ -4915,7 +6026,8 @@ static int __init thinkpad_acpi_module_init(void)
ret = platform_driver_register(&tpacpi_pdriver);
if (ret) {
- printk(IBM_ERR "unable to register main platform driver\n");
+ printk(TPACPI_ERR
+ "unable to register main platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -4923,7 +6035,8 @@ static int __init thinkpad_acpi_module_init(void)
ret = platform_driver_register(&tpacpi_hwmon_pdriver);
if (ret) {
- printk(IBM_ERR "unable to register hwmon platform driver\n");
+ printk(TPACPI_ERR
+ "unable to register hwmon platform driver\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -4932,10 +6045,12 @@ static int __init thinkpad_acpi_module_init(void)
ret = tpacpi_create_driver_attributes(&tpacpi_pdriver.driver);
if (!ret) {
tp_features.platform_drv_attrs_registered = 1;
- ret = tpacpi_create_driver_attributes(&tpacpi_hwmon_pdriver.driver);
+ ret = tpacpi_create_driver_attributes(
+ &tpacpi_hwmon_pdriver.driver);
}
if (ret) {
- printk(IBM_ERR "unable to create sysfs driver attributes\n");
+ printk(TPACPI_ERR
+ "unable to create sysfs driver attributes\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -4943,30 +6058,31 @@ static int __init thinkpad_acpi_module_init(void)
/* Device initialization */
- tpacpi_pdev = platform_device_register_simple(IBM_DRVR_NAME, -1,
+ tpacpi_pdev = platform_device_register_simple(TPACPI_DRVR_NAME, -1,
NULL, 0);
if (IS_ERR(tpacpi_pdev)) {
ret = PTR_ERR(tpacpi_pdev);
tpacpi_pdev = NULL;
- printk(IBM_ERR "unable to register platform device\n");
+ printk(TPACPI_ERR "unable to register platform device\n");
thinkpad_acpi_module_exit();
return ret;
}
tpacpi_sensors_pdev = platform_device_register_simple(
- IBM_HWMON_DRVR_NAME,
- -1, NULL, 0);
+ TPACPI_HWMON_DRVR_NAME,
+ -1, NULL, 0);
if (IS_ERR(tpacpi_sensors_pdev)) {
ret = PTR_ERR(tpacpi_sensors_pdev);
tpacpi_sensors_pdev = NULL;
- printk(IBM_ERR "unable to register hwmon platform device\n");
+ printk(TPACPI_ERR
+ "unable to register hwmon platform device\n");
thinkpad_acpi_module_exit();
return ret;
}
ret = device_create_file(&tpacpi_sensors_pdev->dev,
&dev_attr_thinkpad_acpi_pdev_name);
if (ret) {
- printk(IBM_ERR
- "unable to create sysfs hwmon device attributes\n");
+ printk(TPACPI_ERR
+ "unable to create sysfs hwmon device attributes\n");
thinkpad_acpi_module_exit();
return ret;
}
@@ -4975,20 +6091,20 @@ static int __init thinkpad_acpi_module_init(void)
if (IS_ERR(tpacpi_hwmon)) {
ret = PTR_ERR(tpacpi_hwmon);
tpacpi_hwmon = NULL;
- printk(IBM_ERR "unable to register hwmon device\n");
+ printk(TPACPI_ERR "unable to register hwmon device\n");
thinkpad_acpi_module_exit();
return ret;
}
mutex_init(&tpacpi_inputdev_send_mutex);
tpacpi_inputdev = input_allocate_device();
if (!tpacpi_inputdev) {
- printk(IBM_ERR "unable to allocate input device\n");
+ printk(TPACPI_ERR "unable to allocate input device\n");
thinkpad_acpi_module_exit();
return -ENOMEM;
} else {
/* Prepare input device, but don't register */
tpacpi_inputdev->name = "ThinkPad Extra Buttons";
- tpacpi_inputdev->phys = IBM_DRVR_NAME "/input0";
+ tpacpi_inputdev->phys = TPACPI_DRVR_NAME "/input0";
tpacpi_inputdev->id.bustype = BUS_HOST;
tpacpi_inputdev->id.vendor = (thinkpad_id.vendor) ?
thinkpad_id.vendor :
@@ -5007,7 +6123,7 @@ static int __init thinkpad_acpi_module_init(void)
}
ret = input_register_device(tpacpi_inputdev);
if (ret < 0) {
- printk(IBM_ERR "unable to register input device\n");
+ printk(TPACPI_ERR "unable to register input device\n");
thinkpad_acpi_module_exit();
return ret;
} else {
@@ -5018,56 +6134,36 @@ static int __init thinkpad_acpi_module_init(void)
return 0;
}
-static void thinkpad_acpi_module_exit(void)
-{
- struct ibm_struct *ibm, *itmp;
-
- tpacpi_lifecycle = TPACPI_LIFE_EXITING;
-
- list_for_each_entry_safe_reverse(ibm, itmp,
- &tpacpi_all_drivers,
- all_drivers) {
- ibm_exit(ibm);
- }
-
- dbg_printk(TPACPI_DBG_INIT, "finished subdriver exit path...\n");
-
- if (tpacpi_inputdev) {
- if (tp_features.input_device_registered)
- input_unregister_device(tpacpi_inputdev);
- else
- input_free_device(tpacpi_inputdev);
- }
-
- if (tpacpi_hwmon)
- hwmon_device_unregister(tpacpi_hwmon);
-
- if (tp_features.sensors_pdev_attrs_registered)
- device_remove_file(&tpacpi_sensors_pdev->dev,
- &dev_attr_thinkpad_acpi_pdev_name);
- if (tpacpi_sensors_pdev)
- platform_device_unregister(tpacpi_sensors_pdev);
- if (tpacpi_pdev)
- platform_device_unregister(tpacpi_pdev);
-
- if (tp_features.sensors_pdrv_attrs_registered)
- tpacpi_remove_driver_attributes(&tpacpi_hwmon_pdriver.driver);
- if (tp_features.platform_drv_attrs_registered)
- tpacpi_remove_driver_attributes(&tpacpi_pdriver.driver);
+/* Please remove this in year 2009 */
+MODULE_ALIAS("ibm_acpi");
- if (tp_features.sensors_pdrv_registered)
- platform_driver_unregister(&tpacpi_hwmon_pdriver);
+/*
+ * DMI matching for module autoloading
+ *
+ * See http://thinkwiki.org/wiki/List_of_DMI_IDs
+ * See http://thinkwiki.org/wiki/BIOS_Upgrade_Downloads
+ *
+ * Only models listed in thinkwiki will be supported, so add yours
+ * if it is not there yet.
+ */
+#define IBM_BIOS_MODULE_ALIAS(__type) \
+ MODULE_ALIAS("dmi:bvnIBM:bvr" __type "ET??WW")
- if (tp_features.platform_drv_registered)
- platform_driver_unregister(&tpacpi_pdriver);
+/* Non-ancient thinkpads */
+MODULE_ALIAS("dmi:bvnIBM:*:svnIBM:*:pvrThinkPad*:rvnIBM:*");
+MODULE_ALIAS("dmi:bvnLENOVO:*:svnLENOVO:*:pvrThinkPad*:rvnLENOVO:*");
- if (proc_dir)
- remove_proc_entry(IBM_PROC_DIR, acpi_root_dir);
+/* Ancient thinkpad BIOSes have to be identified by
+ * BIOS type or model number, and there are far less
+ * BIOS types than model numbers... */
+IBM_BIOS_MODULE_ALIAS("I[B,D,H,I,M,N,O,T,W,V,Y,Z]");
+IBM_BIOS_MODULE_ALIAS("1[0,3,6,8,A-G,I,K,M-P,S,T]");
+IBM_BIOS_MODULE_ALIAS("K[U,X-Z]");
- kfree(thinkpad_id.bios_version_str);
- kfree(thinkpad_id.ec_version_str);
- kfree(thinkpad_id.model_str);
-}
+MODULE_AUTHOR("Borislav Deianov, Henrique de Moraes Holschuh");
+MODULE_DESCRIPTION(TPACPI_DESC);
+MODULE_VERSION(TPACPI_VERSION);
+MODULE_LICENSE("GPL");
module_init(thinkpad_acpi_module_init);
module_exit(thinkpad_acpi_module_exit);
diff --git a/drivers/misc/thinkpad_acpi.h b/drivers/misc/thinkpad_acpi.h
deleted file mode 100644
index 8fba2bbe345e..000000000000
--- a/drivers/misc/thinkpad_acpi.h
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * thinkpad_acpi.h - ThinkPad ACPI Extras
- *
- *
- * Copyright (C) 2004-2005 Borislav Deianov <borislav@users.sf.net>
- * Copyright (C) 2006-2007 Henrique de Moraes Holschuh <hmh@hmh.eng.br>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#ifndef __THINKPAD_ACPI_H__
-#define __THINKPAD_ACPI_H__
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-
-#include <linux/nvram.h>
-#include <linux/proc_fs.h>
-#include <linux/sysfs.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/platform_device.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/input.h>
-#include <asm/uaccess.h>
-
-#include <linux/dmi.h>
-#include <linux/jiffies.h>
-#include <linux/workqueue.h>
-
-#include <acpi/acpi_drivers.h>
-#include <acpi/acnamesp.h>
-
-#include <linux/pci_ids.h>
-
-/****************************************************************************
- * Main driver
- */
-
-#define IBM_NAME "thinkpad"
-#define IBM_DESC "ThinkPad ACPI Extras"
-#define IBM_FILE IBM_NAME "_acpi"
-#define IBM_URL "http://ibm-acpi.sf.net/"
-#define IBM_MAIL "ibm-acpi-devel@lists.sourceforge.net"
-
-#define IBM_PROC_DIR "ibm"
-#define IBM_ACPI_EVENT_PREFIX "ibm"
-#define IBM_DRVR_NAME IBM_FILE
-#define IBM_HWMON_DRVR_NAME IBM_NAME "_hwmon"
-
-#define IBM_LOG IBM_FILE ": "
-#define IBM_ERR KERN_ERR IBM_LOG
-#define IBM_NOTICE KERN_NOTICE IBM_LOG
-#define IBM_INFO KERN_INFO IBM_LOG
-#define IBM_DEBUG KERN_DEBUG IBM_LOG
-
-#define IBM_MAX_ACPI_ARGS 3
-
-/* ThinkPad CMOS commands */
-#define TP_CMOS_VOLUME_DOWN 0
-#define TP_CMOS_VOLUME_UP 1
-#define TP_CMOS_VOLUME_MUTE 2
-#define TP_CMOS_BRIGHTNESS_UP 4
-#define TP_CMOS_BRIGHTNESS_DOWN 5
-
-/* ThinkPad CMOS NVRAM constants */
-#define TP_NVRAM_ADDR_BRIGHTNESS 0x5e
-#define TP_NVRAM_MASK_LEVEL_BRIGHTNESS 0x0f
-#define TP_NVRAM_POS_LEVEL_BRIGHTNESS 0
-
-#define onoff(status,bit) ((status) & (1 << (bit)) ? "on" : "off")
-#define enabled(status,bit) ((status) & (1 << (bit)) ? "enabled" : "disabled")
-#define strlencmp(a,b) (strncmp((a), (b), strlen(b)))
-
-/* Debugging */
-#define TPACPI_DBG_ALL 0xffff
-#define TPACPI_DBG_ALL 0xffff
-#define TPACPI_DBG_INIT 0x0001
-#define TPACPI_DBG_EXIT 0x0002
-#define dbg_printk(a_dbg_level, format, arg...) \
- do { if (dbg_level & a_dbg_level) \
- printk(IBM_DEBUG "%s: " format, __func__ , ## arg); } while (0)
-#ifdef CONFIG_THINKPAD_ACPI_DEBUG
-#define vdbg_printk(a_dbg_level, format, arg...) \
- dbg_printk(a_dbg_level, format, ## arg)
-static const char *str_supported(int is_supported);
-#else
-#define vdbg_printk(a_dbg_level, format, arg...)
-#endif
-
-/* Input IDs */
-#define TPACPI_HKEY_INPUT_VENDOR PCI_VENDOR_ID_IBM
-#define TPACPI_HKEY_INPUT_PRODUCT 0x5054 /* "TP" */
-#define TPACPI_HKEY_INPUT_VERSION 0x4101
-
-/* ACPI HIDs */
-#define IBM_HKEY_HID "IBM0068"
-
-/* ACPI helpers */
-static int __must_check acpi_evalf(acpi_handle handle,
- void *res, char *method, char *fmt, ...);
-static int __must_check acpi_ec_read(int i, u8 * p);
-static int __must_check acpi_ec_write(int i, u8 v);
-static int __must_check _sta(acpi_handle handle);
-
-/* ACPI handles */
-static acpi_handle root_handle; /* root namespace */
-static acpi_handle ec_handle; /* EC */
-static acpi_handle ecrd_handle, ecwr_handle; /* 570 EC access */
-static acpi_handle cmos_handle, hkey_handle; /* basic thinkpad handles */
-
-static void drv_acpi_handle_init(char *name,
- acpi_handle *handle, acpi_handle parent,
- char **paths, int num_paths, char **path);
-#define IBM_ACPIHANDLE_INIT(object) \
- drv_acpi_handle_init(#object, &object##_handle, *object##_parent, \
- object##_paths, ARRAY_SIZE(object##_paths), &object##_path)
-
-/* ThinkPad ACPI helpers */
-static int issue_thinkpad_cmos_command(int cmos_cmd);
-
-/* procfs support */
-static struct proc_dir_entry *proc_dir;
-
-/* procfs helpers */
-static int dispatch_procfs_read(char *page, char **start, off_t off,
- int count, int *eof, void *data);
-static int dispatch_procfs_write(struct file *file,
- const char __user * userbuf,
- unsigned long count, void *data);
-static char *next_cmd(char **cmds);
-
-/* sysfs support */
-struct attribute_set {
- unsigned int members, max_members;
- struct attribute_group group;
-};
-
-static struct attribute_set *create_attr_set(unsigned int max_members,
- const char* name);
-#define destroy_attr_set(_set) \
- kfree(_set);
-static int add_to_attr_set(struct attribute_set* s, struct attribute *attr);
-static int add_many_to_attr_set(struct attribute_set* s,
- struct attribute **attr,
- unsigned int count);
-#define register_attr_set_with_sysfs(_attr_set, _kobj) \
- sysfs_create_group(_kobj, &_attr_set->group)
-static void delete_attr_set(struct attribute_set* s, struct kobject *kobj);
-
-static int parse_strtoul(const char *buf, unsigned long max,
- unsigned long *value);
-
-/* Device model */
-static struct platform_device *tpacpi_pdev;
-static struct platform_device *tpacpi_sensors_pdev;
-static struct device *tpacpi_hwmon;
-static struct platform_driver tpacpi_pdriver;
-static struct input_dev *tpacpi_inputdev;
-static int tpacpi_create_driver_attributes(struct device_driver *drv);
-static void tpacpi_remove_driver_attributes(struct device_driver *drv);
-
-/* Module */
-static int experimental;
-static u32 dbg_level;
-static int force_load;
-static unsigned int hotkey_report_mode;
-
-static int thinkpad_acpi_module_init(void);
-static void thinkpad_acpi_module_exit(void);
-
-
-/****************************************************************************
- * Subdrivers
- */
-
-struct ibm_struct;
-
-struct tp_acpi_drv_struct {
- const struct acpi_device_id *hid;
- struct acpi_driver *driver;
-
- void (*notify) (struct ibm_struct *, u32);
- acpi_handle *handle;
- u32 type;
- struct acpi_device *device;
-};
-
-struct ibm_struct {
- char *name;
-
- int (*read) (char *);
- int (*write) (char *);
- void (*exit) (void);
- void (*resume) (void);
-
- struct list_head all_drivers;
-
- struct tp_acpi_drv_struct *acpi;
-
- struct {
- u8 acpi_driver_registered:1;
- u8 acpi_notify_installed:1;
- u8 proc_created:1;
- u8 init_called:1;
- u8 experimental:1;
- } flags;
-};
-
-struct ibm_init_struct {
- char param[32];
-
- int (*init) (struct ibm_init_struct *);
- struct ibm_struct *data;
-};
-
-static struct {
-#ifdef CONFIG_THINKPAD_ACPI_BAY
- u32 bay_status:1;
- u32 bay_eject:1;
- u32 bay_status2:1;
- u32 bay_eject2:1;
-#endif
- u32 bluetooth:1;
- u32 hotkey:1;
- u32 hotkey_mask:1;
- u32 hotkey_wlsw:1;
- u32 light:1;
- u32 light_status:1;
- u32 bright_16levels:1;
- u32 wan:1;
- u32 fan_ctrl_status_undef:1;
- u32 input_device_registered:1;
- u32 platform_drv_registered:1;
- u32 platform_drv_attrs_registered:1;
- u32 sensors_pdrv_registered:1;
- u32 sensors_pdrv_attrs_registered:1;
- u32 sensors_pdev_attrs_registered:1;
-} tp_features;
-
-struct thinkpad_id_data {
- unsigned int vendor; /* ThinkPad vendor:
- * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */
-
- char *bios_version_str; /* Something like 1ZET51WW (1.03z) */
- char *ec_version_str; /* Something like 1ZHT51WW-1.04a */
-
- u16 bios_model; /* Big Endian, TP-1Y = 0x5931, 0 = unknown */
- u16 ec_model;
-
- char *model_str;
-};
-
-static struct thinkpad_id_data thinkpad_id;
-
-static struct list_head tpacpi_all_drivers;
-
-static struct ibm_init_struct ibms_init[];
-static int set_ibm_param(const char *val, struct kernel_param *kp);
-static int ibm_init(struct ibm_init_struct *iibm);
-static void ibm_exit(struct ibm_struct *ibm);
-
-
-/*
- * procfs master subdriver
- */
-static int thinkpad_acpi_driver_init(struct ibm_init_struct *iibm);
-static int thinkpad_acpi_driver_read(char *p);
-
-
-/*
- * Bay subdriver
- */
-
-#ifdef CONFIG_THINKPAD_ACPI_BAY
-static acpi_handle bay_handle, bay_ej_handle;
-static acpi_handle bay2_handle, bay2_ej_handle;
-
-static int bay_init(struct ibm_init_struct *iibm);
-static void bay_notify(struct ibm_struct *ibm, u32 event);
-static int bay_read(char *p);
-static int bay_write(char *buf);
-#endif /* CONFIG_THINKPAD_ACPI_BAY */
-
-
-/*
- * Beep subdriver
- */
-
-static acpi_handle beep_handle;
-
-static int beep_read(char *p);
-static int beep_write(char *buf);
-
-
-/*
- * Bluetooth subdriver
- */
-
-enum {
- /* ACPI GBDC/SBDC bits */
- TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
- TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
- TP_ACPI_BLUETOOTH_UNK = 0x04, /* unknown function */
-};
-
-static int bluetooth_init(struct ibm_init_struct *iibm);
-static int bluetooth_get_radiosw(void);
-static int bluetooth_set_radiosw(int radio_on);
-static int bluetooth_read(char *p);
-static int bluetooth_write(char *buf);
-
-
-/*
- * Brightness (backlight) subdriver
- */
-
-#define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen"
-
-static struct backlight_device *ibm_backlight_device;
-static int brightness_offset = 0x31;
-static int brightness_mode;
-static unsigned int brightness_enable; /* 0 = no, 1 = yes, 2 = auto */
-
-static int brightness_init(struct ibm_init_struct *iibm);
-static void brightness_exit(void);
-static int brightness_get(struct backlight_device *bd);
-static int brightness_set(int value);
-static int brightness_update_status(struct backlight_device *bd);
-static int brightness_read(char *p);
-static int brightness_write(char *buf);
-
-
-/*
- * CMOS subdriver
- */
-
-static int cmos_read(char *p);
-static int cmos_write(char *buf);
-
-
-/*
- * Dock subdriver
- */
-
-#ifdef CONFIG_THINKPAD_ACPI_DOCK
-static acpi_handle pci_handle;
-static acpi_handle dock_handle;
-
-static void dock_notify(struct ibm_struct *ibm, u32 event);
-static int dock_read(char *p);
-static int dock_write(char *buf);
-#endif /* CONFIG_THINKPAD_ACPI_DOCK */
-
-
-/*
- * EC dump subdriver
- */
-
-static int ecdump_read(char *p) ;
-static int ecdump_write(char *buf);
-
-
-/*
- * Fan subdriver
- */
-
-enum { /* Fan control constants */
- fan_status_offset = 0x2f, /* EC register 0x2f */
- fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM)
- * 0x84 must be read before 0x85 */
-
- TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */
- TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */
-
- TPACPI_FAN_LAST_LEVEL = 0x100, /* Use cached last-seen fan level */
-};
-
-enum fan_status_access_mode {
- TPACPI_FAN_NONE = 0, /* No fan status or control */
- TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */
- TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */
-};
-
-enum fan_control_access_mode {
- TPACPI_FAN_WR_NONE = 0, /* No fan control */
- TPACPI_FAN_WR_ACPI_SFAN, /* Use ACPI SFAN */
- TPACPI_FAN_WR_TPEC, /* Use ACPI EC reg 0x2f */
- TPACPI_FAN_WR_ACPI_FANS, /* Use ACPI FANS and EC reg 0x2f */
-};
-
-enum fan_control_commands {
- TPACPI_FAN_CMD_SPEED = 0x0001, /* speed command */
- TPACPI_FAN_CMD_LEVEL = 0x0002, /* level command */
- TPACPI_FAN_CMD_ENABLE = 0x0004, /* enable/disable cmd,
- * and also watchdog cmd */
-};
-
-static int fan_control_allowed;
-
-static enum fan_status_access_mode fan_status_access_mode;
-static enum fan_control_access_mode fan_control_access_mode;
-static enum fan_control_commands fan_control_commands;
-static u8 fan_control_initial_status;
-static u8 fan_control_desired_level;
-static int fan_watchdog_maxinterval;
-
-static struct mutex fan_mutex;
-
-static acpi_handle fans_handle, gfan_handle, sfan_handle;
-
-static int fan_init(struct ibm_init_struct *iibm);
-static void fan_exit(void);
-static int fan_get_status(u8 *status);
-static int fan_get_status_safe(u8 *status);
-static int fan_get_speed(unsigned int *speed);
-static void fan_update_desired_level(u8 status);
-static void fan_watchdog_fire(struct work_struct *ignored);
-static void fan_watchdog_reset(void);
-static int fan_set_level(int level);
-static int fan_set_level_safe(int level);
-static int fan_set_enable(void);
-static int fan_set_disable(void);
-static int fan_set_speed(int speed);
-static int fan_read(char *p);
-static int fan_write(char *buf);
-static int fan_write_cmd_level(const char *cmd, int *rc);
-static int fan_write_cmd_enable(const char *cmd, int *rc);
-static int fan_write_cmd_disable(const char *cmd, int *rc);
-static int fan_write_cmd_speed(const char *cmd, int *rc);
-static int fan_write_cmd_watchdog(const char *cmd, int *rc);
-
-
-/*
- * Hotkey subdriver
- */
-
-static int hotkey_orig_status;
-static u32 hotkey_orig_mask;
-
-static struct mutex hotkey_mutex;
-
-static int hotkey_init(struct ibm_init_struct *iibm);
-static void hotkey_exit(void);
-static int hotkey_get(int *status, u32 *mask);
-static int hotkey_set(int status, u32 mask);
-static void hotkey_notify(struct ibm_struct *ibm, u32 event);
-static int hotkey_read(char *p);
-static int hotkey_write(char *buf);
-
-
-/*
- * LED subdriver
- */
-
-enum led_access_mode {
- TPACPI_LED_NONE = 0,
- TPACPI_LED_570, /* 570 */
- TPACPI_LED_OLD, /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20-21 */
- TPACPI_LED_NEW, /* all others */
-};
-
-enum { /* For TPACPI_LED_OLD */
- TPACPI_LED_EC_HLCL = 0x0c, /* EC reg to get led to power on */
- TPACPI_LED_EC_HLBL = 0x0d, /* EC reg to blink a lit led */
- TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */
-};
-
-static enum led_access_mode led_supported;
-static acpi_handle led_handle;
-
-static int led_init(struct ibm_init_struct *iibm);
-static int led_read(char *p);
-static int led_write(char *buf);
-
-/*
- * Light (thinklight) subdriver
- */
-
-static acpi_handle lght_handle, ledb_handle;
-
-static int light_init(struct ibm_init_struct *iibm);
-static int light_read(char *p);
-static int light_write(char *buf);
-
-
-/*
- * Thermal subdriver
- */
-
-enum thermal_access_mode {
- TPACPI_THERMAL_NONE = 0, /* No thermal support */
- TPACPI_THERMAL_ACPI_TMP07, /* Use ACPI TMP0-7 */
- TPACPI_THERMAL_ACPI_UPDT, /* Use ACPI TMP0-7 with UPDT */
- TPACPI_THERMAL_TPEC_8, /* Use ACPI EC regs, 8 sensors */
- TPACPI_THERMAL_TPEC_16, /* Use ACPI EC regs, 16 sensors */
-};
-
-enum { /* TPACPI_THERMAL_TPEC_* */
- TP_EC_THERMAL_TMP0 = 0x78, /* ACPI EC regs TMP 0..7 */
- TP_EC_THERMAL_TMP8 = 0xC0, /* ACPI EC regs TMP 8..15 */
- TP_EC_THERMAL_TMP_NA = -128, /* ACPI EC sensor not available */
-};
-
-#define TPACPI_MAX_THERMAL_SENSORS 16 /* Max thermal sensors supported */
-struct ibm_thermal_sensors_struct {
- s32 temp[TPACPI_MAX_THERMAL_SENSORS];
-};
-
-static enum thermal_access_mode thermal_read_mode;
-
-static int thermal_init(struct ibm_init_struct *iibm);
-static int thermal_get_sensor(int idx, s32 *value);
-static int thermal_get_sensors(struct ibm_thermal_sensors_struct *s);
-static int thermal_read(char *p);
-
-
-/*
- * Video subdriver
- */
-
-enum video_access_mode {
- TPACPI_VIDEO_NONE = 0,
- TPACPI_VIDEO_570, /* 570 */
- TPACPI_VIDEO_770, /* 600e/x, 770e, 770x */
- TPACPI_VIDEO_NEW, /* all others */
-};
-
-enum { /* video status flags, based on VIDEO_570 */
- TP_ACPI_VIDEO_S_LCD = 0x01, /* LCD output enabled */
- TP_ACPI_VIDEO_S_CRT = 0x02, /* CRT output enabled */
- TP_ACPI_VIDEO_S_DVI = 0x08, /* DVI output enabled */
-};
-
-enum { /* TPACPI_VIDEO_570 constants */
- TP_ACPI_VIDEO_570_PHSCMD = 0x87, /* unknown magic constant :( */
- TP_ACPI_VIDEO_570_PHSMASK = 0x03, /* PHS bits that map to
- * video_status_flags */
- TP_ACPI_VIDEO_570_PHS2CMD = 0x8b, /* unknown magic constant :( */
- TP_ACPI_VIDEO_570_PHS2SET = 0x80, /* unknown magic constant :( */
-};
-
-static enum video_access_mode video_supported;
-static int video_orig_autosw;
-static acpi_handle vid_handle, vid2_handle;
-
-static int video_init(struct ibm_init_struct *iibm);
-static void video_exit(void);
-static int video_outputsw_get(void);
-static int video_outputsw_set(int status);
-static int video_autosw_get(void);
-static int video_autosw_set(int enable);
-static int video_outputsw_cycle(void);
-static int video_expand_toggle(void);
-static int video_read(char *p);
-static int video_write(char *buf);
-
-
-/*
- * Volume subdriver
- */
-
-static int volume_offset = 0x30;
-
-static int volume_read(char *p);
-static int volume_write(char *buf);
-
-
-/*
- * Wan subdriver
- */
-
-enum {
- /* ACPI GWAN/SWAN bits */
- TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
- TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
- TP_ACPI_WANCARD_UNK = 0x04, /* unknown function */
-};
-
-static int wan_init(struct ibm_init_struct *iibm);
-static int wan_get_radiosw(void);
-static int wan_set_radiosw(int radio_on);
-static int wan_read(char *p);
-static int wan_write(char *buf);
-
-
-#endif /* __THINKPAD_ACPI_H */
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 8848e8ac705d..e8503341e3b1 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -150,6 +150,14 @@ config MTD_AFS_PARTS
for your particular device. It won't happen automatically. The
'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example.
+config MTD_OF_PARTS
+ tristate "Flash partition map based on OF description"
+ depends on PPC_OF && MTD_PARTITIONS
+ help
+ This provides a partition parsing function which derives
+ the partition map from the children of the flash node,
+ as described in Documentation/powerpc/booting-without-of.txt.
+
comment "User Modules And Translation Layers"
config MTD_CHAR
@@ -286,6 +294,9 @@ config MTD_OOPS
buffer in a flash partition where it can be read back at some
later point.
+ To use, add console=ttyMTDx to the kernel command line,
+ where x is the MTD device number to use.
+
source "drivers/mtd/chips/Kconfig"
source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 7f0b04b4caa7..538e33d11d46 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 1707f98c322c..47794d23a42e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -50,6 +50,7 @@
#define I82802AC 0x00ac
#define MANUFACTURER_ST 0x0020
#define M50LPW080 0x002F
+#define AT49BV640D 0x02de
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -157,6 +158,47 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
}
#endif
+/* Atmel chips don't use the same PRI format as Intel chips */
+static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ struct cfi_pri_atmel atmel_pri;
+ uint32_t features = 0;
+
+ /* Reverse byteswapping */
+ extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
+ extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
+ extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
+
+ memcpy(&atmel_pri, extp, sizeof(atmel_pri));
+ memset((char *)extp + 5, 0, sizeof(*extp) - 5);
+
+ printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
+
+ if (atmel_pri.Features & 0x01) /* chip erase supported */
+ features |= (1<<0);
+ if (atmel_pri.Features & 0x02) /* erase suspend supported */
+ features |= (1<<1);
+ if (atmel_pri.Features & 0x04) /* program suspend supported */
+ features |= (1<<2);
+ if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
+ features |= (1<<9);
+ if (atmel_pri.Features & 0x20) /* page mode read supported */
+ features |= (1<<7);
+ if (atmel_pri.Features & 0x40) /* queued erase supported */
+ features |= (1<<4);
+ if (atmel_pri.Features & 0x80) /* Protection bits supported */
+ features |= (1<<6);
+
+ extp->FeatureSupport = features;
+
+ /* burst write mode not supported */
+ cfi->cfiq->BufWriteTimeoutTyp = 0;
+ cfi->cfiq->BufWriteTimeoutMax = 0;
+}
+
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@@ -227,13 +269,20 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
/*
* Some chips power-up with all sectors locked by default.
*/
-static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
+static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
{
- printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
- mtd->flags |= MTD_STUPID_LOCK;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+ if (cfip->FeatureSupport&32) {
+ printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
+ mtd->flags |= MTD_POWERUP_LOCK;
+ }
}
static struct cfi_fixup cfi_fixup_table[] = {
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
#endif
@@ -245,7 +294,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
#endif
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
- { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, },
+ { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
{ 0, 0, NULL, NULL }
};
@@ -277,7 +326,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
return NULL;
if (extp->MajorVersion != '1' ||
- (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+ (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
"version %c.%c.\n", extp->MajorVersion,
extp->MinorVersion);
@@ -752,6 +801,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
int ret;
+ DECLARE_WAITQUEUE(wait, current);
retry:
if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
@@ -808,6 +858,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
spin_unlock(contender->mutex);
}
+ /* Check if we already have suspended erase
+ * on this chip. Sleep. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+ spin_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ spin_lock(chip->mutex);
+ goto retry;
+ }
+
/* We now own it */
shared->writing = chip;
if (mode == FL_ERASING)
@@ -2294,7 +2358,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
struct flchip *chip;
int ret = 0;
- if ((mtd->flags & MTD_STUPID_LOCK)
+ if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_save_locks(mtd);
@@ -2405,7 +2469,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
spin_unlock(chip->mutex);
}
- if ((mtd->flags & MTD_STUPID_LOCK)
+ if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_restore_locks(mtd);
}
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 389acc600f5e..d072e87ce4e2 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -185,6 +185,10 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
extp->TopBottom = 2;
else
extp->TopBottom = 3;
+
+ /* burst write mode not supported */
+ cfi->cfiq->BufWriteTimeoutTyp = 0;
+ cfi->cfiq->BufWriteTimeoutMax = 0;
}
static void fixup_use_secsi(struct mtd_info *mtd, void *param)
@@ -213,10 +217,11 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
{
mtd->lock = cfi_atmel_lock;
mtd->unlock = cfi_atmel_unlock;
- mtd->flags |= MTD_STUPID_LOCK;
+ mtd->flags |= MTD_POWERUP_LOCK;
}
static struct cfi_fixup cfi_fixup_table[] = {
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
#endif
@@ -229,7 +234,6 @@ static struct cfi_fixup cfi_fixup_table[] = {
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
#endif
- { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
@@ -338,10 +342,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
/* Modify the unlock address if we are in compatibility mode */
if ( /* x16 in x8 mode */
((cfi->device_type == CFI_DEVICETYPE_X8) &&
- (cfi->cfiq->InterfaceDesc == 2)) ||
+ (cfi->cfiq->InterfaceDesc ==
+ CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
/* x32 in x16 mode */
((cfi->device_type == CFI_DEVICETYPE_X16) &&
- (cfi->cfiq->InterfaceDesc == 4)))
+ (cfi->cfiq->InterfaceDesc ==
+ CFI_INTERFACE_X16_BY_X32_ASYNC)))
{
cfi->addr_unlock1 = 0xaaa;
cfi->addr_unlock2 = 0x555;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index 60e11a0ada97..f651b6ef1c5d 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -370,27 +370,27 @@ static void print_cfi_ident(struct cfi_ident *cfip)
printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
switch(cfip->InterfaceDesc) {
- case 0:
+ case CFI_INTERFACE_X8_ASYNC:
printk(" - x8-only asynchronous interface\n");
break;
- case 1:
+ case CFI_INTERFACE_X16_ASYNC:
printk(" - x16-only asynchronous interface\n");
break;
- case 2:
+ case CFI_INTERFACE_X8_BY_X16_ASYNC:
printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
break;
- case 3:
+ case CFI_INTERFACE_X32_ASYNC:
printk(" - x32-only asynchronous interface\n");
break;
- case 4:
+ case CFI_INTERFACE_X16_BY_X32_ASYNC:
printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
break;
- case 65535:
+ case CFI_INTERFACE_NOT_ALLOWED:
printk(" - Not Allowed / Reserved\n");
break;
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index 2eb696d7b97b..d338b8c92780 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -112,7 +112,7 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
max_chips = 1;
}
- mapsize = (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG;
+ mapsize = sizeof(long) * ( (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG );
chip_map = kzalloc(mapsize, GFP_KERNEL);
if (!chip_map) {
printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index a67b23b87fc0..4be51a86a85c 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -194,8 +194,8 @@ enum uaddr {
struct unlock_addr {
- u32 addr1;
- u32 addr2;
+ uint32_t addr1;
+ uint32_t addr2;
};
@@ -246,16 +246,16 @@ static const struct unlock_addr unlock_addrs[] = {
}
};
-
struct amd_flash_info {
- const __u16 mfr_id;
- const __u16 dev_id;
const char *name;
- const int DevSize;
- const int NumEraseRegions;
- const int CmdSet;
- const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */
- const ulong regions[6];
+ const uint16_t mfr_id;
+ const uint16_t dev_id;
+ const uint8_t dev_size;
+ const uint8_t nr_regions;
+ const uint16_t cmd_set;
+ const uint32_t regions[6];
+ const uint8_t devtypes; /* Bitmask for x8, x16 etc. */
+ const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */
};
#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
@@ -280,12 +280,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F032B,
.name = "AMD AM29F032B",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,64)
}
@@ -293,13 +292,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV160DT,
.name = "AMD AM29LV160DT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
@@ -310,13 +307,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV160DB,
.name = "AMD AM29LV160DB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -327,13 +322,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV400BB,
.name = "AMD AM29LV400BB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -344,13 +337,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV400BT,
.name = "AMD AM29LV400BT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
@@ -361,13 +352,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV800BB,
.name = "AMD AM29LV800BB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -379,13 +368,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29DL800BB,
.name = "AMD AM29DL800BB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 6,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 6,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x08000,1),
@@ -398,13 +385,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29DL800BT,
.name = "AMD AM29DL800BT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 6,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 6,
.regions = {
ERASEINFO(0x10000,14),
ERASEINFO(0x04000,1),
@@ -417,13 +402,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F800BB,
.name = "AMD AM29F800BB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -434,13 +417,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV800BT,
.name = "AMD AM29LV800BT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
@@ -451,13 +432,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F800BT,
.name = "AMD AM29F800BT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
@@ -468,12 +447,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F017D,
.name = "AMD AM29F017D",
- .uaddr = {
- [0] = MTD_UADDR_DONT_CARE /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_DONT_CARE,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
@@ -481,12 +459,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F016D,
.name = "AMD AM29F016D",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
@@ -494,12 +471,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F080,
.name = "AMD AM29F080",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
@@ -507,12 +483,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F040,
.name = "AMD AM29F040",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -520,12 +495,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV040B,
.name = "AMD AM29LV040B",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -533,12 +507,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F002T,
.name = "AMD AM29F002T",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
@@ -549,12 +522,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV512,
.name = "Atmel AT49BV512",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_64KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,1)
}
@@ -562,12 +534,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT29LV512,
.name = "Atmel AT29LV512",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_64KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x80,256),
ERASEINFO(0x80,256)
@@ -576,13 +547,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV16X,
.name = "Atmel AT49BV16X",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,31)
@@ -591,13 +560,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV16XT,
.name = "Atmel AT49BV16XT",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x02000,8)
@@ -606,13 +573,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV32X,
.name = "Atmel AT49BV32X",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
@@ -621,13 +586,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV32XT,
.name = "Atmel AT49BV32XT",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
@@ -636,12 +599,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29F040C,
.name = "Fujitsu MBM29F040C",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8)
}
@@ -649,13 +611,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29F800BA,
.name = "Fujitsu MBM29F800BA",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -666,12 +626,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV650UE,
.name = "Fujitsu MBM29LV650UE",
- .uaddr = {
- [0] = MTD_UADDR_DONT_CARE /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_DONT_CARE,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,128)
}
@@ -679,13 +638,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV320TE,
.name = "Fujitsu MBM29LV320TE",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
@@ -694,13 +651,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV320BE,
.name = "Fujitsu MBM29LV320BE",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
@@ -709,13 +664,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV160TE,
.name = "Fujitsu MBM29LV160TE",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
@@ -726,13 +679,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV160BE,
.name = "Fujitsu MBM29LV160BE",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -743,13 +694,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV800BA,
.name = "Fujitsu MBM29LV800BA",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -760,13 +709,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV800TA,
.name = "Fujitsu MBM29LV800TA",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
@@ -777,13 +724,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV400BC,
.name = "Fujitsu MBM29LV400BC",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -794,13 +739,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV400TC,
.name = "Fujitsu MBM29LV400TC",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
@@ -811,12 +754,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_HYUNDAI,
.dev_id = HY29F002T,
.name = "Hyundai HY29F002T",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
@@ -827,12 +769,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F004B3B,
.name = "Intel 28F004B3B",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 7),
@@ -841,12 +782,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F004B3T,
.name = "Intel 28F004B3T",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 7),
ERASEINFO(0x02000, 8),
@@ -855,13 +795,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F400B3B,
.name = "Intel 28F400B3B",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 7),
@@ -870,13 +808,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F400B3T,
.name = "Intel 28F400B3T",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 7),
ERASEINFO(0x02000, 8),
@@ -885,12 +821,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F008B3B,
.name = "Intel 28F008B3B",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 15),
@@ -899,12 +834,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F008B3T,
.name = "Intel 28F008B3T",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 15),
ERASEINFO(0x02000, 8),
@@ -913,12 +847,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F008S5,
.name = "Intel 28F008S5",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
@@ -926,12 +859,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F016S5,
.name = "Intel 28F016S5",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
@@ -939,12 +871,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F008SA,
.name = "Intel 28F008SA",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000, 16),
}
@@ -952,12 +883,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F800B3B,
.name = "Intel 28F800B3B",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 15),
@@ -966,12 +896,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F800B3T,
.name = "Intel 28F800B3T",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 15),
ERASEINFO(0x02000, 8),
@@ -980,12 +909,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F016B3B,
.name = "Intel 28F016B3B",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 31),
@@ -994,12 +922,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F016S3,
.name = "Intel I28F016S3",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000, 32),
}
@@ -1007,12 +934,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F016B3T,
.name = "Intel 28F016B3T",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 31),
ERASEINFO(0x02000, 8),
@@ -1021,12 +947,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F160B3B,
.name = "Intel 28F160B3B",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 31),
@@ -1035,12 +960,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F160B3T,
.name = "Intel 28F160B3T",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 31),
ERASEINFO(0x02000, 8),
@@ -1049,12 +973,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F320B3B,
.name = "Intel 28F320B3B",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 63),
@@ -1063,12 +986,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F320B3T,
.name = "Intel 28F320B3T",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 63),
ERASEINFO(0x02000, 8),
@@ -1077,12 +999,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F640B3B,
.name = "Intel 28F640B3B",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 127),
@@ -1091,12 +1012,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F640B3T,
.name = "Intel 28F640B3T",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 127),
ERASEINFO(0x02000, 8),
@@ -1105,12 +1025,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I82802AB,
.name = "Intel 82802AB",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -1118,12 +1037,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I82802AC,
.name = "Intel 82802AC",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
@@ -1131,12 +1049,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29LV040C,
.name = "Macronix MX29LV040C",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -1144,13 +1061,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29LV160T,
.name = "MXIC MX29LV160T",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
@@ -1161,13 +1076,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_NEC,
.dev_id = UPD29F064115,
.name = "NEC uPD29F064115",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 3,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 3,
.regions = {
ERASEINFO(0x2000,8),
ERASEINFO(0x10000,126),
@@ -1177,13 +1090,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29LV160B,
.name = "MXIC MX29LV160B",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1194,12 +1105,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F040,
.name = "Macronix MX29F040",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -1207,12 +1117,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F016,
.name = "Macronix MX29F016",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
@@ -1220,12 +1129,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F004T,
.name = "Macronix MX29F004T",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
@@ -1236,12 +1144,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F004B,
.name = "Macronix MX29F004B",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1252,12 +1159,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F002T,
.name = "Macronix MX29F002T",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
@@ -1268,12 +1174,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_PMC,
.dev_id = PM49FL002,
.name = "PMC Pm49FL002",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 64 )
}
@@ -1281,12 +1186,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_PMC,
.dev_id = PM49FL004,
.name = "PMC Pm49FL004",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 128 )
}
@@ -1294,12 +1198,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_PMC,
.dev_id = PM49FL008,
.name = "PMC Pm49FL008",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 256 )
}
@@ -1307,25 +1210,23 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SHARP,
.dev_id = LH28F640BF,
.name = "LH28F640BF",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 1,
- .regions = {
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 1,
+ .regions = {
ERASEINFO(0x40000,16),
}
}, {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF512,
.name = "SST 39LF512",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_64KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,16),
}
@@ -1333,12 +1234,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF010,
.name = "SST 39LF010",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_128KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_128KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,32),
}
@@ -1346,36 +1246,33 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST29EE020,
.name = "SST 29EE020",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_SST_PAGE,
- .NumEraseRegions= 1,
- .regions = {ERASEINFO(0x01000,64),
- }
- }, {
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_SST_PAGE,
+ .nr_regions = 1,
+ .regions = {ERASEINFO(0x01000,64),
+ }
+ }, {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST29LE020,
.name = "SST 29LE020",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_SST_PAGE,
- .NumEraseRegions= 1,
- .regions = {ERASEINFO(0x01000,64),
- }
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_SST_PAGE,
+ .nr_regions = 1,
+ .regions = {ERASEINFO(0x01000,64),
+ }
}, {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF020,
.name = "SST 39LF020",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,64),
}
@@ -1383,12 +1280,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF040,
.name = "SST 39LF040",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
@@ -1396,12 +1292,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39SF010A,
.name = "SST 39SF010A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_128KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_128KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,32),
}
@@ -1409,26 +1304,24 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39SF020A,
.name = "SST 39SF020A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,64),
}
}, {
.mfr_id = MANUFACTURER_SST,
- .dev_id = SST49LF040B,
- .name = "SST 49LF040B",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
- .regions = {
+ .dev_id = SST49LF040B,
+ .name = "SST 49LF040B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
ERASEINFO(0x01000,128),
}
}, {
@@ -1436,12 +1329,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF004B,
.name = "SST 49LF004B",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
@@ -1449,12 +1341,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF008A,
.name = "SST 49LF008A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,256),
}
@@ -1462,12 +1353,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF030A,
.name = "SST 49LF030A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,96),
}
@@ -1475,12 +1365,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF040A,
.name = "SST 49LF040A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
@@ -1488,57 +1377,49 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF080A,
.name = "SST 49LF080A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,256),
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
- .dev_id = SST39LF160,
- .name = "SST 39LF160",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
- [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
- .regions = {
- ERASEINFO(0x1000,256),
- ERASEINFO(0x1000,256)
- }
- }, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
- .dev_id = SST39VF1601,
- .name = "SST 39VF1601",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
- [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
- .regions = {
- ERASEINFO(0x1000,256),
- ERASEINFO(0x1000,256)
- }
-
+ .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .dev_id = SST39LF160,
+ .name = "SST 39LF160",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .dev_id = SST39VF1601,
+ .name = "SST 39VF1601",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
}, {
.mfr_id = MANUFACTURER_ST,
.dev_id = M29F800AB,
.name = "ST M29F800AB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1549,13 +1430,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
.dev_id = M29W800DT,
.name = "ST M29W800DT",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
- [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
@@ -1566,13 +1445,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
.dev_id = M29W800DB,
.name = "ST M29W800DB",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
- [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1583,13 +1460,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
.dev_id = M29W160DT,
.name = "ST M29W160DT",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
@@ -1600,13 +1475,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
.dev_id = M29W160DB,
.name = "ST M29W160DB",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1617,12 +1490,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST,
.dev_id = M29W040B,
.name = "ST M29W040B",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -1630,12 +1502,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST,
.dev_id = M50FW040,
.name = "ST M50FW040",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -1643,12 +1514,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST,
.dev_id = M50FW080,
.name = "ST M50FW080",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
@@ -1656,12 +1526,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST,
.dev_id = M50FW016,
.name = "ST M50FW016",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
@@ -1669,12 +1538,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST,
.dev_id = M50LPW080,
.name = "ST M50LPW080",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
@@ -1682,13 +1550,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVT160,
.name = "Toshiba TC58FVT160",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
@@ -1699,13 +1565,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVB160,
.name = "Toshiba TC58FVB160",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1716,13 +1580,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVB321,
.name = "Toshiba TC58FVB321",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
@@ -1731,13 +1593,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVT321,
.name = "Toshiba TC58FVT321",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
@@ -1746,13 +1606,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVB641,
.name = "Toshiba TC58FVB641",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,127)
@@ -1761,13 +1619,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVT641,
.name = "Toshiba TC58FVT641",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000,127),
ERASEINFO(0x02000,8)
@@ -1776,12 +1632,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_WINBOND,
.dev_id = W49V002A,
.name = "Winbond W49V002A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000, 3),
ERASEINFO(0x08000, 1),
@@ -1791,15 +1646,7 @@ static const struct amd_flash_info jedec_table[] = {
}
};
-
-static int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
-
-static int jedec_probe_chip(struct map_info *map, __u32 base,
- unsigned long *chip_map, struct cfi_private *cfi);
-
-static struct mtd_info *jedec_probe(struct map_info *map);
-
-static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
+static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
struct cfi_private *cfi)
{
map_word result;
@@ -1810,7 +1657,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
return result.x[0] & mask;
}
-static inline u32 jedec_read_id(struct map_info *map, __u32 base,
+static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
struct cfi_private *cfi)
{
map_word result;
@@ -1821,8 +1668,7 @@ static inline u32 jedec_read_id(struct map_info *map, __u32 base,
return result.x[0] & mask;
}
-static inline void jedec_reset(u32 base, struct map_info *map,
- struct cfi_private *cfi)
+static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
{
/* Reset */
@@ -1832,7 +1678,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
* 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
* as they will ignore the writes and dont care what address
* the F0 is written to */
- if(cfi->addr_unlock1) {
+ if (cfi->addr_unlock1) {
DEBUG( MTD_DEBUG_LEVEL3,
"reset unlock called %x %x \n",
cfi->addr_unlock1,cfi->addr_unlock2);
@@ -1841,7 +1687,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
}
cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
- /* Some misdesigned intel chips do not respond for 0xF0 for a reset,
+ /* Some misdesigned Intel chips do not respond for 0xF0 for a reset,
* so ensure we're in read mode. Send both the Intel and the AMD command
* for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
* this should be safe.
@@ -1851,42 +1697,20 @@ static inline void jedec_reset(u32 base, struct map_info *map,
}
-static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type)
-{
- int uaddr_idx;
- __u8 uaddr = MTD_UADDR_NOT_SUPPORTED;
-
- switch ( device_type ) {
- case CFI_DEVICETYPE_X8: uaddr_idx = 0; break;
- case CFI_DEVICETYPE_X16: uaddr_idx = 1; break;
- case CFI_DEVICETYPE_X32: uaddr_idx = 2; break;
- default:
- printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n",
- __func__, device_type);
- goto uaddr_done;
- }
-
- uaddr = finfo->uaddr[uaddr_idx];
-
- if (uaddr != MTD_UADDR_NOT_SUPPORTED ) {
- /* ASSERT("The unlock addresses for non-8-bit mode
- are bollocks. We don't really need an array."); */
- uaddr = finfo->uaddr[0];
- }
-
- uaddr_done:
- return uaddr;
-}
-
-
static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
{
int i,num_erase_regions;
- __u8 uaddr;
+ uint8_t uaddr;
- printk("Found: %s\n",jedec_table[index].name);
+ if (! (jedec_table[index].devtypes & p_cfi->device_type)) {
+ DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
+ jedec_table[index].name, 4 * (1<<p_cfi->device_type));
+ return 0;
+ }
+
+ printk(KERN_INFO "Found: %s\n",jedec_table[index].name);
- num_erase_regions = jedec_table[index].NumEraseRegions;
+ num_erase_regions = jedec_table[index].nr_regions;
p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
if (!p_cfi->cfiq) {
@@ -1896,9 +1720,9 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
- p_cfi->cfiq->P_ID = jedec_table[index].CmdSet;
- p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions;
- p_cfi->cfiq->DevSize = jedec_table[index].DevSize;
+ p_cfi->cfiq->P_ID = jedec_table[index].cmd_set;
+ p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
+ p_cfi->cfiq->DevSize = jedec_table[index].dev_size;
p_cfi->cfi_mode = CFI_MODE_JEDEC;
for (i=0; i<num_erase_regions; i++){
@@ -1910,14 +1734,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
p_cfi->mfr = jedec_table[index].mfr_id;
p_cfi->id = jedec_table[index].dev_id;
- uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type);
- if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
- kfree( p_cfi->cfiq );
- return 0;
- }
+ uaddr = jedec_table[index].uaddr;
- p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1;
- p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2;
+ /* The table has unlock addresses in _bytes_, and we try not to let
+ our brains explode when we see the datasheets talking about address
+ lines numbered from A-1 to A18. The CFI table has unlock addresses
+ in device-words according to the mode the device is connected in */
+ p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
+ p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
return 1; /* ok */
}
@@ -1930,14 +1754,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
* be perfect - consequently there should be some module parameters that
* could be manually specified to force the chip info.
*/
-static inline int jedec_match( __u32 base,
+static inline int jedec_match( uint32_t base,
struct map_info *map,
struct cfi_private *cfi,
const struct amd_flash_info *finfo )
{
int rc = 0; /* failure until all tests pass */
u32 mfr, id;
- __u8 uaddr;
+ uint8_t uaddr;
/*
* The IDs must match. For X16 and X32 devices operating in
@@ -1950,8 +1774,8 @@ static inline int jedec_match( __u32 base,
*/
switch (cfi->device_type) {
case CFI_DEVICETYPE_X8:
- mfr = (__u8)finfo->mfr_id;
- id = (__u8)finfo->dev_id;
+ mfr = (uint8_t)finfo->mfr_id;
+ id = (uint8_t)finfo->dev_id;
/* bjd: it seems that if we do this, we can end up
* detecting 16bit flashes as an 8bit device, even though
@@ -1964,12 +1788,12 @@ static inline int jedec_match( __u32 base,
}
break;
case CFI_DEVICETYPE_X16:
- mfr = (__u16)finfo->mfr_id;
- id = (__u16)finfo->dev_id;
+ mfr = (uint16_t)finfo->mfr_id;
+ id = (uint16_t)finfo->dev_id;
break;
case CFI_DEVICETYPE_X32:
- mfr = (__u16)finfo->mfr_id;
- id = (__u32)finfo->dev_id;
+ mfr = (uint16_t)finfo->mfr_id;
+ id = (uint32_t)finfo->dev_id;
break;
default:
printk(KERN_WARNING
@@ -1984,25 +1808,25 @@ static inline int jedec_match( __u32 base,
/* the part size must fit in the memory window */
DEBUG( MTD_DEBUG_LEVEL3,
"MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
- __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) );
- if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) {
+ __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
+ if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
DEBUG( MTD_DEBUG_LEVEL3,
"MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
__func__, finfo->mfr_id, finfo->dev_id,
- 1 << finfo->DevSize );
+ 1 << finfo->dev_size );
goto match_done;
}
- uaddr = finfo_uaddr(finfo, cfi->device_type);
- if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
+ if (! (finfo->devtypes & cfi->device_type))
goto match_done;
- }
+
+ uaddr = finfo->uaddr;
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
__func__, cfi->addr_unlock1, cfi->addr_unlock2 );
if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
- && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 ||
- unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) {
+ && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
+ unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
DEBUG( MTD_DEBUG_LEVEL3,
"MTD %s(): 0x%.4x 0x%.4x did not match\n",
__func__,
@@ -2042,7 +1866,7 @@ static inline int jedec_match( __u32 base,
* were truly frobbing a real device.
*/
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
- if(cfi->addr_unlock1) {
+ if (cfi->addr_unlock1) {
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
}
@@ -2068,8 +1892,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
if (MTD_UADDR_UNNECESSARY == uaddr_idx)
return 0;
- cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1;
- cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2;
+ cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type;
+ cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type;
}
/* Make certain we aren't probing past the end of map */
@@ -2081,19 +1905,11 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
}
/* Ensure the unlock addresses we try stay inside the map */
- probe_offset1 = cfi_build_cmd_addr(
- cfi->addr_unlock1,
- cfi_interleave(cfi),
- cfi->device_type);
- probe_offset2 = cfi_build_cmd_addr(
- cfi->addr_unlock1,
- cfi_interleave(cfi),
- cfi->device_type);
+ probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type);
+ probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type);
if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
((base + probe_offset2 + map_bankwidth(map)) >= map->size))
- {
goto retry;
- }
/* Reset */
jedec_reset(base, map, cfi);
@@ -2128,8 +1944,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
}
goto retry;
} else {
- __u16 mfr;
- __u16 id;
+ uint16_t mfr;
+ uint16_t id;
/* Make sure it is a chip of the same manufacturer and id */
mfr = jedec_read_mfr(map, base, cfi);
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 23fab14f1637..b44292abd9f7 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -9,7 +9,7 @@
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
- * <partdef> := <size>[@offset][<name>][ro]
+ * <partdef> := <size>[@offset][<name>][ro][lk]
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
* <name> := '(' NAME ')'
@@ -143,6 +143,13 @@ static struct mtd_partition * newpart(char *s,
s += 2;
}
+ /* if lk is found do NOT unlock the MTD partition*/
+ if (strncmp(s, "lk", 2) == 0)
+ {
+ mask_flags |= MTD_POWERUP_LOCK;
+ s += 2;
+ }
+
/* test if more partitions are following */
if (*s == ',')
{
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index be4b9948c762..eeaaa9dce6ef 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -4,7 +4,7 @@
* block2mtd.c - create an mtd from a block device
*
* Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
- * Copyright (C) 2004-2006 Jörn Engel <joern@wh.fh-wedel.de>
+ * Copyright (C) 2004-2006 Joern Engel <joern@wh.fh-wedel.de>
*
* Licence: GPL
*/
@@ -485,5 +485,5 @@ module_init(block2mtd_init);
module_exit(block2mtd_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Simon Evans <spse@secret.org.uk> and others");
+MODULE_AUTHOR("Joern Engel <joern@lazybastard.org>");
MODULE_DESCRIPTION("Emulate an MTD using a block device");
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 90acf57c19bd..846989f292e3 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -632,7 +632,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
len = ((from | 0x1ff) + 1) - from;
/* The ECC will not be calculated correctly if less than 512 is read */
- if (len != 0x200 && eccbuf)
+ if (len != 0x200)
printk(KERN_WARNING
"ECC needs a full sector read (adr: %lx size %lx)\n",
(long) from, (long) len);
@@ -896,7 +896,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
/* Let the caller know we completed it */
*retlen += len;
- if (eccbuf) {
+ {
unsigned char x[8];
size_t dummy;
int ret;
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 2b30b587c6e8..83be3461658f 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -748,7 +748,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd);
/* On interleaved devices the flags for 2nd half 512 are before data */
- if (eccbuf && before)
+ if (before)
fto -= 2;
/* issue the Serial Data In command to initial the Page Program process */
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 4ea50a1dda85..99fd210feaec 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -323,7 +323,7 @@ static int flash_probe (void)
/* put the flash back into command mode */
write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000);
- return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM));
+ return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || devtype == FLASH_DEVICE_16mbit_BOTTOM));
}
/*
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index a5ed6d232c35..b35e4813a3a5 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -420,7 +420,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
status = dataflash_waitready(priv->spi);
/* Check result of the compare operation */
- if ((status & (1 << 6)) == 1) {
+ if (status & (1 << 6)) {
printk(KERN_ERR "%s: compare page %u, err %d\n",
spi->dev.bus_id, pageaddr, status);
remaining = 0;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 56cc1ca7ffd5..180298b92a7a 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -2,7 +2,7 @@
* $Id: phram.c,v 1.16 2005/11/07 11:14:25 gleixner Exp $
*
* Copyright (c) ???? Jochen Schäuble <psionic@psionic.de>
- * Copyright (c) 2003-2004 Jörn Engel <joern@wh.fh-wedel.de>
+ * Copyright (c) 2003-2004 Joern Engel <joern@wh.fh-wedel.de>
*
* Usage:
*
@@ -299,5 +299,5 @@ module_init(init_phram);
module_exit(cleanup_phram);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jörn Engel <joern@wh.fh-wedel.de>");
+MODULE_AUTHOR("Joern Engel <joern@wh.fh-wedel.de>");
MODULE_DESCRIPTION("MTD driver for physical RAM");
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index a592fc04cf78..12c253664eb2 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -110,13 +110,6 @@ config MTD_SUN_UFLASH
Sun Microsystems boardsets. This driver will require CFI support
in the kernel, so if you did not enable CFI previously, do that now.
-config MTD_PNC2000
- tristate "CFI Flash device mapped on Photron PNC-2000"
- depends on X86 && MTD_CFI && MTD_PARTITIONS
- help
- PNC-2000 is the name of Network Camera product from PHOTRON
- Ltd. in Japan. It uses CFI-compliant flash.
-
config MTD_SC520CDP
tristate "CFI Flash device mapped on AMD SC520 CDP"
depends on X86 && MTD_CFI && MTD_CONCAT
@@ -576,7 +569,7 @@ config MTD_BAST_MAXSIZE
default "4"
config MTD_SHARP_SL
- bool "ROM mapped on Sharp SL Series"
+ tristate "ROM mapped on Sharp SL Series"
depends on ARCH_PXA
help
This enables access to the flash chip on the Sharp SL Series of PDAs.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 316382a1401b..a9cbe80f99a0 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -28,7 +28,6 @@ obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
-obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
diff --git a/drivers/mtd/maps/mtx-1_flash.c b/drivers/mtd/maps/mtx-1_flash.c
index d884f2be28f6..2a8fde9b92f0 100644
--- a/drivers/mtd/maps/mtx-1_flash.c
+++ b/drivers/mtd/maps/mtx-1_flash.c
@@ -4,7 +4,7 @@
* $Id: mtx-1_flash.c,v 1.2 2005/11/07 11:14:27 gleixner Exp $
*
* (C) 2005 Bruno Randolf <bruno.randolf@4g-systems.biz>
- * (C) 2005 Jörn Engel <joern@wohnheim.fh-wedel.de>
+ * (C) 2005 Joern Engel <joern@wohnheim.fh-wedel.de>
*
*/
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 28c5ffd75233..f00e04efbe28 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -20,11 +20,15 @@
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
+#include <linux/mtd/concat.h>
#include <asm/io.h>
+#define MAX_RESOURCES 4
+
struct physmap_flash_info {
- struct mtd_info *mtd;
- struct map_info map;
+ struct mtd_info *mtd[MAX_RESOURCES];
+ struct mtd_info *cmtd;
+ struct map_info map[MAX_RESOURCES];
struct resource *res;
#ifdef CONFIG_MTD_PARTITIONS
int nr_parts;
@@ -32,11 +36,11 @@ struct physmap_flash_info {
#endif
};
-
static int physmap_flash_remove(struct platform_device *dev)
{
struct physmap_flash_info *info;
struct physmap_flash_data *physmap_data;
+ int i;
info = platform_get_drvdata(dev);
if (info == NULL)
@@ -45,24 +49,33 @@ static int physmap_flash_remove(struct platform_device *dev)
physmap_data = dev->dev.platform_data;
- if (info->mtd != NULL) {
+#ifdef CONFIG_MTD_CONCAT
+ if (info->cmtd != info->mtd[0]) {
+ del_mtd_device(info->cmtd);
+ mtd_concat_destroy(info->cmtd);
+ }
+#endif
+
+ for (i = 0; i < MAX_RESOURCES; i++) {
+ if (info->mtd[i] != NULL) {
#ifdef CONFIG_MTD_PARTITIONS
- if (info->nr_parts) {
- del_mtd_partitions(info->mtd);
- kfree(info->parts);
- } else if (physmap_data->nr_parts) {
- del_mtd_partitions(info->mtd);
- } else {
- del_mtd_device(info->mtd);
- }
+ if (info->nr_parts) {
+ del_mtd_partitions(info->mtd[i]);
+ kfree(info->parts);
+ } else if (physmap_data->nr_parts) {
+ del_mtd_partitions(info->mtd[i]);
+ } else {
+ del_mtd_device(info->mtd[i]);
+ }
#else
- del_mtd_device(info->mtd);
+ del_mtd_device(info->mtd[i]);
#endif
- map_destroy(info->mtd);
- }
+ map_destroy(info->mtd[i]);
+ }
- if (info->map.virt != NULL)
- iounmap(info->map.virt);
+ if (info->map[i].virt != NULL)
+ iounmap(info->map[i].virt);
+ }
if (info->res != NULL) {
release_resource(info->res);
@@ -82,16 +95,14 @@ static int physmap_flash_probe(struct platform_device *dev)
struct physmap_flash_data *physmap_data;
struct physmap_flash_info *info;
const char **probe_type;
- int err;
+ int err = 0;
+ int i;
+ int devices_found = 0;
physmap_data = dev->dev.platform_data;
if (physmap_data == NULL)
return -ENODEV;
- printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
- (unsigned long long)(dev->resource->end - dev->resource->start + 1),
- (unsigned long long)dev->resource->start);
-
info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL);
if (info == NULL) {
err = -ENOMEM;
@@ -100,56 +111,83 @@ static int physmap_flash_probe(struct platform_device *dev)
platform_set_drvdata(dev, info);
- info->res = request_mem_region(dev->resource->start,
- dev->resource->end - dev->resource->start + 1,
- dev->dev.bus_id);
- if (info->res == NULL) {
- dev_err(&dev->dev, "Could not reserve memory region\n");
- err = -ENOMEM;
- goto err_out;
- }
+ for (i = 0; i < dev->num_resources; i++) {
+ printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
+ (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
+ (unsigned long long)dev->resource[i].start);
+
+ info->res = request_mem_region(dev->resource[i].start,
+ dev->resource[i].end - dev->resource[i].start + 1,
+ dev->dev.bus_id);
+ if (info->res == NULL) {
+ dev_err(&dev->dev, "Could not reserve memory region\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
- info->map.name = dev->dev.bus_id;
- info->map.phys = dev->resource->start;
- info->map.size = dev->resource->end - dev->resource->start + 1;
- info->map.bankwidth = physmap_data->width;
- info->map.set_vpp = physmap_data->set_vpp;
+ info->map[i].name = dev->dev.bus_id;
+ info->map[i].phys = dev->resource[i].start;
+ info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
+ info->map[i].bankwidth = physmap_data->width;
+ info->map[i].set_vpp = physmap_data->set_vpp;
+
+ info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size);
+ if (info->map[i].virt == NULL) {
+ dev_err(&dev->dev, "Failed to ioremap flash region\n");
+ err = EIO;
+ goto err_out;
+ }
- info->map.virt = ioremap(info->map.phys, info->map.size);
- if (info->map.virt == NULL) {
- dev_err(&dev->dev, "Failed to ioremap flash region\n");
- err = EIO;
- goto err_out;
- }
+ simple_map_init(&info->map[i]);
- simple_map_init(&info->map);
+ probe_type = rom_probe_types;
+ for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++)
+ info->mtd[i] = do_map_probe(*probe_type, &info->map[i]);
+ if (info->mtd[i] == NULL) {
+ dev_err(&dev->dev, "map_probe failed\n");
+ err = -ENXIO;
+ goto err_out;
+ } else {
+ devices_found++;
+ }
+ info->mtd[i]->owner = THIS_MODULE;
+ }
- probe_type = rom_probe_types;
- for (; info->mtd == NULL && *probe_type != NULL; probe_type++)
- info->mtd = do_map_probe(*probe_type, &info->map);
- if (info->mtd == NULL) {
- dev_err(&dev->dev, "map_probe failed\n");
+ if (devices_found == 1) {
+ info->cmtd = info->mtd[0];
+ } else if (devices_found > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+#ifdef CONFIG_MTD_CONCAT
+ info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id);
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "physmap-flash: multiple devices "
+ "found but MTD concat support disabled.\n");
err = -ENXIO;
- goto err_out;
+#endif
}
- info->mtd->owner = THIS_MODULE;
+ if (err)
+ goto err_out;
#ifdef CONFIG_MTD_PARTITIONS
- err = parse_mtd_partitions(info->mtd, part_probe_types, &info->parts, 0);
+ err = parse_mtd_partitions(info->cmtd, part_probe_types, &info->parts, 0);
if (err > 0) {
- add_mtd_partitions(info->mtd, info->parts, err);
+ add_mtd_partitions(info->cmtd, info->parts, err);
return 0;
}
if (physmap_data->nr_parts) {
printk(KERN_NOTICE "Using physmap partition information\n");
- add_mtd_partitions(info->mtd, physmap_data->parts,
- physmap_data->nr_parts);
+ add_mtd_partitions(info->cmtd, physmap_data->parts,
+ physmap_data->nr_parts);
return 0;
}
#endif
- add_mtd_device(info->mtd);
+ add_mtd_device(info->cmtd);
return 0;
err_out:
@@ -162,9 +200,11 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
int ret = 0;
+ int i;
if (info)
- ret = info->mtd->suspend(info->mtd);
+ for (i = 0; i < MAX_RESOURCES; i++)
+ ret |= info->mtd[i]->suspend(info->mtd[i]);
return ret;
}
@@ -172,27 +212,35 @@ static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state
static int physmap_flash_resume(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
+ int i;
+
if (info)
- info->mtd->resume(info->mtd);
+ for (i = 0; i < MAX_RESOURCES; i++)
+ info->mtd[i]->resume(info->mtd[i]);
return 0;
}
static void physmap_flash_shutdown(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ int i;
+
+ for (i = 0; i < MAX_RESOURCES; i++)
+ if (info && info->mtd[i]->suspend(info->mtd[i]) == 0)
+ info->mtd[i]->resume(info->mtd[i]);
}
+#else
+#define physmap_flash_suspend NULL
+#define physmap_flash_resume NULL
+#define physmap_flash_shutdown NULL
#endif
static struct platform_driver physmap_flash_driver = {
.probe = physmap_flash_probe,
.remove = physmap_flash_remove,
-#ifdef CONFIG_PM
.suspend = physmap_flash_suspend,
.resume = physmap_flash_resume,
.shutdown = physmap_flash_shutdown,
-#endif
.driver = {
.name = "physmap-flash",
},
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index aeed9ea79714..49acd4171893 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -80,64 +80,6 @@ static int parse_obsolete_partitions(struct of_device *dev,
return nr_parts;
}
-
-static int __devinit parse_partitions(struct of_flash *info,
- struct of_device *dev)
-{
- const char *partname;
- static const char *part_probe_types[]
- = { "cmdlinepart", "RedBoot", NULL };
- struct device_node *dp = dev->node, *pp;
- int nr_parts, i;
-
- /* First look for RedBoot table or partitions on the command
- * line, these take precedence over device tree information */
- nr_parts = parse_mtd_partitions(info->mtd, part_probe_types,
- &info->parts, 0);
- if (nr_parts > 0) {
- add_mtd_partitions(info->mtd, info->parts, nr_parts);
- return 0;
- }
-
- /* First count the subnodes */
- nr_parts = 0;
- for (pp = dp->child; pp; pp = pp->sibling)
- nr_parts++;
-
- if (nr_parts == 0)
- return parse_obsolete_partitions(dev, info, dp);
-
- info->parts = kzalloc(nr_parts * sizeof(*info->parts),
- GFP_KERNEL);
- if (!info->parts)
- return -ENOMEM;
-
- for (pp = dp->child, i = 0; pp; pp = pp->sibling, i++) {
- const u32 *reg;
- int len;
-
- reg = of_get_property(pp, "reg", &len);
- if (!reg || (len != 2*sizeof(u32))) {
- dev_err(&dev->dev, "Invalid 'reg' on %s\n",
- dp->full_name);
- kfree(info->parts);
- info->parts = NULL;
- return -EINVAL;
- }
- info->parts[i].offset = reg[0];
- info->parts[i].size = reg[1];
-
- partname = of_get_property(pp, "label", &len);
- if (!partname)
- partname = of_get_property(pp, "name", &len);
- info->parts[i].name = (char *)partname;
-
- if (of_get_property(pp, "read-only", &len))
- info->parts[i].mask_flags = MTD_WRITEABLE;
- }
-
- return nr_parts;
-}
#else /* MTD_PARTITIONS */
#define OF_FLASH_PARTS(info) (0)
#define parse_partitions(info, dev) (0)
@@ -212,6 +154,10 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
static int __devinit of_flash_probe(struct of_device *dev,
const struct of_device_id *match)
{
+#ifdef CONFIG_MTD_PARTITIONS
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", NULL };
+#endif
struct device_node *dp = dev->node;
struct resource res;
struct of_flash *info;
@@ -274,13 +220,33 @@ static int __devinit of_flash_probe(struct of_device *dev,
}
info->mtd->owner = THIS_MODULE;
- err = parse_partitions(info, dev);
+#ifdef CONFIG_MTD_PARTITIONS
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ err = parse_mtd_partitions(info->mtd, part_probe_types,
+ &info->parts, 0);
if (err < 0)
- goto err_out;
+ return err;
+
+#ifdef CONFIG_MTD_OF_PARTS
+ if (err == 0) {
+ err = of_mtd_parse_partitions(&dev->dev, info->mtd,
+ dp, &info->parts);
+ if (err < 0)
+ return err;
+ }
+#endif
+
+ if (err == 0) {
+ err = parse_obsolete_partitions(dev, info, dp);
+ if (err < 0)
+ return err;
+ }
if (err > 0)
- add_mtd_partitions(info->mtd, OF_FLASH_PARTS(info), err);
+ add_mtd_partitions(info->mtd, info->parts, err);
else
+#endif
add_mtd_device(info->mtd);
return 0;
diff --git a/drivers/mtd/maps/pnc2000.c b/drivers/mtd/maps/pnc2000.c
deleted file mode 100644
index d7e16c2d5c44..000000000000
--- a/drivers/mtd/maps/pnc2000.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * pnc2000.c - mapper for Photron PNC-2000 board.
- *
- * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
- *
- * This code is GPL
- *
- * $Id: pnc2000.c,v 1.18 2005/11/07 11:14:28 gleixner Exp $
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-
-#define WINDOW_ADDR 0xbf000000
-#define WINDOW_SIZE 0x00400000
-
-/*
- * MAP DRIVER STUFF
- */
-
-
-static struct map_info pnc_map = {
- .name = "PNC-2000",
- .size = WINDOW_SIZE,
- .bankwidth = 4,
- .phys = 0xFFFFFFFF,
- .virt = (void __iomem *)WINDOW_ADDR,
-};
-
-
-/*
- * MTD 'PARTITIONING' STUFF
- */
-static struct mtd_partition pnc_partitions[3] = {
- {
- .name = "PNC-2000 boot firmware",
- .size = 0x20000,
- .offset = 0
- },
- {
- .name = "PNC-2000 kernel",
- .size = 0x1a0000,
- .offset = 0x20000
- },
- {
- .name = "PNC-2000 filesystem",
- .size = 0x240000,
- .offset = 0x1c0000
- }
-};
-
-/*
- * This is the master MTD device for which all the others are just
- * auto-relocating aliases.
- */
-static struct mtd_info *mymtd;
-
-static int __init init_pnc2000(void)
-{
- printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
-
- simple_map_init(&pnc_map);
-
- mymtd = do_map_probe("cfi_probe", &pnc_map);
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
- return add_mtd_partitions(mymtd, pnc_partitions, 3);
- }
-
- return -ENXIO;
-}
-
-static void __exit cleanup_pnc2000(void)
-{
- if (mymtd) {
- del_mtd_partitions(mymtd);
- map_destroy(mymtd);
- }
-}
-
-module_init(init_pnc2000);
-module_exit(cleanup_pnc2000);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp>");
-MODULE_DESCRIPTION("MTD map driver for Photron PNC-2000 board");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index dcfb85840d1e..0fc5584324e3 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -79,7 +79,7 @@ scb2_fixup_mtd(struct mtd_info *mtd)
struct cfi_private *cfi = map->fldrv_priv;
/* barf if this doesn't look right */
- if (cfi->cfiq->InterfaceDesc != 1) {
+ if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) {
printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
cfi->cfiq->InterfaceDesc);
return -1;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 74d9d30edabd..839eed8430a2 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -248,9 +248,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
return -EBUSY;
}
- mutex_init(&new->lock);
list_add_tail(&new->list, &tr->devs);
added:
+ mutex_init(&new->lock);
if (!tr->writesect)
new->readonly = 1;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index a0cee86464ca..5d3ac512ce16 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -481,6 +481,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
struct mtd_oob_buf buf;
struct mtd_oob_ops ops;
+ uint32_t retlen;
if(!(file->f_mode & 2))
return -EPERM;
@@ -520,8 +521,11 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
buf.start &= ~(mtd->oobsize - 1);
ret = mtd->write_oob(mtd, buf.start, &ops);
- if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen,
- sizeof(uint32_t)))
+ if (ops.oobretlen > 0xFFFFFFFFU)
+ ret = -EOVERFLOW;
+ retlen = ops.oobretlen;
+ if (copy_to_user(&((struct mtd_oob_buf *)argp)->length,
+ &retlen, sizeof(buf.length)))
ret = -EFAULT;
kfree(ops.oobbuf);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 6c2645e28371..f7e7890e5bc6 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -61,7 +61,7 @@ int add_mtd_device(struct mtd_info *mtd)
/* Some chips always power up locked. Unlock them now */
if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) {
+ && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
if (mtd->unlock(mtd, 0, mtd->size))
printk(KERN_WARNING
"%s: unlock failed, "
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index f8af627f0b98..d3cf05012b46 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -28,19 +28,26 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#define OOPS_PAGE_SIZE 4096
-static struct mtdoops_context {
+struct mtdoops_context {
int mtd_index;
- struct work_struct work;
+ struct work_struct work_erase;
+ struct work_struct work_write;
struct mtd_info *mtd;
int oops_pages;
int nextpage;
int nextcount;
void *oops_buf;
+
+ /* writecount and disabling ready are spin lock protected */
+ spinlock_t writecount_lock;
int ready;
int writecount;
} oops_cxt;
@@ -62,10 +69,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
erase.mtd = mtd;
erase.callback = mtdoops_erase_callback;
erase.addr = offset;
- if (mtd->erasesize < OOPS_PAGE_SIZE)
- erase.len = OOPS_PAGE_SIZE;
- else
- erase.len = mtd->erasesize;
+ erase.len = mtd->erasesize;
erase.priv = (u_long)&wait_q;
set_current_state(TASK_INTERRUPTIBLE);
@@ -87,7 +91,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
return 0;
}
-static int mtdoops_inc_counter(struct mtdoops_context *cxt)
+static void mtdoops_inc_counter(struct mtdoops_context *cxt)
{
struct mtd_info *mtd = cxt->mtd;
size_t retlen;
@@ -103,25 +107,30 @@ static int mtdoops_inc_counter(struct mtdoops_context *cxt)
ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
&retlen, (u_char *) &count);
- if ((retlen != 4) || (ret < 0)) {
+ if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
retlen, ret);
- return 1;
+ schedule_work(&cxt->work_erase);
+ return;
}
/* See if we need to erase the next block */
- if (count != 0xffffffff)
- return 1;
+ if (count != 0xffffffff) {
+ schedule_work(&cxt->work_erase);
+ return;
+ }
printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
cxt->nextpage, cxt->nextcount);
cxt->ready = 1;
- return 0;
}
-static void mtdoops_prepare(struct mtdoops_context *cxt)
+/* Scheduled work - when we can't proceed without erasing a block */
+static void mtdoops_workfunc_erase(struct work_struct *work)
{
+ struct mtdoops_context *cxt =
+ container_of(work, struct mtdoops_context, work_erase);
struct mtd_info *mtd = cxt->mtd;
int i = 0, j, ret, mod;
@@ -136,8 +145,14 @@ static void mtdoops_prepare(struct mtdoops_context *cxt)
cxt->nextpage = 0;
}
- while (mtd->block_isbad &&
- mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) {
+ while (mtd->block_isbad) {
+ ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
+ if (!ret)
+ break;
+ if (ret < 0) {
+ printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
+ return;
+ }
badblock:
printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
cxt->nextpage * OOPS_PAGE_SIZE);
@@ -154,34 +169,72 @@ badblock:
for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
- if (ret < 0) {
- if (mtd->block_markbad)
- mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
- goto badblock;
+ if (ret >= 0) {
+ printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
+ cxt->ready = 1;
+ return;
}
- printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
+ if (mtd->block_markbad && (ret == -EIO)) {
+ ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
+ if (ret < 0) {
+ printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
+ return;
+ }
+ }
+ goto badblock;
+}
- cxt->ready = 1;
+static void mtdoops_write(struct mtdoops_context *cxt, int panic)
+{
+ struct mtd_info *mtd = cxt->mtd;
+ size_t retlen;
+ int ret;
+
+ if (cxt->writecount < OOPS_PAGE_SIZE)
+ memset(cxt->oops_buf + cxt->writecount, 0xff,
+ OOPS_PAGE_SIZE - cxt->writecount);
+
+ if (panic)
+ ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
+ OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
+ else
+ ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
+ OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
+
+ cxt->writecount = 0;
+
+ if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
+ printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
+ cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
+
+ mtdoops_inc_counter(cxt);
}
-static void mtdoops_workfunc(struct work_struct *work)
+
+static void mtdoops_workfunc_write(struct work_struct *work)
{
struct mtdoops_context *cxt =
- container_of(work, struct mtdoops_context, work);
+ container_of(work, struct mtdoops_context, work_write);
- mtdoops_prepare(cxt);
-}
+ mtdoops_write(cxt, 0);
+}
-static int find_next_position(struct mtdoops_context *cxt)
+static void find_next_position(struct mtdoops_context *cxt)
{
struct mtd_info *mtd = cxt->mtd;
- int page, maxpos = 0;
+ int ret, page, maxpos = 0;
u32 count, maxcount = 0xffffffff;
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
- mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
+ ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
+ if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
+ printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
+ ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
+ continue;
+ }
+
if (count == 0xffffffff)
continue;
if (maxcount == 0xffffffff) {
@@ -205,20 +258,19 @@ static int find_next_position(struct mtdoops_context *cxt)
cxt->ready = 1;
printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
cxt->nextpage, cxt->nextcount);
- return 0;
+ return;
}
cxt->nextpage = maxpos;
cxt->nextcount = maxcount;
- return mtdoops_inc_counter(cxt);
+ mtdoops_inc_counter(cxt);
}
static void mtdoops_notify_add(struct mtd_info *mtd)
{
struct mtdoops_context *cxt = &oops_cxt;
- int ret;
if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
return;
@@ -229,14 +281,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
return;
}
+ if (mtd->erasesize < OOPS_PAGE_SIZE) {
+ printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
+ mtd->index);
+ return;
+ }
+
cxt->mtd = mtd;
cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
- ret = find_next_position(cxt);
- if (ret == 1)
- mtdoops_prepare(cxt);
+ find_next_position(cxt);
- printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index);
+ printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
}
static void mtdoops_notify_remove(struct mtd_info *mtd)
@@ -254,31 +310,28 @@ static void mtdoops_console_sync(void)
{
struct mtdoops_context *cxt = &oops_cxt;
struct mtd_info *mtd = cxt->mtd;
- size_t retlen;
- int ret;
+ unsigned long flags;
- if (!cxt->ready || !mtd)
+ if (!cxt->ready || !mtd || cxt->writecount == 0)
return;
- if (cxt->writecount == 0)
+ /*
+ * Once ready is 0 and we've held the lock no further writes to the
+ * buffer will happen
+ */
+ spin_lock_irqsave(&cxt->writecount_lock, flags);
+ if (!cxt->ready) {
+ spin_unlock_irqrestore(&cxt->writecount_lock, flags);
return;
-
- if (cxt->writecount < OOPS_PAGE_SIZE)
- memset(cxt->oops_buf + cxt->writecount, 0xff,
- OOPS_PAGE_SIZE - cxt->writecount);
-
- ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
- OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
+ }
cxt->ready = 0;
- cxt->writecount = 0;
-
- if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
- printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
- cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
+ spin_unlock_irqrestore(&cxt->writecount_lock, flags);
- ret = mtdoops_inc_counter(cxt);
- if (ret == 1)
- schedule_work(&cxt->work);
+ if (mtd->panic_write && in_interrupt())
+ /* Interrupt context, we're going to panic so try and log */
+ mtdoops_write(cxt, 1);
+ else
+ schedule_work(&cxt->work_write);
}
static void
@@ -286,7 +339,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
{
struct mtdoops_context *cxt = co->data;
struct mtd_info *mtd = cxt->mtd;
- int i;
+ unsigned long flags;
if (!oops_in_progress) {
mtdoops_console_sync();
@@ -296,6 +349,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
if (!cxt->ready || !mtd)
return;
+ /* Locking on writecount ensures sequential writes to the buffer */
+ spin_lock_irqsave(&cxt->writecount_lock, flags);
+
+ /* Check ready status didn't change whilst waiting for the lock */
+ if (!cxt->ready)
+ return;
+
if (cxt->writecount == 0) {
u32 *stamp = cxt->oops_buf;
*stamp = cxt->nextcount;
@@ -305,10 +365,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
count = OOPS_PAGE_SIZE - cxt->writecount;
- for (i = 0; i < count; i++, s++)
- *((char *)(cxt->oops_buf) + cxt->writecount + i) = *s;
+ memcpy(cxt->oops_buf + cxt->writecount, s, count);
+ cxt->writecount += count;
- cxt->writecount = cxt->writecount + count;
+ spin_unlock_irqrestore(&cxt->writecount_lock, flags);
+
+ if (cxt->writecount == OOPS_PAGE_SIZE)
+ mtdoops_console_sync();
}
static int __init mtdoops_console_setup(struct console *co, char *options)
@@ -334,7 +397,6 @@ static struct console mtdoops_console = {
.write = mtdoops_console_write,
.setup = mtdoops_console_setup,
.unblank = mtdoops_console_sync,
- .flags = CON_PRINTBUFFER,
.index = -1,
.data = &oops_cxt,
};
@@ -347,11 +409,12 @@ static int __init mtdoops_console_init(void)
cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
if (!cxt->oops_buf) {
- printk(KERN_ERR "Failed to allocate oops buffer workspace\n");
+ printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
return -ENOMEM;
}
- INIT_WORK(&cxt->work, mtdoops_workfunc);
+ INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
+ INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
register_console(&mtdoops_console);
register_mtd_user(&mtdoops_notifier);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 6174a97d7902..c66902df3171 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -151,6 +151,20 @@ static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
len, retlen, buf);
}
+static int part_panic_write (struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct mtd_part *part = PART(mtd);
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+ if (to >= mtd->size)
+ len = 0;
+ else if (to + len > mtd->size)
+ len = mtd->size - to;
+ return part->master->panic_write (part->master, to + part->offset,
+ len, retlen, buf);
+}
+
static int part_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
@@ -352,6 +366,9 @@ int add_mtd_partitions(struct mtd_info *master,
slave->mtd.read = part_read;
slave->mtd.write = part_write;
+ if (master->panic_write)
+ slave->mtd.panic_write = part_panic_write;
+
if(master->point && master->unpoint){
slave->mtd.point = part_point;
slave->mtd.unpoint = part_unpoint;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 246d4512f64b..4a3c6759492b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -93,7 +93,7 @@ config MTD_NAND_AU1550
config MTD_NAND_BF5XX
tristate "Blackfin on-chip NAND Flash Controller driver"
- depends on BF54x && MTD_NAND
+ depends on (BF54x || BF52x) && MTD_NAND
help
This enables the Blackfin on-chip NAND flash controller
@@ -283,6 +283,12 @@ config MTD_NAND_CM_X270
tristate "Support for NAND Flash on CM-X270 modules"
depends on MTD_NAND && MACH_ARMCORE
+config MTD_NAND_PASEMI
+ tristate "NAND support for PA Semi PWRficient"
+ depends on MTD_NAND && PPC_PASEMI
+ help
+ Enables support for NAND Flash interface on PA Semi PWRficient
+ based boards
config MTD_NAND_NANDSIM
tristate "Support for NAND Flash Simulator"
@@ -306,4 +312,22 @@ config MTD_ALAUDA
These two (and possibly other) Alauda-based cardreaders for
SmartMedia and xD allow raw flash access.
+config MTD_NAND_ORION
+ tristate "NAND Flash support for Marvell Orion SoC"
+ depends on ARCH_ORION && MTD_NAND
+ help
+ This enables the NAND flash controller on Orion machines.
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_FSL_ELBC
+ tristate "NAND support for Freescale eLBC controllers"
+ depends on MTD_NAND && PPC_OF
+ help
+ Various Freescale chips, including the 8313, include a NAND Flash
+ Controller Module with built-in hardware ECC capabilities.
+ Enabling this option will enable you to use this to control
+ external NAND devices.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 3ad6c0165da3..80d575eeee96 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -29,5 +29,8 @@ obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_ALAUDA) += alauda.o
+obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
+obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c
index b2a5672df6e0..c9fb2acf4056 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/at91_nand.c
@@ -156,14 +156,14 @@ static int __init at91_nand_probe(struct platform_device *pdev)
}
#ifdef CONFIG_MTD_PARTITIONS
- if (host->board->partition_info)
- partitions = host->board->partition_info(mtd->size, &num_partitions);
#ifdef CONFIG_MTD_CMDLINE_PARTS
- else {
- mtd->name = "at91_nand";
- num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
- }
+ mtd->name = "at91_nand";
+ num_partitions = parse_mtd_partitions(mtd, part_probes,
+ &partitions, 0);
#endif
+ if (num_partitions <= 0 && host->board->partition_info)
+ partitions = host->board->partition_info(mtd->size,
+ &num_partitions);
if ((!partitions) || (num_partitions == 0)) {
printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index a52f3a737c39..747042ab094a 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -74,7 +74,22 @@ static int hardware_ecc = 1;
static int hardware_ecc;
#endif
-static unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, 0};
+static unsigned short bfin_nfc_pin_req[] =
+ {P_NAND_CE,
+ P_NAND_RB,
+ P_NAND_D0,
+ P_NAND_D1,
+ P_NAND_D2,
+ P_NAND_D3,
+ P_NAND_D4,
+ P_NAND_D5,
+ P_NAND_D6,
+ P_NAND_D7,
+ P_NAND_WE,
+ P_NAND_RE,
+ P_NAND_CLE,
+ P_NAND_ALE,
+ 0};
/*
* Data structures for bf5xx nand flash controller driver
@@ -278,7 +293,6 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
u16 ecc0, ecc1;
u32 code[2];
u8 *p;
- int bytes = 3, i;
/* first 4 bytes ECC code for 256 page size */
ecc0 = bfin_read_NFC_ECC0();
@@ -288,19 +302,24 @@ static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
+ /* first 3 bytes in ecc_code for 256 page size */
+ p = (u8 *) code;
+ memcpy(ecc_code, p, 3);
+
/* second 4 bytes ECC code for 512 page size */
if (page_size == 512) {
ecc0 = bfin_read_NFC_ECC2();
ecc1 = bfin_read_NFC_ECC3();
code[1] = (ecc0 & 0x3FF) | ((ecc1 & 0x3FF) << 11);
- bytes = 6;
+
+ /* second 3 bytes in ecc_code for second 256
+ * bytes of 512 page size
+ */
+ p = (u8 *) (code + 1);
+ memcpy((ecc_code + 3), p, 3);
dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]);
}
- p = (u8 *)code;
- for (i = 0; i < bytes; i++)
- ecc_code[i] = p[i];
-
return 0;
}
@@ -507,12 +526,13 @@ static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
init_completion(&info->dma_completion);
+#ifdef CONFIG_BF54x
/* Setup DMAC1 channel mux for NFC which shared with SDH */
val = bfin_read_DMAC1_PERIMUX();
val &= 0xFFFE;
bfin_write_DMAC1_PERIMUX(val);
SSYNC();
-
+#endif
/* Request NFC DMA channel */
ret = request_dma(CH_NFC, "BF5XX NFC driver");
if (ret < 0) {
@@ -744,9 +764,6 @@ static int bf5xx_nand_resume(struct platform_device *dev)
{
struct bf5xx_nand_info *info = platform_get_drvdata(dev);
- if (info)
- bf5xx_nand_hw_init(info);
-
return 0;
}
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 1e811715211a..da6ceaa80ba1 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -11,6 +11,7 @@
#undef DEBUG
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
#include <linux/rslib.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -52,6 +53,7 @@
struct cafe_priv {
struct nand_chip nand;
+ struct mtd_partition *parts;
struct pci_dev *pdev;
void __iomem *mmio;
struct rs_control *rs;
@@ -84,6 +86,10 @@ static unsigned int numtimings;
static int timing[3];
module_param_array(timing, int, &numtimings, 0644);
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "RedBoot", NULL };
+#endif
+
/* Hrm. Why isn't this already conditional on something in the struct device? */
#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
@@ -620,7 +626,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
{
struct mtd_info *mtd;
struct cafe_priv *cafe;
+ struct mtd_partition *parts;
uint32_t ctrl;
+ int nr_parts;
int err = 0;
/* Very old versions shared the same PCI ident for all three
@@ -787,7 +795,18 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
goto out_irq;
pci_set_drvdata(pdev, mtd);
+
+ /* We register the whole device first, separate from the partitions */
add_mtd_device(mtd);
+
+#ifdef CONFIG_MTD_PARTITIONS
+ nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
+ if (nr_parts > 0) {
+ cafe->parts = parts;
+ dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts);
+ add_mtd_partitions(mtd, parts, nr_parts);
+ }
+#endif
goto out;
out_irq:
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
new file mode 100644
index 000000000000..b025dfe0b274
--- /dev/null
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -0,0 +1,1244 @@
+/* Freescale Enhanced Local Bus Controller NAND driver
+ *
+ * Copyright (c) 2006-2007 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ * Scott Wood <scottwood@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+
+
+#define MAX_BANKS 8
+#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
+#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
+
+struct elbc_bank {
+ __be32 br; /**< Base Register */
+#define BR_BA 0xFFFF8000
+#define BR_BA_SHIFT 15
+#define BR_PS 0x00001800
+#define BR_PS_SHIFT 11
+#define BR_PS_8 0x00000800 /* Port Size 8 bit */
+#define BR_PS_16 0x00001000 /* Port Size 16 bit */
+#define BR_PS_32 0x00001800 /* Port Size 32 bit */
+#define BR_DECC 0x00000600
+#define BR_DECC_SHIFT 9
+#define BR_DECC_OFF 0x00000000 /* HW ECC checking and generation off */
+#define BR_DECC_CHK 0x00000200 /* HW ECC checking on, generation off */
+#define BR_DECC_CHK_GEN 0x00000400 /* HW ECC checking and generation on */
+#define BR_WP 0x00000100
+#define BR_WP_SHIFT 8
+#define BR_MSEL 0x000000E0
+#define BR_MSEL_SHIFT 5
+#define BR_MS_GPCM 0x00000000 /* GPCM */
+#define BR_MS_FCM 0x00000020 /* FCM */
+#define BR_MS_SDRAM 0x00000060 /* SDRAM */
+#define BR_MS_UPMA 0x00000080 /* UPMA */
+#define BR_MS_UPMB 0x000000A0 /* UPMB */
+#define BR_MS_UPMC 0x000000C0 /* UPMC */
+#define BR_V 0x00000001
+#define BR_V_SHIFT 0
+#define BR_RES ~(BR_BA|BR_PS|BR_DECC|BR_WP|BR_MSEL|BR_V)
+
+ __be32 or; /**< Base Register */
+#define OR0 0x5004
+#define OR1 0x500C
+#define OR2 0x5014
+#define OR3 0x501C
+#define OR4 0x5024
+#define OR5 0x502C
+#define OR6 0x5034
+#define OR7 0x503C
+
+#define OR_FCM_AM 0xFFFF8000
+#define OR_FCM_AM_SHIFT 15
+#define OR_FCM_BCTLD 0x00001000
+#define OR_FCM_BCTLD_SHIFT 12
+#define OR_FCM_PGS 0x00000400
+#define OR_FCM_PGS_SHIFT 10
+#define OR_FCM_CSCT 0x00000200
+#define OR_FCM_CSCT_SHIFT 9
+#define OR_FCM_CST 0x00000100
+#define OR_FCM_CST_SHIFT 8
+#define OR_FCM_CHT 0x00000080
+#define OR_FCM_CHT_SHIFT 7
+#define OR_FCM_SCY 0x00000070
+#define OR_FCM_SCY_SHIFT 4
+#define OR_FCM_SCY_1 0x00000010
+#define OR_FCM_SCY_2 0x00000020
+#define OR_FCM_SCY_3 0x00000030
+#define OR_FCM_SCY_4 0x00000040
+#define OR_FCM_SCY_5 0x00000050
+#define OR_FCM_SCY_6 0x00000060
+#define OR_FCM_SCY_7 0x00000070
+#define OR_FCM_RST 0x00000008
+#define OR_FCM_RST_SHIFT 3
+#define OR_FCM_TRLX 0x00000004
+#define OR_FCM_TRLX_SHIFT 2
+#define OR_FCM_EHTR 0x00000002
+#define OR_FCM_EHTR_SHIFT 1
+};
+
+struct elbc_regs {
+ struct elbc_bank bank[8];
+ u8 res0[0x28];
+ __be32 mar; /**< UPM Address Register */
+ u8 res1[0x4];
+ __be32 mamr; /**< UPMA Mode Register */
+ __be32 mbmr; /**< UPMB Mode Register */
+ __be32 mcmr; /**< UPMC Mode Register */
+ u8 res2[0x8];
+ __be32 mrtpr; /**< Memory Refresh Timer Prescaler Register */
+ __be32 mdr; /**< UPM Data Register */
+ u8 res3[0x4];
+ __be32 lsor; /**< Special Operation Initiation Register */
+ __be32 lsdmr; /**< SDRAM Mode Register */
+ u8 res4[0x8];
+ __be32 lurt; /**< UPM Refresh Timer */
+ __be32 lsrt; /**< SDRAM Refresh Timer */
+ u8 res5[0x8];
+ __be32 ltesr; /**< Transfer Error Status Register */
+#define LTESR_BM 0x80000000
+#define LTESR_FCT 0x40000000
+#define LTESR_PAR 0x20000000
+#define LTESR_WP 0x04000000
+#define LTESR_ATMW 0x00800000
+#define LTESR_ATMR 0x00400000
+#define LTESR_CS 0x00080000
+#define LTESR_CC 0x00000001
+#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC)
+ __be32 ltedr; /**< Transfer Error Disable Register */
+ __be32 lteir; /**< Transfer Error Interrupt Register */
+ __be32 lteatr; /**< Transfer Error Attributes Register */
+ __be32 ltear; /**< Transfer Error Address Register */
+ u8 res6[0xC];
+ __be32 lbcr; /**< Configuration Register */
+#define LBCR_LDIS 0x80000000
+#define LBCR_LDIS_SHIFT 31
+#define LBCR_BCTLC 0x00C00000
+#define LBCR_BCTLC_SHIFT 22
+#define LBCR_AHD 0x00200000
+#define LBCR_LPBSE 0x00020000
+#define LBCR_LPBSE_SHIFT 17
+#define LBCR_EPAR 0x00010000
+#define LBCR_EPAR_SHIFT 16
+#define LBCR_BMT 0x0000FF00
+#define LBCR_BMT_SHIFT 8
+#define LBCR_INIT 0x00040000
+ __be32 lcrr; /**< Clock Ratio Register */
+#define LCRR_DBYP 0x80000000
+#define LCRR_DBYP_SHIFT 31
+#define LCRR_BUFCMDC 0x30000000
+#define LCRR_BUFCMDC_SHIFT 28
+#define LCRR_ECL 0x03000000
+#define LCRR_ECL_SHIFT 24
+#define LCRR_EADC 0x00030000
+#define LCRR_EADC_SHIFT 16
+#define LCRR_CLKDIV 0x0000000F
+#define LCRR_CLKDIV_SHIFT 0
+ u8 res7[0x8];
+ __be32 fmr; /**< Flash Mode Register */
+#define FMR_CWTO 0x0000F000
+#define FMR_CWTO_SHIFT 12
+#define FMR_BOOT 0x00000800
+#define FMR_ECCM 0x00000100
+#define FMR_AL 0x00000030
+#define FMR_AL_SHIFT 4
+#define FMR_OP 0x00000003
+#define FMR_OP_SHIFT 0
+ __be32 fir; /**< Flash Instruction Register */
+#define FIR_OP0 0xF0000000
+#define FIR_OP0_SHIFT 28
+#define FIR_OP1 0x0F000000
+#define FIR_OP1_SHIFT 24
+#define FIR_OP2 0x00F00000
+#define FIR_OP2_SHIFT 20
+#define FIR_OP3 0x000F0000
+#define FIR_OP3_SHIFT 16
+#define FIR_OP4 0x0000F000
+#define FIR_OP4_SHIFT 12
+#define FIR_OP5 0x00000F00
+#define FIR_OP5_SHIFT 8
+#define FIR_OP6 0x000000F0
+#define FIR_OP6_SHIFT 4
+#define FIR_OP7 0x0000000F
+#define FIR_OP7_SHIFT 0
+#define FIR_OP_NOP 0x0 /* No operation and end of sequence */
+#define FIR_OP_CA 0x1 /* Issue current column address */
+#define FIR_OP_PA 0x2 /* Issue current block+page address */
+#define FIR_OP_UA 0x3 /* Issue user defined address */
+#define FIR_OP_CM0 0x4 /* Issue command from FCR[CMD0] */
+#define FIR_OP_CM1 0x5 /* Issue command from FCR[CMD1] */
+#define FIR_OP_CM2 0x6 /* Issue command from FCR[CMD2] */
+#define FIR_OP_CM3 0x7 /* Issue command from FCR[CMD3] */
+#define FIR_OP_WB 0x8 /* Write FBCR bytes from FCM buffer */
+#define FIR_OP_WS 0x9 /* Write 1 or 2 bytes from MDR[AS] */
+#define FIR_OP_RB 0xA /* Read FBCR bytes to FCM buffer */
+#define FIR_OP_RS 0xB /* Read 1 or 2 bytes to MDR[AS] */
+#define FIR_OP_CW0 0xC /* Wait then issue FCR[CMD0] */
+#define FIR_OP_CW1 0xD /* Wait then issue FCR[CMD1] */
+#define FIR_OP_RBW 0xE /* Wait then read FBCR bytes */
+#define FIR_OP_RSW 0xE /* Wait then read 1 or 2 bytes */
+ __be32 fcr; /**< Flash Command Register */
+#define FCR_CMD0 0xFF000000
+#define FCR_CMD0_SHIFT 24
+#define FCR_CMD1 0x00FF0000
+#define FCR_CMD1_SHIFT 16
+#define FCR_CMD2 0x0000FF00
+#define FCR_CMD2_SHIFT 8
+#define FCR_CMD3 0x000000FF
+#define FCR_CMD3_SHIFT 0
+ __be32 fbar; /**< Flash Block Address Register */
+#define FBAR_BLK 0x00FFFFFF
+ __be32 fpar; /**< Flash Page Address Register */
+#define FPAR_SP_PI 0x00007C00
+#define FPAR_SP_PI_SHIFT 10
+#define FPAR_SP_MS 0x00000200
+#define FPAR_SP_CI 0x000001FF
+#define FPAR_SP_CI_SHIFT 0
+#define FPAR_LP_PI 0x0003F000
+#define FPAR_LP_PI_SHIFT 12
+#define FPAR_LP_MS 0x00000800
+#define FPAR_LP_CI 0x000007FF
+#define FPAR_LP_CI_SHIFT 0
+ __be32 fbcr; /**< Flash Byte Count Register */
+#define FBCR_BC 0x00000FFF
+ u8 res11[0x8];
+ u8 res8[0xF00];
+};
+
+struct fsl_elbc_ctrl;
+
+/* mtd information per set */
+
+struct fsl_elbc_mtd {
+ struct mtd_info mtd;
+ struct nand_chip chip;
+ struct fsl_elbc_ctrl *ctrl;
+
+ struct device *dev;
+ int bank; /* Chip select bank number */
+ u8 __iomem *vbase; /* Chip select base virtual address */
+ int page_size; /* NAND page size (0=512, 1=2048) */
+ unsigned int fmr; /* FCM Flash Mode Register value */
+};
+
+/* overview of the fsl elbc controller */
+
+struct fsl_elbc_ctrl {
+ struct nand_hw_control controller;
+ struct fsl_elbc_mtd *chips[MAX_BANKS];
+
+ /* device info */
+ struct device *dev;
+ struct elbc_regs __iomem *regs;
+ int irq;
+ wait_queue_head_t irq_wait;
+ unsigned int irq_status; /* status read from LTESR by irq handler */
+ u8 __iomem *addr; /* Address of assigned FCM buffer */
+ unsigned int page; /* Last page written to / read from */
+ unsigned int read_bytes; /* Number of bytes read during command */
+ unsigned int column; /* Saved column from SEQIN */
+ unsigned int index; /* Pointer to next byte to 'read' */
+ unsigned int status; /* status read from LTESR after last op */
+ unsigned int mdr; /* UPM/FCM Data Register value */
+ unsigned int use_mdr; /* Non zero if the MDR is to be set */
+ unsigned int oob; /* Non zero if operating on OOB data */
+ char *oob_poi; /* Place to write ECC after read back */
+};
+
+/* These map to the positions used by the FCM hardware ECC generator */
+
+/* Small Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = {
+ .eccbytes = 3,
+ .eccpos = {6, 7, 8},
+ .oobfree = { {0, 5}, {9, 7} },
+ .oobavail = 12,
+};
+
+/* Small Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = {
+ .eccbytes = 3,
+ .eccpos = {8, 9, 10},
+ .oobfree = { {0, 5}, {6, 2}, {11, 5} },
+ .oobavail = 12,
+};
+
+/* Large Page FLASH with FMR[ECCM] = 0 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = {
+ .eccbytes = 12,
+ .eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56},
+ .oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} },
+ .oobavail = 48,
+};
+
+/* Large Page FLASH with FMR[ECCM] = 1 */
+static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = {
+ .eccbytes = 12,
+ .eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58},
+ .oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} },
+ .oobavail = 48,
+};
+
+/*=================================*/
+
+/*
+ * Set up the FCM hardware block and page address fields, and the fcm
+ * structure addr field to point to the correct FCM buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct elbc_regs __iomem *lbc = ctrl->regs;
+ int buf_num;
+
+ ctrl->page = page_addr;
+
+ out_be32(&lbc->fbar,
+ page_addr >> (chip->phys_erase_shift - chip->page_shift));
+
+ if (priv->page_size) {
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
+ (oob ? FPAR_LP_MS : 0) | column);
+ buf_num = (page_addr & 1) << 2;
+ } else {
+ out_be32(&lbc->fpar,
+ ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
+ (oob ? FPAR_SP_MS : 0) | column);
+ buf_num = page_addr & 7;
+ }
+
+ ctrl->addr = priv->vbase + buf_num * 1024;
+ ctrl->index = column;
+
+ /* for OOB data point to the second half of the buffer */
+ if (oob)
+ ctrl->index += priv->page_size ? 2048 : 512;
+
+ dev_vdbg(ctrl->dev, "set_addr: bank=%d, ctrl->addr=0x%p (0x%p), "
+ "index %x, pes %d ps %d\n",
+ buf_num, ctrl->addr, priv->vbase, ctrl->index,
+ chip->phys_erase_shift, chip->page_shift);
+}
+
+/*
+ * execute FCM command and wait for it to complete
+ */
+static int fsl_elbc_run_command(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct elbc_regs __iomem *lbc = ctrl->regs;
+
+ /* Setup the FMR[OP] to execute without write protection */
+ out_be32(&lbc->fmr, priv->fmr | 3);
+ if (ctrl->use_mdr)
+ out_be32(&lbc->mdr, ctrl->mdr);
+
+ dev_vdbg(ctrl->dev,
+ "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
+ in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
+ dev_vdbg(ctrl->dev,
+ "fsl_elbc_run_command: fbar=%08x fpar=%08x "
+ "fbcr=%08x bank=%d\n",
+ in_be32(&lbc->fbar), in_be32(&lbc->fpar),
+ in_be32(&lbc->fbcr), priv->bank);
+
+ /* execute special operation */
+ out_be32(&lbc->lsor, priv->bank);
+
+ /* wait for FCM complete flag or timeout */
+ ctrl->irq_status = 0;
+ wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
+ FCM_TIMEOUT_MSECS * HZ/1000);
+ ctrl->status = ctrl->irq_status;
+
+ /* store mdr value in case it was needed */
+ if (ctrl->use_mdr)
+ ctrl->mdr = in_be32(&lbc->mdr);
+
+ ctrl->use_mdr = 0;
+
+ dev_vdbg(ctrl->dev,
+ "fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n",
+ ctrl->status, ctrl->mdr, in_be32(&lbc->fmr));
+
+ /* returns 0 on success otherwise non-zero) */
+ return ctrl->status == LTESR_CC ? 0 : -EIO;
+}
+
+static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct elbc_regs __iomem *lbc = ctrl->regs;
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+ (FIR_OP_RBW << FIR_OP4_SHIFT));
+
+ out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_RBW << FIR_OP3_SHIFT));
+
+ if (oob)
+ out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT);
+ else
+ out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
+ }
+}
+
+/* cmdfunc send commands to the FCM */
+static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct elbc_regs __iomem *lbc = ctrl->regs;
+
+ ctrl->use_mdr = 0;
+
+ /* clear the read buffer */
+ ctrl->read_bytes = 0;
+ if (command != NAND_CMD_PAGEPROG)
+ ctrl->index = 0;
+
+ switch (command) {
+ /* READ0 and READ1 read the entire buffer to use hardware ECC. */
+ case NAND_CMD_READ1:
+ column += 256;
+
+ /* fall-through */
+ case NAND_CMD_READ0:
+ dev_dbg(ctrl->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+
+ out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
+ set_addr(mtd, 0, page_addr, 0);
+
+ ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+ ctrl->index += column;
+
+ fsl_elbc_do_read(chip, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* READOOB reads only the OOB because no ECC is performed. */
+ case NAND_CMD_READOOB:
+ dev_vdbg(ctrl->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
+ " 0x%x, column: 0x%x.\n", page_addr, column);
+
+ out_be32(&lbc->fbcr, mtd->oobsize - column);
+ set_addr(mtd, column, page_addr, 1);
+
+ ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+ fsl_elbc_do_read(chip, 1);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* READID must read all 5 possible bytes while CEB is active */
+ case NAND_CMD_READID:
+ dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
+
+ out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_UA << FIR_OP1_SHIFT) |
+ (FIR_OP_RBW << FIR_OP2_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
+ /* 5 bytes for manuf, device and exts */
+ out_be32(&lbc->fbcr, 5);
+ ctrl->read_bytes = 5;
+ ctrl->use_mdr = 1;
+ ctrl->mdr = 0;
+
+ set_addr(mtd, 0, 0, 0);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* ERASE1 stores the block and page address */
+ case NAND_CMD_ERASE1:
+ dev_vdbg(ctrl->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
+ "page_addr: 0x%x.\n", page_addr);
+ set_addr(mtd, 0, page_addr, 0);
+ return;
+
+ /* ERASE2 uses the block and page address from ERASE1 */
+ case NAND_CMD_ERASE2:
+ dev_vdbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
+
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_PA << FIR_OP1_SHIFT) |
+ (FIR_OP_CM1 << FIR_OP2_SHIFT));
+
+ out_be32(&lbc->fcr,
+ (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT));
+
+ out_be32(&lbc->fbcr, 0);
+ ctrl->read_bytes = 0;
+
+ fsl_elbc_run_command(mtd);
+ return;
+
+ /* SEQIN sets up the addr buffer and all registers except the length */
+ case NAND_CMD_SEQIN: {
+ __be32 fcr;
+ dev_vdbg(ctrl->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
+ "page_addr: 0x%x, column: 0x%x.\n",
+ page_addr, column);
+
+ ctrl->column = column;
+ ctrl->oob = 0;
+
+ fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) |
+ (NAND_CMD_SEQIN << FCR_CMD2_SHIFT);
+
+ if (priv->page_size) {
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CA << FIR_OP1_SHIFT) |
+ (FIR_OP_PA << FIR_OP2_SHIFT) |
+ (FIR_OP_WB << FIR_OP3_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP4_SHIFT));
+
+ fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+ } else {
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_CM2 << FIR_OP1_SHIFT) |
+ (FIR_OP_CA << FIR_OP2_SHIFT) |
+ (FIR_OP_PA << FIR_OP3_SHIFT) |
+ (FIR_OP_WB << FIR_OP4_SHIFT) |
+ (FIR_OP_CW1 << FIR_OP5_SHIFT));
+
+ if (column >= mtd->writesize) {
+ /* OOB area --> READOOB */
+ column -= mtd->writesize;
+ fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
+ ctrl->oob = 1;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+ } else {
+ /* Second 256 bytes --> READ1 */
+ fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT;
+ }
+ }
+
+ out_be32(&lbc->fcr, fcr);
+ set_addr(mtd, column, page_addr, ctrl->oob);
+ return;
+ }
+
+ /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+ case NAND_CMD_PAGEPROG: {
+ int full_page;
+ dev_vdbg(ctrl->dev,
+ "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
+ "writing %d bytes.\n", ctrl->index);
+
+ /* if the write did not start at 0 or is not a full page
+ * then set the exact length, otherwise use a full page
+ * write so the HW generates the ECC.
+ */
+ if (ctrl->oob || ctrl->column != 0 ||
+ ctrl->index != mtd->writesize + mtd->oobsize) {
+ out_be32(&lbc->fbcr, ctrl->index);
+ full_page = 0;
+ } else {
+ out_be32(&lbc->fbcr, 0);
+ full_page = 1;
+ }
+
+ fsl_elbc_run_command(mtd);
+
+ /* Read back the page in order to fill in the ECC for the
+ * caller. Is this really needed?
+ */
+ if (full_page && ctrl->oob_poi) {
+ out_be32(&lbc->fbcr, 3);
+ set_addr(mtd, 6, page_addr, 1);
+
+ ctrl->read_bytes = mtd->writesize + 9;
+
+ fsl_elbc_do_read(chip, 1);
+ fsl_elbc_run_command(mtd);
+
+ memcpy_fromio(ctrl->oob_poi + 6,
+ &ctrl->addr[ctrl->index], 3);
+ ctrl->index += 3;
+ }
+
+ ctrl->oob_poi = NULL;
+ return;
+ }
+
+ /* CMD_STATUS must read the status byte while CEB is active */
+ /* Note - it does not wait for the ready line */
+ case NAND_CMD_STATUS:
+ out_be32(&lbc->fir,
+ (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+ (FIR_OP_RBW << FIR_OP1_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+ out_be32(&lbc->fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ctrl->read_bytes = 1;
+
+ fsl_elbc_run_command(mtd);
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ setbits8(ctrl->addr, NAND_STATUS_WP);
+ return;
+
+ /* RESET without waiting for the ready line */
+ case NAND_CMD_RESET:
+ dev_dbg(ctrl->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
+ out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
+ out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
+ fsl_elbc_run_command(mtd);
+ return;
+
+ default:
+ dev_err(ctrl->dev,
+ "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
+ command);
+ }
+}
+
+static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
+{
+ /* The hardware does not seem to support multiple
+ * chips per bank.
+ */
+}
+
+/*
+ * Write buf to the FCM Controller Data Buffer
+ */
+static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+ if (len < 0) {
+ dev_err(ctrl->dev, "write_buf of %d bytes", len);
+ ctrl->status = 0;
+ return;
+ }
+
+ if ((unsigned int)len > bufsize - ctrl->index) {
+ dev_err(ctrl->dev,
+ "write_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, bufsize - ctrl->index);
+ len = bufsize - ctrl->index;
+ }
+
+ memcpy_toio(&ctrl->addr[ctrl->index], buf, len);
+ ctrl->index += len;
+}
+
+/*
+ * read a byte from either the FCM hardware buffer if it has any data left
+ * otherwise issue a command to read a single byte.
+ */
+static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+
+ /* If there are still bytes in the FCM, then use the next byte. */
+ if (ctrl->index < ctrl->read_bytes)
+ return in_8(&ctrl->addr[ctrl->index++]);
+
+ dev_err(ctrl->dev, "read_byte beyond end of buffer\n");
+ return ERR_BYTE;
+}
+
+/*
+ * Read from the FCM Controller Data Buffer
+ */
+static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ int avail;
+
+ if (len < 0)
+ return;
+
+ avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index);
+ memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail);
+ ctrl->index += avail;
+
+ if (len > avail)
+ dev_err(ctrl->dev,
+ "read_buf beyond end of buffer "
+ "(%d requested, %d available)\n",
+ len, avail);
+}
+
+/*
+ * Verify buffer against the FCM Controller Data Buffer
+ */
+static int fsl_elbc_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ int i;
+
+ if (len < 0) {
+ dev_err(ctrl->dev, "write_buf of %d bytes", len);
+ return -EINVAL;
+ }
+
+ if ((unsigned int)len > ctrl->read_bytes - ctrl->index) {
+ dev_err(ctrl->dev,
+ "verify_buf beyond end of buffer "
+ "(%d requested, %u available)\n",
+ len, ctrl->read_bytes - ctrl->index);
+
+ ctrl->index = ctrl->read_bytes;
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++)
+ if (in_8(&ctrl->addr[ctrl->index + i]) != buf[i])
+ break;
+
+ ctrl->index += len;
+ return i == len && ctrl->status == LTESR_CC ? 0 : -EIO;
+}
+
+/* This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct elbc_regs __iomem *lbc = ctrl->regs;
+
+ if (ctrl->status != LTESR_CC)
+ return NAND_STATUS_FAIL;
+
+ /* Use READ_STATUS command, but wait for the device to be ready */
+ ctrl->use_mdr = 0;
+ out_be32(&lbc->fir,
+ (FIR_OP_CW0 << FIR_OP0_SHIFT) |
+ (FIR_OP_RBW << FIR_OP1_SHIFT));
+ out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+ out_be32(&lbc->fbcr, 1);
+ set_addr(mtd, 0, 0, 0);
+ ctrl->read_bytes = 1;
+
+ fsl_elbc_run_command(mtd);
+
+ if (ctrl->status != LTESR_CC)
+ return NAND_STATUS_FAIL;
+
+ /* The chip always seems to report that it is
+ * write-protected, even when it is not.
+ */
+ setbits8(ctrl->addr, NAND_STATUS_WP);
+ return fsl_elbc_read_byte(mtd);
+}
+
+static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct elbc_regs __iomem *lbc = ctrl->regs;
+ unsigned int al;
+
+ /* calculate FMR Address Length field */
+ al = 0;
+ if (chip->pagemask & 0xffff0000)
+ al++;
+ if (chip->pagemask & 0xff000000)
+ al++;
+
+ /* add to ECCM mode set in fsl_elbc_init */
+ priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */
+ (al << FMR_AL_SHIFT);
+
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->numchips = %d\n",
+ chip->numchips);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chipsize = %ld\n",
+ chip->chipsize);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
+ chip->pagemask);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
+ chip->chip_delay);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
+ chip->badblockpos);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
+ chip->chip_shift);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->page_shift = %d\n",
+ chip->page_shift);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
+ chip->phys_erase_shift);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
+ chip->ecclayout);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
+ chip->ecc.mode);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
+ chip->ecc.steps);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
+ chip->ecc.bytes);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
+ chip->ecc.total);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: nand->ecc.layout = %p\n",
+ chip->ecc.layout);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->size = %d\n", mtd->size);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
+ mtd->erasesize);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->writesize = %d\n",
+ mtd->writesize);
+ dev_dbg(ctrl->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
+ mtd->oobsize);
+
+ /* adjust Option Register and ECC to match Flash page size */
+ if (mtd->writesize == 512) {
+ priv->page_size = 0;
+ clrbits32(&lbc->bank[priv->bank].or, ~OR_FCM_PGS);
+ } else if (mtd->writesize == 2048) {
+ priv->page_size = 1;
+ setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+ /* adjust ecc setup if needed */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+ BR_DECC_CHK_GEN) {
+ chip->ecc.size = 512;
+ chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
+ &fsl_elbc_oob_lp_eccm1 :
+ &fsl_elbc_oob_lp_eccm0;
+ mtd->ecclayout = chip->ecc.layout;
+ mtd->oobavail = chip->ecc.layout->oobavail;
+ }
+ } else {
+ dev_err(ctrl->dev,
+ "fsl_elbc_init: page size %d is not supported\n",
+ mtd->writesize);
+ return -1;
+ }
+
+ /* The default u-boot configuration on MPC8313ERDB causes errors;
+ * more delay is needed. This should be safe for other boards
+ * as well.
+ */
+ setbits32(&lbc->bank[priv->bank].or, 0x70);
+ return 0;
+}
+
+static int fsl_elbc_read_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ uint8_t *buf)
+{
+ fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static void fsl_elbc_write_page(struct mtd_info *mtd,
+ struct nand_chip *chip,
+ const uint8_t *buf)
+{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+
+ fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+ fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+ ctrl->oob_poi = chip->oob_poi;
+}
+
+static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+{
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+ struct elbc_regs __iomem *lbc = ctrl->regs;
+ struct nand_chip *chip = &priv->chip;
+
+ dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
+
+ /* Fill in fsl_elbc_mtd structure */
+ priv->mtd.priv = chip;
+ priv->mtd.owner = THIS_MODULE;
+ priv->fmr = 0; /* rest filled in later */
+
+ /* fill in nand_chip structure */
+ /* set up function call table */
+ chip->read_byte = fsl_elbc_read_byte;
+ chip->write_buf = fsl_elbc_write_buf;
+ chip->read_buf = fsl_elbc_read_buf;
+ chip->verify_buf = fsl_elbc_verify_buf;
+ chip->select_chip = fsl_elbc_select_chip;
+ chip->cmdfunc = fsl_elbc_cmdfunc;
+ chip->waitfunc = fsl_elbc_wait;
+
+ /* set up nand options */
+ chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+
+ chip->controller = &ctrl->controller;
+ chip->priv = priv;
+
+ chip->ecc.read_page = fsl_elbc_read_page;
+ chip->ecc.write_page = fsl_elbc_write_page;
+
+ /* If CS Base Register selects full hardware ECC then use it */
+ if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+ BR_DECC_CHK_GEN) {
+ chip->ecc.mode = NAND_ECC_HW;
+ /* put in small page settings and adjust later if needed */
+ chip->ecc.layout = (priv->fmr & FMR_ECCM) ?
+ &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0;
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ } else {
+ /* otherwise fall back to default software ECC */
+ chip->ecc.mode = NAND_ECC_SOFT;
+ }
+
+ return 0;
+}
+
+static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
+{
+ struct fsl_elbc_ctrl *ctrl = priv->ctrl;
+
+ nand_release(&priv->mtd);
+
+ if (priv->vbase)
+ iounmap(priv->vbase);
+
+ ctrl->chips[priv->bank] = NULL;
+ kfree(priv);
+
+ return 0;
+}
+
+static int fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
+ struct device_node *node)
+{
+ struct elbc_regs __iomem *lbc = ctrl->regs;
+ struct fsl_elbc_mtd *priv;
+ struct resource res;
+#ifdef CONFIG_MTD_PARTITIONS
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", NULL };
+ struct mtd_partition *parts;
+#endif
+ int ret;
+ int bank;
+
+ /* get, allocate and map the memory resource */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(ctrl->dev, "failed to get resource\n");
+ return ret;
+ }
+
+ /* find which chip select it is connected to */
+ for (bank = 0; bank < MAX_BANKS; bank++)
+ if ((in_be32(&lbc->bank[bank].br) & BR_V) &&
+ (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
+ (in_be32(&lbc->bank[bank].br) &
+ in_be32(&lbc->bank[bank].or) & BR_BA)
+ == res.start)
+ break;
+
+ if (bank >= MAX_BANKS) {
+ dev_err(ctrl->dev, "address did not match any chip selects\n");
+ return -ENODEV;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ctrl->chips[bank] = priv;
+ priv->bank = bank;
+ priv->ctrl = ctrl;
+ priv->dev = ctrl->dev;
+
+ priv->vbase = ioremap(res.start, res.end - res.start + 1);
+ if (!priv->vbase) {
+ dev_err(ctrl->dev, "failed to map chip region\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = fsl_elbc_chip_init(priv);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_ident(&priv->mtd, 1);
+ if (ret)
+ goto err;
+
+ ret = fsl_elbc_chip_init_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+ ret = nand_scan_tail(&priv->mtd);
+ if (ret)
+ goto err;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
+ if (ret < 0)
+ goto err;
+
+#ifdef CONFIG_MTD_OF_PARTS
+ if (ret == 0) {
+ ret = of_mtd_parse_partitions(priv->dev, &priv->mtd,
+ node, &parts);
+ if (ret < 0)
+ goto err;
+ }
+#endif
+
+ if (ret > 0)
+ add_mtd_partitions(&priv->mtd, parts, ret);
+ else
+#endif
+ add_mtd_device(&priv->mtd);
+
+ printk(KERN_INFO "eLBC NAND device at 0x%zx, bank %d\n",
+ res.start, priv->bank);
+ return 0;
+
+err:
+ fsl_elbc_chip_remove(priv);
+ return ret;
+}
+
+static int __devinit fsl_elbc_ctrl_init(struct fsl_elbc_ctrl *ctrl)
+{
+ struct elbc_regs __iomem *lbc = ctrl->regs;
+
+ /* clear event registers */
+ setbits32(&lbc->ltesr, LTESR_NAND_MASK);
+ out_be32(&lbc->lteatr, 0);
+
+ /* Enable interrupts for any detected events */
+ out_be32(&lbc->lteir, LTESR_NAND_MASK);
+
+ ctrl->read_bytes = 0;
+ ctrl->index = 0;
+ ctrl->addr = NULL;
+
+ return 0;
+}
+
+static int __devexit fsl_elbc_ctrl_remove(struct of_device *ofdev)
+{
+ struct fsl_elbc_ctrl *ctrl = dev_get_drvdata(&ofdev->dev);
+ int i;
+
+ for (i = 0; i < MAX_BANKS; i++)
+ if (ctrl->chips[i])
+ fsl_elbc_chip_remove(ctrl->chips[i]);
+
+ if (ctrl->irq)
+ free_irq(ctrl->irq, ctrl);
+
+ if (ctrl->regs)
+ iounmap(ctrl->regs);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+ kfree(ctrl);
+ return 0;
+}
+
+/* NOTE: This interrupt is also used to report other localbus events,
+ * such as transaction errors on other chipselects. If we want to
+ * capture those, we'll need to move the IRQ code into a shared
+ * LBC driver.
+ */
+
+static irqreturn_t fsl_elbc_ctrl_irq(int irqno, void *data)
+{
+ struct fsl_elbc_ctrl *ctrl = data;
+ struct elbc_regs __iomem *lbc = ctrl->regs;
+ __be32 status = in_be32(&lbc->ltesr) & LTESR_NAND_MASK;
+
+ if (status) {
+ out_be32(&lbc->ltesr, status);
+ out_be32(&lbc->lteatr, 0);
+
+ ctrl->irq_status = status;
+ smp_wmb();
+ wake_up(&ctrl->irq_wait);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/* fsl_elbc_ctrl_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code allocates all of
+ * the resources needed for the controller only. The
+ * resources for the NAND banks themselves are allocated
+ * in the chip probe function.
+*/
+
+static int __devinit fsl_elbc_ctrl_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct device_node *child;
+ struct fsl_elbc_ctrl *ctrl;
+ int ret;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ dev_set_drvdata(&ofdev->dev, ctrl);
+
+ spin_lock_init(&ctrl->controller.lock);
+ init_waitqueue_head(&ctrl->controller.wq);
+ init_waitqueue_head(&ctrl->irq_wait);
+
+ ctrl->regs = of_iomap(ofdev->node, 0);
+ if (!ctrl->regs) {
+ dev_err(&ofdev->dev, "failed to get memory region\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ctrl->irq = of_irq_to_resource(ofdev->node, 0, NULL);
+ if (ctrl->irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "failed to get irq resource\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ctrl->dev = &ofdev->dev;
+
+ ret = fsl_elbc_ctrl_init(ctrl);
+ if (ret < 0)
+ goto err;
+
+ ret = request_irq(ctrl->irq, fsl_elbc_ctrl_irq, 0, "fsl-elbc", ctrl);
+ if (ret != 0) {
+ dev_err(&ofdev->dev, "failed to install irq (%d)\n",
+ ctrl->irq);
+ ret = ctrl->irq;
+ goto err;
+ }
+
+ for_each_child_of_node(ofdev->node, child)
+ if (of_device_is_compatible(child, "fsl,elbc-fcm-nand"))
+ fsl_elbc_chip_probe(ctrl, child);
+
+ return 0;
+
+err:
+ fsl_elbc_ctrl_remove(ofdev);
+ return ret;
+}
+
+static const struct of_device_id fsl_elbc_match[] = {
+ {
+ .compatible = "fsl,elbc",
+ },
+ {}
+};
+
+static struct of_platform_driver fsl_elbc_ctrl_driver = {
+ .driver = {
+ .name = "fsl-elbc",
+ },
+ .match_table = fsl_elbc_match,
+ .probe = fsl_elbc_ctrl_probe,
+ .remove = __devexit_p(fsl_elbc_ctrl_remove),
+};
+
+static int __init fsl_elbc_init(void)
+{
+ return of_register_platform_driver(&fsl_elbc_ctrl_driver);
+}
+
+static void __exit fsl_elbc_exit(void)
+{
+ of_unregister_platform_driver(&fsl_elbc_ctrl_driver);
+}
+
+module_init(fsl_elbc_init);
+module_exit(fsl_elbc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index ddd4fc019042..7acb1a0e7409 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2469,8 +2469,12 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.write_oob = nand_write_oob_std;
case NAND_ECC_HW_SYNDROME:
- if (!chip->ecc.calculate || !chip->ecc.correct ||
- !chip->ecc.hwctl) {
+ if ((!chip->ecc.calculate || !chip->ecc.correct ||
+ !chip->ecc.hwctl) &&
+ (!chip->ecc.read_page ||
+ chip->ecc.read_page == nand_read_page_hwecc ||
+ !chip->ecc.write_page ||
+ chip->ecc.write_page == nand_write_page_hwecc)) {
printk(KERN_WARNING "No ECC functions supplied, "
"Hardware ECC not possible\n");
BUG();
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
new file mode 100644
index 000000000000..9162cca0182b
--- /dev/null
+++ b/drivers/mtd/nand/orion_nand.c
@@ -0,0 +1,171 @@
+/*
+ * drivers/mtd/nand/orion_nand.c
+ *
+ * NAND support for Marvell Orion SoC platforms
+ *
+ * Tzachi Perelstein <tzachi@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+#include <asm/arch/platform.h>
+#include <asm/arch/hardware.h>
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+static const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *nc = mtd->priv;
+ struct orion_nand_data *board = nc->priv;
+ u32 offs;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ offs = (1 << board->cle);
+ else if (ctrl & NAND_ALE)
+ offs = (1 << board->ale);
+ else
+ return;
+
+ if (nc->options & NAND_BUSWIDTH_16)
+ offs <<= 1;
+
+ writeb(cmd, nc->IO_ADDR_W + offs);
+}
+
+static int __init orion_nand_probe(struct platform_device *pdev)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *nc;
+ struct orion_nand_data *board;
+ void __iomem *io_base;
+ int ret = 0;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *partitions = NULL;
+ int num_part = 0;
+#endif
+
+ nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
+ if (!nc) {
+ printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
+ ret = -ENOMEM;
+ goto no_res;
+ }
+ mtd = (struct mtd_info *)(nc + 1);
+
+ io_base = ioremap(pdev->resource[0].start,
+ pdev->resource[0].end - pdev->resource[0].start + 1);
+ if (!io_base) {
+ printk(KERN_ERR "orion_nand: ioremap failed\n");
+ ret = -EIO;
+ goto no_res;
+ }
+
+ board = pdev->dev.platform_data;
+
+ mtd->priv = nc;
+ mtd->owner = THIS_MODULE;
+
+ nc->priv = board;
+ nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
+ nc->cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->ecc.mode = NAND_ECC_SOFT;
+
+ if (board->width == 16)
+ nc->options |= NAND_BUSWIDTH_16;
+
+ platform_set_drvdata(pdev, mtd);
+
+ if (nand_scan(mtd, 1)) {
+ ret = -ENXIO;
+ goto no_dev;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ mtd->name = "orion_nand";
+ num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
+#endif
+ /* If cmdline partitions have been passed, let them be used */
+ if (num_part <= 0) {
+ num_part = board->nr_parts;
+ partitions = board->parts;
+ }
+
+ if (partitions && num_part > 0)
+ ret = add_mtd_partitions(mtd, partitions, num_part);
+ else
+ ret = add_mtd_device(mtd);
+#else
+ ret = add_mtd_device(mtd);
+#endif
+
+ if (ret) {
+ nand_release(mtd);
+ goto no_dev;
+ }
+
+ return 0;
+
+no_dev:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(io_base);
+no_res:
+ kfree(nc);
+
+ return ret;
+}
+
+static int __devexit orion_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nc = mtd->priv;
+
+ nand_release(mtd);
+
+ iounmap(nc->IO_ADDR_W);
+
+ kfree(nc);
+
+ return 0;
+}
+
+static struct platform_driver orion_nand_driver = {
+ .probe = orion_nand_probe,
+ .remove = orion_nand_remove,
+ .driver = {
+ .name = "orion_nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init orion_nand_init(void)
+{
+ return platform_driver_register(&orion_nand_driver);
+}
+
+static void __exit orion_nand_exit(void)
+{
+ platform_driver_unregister(&orion_nand_driver);
+}
+
+module_init(orion_nand_init);
+module_exit(orion_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_DESCRIPTION("NAND glue for Orion platforms");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
new file mode 100644
index 000000000000..75c899039023
--- /dev/null
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip NAND flash interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#undef DEBUG
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+
+#define LBICTRL_LPCCTL_NR 0x00004000
+#define CLE_PIN_CTL 15
+#define ALE_PIN_CTL 14
+
+static unsigned int lpcctl;
+static struct mtd_info *pasemi_nand_mtd;
+static const char driver_name[] = "pasemi-nand";
+
+static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ while (len > 0x800) {
+ memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_fromio(buf, chip->IO_ADDR_R, len);
+}
+
+static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ while (len > 0x800) {
+ memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_toio(chip->IO_ADDR_R, buf, len);
+}
+
+static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
+ else
+ out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
+
+ /* Push out posted writes */
+ eieio();
+ inl(lpcctl);
+}
+
+int pasemi_device_ready(struct mtd_info *mtd)
+{
+ return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
+}
+
+static int __devinit pasemi_nand_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct pci_dev *pdev;
+ struct device_node *np = ofdev->node;
+ struct resource res;
+ struct nand_chip *chip;
+ int err = 0;
+
+ err = of_address_to_resource(np, 0, &res);
+
+ if (err)
+ return -EINVAL;
+
+ /* We only support one device at the moment */
+ if (pasemi_nand_mtd)
+ return -ENODEV;
+
+ pr_debug("pasemi_nand at %lx-%lx\n", res.start, res.end);
+
+ /* Allocate memory for MTD device structure and private data */
+ pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip), GFP_KERNEL);
+ if (!pasemi_nand_mtd) {
+ printk(KERN_WARNING
+ "Unable to allocate PASEMI NAND MTD device structure\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Get pointer to private data */
+ chip = (struct nand_chip *)&pasemi_nand_mtd[1];
+
+ /* Link the private data with the MTD structure */
+ pasemi_nand_mtd->priv = chip;
+ pasemi_nand_mtd->owner = THIS_MODULE;
+
+ chip->IO_ADDR_R = of_iomap(np, 0);
+ chip->IO_ADDR_W = chip->IO_ADDR_R;
+
+ if (!chip->IO_ADDR_R) {
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
+ if (!pdev) {
+ err = -ENODEV;
+ goto out_ior;
+ }
+
+ lpcctl = pci_resource_start(pdev, 0);
+
+ if (!request_region(lpcctl, 4, driver_name)) {
+ err = -EBUSY;
+ goto out_ior;
+ }
+
+ chip->cmd_ctrl = pasemi_hwcontrol;
+ chip->dev_ready = pasemi_device_ready;
+ chip->read_buf = pasemi_read_buf;
+ chip->write_buf = pasemi_write_buf;
+ chip->chip_delay = 0;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Enable the following for a flash based bad block table */
+ chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
+
+ /* Scan to find existance of the device */
+ if (nand_scan(pasemi_nand_mtd, 1)) {
+ err = -ENXIO;
+ goto out_lpc;
+ }
+
+ if (add_mtd_device(pasemi_nand_mtd)) {
+ printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
+ err = -ENODEV;
+ goto out_lpc;
+ }
+
+ printk(KERN_INFO "PA Semi NAND flash at %08lx, control at I/O %x\n",
+ res.start, lpcctl);
+
+ return 0;
+
+ out_lpc:
+ release_region(lpcctl, 4);
+ out_ior:
+ iounmap(chip->IO_ADDR_R);
+ out_mtd:
+ kfree(pasemi_nand_mtd);
+ out:
+ return err;
+}
+
+static int __devexit pasemi_nand_remove(struct of_device *ofdev)
+{
+ struct nand_chip *chip;
+
+ if (!pasemi_nand_mtd)
+ return 0;
+
+ chip = pasemi_nand_mtd->priv;
+
+ /* Release resources, unregister device */
+ nand_release(pasemi_nand_mtd);
+
+ release_region(lpcctl, 4);
+
+ iounmap(chip->IO_ADDR_R);
+
+ /* Free the MTD device structure */
+ kfree(pasemi_nand_mtd);
+
+ pasemi_nand_mtd = NULL;
+
+ return 0;
+}
+
+static struct of_device_id pasemi_nand_match[] =
+{
+ {
+ .compatible = "pasemi,localbus-nand",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, pasemi_nand_match);
+
+static struct of_platform_driver pasemi_nand_driver =
+{
+ .name = (char*)driver_name,
+ .match_table = pasemi_nand_match,
+ .probe = pasemi_nand_probe,
+ .remove = pasemi_nand_remove,
+};
+
+static int __init pasemi_nand_init(void)
+{
+ return of_register_platform_driver(&pasemi_nand_driver);
+}
+module_init(pasemi_nand_init);
+
+static void __exit pasemi_nand_exit(void)
+{
+ of_unregister_platform_driver(&pasemi_nand_driver);
+}
+module_exit(pasemi_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index cd725fc5e813..f6d5c2adc4fd 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -110,7 +110,9 @@ out:
static int __devexit plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
+#ifdef CONFIG_MTD_PARTITIONS
struct platform_nand_data *pdata = pdev->dev.platform_data;
+#endif
nand_release(&data->mtd);
#ifdef CONFIG_MTD_PARTITIONS
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 2bd0737572c6..9260ad947524 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -120,6 +120,8 @@ struct s3c2410_nand_info {
int sel_bit;
int mtd_count;
+ unsigned long save_nfconf;
+
enum s3c_cpu_type cpu_type;
};
@@ -364,23 +366,21 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
/* calculate the bit position of the error */
- bit = (diff2 >> 2) & 1;
- bit |= (diff2 >> 3) & 2;
- bit |= (diff2 >> 4) & 4;
+ bit = ((diff2 >> 3) & 1) |
+ ((diff2 >> 4) & 2) |
+ ((diff2 >> 5) & 4);
/* calculate the byte position of the error */
- byte = (diff1 << 1) & 0x80;
- byte |= (diff1 << 2) & 0x40;
- byte |= (diff1 << 3) & 0x20;
- byte |= (diff1 << 4) & 0x10;
-
- byte |= (diff0 >> 3) & 0x08;
- byte |= (diff0 >> 2) & 0x04;
- byte |= (diff0 >> 1) & 0x02;
- byte |= (diff0 >> 0) & 0x01;
-
- byte |= (diff2 << 8) & 0x100;
+ byte = ((diff2 << 7) & 0x100) |
+ ((diff1 << 0) & 0x80) |
+ ((diff1 << 1) & 0x40) |
+ ((diff1 << 2) & 0x20) |
+ ((diff1 << 3) & 0x10) |
+ ((diff0 >> 4) & 0x08) |
+ ((diff0 >> 3) & 0x04) |
+ ((diff0 >> 2) & 0x02) |
+ ((diff0 >> 1) & 0x01);
dev_dbg(info->device, "correcting error bit %d, byte %d\n",
bit, byte);
@@ -399,7 +399,7 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
if ((diff0 & ~(1<<fls(diff0))) == 0)
return 1;
- return 0;
+ return -1;
}
/* ECC functions
@@ -810,6 +810,16 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
struct s3c2410_nand_info *info = platform_get_drvdata(dev);
if (info) {
+ info->save_nfconf = readl(info->regs + S3C2410_NFCONF);
+
+ /* For the moment, we must ensure nFCE is high during
+ * the time we are suspended. This really should be
+ * handled by suspending the MTDs we are using, but
+ * that is currently not the case. */
+
+ writel(info->save_nfconf | info->sel_bit,
+ info->regs + S3C2410_NFCONF);
+
if (!allow_clk_stop(info))
clk_disable(info->clk);
}
@@ -820,11 +830,19 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
static int s3c24xx_nand_resume(struct platform_device *dev)
{
struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+ unsigned long nfconf;
if (info) {
clk_enable(info->clk);
s3c2410_nand_inithw(info, dev);
+ /* Restore the state of the nFCE line. */
+
+ nfconf = readl(info->regs + S3C2410_NFCONF);
+ nfconf &= ~info->sel_bit;
+ nfconf |= info->save_nfconf & info->sel_bit;
+ writel(nfconf, info->regs + S3C2410_NFCONF);
+
if (allow_clk_stop(info))
clk_disable(info->clk);
}
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
new file mode 100644
index 000000000000..f86e06934cd8
--- /dev/null
+++ b/drivers/mtd/ofpart.c
@@ -0,0 +1,74 @@
+/*
+ * Flash partitions described by the OF (or flattened) device tree
+ *
+ * Copyright (C) 2006 MontaVista Software Inc.
+ * Author: Vitaly Wool <vwool@ru.mvista.com>
+ *
+ * Revised to handle newer style flash binding by:
+ * Copyright (C) 2007 David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+int __devinit of_mtd_parse_partitions(struct device *dev,
+ struct mtd_info *mtd,
+ struct device_node *node,
+ struct mtd_partition **pparts)
+{
+ const char *partname;
+ struct device_node *pp;
+ int nr_parts, i;
+
+ /* First count the subnodes */
+ pp = NULL;
+ nr_parts = 0;
+ while ((pp = of_get_next_child(node, pp)))
+ nr_parts++;
+
+ if (nr_parts == 0)
+ return 0;
+
+ *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ pp = NULL;
+ i = 0;
+ while ((pp = of_get_next_child(node, pp))) {
+ const u32 *reg;
+ int len;
+
+ reg = of_get_property(pp, "reg", &len);
+ if (!reg || (len != 2 * sizeof(u32))) {
+ of_node_put(pp);
+ dev_err(dev, "Invalid 'reg' on %s\n", node->full_name);
+ kfree(*pparts);
+ *pparts = NULL;
+ return -EINVAL;
+ }
+ (*pparts)[i].offset = reg[0];
+ (*pparts)[i].size = reg[1];
+
+ partname = of_get_property(pp, "label", &len);
+ if (!partname)
+ partname = of_get_property(pp, "name", &len);
+ (*pparts)[i].name = (char *)partname;
+
+ if (of_get_property(pp, "read-only", &len))
+ (*pparts)[i].mask_flags = MTD_WRITEABLE;
+
+ i++;
+ }
+
+ return nr_parts;
+}
+EXPORT_SYMBOL(of_mtd_parse_partitions);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1b0b32011415..8d7d21be1541 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/mtd/mtd.h>
@@ -170,6 +171,18 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
}
/**
+ * onenand_get_density - [DEFAULT] Get OneNAND density
+ * @param dev_id OneNAND device ID
+ *
+ * Get OneNAND density from device ID
+ */
+static inline int onenand_get_density(int dev_id)
+{
+ int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ return (density & ONENAND_DEVICE_DENSITY_MASK);
+}
+
+/**
* onenand_command - [DEFAULT] Send command to OneNAND device
* @param mtd MTD device structure
* @param cmd the command to be sent
@@ -182,8 +195,7 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len)
{
struct onenand_chip *this = mtd->priv;
- int value, readcmd = 0, block_cmd = 0;
- int block, page;
+ int value, block, page;
/* Address translation */
switch (cmd) {
@@ -198,7 +210,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
case ONENAND_CMD_ERASE:
case ONENAND_CMD_BUFFERRAM:
case ONENAND_CMD_OTP_ACCESS:
- block_cmd = 1;
block = (int) (addr >> this->erase_shift);
page = -1;
break;
@@ -240,11 +251,9 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_block_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
- if (block_cmd) {
- /* Select DataRAM for DDP */
- value = onenand_bufferram_address(this, block);
- this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- }
+ /* Select DataRAM for DDP */
+ value = onenand_bufferram_address(this, block);
+ this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
if (page != -1) {
@@ -256,7 +265,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
- readcmd = 1;
break;
default:
@@ -273,12 +281,6 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
/* Write 'BSA, BSC' of DataRAM */
value = onenand_buffer_address(dataram, sectors, count);
this->write_word(value, this->base + ONENAND_REG_START_BUFFER);
-
- if (readcmd) {
- /* Select DataRAM for DDP */
- value = onenand_bufferram_address(this, block);
- this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- }
}
/* Interrupt clear */
@@ -855,6 +857,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
this->command(mtd, ONENAND_CMD_READ, from, writesize);
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
}
}
@@ -913,6 +917,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
/* Now wait for load */
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
}
/*
@@ -923,12 +929,12 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
ops->retlen = read;
ops->oobretlen = oobread;
- if (mtd->ecc_stats.failed - stats.failed)
- return -EBADMSG;
-
if (ret)
return ret;
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
}
@@ -944,6 +950,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
int read = 0, thislen, column, oobsize;
size_t len = ops->ooblen;
mtd_oob_mode_t mode = ops->mode;
@@ -977,6 +984,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
+ stats = mtd->ecc_stats;
+
while (read < len) {
cond_resched();
@@ -988,18 +997,16 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
onenand_update_bufferram(mtd, from, 0);
ret = this->wait(mtd, FL_READING);
- /* First copy data and check return value for ECC handling */
+ if (ret && ret != -EBADMSG) {
+ printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
+ break;
+ }
if (mode == MTD_OOB_AUTO)
onenand_transfer_auto_oob(mtd, buf, column, thislen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
- if (ret) {
- printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
- break;
- }
-
read += thislen;
if (read == len)
@@ -1016,7 +1023,14 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
}
ops->oobretlen = read;
- return ret;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return 0;
}
/**
@@ -1106,12 +1120,10 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
+ /* Initial bad block case: 0x2400 or 0x0400 */
if (ctrl & ONENAND_CTRL_ERROR) {
printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl);
- /* Initial bad block case */
- if (ctrl & ONENAND_CTRL_LOAD)
- return ONENAND_BBT_READ_ERROR;
- return ONENAND_BBT_READ_FATAL_ERROR;
+ return ONENAND_BBT_READ_ERROR;
}
if (interrupt & ONENAND_INT_READ) {
@@ -1206,7 +1218,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to)
{
struct onenand_chip *this = mtd->priv;
- char oobbuf[64];
+ u_char *oob_buf = this->oob_buf;
int status, i;
this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize);
@@ -1215,9 +1227,9 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
if (status)
return status;
- this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize);
for (i = 0; i < mtd->oobsize; i++)
- if (buf[i] != 0xFF && buf[i] != oobbuf[i])
+ if (buf[i] != 0xFF && buf[i] != oob_buf[i])
return -EBADMSG;
return 0;
@@ -1273,6 +1285,112 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr,
#define NOTALIGNED(x) ((x & (this->subpagesize - 1)) != 0)
+static void onenand_panic_wait(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned int interrupt;
+ int i;
+
+ for (i = 0; i < 2000; i++) {
+ interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
+ if (interrupt & ONENAND_INT_MASTER)
+ break;
+ udelay(10);
+ }
+}
+
+/**
+ * onenand_panic_write - [MTD Interface] write buffer to FLASH in a panic context
+ * @param mtd MTD device structure
+ * @param to offset to write to
+ * @param len number of bytes to write
+ * @param retlen pointer to variable to store the number of written bytes
+ * @param buf the data to write
+ *
+ * Write with ECC
+ */
+static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct onenand_chip *this = mtd->priv;
+ int column, subpage;
+ int written = 0;
+ int ret = 0;
+
+ if (this->state == FL_PM_SUSPENDED)
+ return -EBUSY;
+
+ /* Wait for any existing operation to clear */
+ onenand_panic_wait(mtd);
+
+ DEBUG(MTD_DEBUG_LEVEL3, "onenand_panic_write: to = 0x%08x, len = %i\n",
+ (unsigned int) to, (int) len);
+
+ /* Initialize retlen, in case of early exit */
+ *retlen = 0;
+
+ /* Do not allow writes past end of device */
+ if (unlikely((to + len) > mtd->size)) {
+ printk(KERN_ERR "onenand_panic_write: Attempt write to past end of device\n");
+ return -EINVAL;
+ }
+
+ /* Reject writes, which are not page aligned */
+ if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) {
+ printk(KERN_ERR "onenand_panic_write: Attempt to write not page aligned data\n");
+ return -EINVAL;
+ }
+
+ column = to & (mtd->writesize - 1);
+
+ /* Loop until all data write */
+ while (written < len) {
+ int thislen = min_t(int, mtd->writesize - column, len - written);
+ u_char *wbuf = (u_char *) buf;
+
+ this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen);
+
+ /* Partial page write */
+ subpage = thislen < mtd->writesize;
+ if (subpage) {
+ memset(this->page_buf, 0xff, mtd->writesize);
+ memcpy(this->page_buf + column, buf, thislen);
+ wbuf = this->page_buf;
+ }
+
+ this->write_bufferram(mtd, ONENAND_DATARAM, wbuf, 0, mtd->writesize);
+ this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize);
+
+ this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
+
+ onenand_panic_wait(mtd);
+
+ /* In partial page write we don't update bufferram */
+ onenand_update_bufferram(mtd, to, !ret && !subpage);
+ if (ONENAND_IS_2PLANE(this)) {
+ ONENAND_SET_BUFFERRAM1(this);
+ onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage);
+ }
+
+ if (ret) {
+ printk(KERN_ERR "onenand_panic_write: write failed %d\n", ret);
+ break;
+ }
+
+ written += thislen;
+
+ if (written == len)
+ break;
+
+ column = 0;
+ to += thislen;
+ buf += thislen;
+ }
+
+ *retlen = written;
+ return ret;
+}
+
/**
* onenand_fill_auto_oob - [Internal] oob auto-placement transfer
* @param mtd MTD device structure
@@ -1419,7 +1537,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
}
/* Only check verify write turn on */
- ret = onenand_verify(mtd, (u_char *) wbuf, to, thislen);
+ ret = onenand_verify(mtd, buf, to, thislen);
if (ret) {
printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret);
break;
@@ -1435,9 +1553,6 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to,
buf += thislen;
}
- /* Deselect and wake up anyone waiting on the device */
- onenand_release_device(mtd);
-
ops->retlen = written;
return ret;
@@ -2148,7 +2263,7 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
*retlen = 0;
- density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ density = onenand_get_density(this->device_id);
if (density < ONENAND_DEVICE_DENSITY_512Mb)
otp_pages = 20;
else
@@ -2299,7 +2414,8 @@ static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
- unsigned char oob_buf[64];
+ struct onenand_chip *this = mtd->priv;
+ u_char *oob_buf = this->oob_buf;
size_t retlen;
int ret;
@@ -2339,7 +2455,7 @@ static void onenand_check_features(struct mtd_info *mtd)
unsigned int density, process;
/* Lock scheme depends on density and process */
- density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ density = onenand_get_density(this->device_id);
process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT;
/* Lock scheme */
@@ -2388,7 +2504,7 @@ static void onenand_print_device_info(int device, int version)
vcc = device & ONENAND_DEVICE_VCC_MASK;
demuxed = device & ONENAND_DEVICE_IS_DEMUX;
ddp = device & ONENAND_DEVICE_IS_DDP;
- density = device >> ONENAND_DEVICE_DENSITY_SHIFT;
+ density = onenand_get_density(device);
printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
demuxed ? "" : "Muxed ",
ddp ? "(DDP)" : "",
@@ -2480,7 +2596,7 @@ static int onenand_probe(struct mtd_info *mtd)
this->device_id = dev_id;
this->version_id = ver_id;
- density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ density = onenand_get_density(dev_id);
this->chipsize = (16 << density) << 20;
/* Set density mask. it is used for DDP */
if (ONENAND_IS_DDP(this))
@@ -2664,6 +2780,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->write = onenand_write;
mtd->read_oob = onenand_read_oob;
mtd->write_oob = onenand_write_oob;
+ mtd->panic_write = onenand_panic_write;
#ifdef CONFIG_MTD_ONENAND_OTP
mtd->get_fact_prot_info = onenand_get_fact_prot_info;
mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index a61351f88ec0..47474903263c 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -59,16 +59,31 @@ static int parse_redboot_partitions(struct mtd_info *master,
static char nullstring[] = "unallocated";
#endif
+ if ( directory < 0 ) {
+ offset = master->size + directory * master->erasesize;
+ while (master->block_isbad &&
+ master->block_isbad(master, offset)) {
+ if (!offset) {
+ nogood:
+ printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
+ return -EIO;
+ }
+ offset -= master->erasesize;
+ }
+ } else {
+ offset = directory * master->erasesize;
+ while (master->block_isbad &&
+ master->block_isbad(master, offset)) {
+ offset += master->erasesize;
+ if (offset == master->size)
+ goto nogood;
+ }
+ }
buf = vmalloc(master->erasesize);
if (!buf)
return -ENOMEM;
- if ( directory < 0 )
- offset = master->size + directory*master->erasesize;
- else
- offset = directory*master->erasesize;
-
printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
master->name, offset);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 023653977a1a..6ac81e35355c 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -21,11 +21,16 @@
*/
/*
- * This file includes UBI initialization and building of UBI devices. At the
- * moment UBI devices may only be added while UBI is initialized, but dynamic
- * device add/remove functionality is planned. Also, at the moment we only
- * attach UBI devices by scanning, which will become a bottleneck when flashes
- * reach certain large size. Then one may improve UBI and add other methods.
+ * This file includes UBI initialization and building of UBI devices.
+ *
+ * When UBI is initialized, it attaches all the MTD devices specified as the
+ * module load parameters or the kernel boot parameters. If MTD devices were
+ * specified, UBI does not attach any MTD device, but it is possible to do
+ * later using the "UBI control device".
+ *
+ * At the moment we only attach UBI devices by scanning, which will become a
+ * bottleneck when flashes reach certain large size. Then one may improve UBI
+ * and add other methods, although it does not seem to be easy to do.
*/
#include <linux/err.h>
@@ -33,7 +38,9 @@
#include <linux/moduleparam.h>
#include <linux/stringify.h>
#include <linux/stat.h>
+#include <linux/miscdevice.h>
#include <linux/log2.h>
+#include <linux/kthread.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
@@ -43,13 +50,11 @@
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD device name or number string
* @vid_hdr_offs: VID header offset
- * @data_offs: data offset
*/
struct mtd_dev_param
{
char name[MTD_PARAM_LEN_MAX];
int vid_hdr_offs;
- int data_offs;
};
/* Numbers of elements set in the @mtd_dev_param array */
@@ -58,14 +63,27 @@ static int mtd_devs = 0;
/* MTD devices specification parameters */
static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
-/* Number of UBI devices in system */
-int ubi_devices_cnt;
+/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
+struct class *ubi_class;
+
+/* Slab cache for wear-leveling entries */
+struct kmem_cache *ubi_wl_entry_slab;
+
+/* UBI control character device */
+static struct miscdevice ubi_ctrl_cdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ubi_ctrl",
+ .fops = &ubi_ctrl_cdev_operations,
+};
/* All UBI devices in system */
-struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
+static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
-/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
-struct class *ubi_class;
+/* Serializes UBI devices creations and removals */
+DEFINE_MUTEX(ubi_devices_mutex);
+
+/* Protects @ubi_devices and @ubi->ref_count */
+static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
static ssize_t ubi_version_show(struct class *class, char *buf)
@@ -101,38 +119,150 @@ static struct device_attribute dev_min_io_size =
__ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_bgt_enabled =
__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_num =
+ __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+
+/**
+ * ubi_get_device - get UBI device.
+ * @ubi_num: UBI device number
+ *
+ * This function returns UBI device description object for UBI device number
+ * @ubi_num, or %NULL if the device does not exist. This function increases the
+ * device reference count to prevent removal of the device. In other words, the
+ * device cannot be removed if its reference count is not zero.
+ */
+struct ubi_device *ubi_get_device(int ubi_num)
+{
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ ubi = ubi_devices[ubi_num];
+ if (ubi) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi;
+}
+
+/**
+ * ubi_put_device - drop an UBI device reference.
+ * @ubi: UBI device description object
+ */
+void ubi_put_device(struct ubi_device *ubi)
+{
+ spin_lock(&ubi_devices_lock);
+ ubi->ref_count -= 1;
+ put_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+}
+
+/**
+ * ubi_get_by_major - get UBI device description object by character device
+ * major number.
+ * @major: major number
+ *
+ * This function is similar to 'ubi_get_device()', but it searches the device
+ * by its major number.
+ */
+struct ubi_device *ubi_get_by_major(int major)
+{
+ int i;
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+ return ubi;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return NULL;
+}
+
+/**
+ * ubi_major2num - get UBI device number by character device major number.
+ * @major: major number
+ *
+ * This function searches UBI device number object by its major number. If UBI
+ * device was not found, this function returns -ENODEV, otherwise the UBI device
+ * number is returned.
+ */
+int ubi_major2num(int major)
+{
+ int i, ubi_num = -ENODEV;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_num = ubi->ubi_num;
+ break;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi_num;
+}
/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct ubi_device *ubi;
+ ssize_t ret;
+ struct ubi_device *ubi;
+ /*
+ * The below code looks weird, but it actually makes sense. We get the
+ * UBI device reference from the contained 'struct ubi_device'. But it
+ * is unclear if the device was removed or not yet. Indeed, if the
+ * device was removed before we increased its reference count,
+ * 'ubi_get_device()' will return -ENODEV and we fail.
+ *
+ * Remember, 'struct ubi_device' is freed in the release function, so
+ * we still can use 'ubi->ubi_num'.
+ */
ubi = container_of(dev, struct ubi_device, dev);
+ ubi = ubi_get_device(ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
if (attr == &dev_eraseblock_size)
- return sprintf(buf, "%d\n", ubi->leb_size);
+ ret = sprintf(buf, "%d\n", ubi->leb_size);
else if (attr == &dev_avail_eraseblocks)
- return sprintf(buf, "%d\n", ubi->avail_pebs);
+ ret = sprintf(buf, "%d\n", ubi->avail_pebs);
else if (attr == &dev_total_eraseblocks)
- return sprintf(buf, "%d\n", ubi->good_peb_count);
+ ret = sprintf(buf, "%d\n", ubi->good_peb_count);
else if (attr == &dev_volumes_count)
- return sprintf(buf, "%d\n", ubi->vol_count);
+ ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
else if (attr == &dev_max_ec)
- return sprintf(buf, "%d\n", ubi->max_ec);
+ ret = sprintf(buf, "%d\n", ubi->max_ec);
else if (attr == &dev_reserved_for_bad)
- return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
+ ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
else if (attr == &dev_bad_peb_count)
- return sprintf(buf, "%d\n", ubi->bad_peb_count);
+ ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
else if (attr == &dev_max_vol_count)
- return sprintf(buf, "%d\n", ubi->vtbl_slots);
+ ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
else if (attr == &dev_min_io_size)
- return sprintf(buf, "%d\n", ubi->min_io_size);
+ ret = sprintf(buf, "%d\n", ubi->min_io_size);
else if (attr == &dev_bgt_enabled)
- return sprintf(buf, "%d\n", ubi->thread_enabled);
+ ret = sprintf(buf, "%d\n", ubi->thread_enabled);
+ else if (attr == &dev_mtd_num)
+ ret = sprintf(buf, "%d\n", ubi->mtd->index);
else
- BUG();
+ ret = -EINVAL;
- return 0;
+ ubi_put_device(ubi);
+ return ret;
}
/* Fake "release" method for UBI devices */
@@ -150,68 +280,44 @@ static int ubi_sysfs_init(struct ubi_device *ubi)
int err;
ubi->dev.release = dev_release;
- ubi->dev.devt = MKDEV(ubi->major, 0);
+ ubi->dev.devt = ubi->cdev.dev;
ubi->dev.class = ubi_class;
sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
err = device_register(&ubi->dev);
if (err)
- goto out;
+ return err;
err = device_create_file(&ubi->dev, &dev_eraseblock_size);
if (err)
- goto out_unregister;
+ return err;
err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
if (err)
- goto out_eraseblock_size;
+ return err;
err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
if (err)
- goto out_avail_eraseblocks;
+ return err;
err = device_create_file(&ubi->dev, &dev_volumes_count);
if (err)
- goto out_total_eraseblocks;
+ return err;
err = device_create_file(&ubi->dev, &dev_max_ec);
if (err)
- goto out_volumes_count;
+ return err;
err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
if (err)
- goto out_volumes_max_ec;
+ return err;
err = device_create_file(&ubi->dev, &dev_bad_peb_count);
if (err)
- goto out_reserved_for_bad;
+ return err;
err = device_create_file(&ubi->dev, &dev_max_vol_count);
if (err)
- goto out_bad_peb_count;
+ return err;
err = device_create_file(&ubi->dev, &dev_min_io_size);
if (err)
- goto out_max_vol_count;
+ return err;
err = device_create_file(&ubi->dev, &dev_bgt_enabled);
if (err)
- goto out_min_io_size;
-
- return 0;
-
-out_min_io_size:
- device_remove_file(&ubi->dev, &dev_min_io_size);
-out_max_vol_count:
- device_remove_file(&ubi->dev, &dev_max_vol_count);
-out_bad_peb_count:
- device_remove_file(&ubi->dev, &dev_bad_peb_count);
-out_reserved_for_bad:
- device_remove_file(&ubi->dev, &dev_reserved_for_bad);
-out_volumes_max_ec:
- device_remove_file(&ubi->dev, &dev_max_ec);
-out_volumes_count:
- device_remove_file(&ubi->dev, &dev_volumes_count);
-out_total_eraseblocks:
- device_remove_file(&ubi->dev, &dev_total_eraseblocks);
-out_avail_eraseblocks:
- device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
-out_eraseblock_size:
- device_remove_file(&ubi->dev, &dev_eraseblock_size);
-out_unregister:
- device_unregister(&ubi->dev);
-out:
- ubi_err("failed to initialize sysfs for %s", ubi->ubi_name);
+ return err;
+ err = device_create_file(&ubi->dev, &dev_mtd_num);
return err;
}
@@ -221,6 +327,7 @@ out:
*/
static void ubi_sysfs_close(struct ubi_device *ubi)
{
+ device_remove_file(&ubi->dev, &dev_mtd_num);
device_remove_file(&ubi->dev, &dev_bgt_enabled);
device_remove_file(&ubi->dev, &dev_min_io_size);
device_remove_file(&ubi->dev, &dev_max_vol_count);
@@ -244,7 +351,7 @@ static void kill_volumes(struct ubi_device *ubi)
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i])
- ubi_free_volume(ubi, i);
+ ubi_free_volume(ubi, ubi->volumes[i]);
}
/**
@@ -259,9 +366,6 @@ static int uif_init(struct ubi_device *ubi)
int i, err;
dev_t dev;
- mutex_init(&ubi->vtbl_mutex);
- spin_lock_init(&ubi->volumes_lock);
-
sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
/*
@@ -278,39 +382,40 @@ static int uif_init(struct ubi_device *ubi)
return err;
}
+ ubi_assert(MINOR(dev) == 0);
cdev_init(&ubi->cdev, &ubi_cdev_operations);
- ubi->major = MAJOR(dev);
- dbg_msg("%s major is %u", ubi->ubi_name, ubi->major);
+ dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev));
ubi->cdev.owner = THIS_MODULE;
- dev = MKDEV(ubi->major, 0);
err = cdev_add(&ubi->cdev, dev, 1);
if (err) {
- ubi_err("cannot add character device %s", ubi->ubi_name);
+ ubi_err("cannot add character device");
goto out_unreg;
}
err = ubi_sysfs_init(ubi);
if (err)
- goto out_cdev;
+ goto out_sysfs;
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i]) {
- err = ubi_add_volume(ubi, i);
- if (err)
+ err = ubi_add_volume(ubi, ubi->volumes[i]);
+ if (err) {
+ ubi_err("cannot add volume %d", i);
goto out_volumes;
+ }
}
return 0;
out_volumes:
kill_volumes(ubi);
+out_sysfs:
ubi_sysfs_close(ubi);
-out_cdev:
cdev_del(&ubi->cdev);
out_unreg:
- unregister_chrdev_region(MKDEV(ubi->major, 0),
- ubi->vtbl_slots + 1);
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+ ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
return err;
}
@@ -323,7 +428,7 @@ static void uif_close(struct ubi_device *ubi)
kill_volumes(ubi);
ubi_sysfs_close(ubi);
cdev_del(&ubi->cdev);
- unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1);
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
}
/**
@@ -384,9 +489,9 @@ out_si:
* assumed:
* o EC header is always at offset zero - this cannot be changed;
* o VID header starts just after the EC header at the closest address
- * aligned to @io->@hdrs_min_io_size;
+ * aligned to @io->hdrs_min_io_size;
* o data starts just after the VID header at the closest address aligned to
- * @io->@min_io_size
+ * @io->min_io_size
*
* This function returns zero in case of success and a negative error code in
* case of failure.
@@ -407,6 +512,9 @@ static int io_init(struct ubi_device *ubi)
return -EINVAL;
}
+ if (ubi->vid_hdr_offset < 0)
+ return -EINVAL;
+
/*
* Note, in this implementation we support MTD devices with 0x7FFFFFFF
* physical eraseblocks maximum.
@@ -424,7 +532,8 @@ static int io_init(struct ubi_device *ubi)
/* Make sure minimal I/O unit is power of 2 */
if (!is_power_of_2(ubi->min_io_size)) {
- ubi_err("bad min. I/O unit");
+ ubi_err("min. I/O unit (%d) is not power of 2",
+ ubi->min_io_size);
return -EINVAL;
}
@@ -453,10 +562,8 @@ static int io_init(struct ubi_device *ubi)
}
/* Similar for the data offset */
- if (ubi->leb_start == 0) {
- ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize;
- ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
- }
+ ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE;
+ ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
@@ -514,76 +621,147 @@ static int io_init(struct ubi_device *ubi)
}
/**
- * attach_mtd_dev - attach an MTD device.
- * @mtd_dev: MTD device name or number string
- * @vid_hdr_offset: VID header offset
- * @data_offset: data offset
+ * autoresize - re-size the volume which has the "auto-resize" flag set.
+ * @ubi: UBI device description object
+ * @vol_id: ID of the volume to re-size
*
- * This function attaches an MTD device to UBI. It first treats @mtd_dev as the
- * MTD device name, and tries to open it by this name. If it is unable to open,
- * it tries to convert @mtd_dev to an integer and open the MTD device by its
- * number. Returns zero in case of success and a negative error code in case of
- * failure.
+ * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in
+ * the volume table to the largest possible size. See comments in ubi-header.h
+ * for more description of the flag. Returns zero in case of success and a
+ * negative error code in case of failure.
*/
-static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
- int data_offset)
+static int autoresize(struct ubi_device *ubi, int vol_id)
{
- struct ubi_device *ubi;
- struct mtd_info *mtd;
- int i, err;
+ struct ubi_volume_desc desc;
+ struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err, old_reserved_pebs = vol->reserved_pebs;
- mtd = get_mtd_device_nm(mtd_dev);
- if (IS_ERR(mtd)) {
- int mtd_num;
- char *endp;
+ /*
+ * Clear the auto-resize flag in the volume in-memory copy of the
+ * volume table, and 'ubi_resize_volume()' will propogate this change
+ * to the flash.
+ */
+ ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG;
- if (PTR_ERR(mtd) != -ENODEV)
- return PTR_ERR(mtd);
+ if (ubi->avail_pebs == 0) {
+ struct ubi_vtbl_record vtbl_rec;
/*
- * Probably this is not MTD device name but MTD device number -
- * check this out.
+ * No avalilable PEBs to re-size the volume, clear the flag on
+ * flash and exit.
*/
- mtd_num = simple_strtoul(mtd_dev, &endp, 0);
- if (*endp != '\0' || mtd_dev == endp) {
- ubi_err("incorrect MTD device: \"%s\"", mtd_dev);
- return -ENODEV;
- }
-
- mtd = get_mtd_device(NULL, mtd_num);
- if (IS_ERR(mtd))
- return PTR_ERR(mtd);
+ memcpy(&vtbl_rec, &ubi->vtbl[vol_id],
+ sizeof(struct ubi_vtbl_record));
+ err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ if (err)
+ ubi_err("cannot clean auto-resize flag for volume %d",
+ vol_id);
+ } else {
+ desc.vol = vol;
+ err = ubi_resize_volume(&desc,
+ old_reserved_pebs + ubi->avail_pebs);
+ if (err)
+ ubi_err("cannot auto-resize volume %d", vol_id);
}
- /* Check if we already have the same MTD device attached */
- for (i = 0; i < ubi_devices_cnt; i++)
- if (ubi_devices[i]->mtd->index == mtd->index) {
- ubi_err("mtd%d is already attached to ubi%d",
+ if (err)
+ return err;
+
+ ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id,
+ vol->name, old_reserved_pebs, vol->reserved_pebs);
+ return 0;
+}
+
+/**
+ * ubi_attach_mtd_dev - attach an MTD device.
+ * @mtd_dev: MTD device description object
+ * @ubi_num: number to assign to the new UBI device
+ * @vid_hdr_offset: VID header offset
+ *
+ * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
+ * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
+ * which case this function finds a vacant device nubert and assings it
+ * automatically. Returns the new UBI device number in case of success and a
+ * negative error code in case of failure.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
+ */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
+{
+ struct ubi_device *ubi;
+ int i, err;
+
+ /*
+ * Check if we already have the same MTD device attached.
+ *
+ * Note, this function assumes that UBI devices creations and deletions
+ * are serialized, so it does not take the &ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && mtd->index == ubi->mtd->index) {
+ dbg_err("mtd%d is already attached to ubi%d",
mtd->index, i);
- err = -EINVAL;
- goto out_mtd;
+ return -EEXIST;
}
+ }
- ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device),
- GFP_KERNEL);
- if (!ubi) {
- err = -ENOMEM;
- goto out_mtd;
+ /*
+ * Make sure this MTD device is not emulated on top of an UBI volume
+ * already. Well, generally this recursion works fine, but there are
+ * different problems like the UBI module takes a reference to itself
+ * by attaching (and thus, opening) the emulated MTD device. This
+ * results in inability to unload the module. And in general it makes
+ * no sense to attach emulated MTD devices, so we prohibit this.
+ */
+ if (mtd->type == MTD_UBIVOLUME) {
+ ubi_err("refuse attaching mtd%d - it is already emulated on "
+ "top of UBI", mtd->index);
+ return -EINVAL;
}
- ubi->ubi_num = ubi_devices_cnt;
- ubi->mtd = mtd;
+ if (ubi_num == UBI_DEV_NUM_AUTO) {
+ /* Search for an empty slot in the @ubi_devices array */
+ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+ if (!ubi_devices[ubi_num])
+ break;
+ if (ubi_num == UBI_MAX_DEVICES) {
+ dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
+ return -ENFILE;
+ }
+ } else {
+ if (ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ /* Make sure ubi_num is not busy */
+ if (ubi_devices[ubi_num]) {
+ dbg_err("ubi%d already exists", ubi_num);
+ return -EEXIST;
+ }
+ }
- dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d",
- ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset);
+ ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
+ if (!ubi)
+ return -ENOMEM;
+ ubi->mtd = mtd;
+ ubi->ubi_num = ubi_num;
ubi->vid_hdr_offset = vid_hdr_offset;
- ubi->leb_start = data_offset;
+ ubi->autoresize_vol_id = -1;
+
+ mutex_init(&ubi->buf_mutex);
+ mutex_init(&ubi->ckvol_mutex);
+ mutex_init(&ubi->volumes_mutex);
+ spin_lock_init(&ubi->volumes_lock);
+
+ dbg_msg("attaching mtd%d to ubi%d: VID header offset %d",
+ mtd->index, ubi_num, vid_hdr_offset);
+
err = io_init(ubi);
if (err)
goto out_free;
- mutex_init(&ubi->buf_mutex);
ubi->peb_buf1 = vmalloc(ubi->peb_size);
if (!ubi->peb_buf1)
goto out_free;
@@ -605,12 +783,26 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
goto out_free;
}
+ if (ubi->autoresize_vol_id != -1) {
+ err = autoresize(ubi, ubi->autoresize_vol_id);
+ if (err)
+ goto out_detach;
+ }
+
err = uif_init(ubi);
if (err)
goto out_detach;
- ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt);
- ubi_msg("MTD device name: \"%s\"", ubi->mtd->name);
+ ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
+ if (IS_ERR(ubi->bgt_thread)) {
+ err = PTR_ERR(ubi->bgt_thread);
+ ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
+ err);
+ goto out_uif;
+ }
+
+ ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
+ ubi_msg("MTD device name: \"%s\"", mtd->name);
ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
ubi->peb_size, ubi->peb_size >> 10);
@@ -638,9 +830,11 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
wake_up_process(ubi->bgt_thread);
}
- ubi_devices_cnt += 1;
- return 0;
+ ubi_devices[ubi_num] = ubi;
+ return ubi_num;
+out_uif:
+ uif_close(ubi);
out_detach:
ubi_eba_close(ubi);
ubi_wl_close(ubi);
@@ -652,21 +846,58 @@ out_free:
vfree(ubi->dbg_peb_buf);
#endif
kfree(ubi);
-out_mtd:
- put_mtd_device(mtd);
- ubi_devices[ubi_devices_cnt] = NULL;
return err;
}
/**
- * detach_mtd_dev - detach an MTD device.
- * @ubi: UBI device description object
+ * ubi_detach_mtd_dev - detach an MTD device.
+ * @ubi_num: UBI device number to detach from
+ * @anyway: detach MTD even if device reference count is not zero
+ *
+ * This function destroys an UBI device number @ubi_num and detaches the
+ * underlying MTD device. Returns zero in case of success and %-EBUSY if the
+ * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
+ * exist.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
*/
-static void detach_mtd_dev(struct ubi_device *ubi)
+int ubi_detach_mtd_dev(int ubi_num, int anyway)
{
- int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index;
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ spin_lock(&ubi_devices_lock);
+ ubi = ubi_devices[ubi_num];
+ if (!ubi) {
+ spin_unlock(&ubi_devices_lock);
+ return -EINVAL;
+ }
+
+ if (ubi->ref_count) {
+ if (!anyway) {
+ spin_unlock(&ubi_devices_lock);
+ return -EBUSY;
+ }
+ /* This may only happen if there is a bug */
+ ubi_err("%s reference count %d, destroy anyway",
+ ubi->ubi_name, ubi->ref_count);
+ }
+ ubi_devices[ubi_num] = NULL;
+ spin_unlock(&ubi_devices_lock);
+ ubi_assert(ubi_num == ubi->ubi_num);
dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+
+ /*
+ * Before freeing anything, we have to stop the background thread to
+ * prevent it from doing anything on this device while we are freeing.
+ */
+ if (ubi->bgt_thread)
+ kthread_stop(ubi->bgt_thread);
+
uif_close(ubi);
ubi_eba_close(ubi);
ubi_wl_close(ubi);
@@ -677,11 +908,37 @@ static void detach_mtd_dev(struct ubi_device *ubi)
#ifdef CONFIG_MTD_UBI_DEBUG
vfree(ubi->dbg_peb_buf);
#endif
- kfree(ubi_devices[ubi_num]);
- ubi_devices[ubi_num] = NULL;
- ubi_devices_cnt -= 1;
- ubi_assert(ubi_devices_cnt >= 0);
- ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num);
+ ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
+ kfree(ubi);
+ return 0;
+}
+
+/**
+ * find_mtd_device - open an MTD device by its name or number.
+ * @mtd_dev: name or number of the device
+ *
+ * This function tries to open and MTD device described by @mtd_dev string,
+ * which is first treated as an ASCII number, and if it is not true, it is
+ * treated as MTD device name. Returns MTD device description object in case of
+ * success and a negative error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
+{
+ struct mtd_info *mtd;
+ int mtd_num;
+ char *endp;
+
+ mtd_num = simple_strtoul(mtd_dev, &endp, 0);
+ if (*endp != '\0' || mtd_dev == endp) {
+ /*
+ * This does not look like an ASCII integer, probably this is
+ * MTD device name.
+ */
+ mtd = get_mtd_device_nm(mtd_dev);
+ } else
+ mtd = get_mtd_device(NULL, mtd_num);
+
+ return mtd;
}
static int __init ubi_init(void)
@@ -693,47 +950,96 @@ static int __init ubi_init(void)
BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
if (mtd_devs > UBI_MAX_DEVICES) {
- printk("UBI error: too many MTD devices, maximum is %d\n",
- UBI_MAX_DEVICES);
+ printk(KERN_ERR "UBI error: too many MTD devices, "
+ "maximum is %d\n", UBI_MAX_DEVICES);
return -EINVAL;
}
+ /* Create base sysfs directory and sysfs files */
ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
- if (IS_ERR(ubi_class))
- return PTR_ERR(ubi_class);
+ if (IS_ERR(ubi_class)) {
+ err = PTR_ERR(ubi_class);
+ printk(KERN_ERR "UBI error: cannot create UBI class\n");
+ goto out;
+ }
err = class_create_file(ubi_class, &ubi_version);
- if (err)
+ if (err) {
+ printk(KERN_ERR "UBI error: cannot create sysfs file\n");
goto out_class;
+ }
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ printk(KERN_ERR "UBI error: cannot register device\n");
+ goto out_version;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab)
+ goto out_dev_unreg;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
struct mtd_dev_param *p = &mtd_dev_param[i];
+ struct mtd_info *mtd;
cond_resched();
- err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs);
- if (err)
+
+ mtd = open_mtd_device(p->name);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ goto out_detach;
+ }
+
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
+ p->vid_hdr_offs);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0) {
+ put_mtd_device(mtd);
+ printk(KERN_ERR "UBI error: cannot attach %s\n",
+ p->name);
goto out_detach;
+ }
}
return 0;
out_detach:
for (k = 0; k < i; k++)
- detach_mtd_dev(ubi_devices[k]);
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ kmem_cache_destroy(ubi_wl_entry_slab);
+out_dev_unreg:
+ misc_deregister(&ubi_ctrl_cdev);
+out_version:
class_remove_file(ubi_class, &ubi_version);
out_class:
class_destroy(ubi_class);
+out:
+ printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err);
return err;
}
module_init(ubi_init);
static void __exit ubi_exit(void)
{
- int i, n = ubi_devices_cnt;
+ int i;
- for (i = 0; i < n; i++)
- detach_mtd_dev(ubi_devices[i]);
+ for (i = 0; i < UBI_MAX_DEVICES; i++)
+ if (ubi_devices[i]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ kmem_cache_destroy(ubi_wl_entry_slab);
+ misc_deregister(&ubi_ctrl_cdev);
class_remove_file(ubi_class, &ubi_version);
class_destroy(ubi_class);
}
@@ -754,7 +1060,8 @@ static int __init bytes_str_to_int(const char *str)
result = simple_strtoul(str, &endp, 0);
if (str == endp || result < 0) {
- printk("UBI error: incorrect bytes count: \"%s\"\n", str);
+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
+ str);
return -EINVAL;
}
@@ -764,15 +1071,14 @@ static int __init bytes_str_to_int(const char *str)
case 'M':
result *= 1024;
case 'K':
- case 'k':
result *= 1024;
- if (endp[1] == 'i' && (endp[2] == '\0' ||
- endp[2] == 'B' || endp[2] == 'b'))
+ if (endp[1] == 'i' && endp[2] == 'B')
endp += 2;
case '\0':
break;
default:
- printk("UBI error: incorrect bytes count: \"%s\"\n", str);
+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
+ str);
return -EINVAL;
}
@@ -793,23 +1099,27 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
struct mtd_dev_param *p;
char buf[MTD_PARAM_LEN_MAX];
char *pbuf = &buf[0];
- char *tokens[3] = {NULL, NULL, NULL};
+ char *tokens[2] = {NULL, NULL};
+
+ if (!val)
+ return -EINVAL;
if (mtd_devs == UBI_MAX_DEVICES) {
- printk("UBI error: too many parameters, max. is %d\n",
+ printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
UBI_MAX_DEVICES);
return -EINVAL;
}
len = strnlen(val, MTD_PARAM_LEN_MAX);
if (len == MTD_PARAM_LEN_MAX) {
- printk("UBI error: parameter \"%s\" is too long, max. is %d\n",
- val, MTD_PARAM_LEN_MAX);
+ printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
+ "max. is %d\n", val, MTD_PARAM_LEN_MAX);
return -EINVAL;
}
if (len == 0) {
- printk("UBI warning: empty 'mtd=' parameter - ignored\n");
+ printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
+ "ignored\n");
return 0;
}
@@ -819,11 +1129,12 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
if (buf[len - 1] == '\n')
buf[len - 1] = '\0';
- for (i = 0; i < 3; i++)
+ for (i = 0; i < 2; i++)
tokens[i] = strsep(&pbuf, ",");
if (pbuf) {
- printk("UBI error: too many arguments at \"%s\"\n", val);
+ printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
+ val);
return -EINVAL;
}
@@ -832,13 +1143,9 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
if (tokens[1])
p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
- if (tokens[2])
- p->data_offs = bytes_str_to_int(tokens[2]);
if (p->vid_hdr_offs < 0)
return p->vid_hdr_offs;
- if (p->data_offs < 0)
- return p->data_offs;
mtd_devs += 1;
return 0;
@@ -846,16 +1153,15 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
- "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. "
+ "mtd=<name|num>[,<vid_hdr_offs>].\n"
"Multiple \"mtd\" parameters may be specified.\n"
- "MTD devices may be specified by their number or name. "
- "Optional \"vid_hdr_offs\" and \"data_offs\" parameters "
- "specify UBI VID header position and data starting "
- "position to be used by UBI.\n"
- "Example: mtd=content,1984,2048 mtd=4 - attach MTD device"
- "with name content using VID header offset 1984 and data "
- "start 2048, and MTD device number 4 using default "
- "offsets");
+ "MTD devices may be specified by their number or name.\n"
+ "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
+ "header position and data starting position to be used "
+ "by UBI.\n"
+ "Example: mtd=content,1984 mtd=4 - attach MTD device"
+ "with name \"content\" using VID header offset 1984, and "
+ "MTD device number 4 with default VID header offset.");
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index fe4da1e96c52..9d6aae5449b6 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -28,6 +28,11 @@
*
* Major and minor numbers are assigned dynamically to both UBI and volume
* character devices.
+ *
+ * Well, there is the third kind of character devices - the UBI control
+ * character device, which allows to manipulate by UBI devices - create and
+ * delete them. In other words, it is used for attaching and detaching MTD
+ * devices.
*/
#include <linux/module.h>
@@ -39,34 +44,6 @@
#include <asm/div64.h>
#include "ubi.h"
-/*
- * Maximum sequence numbers of UBI and volume character device IOCTLs (direct
- * logical eraseblock erase is a debug-only feature).
- */
-#define UBI_CDEV_IOC_MAX_SEQ 2
-#ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
-#define VOL_CDEV_IOC_MAX_SEQ 1
-#else
-#define VOL_CDEV_IOC_MAX_SEQ 2
-#endif
-
-/**
- * major_to_device - get UBI device object by character device major number.
- * @major: major number
- *
- * This function returns a pointer to the UBI device object.
- */
-static struct ubi_device *major_to_device(int major)
-{
- int i;
-
- for (i = 0; i < ubi_devices_cnt; i++)
- if (ubi_devices[i] && ubi_devices[i]->major == major)
- return ubi_devices[i];
- BUG();
- return NULL;
-}
-
/**
* get_exclusive - get exclusive access to an UBI volume.
* @desc: volume descriptor
@@ -124,9 +101,11 @@ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
static int vol_cdev_open(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc;
- const struct ubi_device *ubi = major_to_device(imajor(inode));
- int vol_id = iminor(inode) - 1;
- int mode;
+ int vol_id = iminor(inode) - 1, mode, ubi_num;
+
+ ubi_num = ubi_major2num(imajor(inode));
+ if (ubi_num < 0)
+ return ubi_num;
if (file->f_mode & FMODE_WRITE)
mode = UBI_READWRITE;
@@ -135,7 +114,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
dbg_msg("open volume %d, mode %d", vol_id, mode);
- desc = ubi_open_volume(ubi->ubi_num, vol_id, mode);
+ desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -153,8 +132,15 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
if (vol->updating) {
ubi_warn("update of volume %d not finished, volume is damaged",
vol->vol_id);
+ ubi_assert(!vol->changing_leb);
vol->updating = 0;
vfree(vol->upd_buf);
+ } else if (vol->changing_leb) {
+ dbg_msg("only %lld of %lld bytes received for atomic LEB change"
+ " for volume %d:%d, cancel", vol->upd_received,
+ vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id);
+ vol->changing_leb = 0;
+ vfree(vol->upd_buf);
}
ubi_close_volume(desc);
@@ -205,13 +191,13 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- int err, lnum, off, len, vol_id = desc->vol->vol_id, tbuf_size;
+ int err, lnum, off, len, tbuf_size;
size_t count_save = count;
void *tbuf;
uint64_t tmp;
dbg_msg("read %zd bytes from offset %lld of volume %d",
- count, *offp, vol_id);
+ count, *offp, vol->vol_id);
if (vol->updating) {
dbg_err("updating");
@@ -225,7 +211,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
return 0;
if (vol->corrupted)
- dbg_msg("read from corrupted volume %d", vol_id);
+ dbg_msg("read from corrupted volume %d", vol->vol_id);
if (*offp + count > vol->used_bytes)
count_save = count = vol->used_bytes - *offp;
@@ -249,7 +235,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
- err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0);
+ err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
if (err)
break;
@@ -289,13 +275,13 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0;
+ int lnum, off, len, tbuf_size, err = 0;
size_t count_save = count;
char *tbuf;
uint64_t tmp;
dbg_msg("requested: write %zd bytes to offset %lld of volume %u",
- count, *offp, desc->vol->vol_id);
+ count, *offp, vol->vol_id);
if (vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
@@ -339,7 +325,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
break;
}
- err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len,
+ err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
UBI_UNKNOWN);
if (err)
break;
@@ -372,22 +358,32 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- if (!vol->updating)
+ if (!vol->updating && !vol->changing_leb)
return vol_cdev_direct_write(file, buf, count, offp);
- err = ubi_more_update_data(ubi, vol->vol_id, buf, count);
+ if (vol->updating)
+ err = ubi_more_update_data(ubi, vol, buf, count);
+ else
+ err = ubi_more_leb_change_data(ubi, vol, buf, count);
+
if (err < 0) {
- ubi_err("cannot write %zd bytes of update data", count);
+ ubi_err("cannot accept more %zd bytes of data, error %d",
+ count, err);
return err;
}
if (err) {
/*
- * Update is finished, @err contains number of actually written
- * bytes now.
+ * The operation is finished, @err contains number of actually
+ * written bytes.
*/
count = err;
+ if (vol->changing_leb) {
+ revoke_exclusive(desc, UBI_READWRITE);
+ return count;
+ }
+
err = ubi_check_volume(ubi, vol->vol_id);
if (err < 0)
return err;
@@ -402,7 +398,6 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
revoke_exclusive(desc, UBI_READWRITE);
}
- *offp += count;
return count;
}
@@ -447,11 +442,46 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
if (err < 0)
break;
- err = ubi_start_update(ubi, vol->vol_id, bytes);
+ err = ubi_start_update(ubi, vol, bytes);
if (bytes == 0)
revoke_exclusive(desc, UBI_READWRITE);
+ break;
+ }
+
+ /* Atomic logical eraseblock change command */
+ case UBI_IOCEBCH:
+ {
+ struct ubi_leb_change_req req;
+
+ err = copy_from_user(&req, argp,
+ sizeof(struct ubi_leb_change_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (desc->mode == UBI_READONLY ||
+ vol->vol_type == UBI_STATIC_VOLUME) {
+ err = -EROFS;
+ break;
+ }
+
+ /* Validate the request */
+ err = -EINVAL;
+ if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
+ req.bytes < 0 || req.lnum >= vol->usable_leb_size)
+ break;
+ if (req.dtype != UBI_LONGTERM && req.dtype != UBI_SHORTTERM &&
+ req.dtype != UBI_UNKNOWN)
+ break;
+
+ err = get_exclusive(desc);
+ if (err < 0)
+ break;
- file->f_pos = 0;
+ err = ubi_start_leb_change(ubi, vol, &req);
+ if (req.bytes == 0)
+ revoke_exclusive(desc, UBI_READWRITE);
break;
}
@@ -467,7 +497,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
break;
}
- if (desc->mode == UBI_READONLY) {
+ if (desc->mode == UBI_READONLY ||
+ vol->vol_type == UBI_STATIC_VOLUME) {
err = -EROFS;
break;
}
@@ -477,13 +508,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
break;
}
- if (vol->vol_type != UBI_DYNAMIC_VOLUME) {
- err = -EROFS;
- break;
- }
-
dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
- err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum);
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
break;
@@ -580,9 +606,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- ubi = major_to_device(imajor(inode));
- if (IS_ERR(ubi))
- return PTR_ERR(ubi);
+ ubi = ubi_get_by_major(imajor(inode));
+ if (!ubi)
+ return -ENODEV;
switch (cmd) {
/* Create volume command */
@@ -591,8 +617,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_mkvol_req req;
dbg_msg("create volume");
- err = copy_from_user(&req, argp,
- sizeof(struct ubi_mkvol_req));
+ err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
if (err) {
err = -EFAULT;
break;
@@ -604,7 +629,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
req.name[req.name_len] = '\0';
+ mutex_lock(&ubi->volumes_mutex);
err = ubi_create_volume(ubi, &req);
+ mutex_unlock(&ubi->volumes_mutex);
if (err)
break;
@@ -633,10 +660,16 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
break;
}
+ mutex_lock(&ubi->volumes_mutex);
err = ubi_remove_volume(desc);
- if (err)
- ubi_close_volume(desc);
+ mutex_unlock(&ubi->volumes_mutex);
+ /*
+ * The volume is deleted (unless an error occurred), and the
+ * 'struct ubi_volume' object will be freed when
+ * 'ubi_close_volume()' will call 'put_device()'.
+ */
+ ubi_close_volume(desc);
break;
}
@@ -648,8 +681,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_rsvol_req req;
dbg_msg("re-size volume");
- err = copy_from_user(&req, argp,
- sizeof(struct ubi_rsvol_req));
+ err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
if (err) {
err = -EFAULT;
break;
@@ -669,7 +701,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
pebs = !!do_div(tmp, desc->vol->usable_leb_size);
pebs += tmp;
+ mutex_lock(&ubi->volumes_mutex);
err = ubi_resize_volume(desc, pebs);
+ mutex_unlock(&ubi->volumes_mutex);
ubi_close_volume(desc);
break;
}
@@ -679,9 +713,93 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
break;
}
+ ubi_put_device(ubi);
return err;
}
+static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ void __user *argp = (void __user *)arg;
+
+ if (!capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ switch (cmd) {
+ /* Attach an MTD device command */
+ case UBI_IOCATT:
+ {
+ struct ubi_attach_req req;
+ struct mtd_info *mtd;
+
+ dbg_msg("attach MTD device");
+ err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (req.mtd_num < 0 ||
+ (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
+ err = -EINVAL;
+ break;
+ }
+
+ mtd = get_mtd_device(NULL, req.mtd_num);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ break;
+ }
+
+ /*
+ * Note, further request verification is done by
+ * 'ubi_attach_mtd_dev()'.
+ */
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0)
+ put_mtd_device(mtd);
+ else
+ /* @err contains UBI device number */
+ err = put_user(err, (__user int32_t *)argp);
+
+ break;
+ }
+
+ /* Detach an MTD device command */
+ case UBI_IOCDET:
+ {
+ int ubi_num;
+
+ dbg_msg("dettach MTD device");
+ err = get_user(ubi_num, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_detach_mtd_dev(ubi_num, 0);
+ mutex_unlock(&ubi_devices_mutex);
+ break;
+ }
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
+ return err;
+}
+
+/* UBI control character device operations */
+struct file_operations ubi_ctrl_cdev_operations = {
+ .ioctl = ctrl_cdev_ioctl,
+ .owner = THIS_MODULE,
+};
+
/* UBI character device operations */
struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 467722eb618b..51c40b17f1ec 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -39,8 +39,9 @@
#ifdef CONFIG_MTD_UBI_DEBUG_MSG
/* Generic debugging message */
-#define dbg_msg(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__)
+#define dbg_msg(fmt, ...) \
+ printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
+ current->pid, __FUNCTION__, ##__VA_ARGS__)
#define ubi_dbg_dump_stack() dump_stack()
@@ -76,36 +77,28 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
/* Messages from the eraseblock association unit */
-#define dbg_eba(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \
- ##__VA_ARGS__)
+#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_eba(fmt, ...) ({})
#endif
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
/* Messages from the wear-leveling unit */
-#define dbg_wl(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \
- ##__VA_ARGS__)
+#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_wl(fmt, ...) ({})
#endif
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
/* Messages from the input/output unit */
-#define dbg_io(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \
- ##__VA_ARGS__)
+#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_io(fmt, ...) ({})
#endif
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
/* Initialization and build messages */
-#define dbg_bld(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \
- ##__VA_ARGS__)
+#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_bld(fmt, ...) ({})
#endif
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 880fa3690352..7ce91ca742b1 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,7 +31,7 @@
* logical eraseblock it is locked for reading or writing. The per-logical
* eraseblock locking is implemented by means of the lock tree. The lock tree
* is an RB-tree which refers all the currently locked logical eraseblocks. The
- * lock tree elements are &struct ltree_entry objects. They are indexed by
+ * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
* (@vol_id, @lnum) pairs.
*
* EBA also maintains the global sequence counter which is incremented each
@@ -50,29 +50,6 @@
#define EBA_RESERVED_PEBS 1
/**
- * struct ltree_entry - an entry in the lock tree.
- * @rb: links RB-tree nodes
- * @vol_id: volume ID of the locked logical eraseblock
- * @lnum: locked logical eraseblock number
- * @users: how many tasks are using this logical eraseblock or wait for it
- * @mutex: read/write mutex to implement read/write access serialization to
- * the (@vol_id, @lnum) logical eraseblock
- *
- * When a logical eraseblock is being locked - corresponding &struct ltree_entry
- * object is inserted to the lock tree (@ubi->ltree).
- */
-struct ltree_entry {
- struct rb_node rb;
- int vol_id;
- int lnum;
- int users;
- struct rw_semaphore mutex;
-};
-
-/* Slab cache for lock-tree entries */
-static struct kmem_cache *ltree_slab;
-
-/**
* next_sqnum - get next sequence number.
* @ubi: UBI device description object
*
@@ -101,7 +78,7 @@ static unsigned long long next_sqnum(struct ubi_device *ubi)
*/
static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
{
- if (vol_id == UBI_LAYOUT_VOL_ID)
+ if (vol_id == UBI_LAYOUT_VOLUME_ID)
return UBI_LAYOUT_VOLUME_COMPAT;
return 0;
}
@@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
- * This function returns a pointer to the corresponding &struct ltree_entry
+ * This function returns a pointer to the corresponding &struct ubi_ltree_entry
* object if the logical eraseblock is locked and %NULL if it is not.
* @ubi->ltree_lock has to be locked.
*/
-static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
- int lnum)
+static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
+ int lnum)
{
struct rb_node *p;
p = ubi->ltree.rb_node;
while (p) {
- struct ltree_entry *le;
+ struct ubi_ltree_entry *le;
- le = rb_entry(p, struct ltree_entry, rb);
+ le = rb_entry(p, struct ubi_ltree_entry, rb);
if (vol_id < le->vol_id)
p = p->rb_left;
@@ -155,15 +132,17 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
* Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
* failed.
*/
-static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
- int lnum)
+static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
+ int vol_id, int lnum)
{
- struct ltree_entry *le, *le1, *le_free;
+ struct ubi_ltree_entry *le, *le1, *le_free;
- le = kmem_cache_alloc(ltree_slab, GFP_NOFS);
+ le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
if (!le)
return ERR_PTR(-ENOMEM);
+ le->users = 0;
+ init_rwsem(&le->mutex);
le->vol_id = vol_id;
le->lnum = lnum;
@@ -189,7 +168,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
p = &ubi->ltree.rb_node;
while (*p) {
parent = *p;
- le1 = rb_entry(parent, struct ltree_entry, rb);
+ le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
if (vol_id < le1->vol_id)
p = &(*p)->rb_left;
@@ -211,7 +190,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
spin_unlock(&ubi->ltree_lock);
if (le_free)
- kmem_cache_free(ltree_slab, le_free);
+ kfree(le_free);
return le;
}
@@ -227,7 +206,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
*/
static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
{
- struct ltree_entry *le;
+ struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
@@ -245,7 +224,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
int free = 0;
- struct ltree_entry *le;
+ struct ubi_ltree_entry *le;
spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
@@ -259,7 +238,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
up_read(&le->mutex);
if (free)
- kmem_cache_free(ltree_slab, le);
+ kfree(le);
}
/**
@@ -273,7 +252,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
*/
static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
{
- struct ltree_entry *le;
+ struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
@@ -283,6 +262,44 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
}
/**
+ * leb_write_lock - lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing if there is no
+ * contention and does nothing if there is contention. Returns %0 in case of
+ * success, %1 in case of contention, and and a negative error code in case of
+ * failure.
+ */
+static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ int free;
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ if (down_write_trylock(&le->mutex))
+ return 0;
+
+ /* Contention, cancel */
+ spin_lock(&ubi->ltree_lock);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ free = 1;
+ } else
+ free = 0;
+ spin_unlock(&ubi->ltree_lock);
+ if (free)
+ kfree(le);
+
+ return 1;
+}
+
+/**
* leb_write_unlock - unlock logical eraseblock.
* @ubi: UBI device description object
* @vol_id: volume ID
@@ -291,7 +308,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
int free;
- struct ltree_entry *le;
+ struct ubi_ltree_entry *le;
spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
@@ -306,23 +323,23 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
up_write(&le->mutex);
if (free)
- kmem_cache_free(ltree_slab, le);
+ kfree(le);
}
/**
* ubi_eba_unmap_leb - un-map logical eraseblock.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
*
* This function un-maps logical eraseblock @lnum and schedules corresponding
* physical eraseblock for erasure. Returns zero in case of success and a
* negative error code in case of failure.
*/
-int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum)
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum)
{
- int idx = vol_id2idx(ubi, vol_id), err, pnum;
- struct ubi_volume *vol = ubi->volumes[idx];
+ int err, pnum, vol_id = vol->vol_id;
if (ubi->ro_mode)
return -EROFS;
@@ -349,7 +366,7 @@ out_unlock:
/**
* ubi_eba_read_leb - read data.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
* @buf: buffer to store the read data
* @offset: offset from where to read
@@ -365,12 +382,11 @@ out_unlock:
* returned for any volume type if an ECC error was detected by the MTD device
* driver. Other negative error cored may be returned in case of other errors.
*/
-int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
- int offset, int len, int check)
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check)
{
- int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
+ int err, pnum, scrub = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
- struct ubi_volume *vol = ubi->volumes[idx];
uint32_t uninitialized_var(crc);
err = leb_read_lock(ubi, vol_id, lnum);
@@ -578,7 +594,7 @@ write_error:
/**
* ubi_eba_write_leb - write data to dynamic volume.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
* @buf: the data to write
* @offset: offset within the logical eraseblock where to write
@@ -586,15 +602,14 @@ write_error:
* @dtype: data type
*
* This function writes data to logical eraseblock @lnum of a dynamic volume
- * @vol_id. Returns zero in case of success and a negative error code in case
+ * @vol. Returns zero in case of success and a negative error code in case
* of failure. In case of error, it is possible that something was still
* written to the flash media, but may be some garbage.
*/
-int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len, int dtype)
{
- int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0;
- struct ubi_volume *vol = ubi->volumes[idx];
+ int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
if (ubi->ro_mode)
@@ -613,7 +628,8 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
if (err) {
ubi_warn("failed to write data to PEB %d", pnum);
if (err == -EIO && ubi->bad_allowed)
- err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len);
+ err = recover_peb(ubi, pnum, vol_id, lnum, buf,
+ offset, len);
if (err)
ubi_ro_mode(ubi);
}
@@ -656,11 +672,14 @@ retry:
goto write_error;
}
- err = ubi_io_write_data(ubi, buf, pnum, offset, len);
- if (err) {
- ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, "
- "PEB %d", len, offset, vol_id, lnum, pnum);
- goto write_error;
+ if (len) {
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn("failed to write %d bytes at offset %d of "
+ "LEB %d:%d, PEB %d", len, offset, vol_id,
+ lnum, pnum);
+ goto write_error;
+ }
}
vol->eba_tbl[lnum] = pnum;
@@ -698,7 +717,7 @@ write_error:
/**
* ubi_eba_write_leb_st - write data to static volume.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
@@ -706,7 +725,7 @@ write_error:
* @used_ebs: how many logical eraseblocks will this volume contain
*
* This function writes data to logical eraseblock @lnum of static volume
- * @vol_id. The @used_ebs argument should contain total number of logical
+ * @vol. The @used_ebs argument should contain total number of logical
* eraseblock in this static volume.
*
* When writing to the last logical eraseblock, the @len argument doesn't have
@@ -718,12 +737,11 @@ write_error:
* volumes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
-int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
- const void *buf, int len, int dtype, int used_ebs)
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype,
+ int used_ebs)
{
- int err, pnum, tries = 0, data_size = len;
- int idx = vol_id2idx(ubi, vol_id);
- struct ubi_volume *vol = ubi->volumes[idx];
+ int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
@@ -819,7 +837,7 @@ write_error:
/*
* ubi_eba_atomic_leb_change - change logical eraseblock atomically.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
@@ -834,17 +852,27 @@ write_error:
* UBI reserves one LEB for the "atomic LEB change" operation, so only one
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
-int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
- const void *buf, int len, int dtype)
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype)
{
- int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id);
- struct ubi_volume *vol = ubi->volumes[idx];
+ int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
if (ubi->ro_mode)
return -EROFS;
+ if (len == 0) {
+ /*
+ * Special case when data length is zero. In this case the LEB
+ * has to be unmapped and mapped somewhere else.
+ */
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
+ if (err)
+ return err;
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+ }
+
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
@@ -928,20 +956,6 @@ write_error:
}
/**
- * ltree_entry_ctor - lock tree entries slab cache constructor.
- * @obj: the lock-tree entry to construct
- * @cache: the lock tree entry slab cache
- * @flags: constructor flags
- */
-static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
-{
- struct ltree_entry *le = obj;
-
- le->users = 0;
- init_rwsem(&le->mutex);
-}
-
-/**
* ubi_eba_copy_leb - copy logical eraseblock.
* @ubi: UBI device description object
* @from: physical eraseblock number from where to copy
@@ -950,14 +964,16 @@ static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
*
* This function copies logical eraseblock from physical eraseblock @from to
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
- * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation
- * was canceled because bit-flips were detected at the target PEB, and a
- * negative error code in case of failure.
+ * function. Returns:
+ * o %0 in case of success;
+ * o %1 if the operation was canceled and should be tried later (e.g.,
+ * because a bit-flip was detected at the target PEB);
+ * o %2 if the volume is being deleted and this LEB should not be moved.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr)
{
- int err, vol_id, lnum, data_size, aldata_size, pnum, idx;
+ int err, vol_id, lnum, data_size, aldata_size, idx;
struct ubi_volume *vol;
uint32_t crc;
@@ -973,51 +989,67 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
data_size = aldata_size =
ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
- /*
- * We do not want anybody to write to this logical eraseblock while we
- * are moving it, so we lock it.
- */
- err = leb_write_lock(ubi, vol_id, lnum);
- if (err)
- return err;
-
- mutex_lock(&ubi->buf_mutex);
-
- /*
- * But the logical eraseblock might have been put by this time.
- * Cancel if it is true.
- */
idx = vol_id2idx(ubi, vol_id);
-
+ spin_lock(&ubi->volumes_lock);
/*
- * We may race with volume deletion/re-size, so we have to hold
- * @ubi->volumes_lock.
+ * Note, we may race with volume deletion, which means that the volume
+ * this logical eraseblock belongs to might be being deleted. Since the
+ * volume deletion unmaps all the volume's logical eraseblocks, it will
+ * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
*/
- spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[idx];
if (!vol) {
- dbg_eba("volume %d was removed meanwhile", vol_id);
+ /* No need to do further work, cancel */
+ dbg_eba("volume %d is being removed, cancel", vol_id);
spin_unlock(&ubi->volumes_lock);
- goto out_unlock;
+ return 2;
}
+ spin_unlock(&ubi->volumes_lock);
- pnum = vol->eba_tbl[lnum];
- if (pnum != from) {
- dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
- "PEB %d, cancel", vol_id, lnum, from, pnum);
- spin_unlock(&ubi->volumes_lock);
- goto out_unlock;
+ /*
+ * We do not want anybody to write to this logical eraseblock while we
+ * are moving it, so lock it.
+ *
+ * Note, we are using non-waiting locking here, because we cannot sleep
+ * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
+ * unmapping the LEB which is mapped to the PEB we are going to move
+ * (@from). This task locks the LEB and goes sleep in the
+ * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
+ * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
+ * LEB is already locked, we just do not move it and return %1.
+ */
+ err = leb_write_trylock(ubi, vol_id, lnum);
+ if (err) {
+ dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
+ return err;
}
- spin_unlock(&ubi->volumes_lock);
- /* OK, now the LEB is locked and we can safely start moving it */
+ /*
+ * The LEB might have been put meanwhile, and the task which put it is
+ * probably waiting on @ubi->move_mutex. No need to continue the work,
+ * cancel it.
+ */
+ if (vol->eba_tbl[lnum] != from) {
+ dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
+ "PEB %d, cancel", vol_id, lnum, from,
+ vol->eba_tbl[lnum]);
+ err = 1;
+ goto out_unlock_leb;
+ }
+ /*
+ * OK, now the LEB is locked and we can safely start moving iy. Since
+ * this function utilizes thie @ubi->peb1_buf buffer which is shared
+ * with some other functions, so lock the buffer by taking the
+ * @ubi->buf_mutex.
+ */
+ mutex_lock(&ubi->buf_mutex);
dbg_eba("read %d bytes of data", aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data from PEB %d",
err, from);
- goto out_unlock;
+ goto out_unlock_buf;
}
/*
@@ -1053,7 +1085,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err)
- goto out_unlock;
+ goto out_unlock_buf;
cond_resched();
@@ -1062,13 +1094,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (err) {
if (err != UBI_IO_BITFLIPS)
ubi_warn("cannot read VID header back from PEB %d", to);
- goto out_unlock;
+ else
+ err = 1;
+ goto out_unlock_buf;
}
if (data_size > 0) {
err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
if (err)
- goto out_unlock;
+ goto out_unlock_buf;
cond_resched();
@@ -1082,7 +1116,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (err != UBI_IO_BITFLIPS)
ubi_warn("cannot read data back from PEB %d",
to);
- goto out_unlock;
+ else
+ err = 1;
+ goto out_unlock_buf;
}
cond_resched();
@@ -1090,15 +1126,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
ubi_warn("read data back from PEB %d - it is different",
to);
- goto out_unlock;
+ goto out_unlock_buf;
}
}
ubi_assert(vol->eba_tbl[lnum] == from);
vol->eba_tbl[lnum] = to;
-out_unlock:
+out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
+out_unlock_leb:
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
@@ -1125,14 +1162,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
- if (ubi_devices_cnt == 0) {
- ltree_slab = kmem_cache_create("ubi_ltree_slab",
- sizeof(struct ltree_entry), 0,
- 0, &ltree_entry_ctor);
- if (!ltree_slab)
- return -ENOMEM;
- }
-
ubi->global_sqnum = si->max_sqnum + 1;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
@@ -1168,6 +1197,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
}
}
+ if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
+ ubi_err("no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, EBA_RESERVED_PEBS);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= EBA_RESERVED_PEBS;
+ ubi->rsvd_pebs += EBA_RESERVED_PEBS;
+
if (ubi->bad_allowed) {
ubi_calculate_reserved(ubi);
@@ -1184,15 +1222,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
}
- if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
- ubi_err("no enough physical eraseblocks (%d, need %d)",
- ubi->avail_pebs, EBA_RESERVED_PEBS);
- err = -ENOSPC;
- goto out_free;
- }
- ubi->avail_pebs -= EBA_RESERVED_PEBS;
- ubi->rsvd_pebs += EBA_RESERVED_PEBS;
-
dbg_eba("EBA unit is initialized");
return 0;
@@ -1202,8 +1231,6 @@ out_free:
continue;
kfree(ubi->volumes[i]->eba_tbl);
}
- if (ubi_devices_cnt == 0)
- kmem_cache_destroy(ltree_slab);
return err;
}
@@ -1222,6 +1249,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
continue;
kfree(ubi->volumes[i]->eba_tbl);
}
- if (ubi_devices_cnt == 1)
- kmem_cache_destroy(ltree_slab);
}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 41ff74c60e14..d397219238d3 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -129,8 +129,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
if (to_read > total_read)
to_read = total_read;
- err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs,
- to_read, 0);
+ err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0);
if (err)
break;
@@ -187,8 +186,8 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (to_write > total_written)
to_write = total_written;
- err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs,
- to_write, UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write,
+ UBI_UNKNOWN);
if (err)
break;
@@ -237,7 +236,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
return -EROFS;
for (i = 0; i < count; i++) {
- err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i);
+ err = ubi_eba_unmap_leb(ubi, vol, lnum + i);
if (err)
goto out_err;
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 7c304eec78b5..db3efdef2433 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -173,6 +173,16 @@ retry:
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
ubi_dbg_dump_stack();
+
+ /*
+ * The driver should never return -EBADMSG if it failed to read
+ * all the requested data. But some buggy drivers might do
+ * this, so we change it to -EIO.
+ */
+ if (read != len && err == -EBADMSG) {
+ ubi_assert(0);
+ err = -EIO;
+ }
} else {
ubi_assert(len == read);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 03c774f41549..a70d58823f8d 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -30,23 +30,27 @@
* @ubi_num: UBI device number
* @di: the information is stored here
*
- * This function returns %0 in case of success and a %-ENODEV if there is no
- * such UBI device.
+ * This function returns %0 in case of success, %-EINVAL if the UBI device
+ * number is invalid, and %-ENODEV if there is no such UBI device.
*/
int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
{
- const struct ubi_device *ubi;
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
- if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES ||
- !ubi_devices[ubi_num])
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
return -ENODEV;
- ubi = ubi_devices[ubi_num];
di->ubi_num = ubi->ubi_num;
di->leb_size = ubi->leb_size;
di->min_io_size = ubi->min_io_size;
di->ro_mode = ubi->ro_mode;
- di->cdev = MKDEV(ubi->major, 0);
+ di->cdev = ubi->cdev.dev;
+
+ ubi_put_device(ubi);
return 0;
}
EXPORT_SYMBOL_GPL(ubi_get_device_info);
@@ -73,7 +77,7 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
vi->usable_leb_size = vol->usable_leb_size;
vi->name_len = vol->name_len;
vi->name = vol->name;
- vi->cdev = MKDEV(ubi->major, vi->vol_id + 1);
+ vi->cdev = vol->cdev.dev;
}
EXPORT_SYMBOL_GPL(ubi_get_volume_info);
@@ -104,37 +108,39 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
- err = -ENODEV;
- if (ubi_num < 0)
- return ERR_PTR(err);
-
- ubi = ubi_devices[ubi_num];
-
- if (!try_module_get(THIS_MODULE))
- return ERR_PTR(err);
-
- if (ubi_num >= UBI_MAX_DEVICES || !ubi)
- goto out_put;
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
- err = -EINVAL;
- if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
- goto out_put;
if (mode != UBI_READONLY && mode != UBI_READWRITE &&
mode != UBI_EXCLUSIVE)
- goto out_put;
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * First of all, we have to get the UBI device to prevent its removal.
+ */
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
+ err = -EINVAL;
+ goto out_put_ubi;
+ }
desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
if (!desc) {
err = -ENOMEM;
- goto out_put;
+ goto out_put_ubi;
}
+ err = -ENODEV;
+ if (!try_module_get(THIS_MODULE))
+ goto out_free;
+
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
- if (!vol) {
- err = -ENODEV;
+ if (!vol)
goto out_unlock;
- }
err = -EBUSY;
switch (mode) {
@@ -156,21 +162,19 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
vol->exclusive = 1;
break;
}
+ get_device(&vol->dev);
+ vol->ref_count += 1;
spin_unlock(&ubi->volumes_lock);
desc->vol = vol;
desc->mode = mode;
- /*
- * To prevent simultaneous checks of the same volume we use @vtbl_mutex,
- * although it is not the purpose it was introduced for.
- */
- mutex_lock(&ubi->vtbl_mutex);
+ mutex_lock(&ubi->ckvol_mutex);
if (!vol->checked) {
/* This is the first open - check the volume */
err = ubi_check_volume(ubi, vol_id);
if (err < 0) {
- mutex_unlock(&ubi->vtbl_mutex);
+ mutex_unlock(&ubi->ckvol_mutex);
ubi_close_volume(desc);
return ERR_PTR(err);
}
@@ -181,14 +185,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
}
vol->checked = 1;
}
- mutex_unlock(&ubi->vtbl_mutex);
+ mutex_unlock(&ubi->ckvol_mutex);
+
return desc;
out_unlock:
spin_unlock(&ubi->volumes_lock);
- kfree(desc);
-out_put:
module_put(THIS_MODULE);
+out_free:
+ kfree(desc);
+out_put_ubi:
+ ubi_put_device(ubi);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -205,8 +212,8 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
int mode)
{
int i, vol_id = -1, len;
- struct ubi_volume_desc *ret;
struct ubi_device *ubi;
+ struct ubi_volume_desc *ret;
dbg_msg("open volume %s, mode %d", name, mode);
@@ -217,14 +224,12 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
if (len > UBI_VOL_NAME_MAX)
return ERR_PTR(-EINVAL);
- ret = ERR_PTR(-ENODEV);
- if (!try_module_get(THIS_MODULE))
- return ret;
-
- if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num])
- goto out_put;
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
- ubi = ubi_devices[ubi_num];
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
spin_lock(&ubi->volumes_lock);
/* Walk all volumes of this UBI device */
@@ -238,13 +243,16 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
}
spin_unlock(&ubi->volumes_lock);
- if (vol_id < 0)
- goto out_put;
+ if (vol_id >= 0)
+ ret = ubi_open_volume(ubi_num, vol_id, mode);
+ else
+ ret = ERR_PTR(-ENODEV);
- ret = ubi_open_volume(ubi_num, vol_id, mode);
-
-out_put:
- module_put(THIS_MODULE);
+ /*
+ * We should put the UBI device even in case of success, because
+ * 'ubi_open_volume()' took a reference as well.
+ */
+ ubi_put_device(ubi);
return ret;
}
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
@@ -256,10 +264,11 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
void ubi_close_volume(struct ubi_volume_desc *desc)
{
struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
- spin_lock(&vol->ubi->volumes_lock);
+ spin_lock(&ubi->volumes_lock);
switch (desc->mode) {
case UBI_READONLY:
vol->readers -= 1;
@@ -270,9 +279,12 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
case UBI_EXCLUSIVE:
vol->exclusive = 0;
}
- spin_unlock(&vol->ubi->volumes_lock);
+ vol->ref_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
kfree(desc);
+ put_device(&vol->dev);
+ ubi_put_device(ubi);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(ubi_close_volume);
@@ -332,7 +344,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
if (len == 0)
return 0;
- err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check);
+ err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
ubi_warn("mark volume %d as corrupted", vol_id);
vol->corrupted = 1;
@@ -399,7 +411,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
if (len == 0)
return 0;
- return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
}
EXPORT_SYMBOL_GPL(ubi_leb_write);
@@ -448,7 +460,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
if (len == 0)
return 0;
- return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
}
EXPORT_SYMBOL_GPL(ubi_leb_change);
@@ -468,9 +480,9 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- int err, vol_id = vol->vol_id;
+ int err;
- dbg_msg("erase LEB %d:%d", vol_id, lnum);
+ dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
@@ -481,7 +493,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
if (vol->upd_marker)
return -EBADF;
- err = ubi_eba_unmap_leb(ubi, vol_id, lnum);
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
@@ -529,9 +541,8 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
{
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- int vol_id = vol->vol_id;
- dbg_msg("unmap LEB %d:%d", vol_id, lnum);
+ dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
@@ -542,11 +553,55 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
if (vol->upd_marker)
return -EBADF;
- return ubi_eba_unmap_leb(ubi, vol_id, lnum);
+ return ubi_eba_unmap_leb(ubi, vol, lnum);
}
EXPORT_SYMBOL_GPL(ubi_leb_unmap);
/**
+ * ubi_leb_map - map logical erasblock to a physical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ * @dtype: expected data type
+ *
+ * This function maps an un-mapped logical eraseblock @lnum to a physical
+ * eraseblock. This means, that after a successfull invocation of this
+ * function the logical eraseblock @lnum will be empty (contain only %0xFF
+ * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
+ * happens.
+ *
+ * This function returns zero in case of success, %-EBADF if the volume is
+ * damaged because of an interrupted update, %-EBADMSG if the logical
+ * eraseblock is already mapped, and other negative error codes in case of
+ * other failures.
+ */
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+
+ dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
+ dtype != UBI_UNKNOWN)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (vol->eba_tbl[lnum] >= 0)
+ return -EBADMSG;
+
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_map);
+
+/**
* ubi_is_mapped - check if logical eraseblock is mapped.
* @desc: volume descriptor
* @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 9e2338c8e2cf..93e052812012 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -79,7 +79,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
else
size = vol->usable_leb_size;
- err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1);
+ err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
if (err) {
if (err == -EBADMSG)
err = 1;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index c7b0afc9d280..05aa3e7daba1 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -286,9 +286,14 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
* FIXME: but this is anyway obsolete and will be removed at
* some point.
*/
-
dbg_bld("using old crappy leb_ver stuff");
+ if (v1 == v2) {
+ ubi_err("PEB %d and PEB %d have the same version %lld",
+ seb->pnum, pnum, v1);
+ return -EINVAL;
+ }
+
abs = v1 - v2;
if (abs < 0)
abs = -abs;
@@ -390,7 +395,6 @@ out_free_buf:
vfree(buf);
out_free_vidh:
ubi_free_vid_hdr(ubi, vh);
- ubi_assert(err < 0);
return err;
}
@@ -769,7 +773,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
*/
static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
{
- long long ec;
+ long long uninitialized_var(ec);
int err, bitflips = 0, vol_id, ec_corr = 0;
dbg_bld("scan PEB %d", pnum);
@@ -854,7 +858,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum
}
vol_id = be32_to_cpu(vidh->vol_id);
- if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOL_ID) {
+ if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5e941a633030..457710615261 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -94,8 +94,43 @@ enum {
UBI_IO_BITFLIPS
};
-extern int ubi_devices_cnt;
-extern struct ubi_device *ubi_devices[];
+/**
+ * struct ubi_wl_entry - wear-leveling entry.
+ * @rb: link in the corresponding RB-tree
+ * @ec: erase counter
+ * @pnum: physical eraseblock number
+ *
+ * This data structure is used in the WL unit. Each physical eraseblock has a
+ * corresponding &struct wl_entry object which may be kept in different
+ * RB-trees. See WL unit for details.
+ */
+struct ubi_wl_entry {
+ struct rb_node rb;
+ int ec;
+ int pnum;
+};
+
+/**
+ * struct ubi_ltree_entry - an entry in the lock tree.
+ * @rb: links RB-tree nodes
+ * @vol_id: volume ID of the locked logical eraseblock
+ * @lnum: locked logical eraseblock number
+ * @users: how many tasks are using this logical eraseblock or wait for it
+ * @mutex: read/write mutex to implement read/write access serialization to
+ * the (@vol_id, @lnum) logical eraseblock
+ *
+ * This data structure is used in the EBA unit to implement per-LEB locking.
+ * When a logical eraseblock is being locked - corresponding
+ * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
+ * See EBA unit for details.
+ */
+struct ubi_ltree_entry {
+ struct rb_node rb;
+ int vol_id;
+ int lnum;
+ int users;
+ struct rw_semaphore mutex;
+};
struct ubi_volume_desc;
@@ -105,11 +140,10 @@ struct ubi_volume_desc;
* @cdev: character device object to create character device
* @ubi: reference to the UBI device description object
* @vol_id: volume ID
+ * @ref_count: volume reference count
* @readers: number of users holding this volume in read-only mode
* @writers: number of users holding this volume in read-write mode
* @exclusive: whether somebody holds this volume in exclusive mode
- * @removed: if the volume was removed
- * @checked: if this static volume was checked
*
* @reserved_pebs: how many physical eraseblocks are reserved for this volume
* @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
@@ -117,21 +151,30 @@ struct ubi_volume_desc;
* @used_ebs: how many logical eraseblocks in this volume contain data
* @last_eb_bytes: how many bytes are stored in the last logical eraseblock
* @used_bytes: how many bytes of data this volume contains
- * @upd_marker: non-zero if the update marker is set for this volume
- * @corrupted: non-zero if the volume is corrupted (static volumes only)
* @alignment: volume alignment
* @data_pad: how many bytes are not used at the end of physical eraseblocks to
- * satisfy the requested alignment
+ * satisfy the requested alignment
* @name_len: volume name length
* @name: volume name
*
- * @updating: whether the volume is being updated
* @upd_ebs: how many eraseblocks are expected to be updated
- * @upd_bytes: how many bytes are expected to be received
- * @upd_received: how many update bytes were already received
- * @upd_buf: update buffer which is used to collect update data
+ * @ch_lnum: LEB number which is being changing by the atomic LEB change
+ * operation
+ * @ch_dtype: data persistency type which is being changing by the atomic LEB
+ * change operation
+ * @upd_bytes: how many bytes are expected to be received for volume update or
+ * atomic LEB change
+ * @upd_received: how many bytes were already received for volume update or
+ * atomic LEB change
+ * @upd_buf: update buffer which is used to collect update data or data for
+ * atomic LEB change
*
* @eba_tbl: EBA table of this volume (LEB->PEB mapping)
+ * @checked: %1 if this static volume was checked
+ * @corrupted: %1 if the volume is corrupted (static volumes only)
+ * @upd_marker: %1 if the update marker is set for this volume
+ * @updating: %1 if the volume is being updated
+ * @changing_leb: %1 if the atomic LEB change ioctl command is in progress
*
* @gluebi_desc: gluebi UBI volume descriptor
* @gluebi_refcount: reference count of the gluebi MTD device
@@ -150,11 +193,10 @@ struct ubi_volume {
struct cdev cdev;
struct ubi_device *ubi;
int vol_id;
+ int ref_count;
int readers;
int writers;
int exclusive;
- int removed;
- int checked;
int reserved_pebs;
int vol_type;
@@ -162,23 +204,31 @@ struct ubi_volume {
int used_ebs;
int last_eb_bytes;
long long used_bytes;
- int upd_marker;
- int corrupted;
int alignment;
int data_pad;
int name_len;
char name[UBI_VOL_NAME_MAX+1];
- int updating;
int upd_ebs;
+ int ch_lnum;
+ int ch_dtype;
long long upd_bytes;
long long upd_received;
void *upd_buf;
int *eba_tbl;
+ int checked:1;
+ int corrupted:1;
+ int upd_marker:1;
+ int updating:1;
+ int changing_leb:1;
#ifdef CONFIG_MTD_UBI_GLUEBI
- /* Gluebi-related stuff may be compiled out */
+ /*
+ * Gluebi-related stuff may be compiled out.
+ * TODO: this should not be built into UBI but should be a separate
+ * ubimtd driver which works on top of UBI and emulates MTD devices.
+ */
struct ubi_volume_desc *gluebi_desc;
int gluebi_refcount;
struct mtd_info gluebi_mtd;
@@ -200,28 +250,31 @@ struct ubi_wl_entry;
/**
* struct ubi_device - UBI device description structure
- * @dev: class device object to use the the Linux device model
+ * @dev: UBI device object to use the the Linux device model
* @cdev: character device object to create character device
* @ubi_num: UBI device number
* @ubi_name: UBI device name
- * @major: character device major number
* @vol_count: number of volumes in this UBI device
* @volumes: volumes of this UBI device
* @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
- * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers,
- * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and
- * @vol->eba_tbl.
+ * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
+ * @vol->readers, @vol->writers, @vol->exclusive,
+ * @vol->ref_count, @vol->mapping and @vol->eba_tbl.
+ * @ref_count: count of references on the UBI device
*
* @rsvd_pebs: count of reserved physical eraseblocks
* @avail_pebs: count of available physical eraseblocks
* @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB
- * handling
+ * handling
* @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
*
+ * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
+ * of UBI ititializetion
* @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
- * @vtbl_mutex: protects on-flash volume table
+ * @volumes_mutex: protects on-flash volume table and serializes volume
+ * changes, like creation, deletion, update, resize
*
* @max_ec: current highest erase counter value
* @mean_ec: current mean erase counter value
@@ -238,15 +291,15 @@ struct ubi_wl_entry;
* @prot.pnum: protection tree indexed by physical eraseblock numbers
* @prot.aec: protection tree indexed by absolute erase counter value
* @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
- * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
- * fields
+ * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
+ * fields
+ * @move_mutex: serializes eraseblock moves
* @wl_scheduled: non-zero if the wear-leveling was scheduled
* @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
- * physical eraseblock
+ * physical eraseblock
* @abs_ec: absolute erase counter
* @move_from: physical eraseblock from where the data is being moved
* @move_to: physical eraseblock where the data is being moved to
- * @move_from_put: if the "from" PEB was put
* @move_to_put: if the "to" PEB was put
* @works: list of pending works
* @works_count: count of pending works
@@ -273,13 +326,13 @@ struct ubi_wl_entry;
* @hdrs_min_io_size
* @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
* @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
- * not
+ * not
* @mtd: MTD device descriptor
*
* @peb_buf1: a buffer of PEB size used for different purposes
* @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: proptects @peb_buf1 and @peb_buf2
- * @dbg_peb_buf: buffer of PEB size used for debugging
+ * @dbg_peb_buf: buffer of PEB size used for debugging
* @dbg_buf_mutex: proptects @dbg_peb_buf
*/
struct ubi_device {
@@ -287,22 +340,24 @@ struct ubi_device {
struct device dev;
int ubi_num;
char ubi_name[sizeof(UBI_NAME_STR)+5];
- int major;
int vol_count;
struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
spinlock_t volumes_lock;
+ int ref_count;
int rsvd_pebs;
int avail_pebs;
int beb_rsvd_pebs;
int beb_rsvd_level;
+ int autoresize_vol_id;
int vtbl_slots;
int vtbl_size;
struct ubi_vtbl_record *vtbl;
- struct mutex vtbl_mutex;
+ struct mutex volumes_mutex;
int max_ec;
+ /* TODO: mean_ec is not updated run-time, fix */
int mean_ec;
/* EBA unit's stuff */
@@ -320,12 +375,13 @@ struct ubi_device {
struct rb_root aec;
} prot;
spinlock_t wl_lock;
+ struct mutex move_mutex;
+ struct rw_semaphore work_sem;
int wl_scheduled;
struct ubi_wl_entry **lookuptbl;
unsigned long long abs_ec;
struct ubi_wl_entry *move_from;
struct ubi_wl_entry *move_to;
- int move_from_put;
int move_to_put;
struct list_head works;
int works_count;
@@ -355,15 +411,19 @@ struct ubi_device {
void *peb_buf1;
void *peb_buf2;
struct mutex buf_mutex;
+ struct mutex ckvol_mutex;
#ifdef CONFIG_MTD_UBI_DEBUG
void *dbg_peb_buf;
struct mutex dbg_buf_mutex;
#endif
};
+extern struct kmem_cache *ubi_wl_entry_slab;
+extern struct file_operations ubi_ctrl_cdev_operations;
extern struct file_operations ubi_cdev_operations;
extern struct file_operations ubi_vol_cdev_operations;
extern struct class *ubi_class;
+extern struct mutex ubi_devices_mutex;
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -374,13 +434,18 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
int ubi_remove_volume(struct ubi_volume_desc *desc);
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
-int ubi_add_volume(struct ubi_device *ubi, int vol_id);
-void ubi_free_volume(struct ubi_device *ubi, int vol_id);
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
/* upd.c */
-int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes);
-int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes);
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count);
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ const struct ubi_leb_change_req *req);
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count);
/* misc.c */
int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length);
@@ -399,16 +464,17 @@ void ubi_gluebi_updated(struct ubi_volume *vol);
#endif
/* eba.c */
-int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum);
-int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
- int offset, int len, int check);
-int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum);
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check);
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len, int dtype);
-int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
- const void *buf, int len, int dtype,
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype,
int used_ebs);
-int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
- const void *buf, int len, int dtype);
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype);
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
@@ -421,6 +487,7 @@ int ubi_wl_flush(struct ubi_device *ubi);
int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
void ubi_wl_close(struct ubi_device *ubi);
+int ubi_thread(void *u);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -439,6 +506,14 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
+/* build.c */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
+int ubi_detach_mtd_dev(int ubi_num, int anyway);
+struct ubi_device *ubi_get_device(int ubi_num);
+void ubi_put_device(struct ubi_device *ubi);
+struct ubi_device *ubi_get_by_major(int major);
+int ubi_major2num(int major);
+
/*
* ubi_rb_for_each_entry - walk an RB-tree.
* @rb: a pointer to type 'struct rb_node' to to use as a loop counter
@@ -523,8 +598,10 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
*/
static inline void ubi_ro_mode(struct ubi_device *ubi)
{
- ubi->ro_mode = 1;
- ubi_warn("switch to read-only mode");
+ if (!ubi->ro_mode) {
+ ubi->ro_mode = 1;
+ ubi_warn("switch to read-only mode");
+ }
}
/**
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0efc586a8328..ddaa1a56cc69 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -22,7 +22,8 @@
*/
/*
- * This file contains implementation of the volume update functionality.
+ * This file contains implementation of the volume update and atomic LEB change
+ * functionality.
*
* The update operation is based on the per-volume update marker which is
* stored in the volume table. The update marker is set before the update
@@ -45,29 +46,31 @@
/**
* set_update_marker - set update marker.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
*
- * This function sets the update marker flag for volume @vol_id. Returns zero
+ * This function sets the update marker flag for volume @vol. Returns zero
* in case of success and a negative error code in case of failure.
*/
-static int set_update_marker(struct ubi_device *ubi, int vol_id)
+static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err;
struct ubi_vtbl_record vtbl_rec;
- struct ubi_volume *vol = ubi->volumes[vol_id];
- dbg_msg("set update marker for volume %d", vol_id);
+ dbg_msg("set update marker for volume %d", vol->vol_id);
if (vol->upd_marker) {
- ubi_assert(ubi->vtbl[vol_id].upd_marker);
+ ubi_assert(ubi->vtbl[vol->vol_id].upd_marker);
dbg_msg("already set");
return 0;
}
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+ memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
+ sizeof(struct ubi_vtbl_record));
vtbl_rec.upd_marker = 1;
- err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ mutex_lock(&ubi->volumes_mutex);
+ err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+ mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 1;
return err;
}
@@ -75,23 +78,24 @@ static int set_update_marker(struct ubi_device *ubi, int vol_id)
/**
* clear_update_marker - clear update marker.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @bytes: new data size in bytes
*
- * This function clears the update marker for volume @vol_id, sets new volume
+ * This function clears the update marker for volume @vol, sets new volume
* data size and clears the "corrupted" flag (static volumes only). Returns
* zero in case of success and a negative error code in case of failure.
*/
-static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long bytes)
+static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes)
{
int err;
uint64_t tmp;
struct ubi_vtbl_record vtbl_rec;
- struct ubi_volume *vol = ubi->volumes[vol_id];
- dbg_msg("clear update marker for volume %d", vol_id);
+ dbg_msg("clear update marker for volume %d", vol->vol_id);
- memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
+ memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id],
+ sizeof(struct ubi_vtbl_record));
ubi_assert(vol->upd_marker && vtbl_rec.upd_marker);
vtbl_rec.upd_marker = 0;
@@ -106,7 +110,9 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
vol->last_eb_bytes = vol->usable_leb_size;
}
- err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ mutex_lock(&ubi->volumes_mutex);
+ err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec);
+ mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 0;
return err;
}
@@ -114,35 +120,36 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
/**
* ubi_start_update - start volume update.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @bytes: update bytes
*
* This function starts volume update operation. If @bytes is zero, the volume
* is just wiped out. Returns zero in case of success and a negative error code
* in case of failure.
*/
-int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
+int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
+ long long bytes)
{
int i, err;
uint64_t tmp;
- struct ubi_volume *vol = ubi->volumes[vol_id];
- dbg_msg("start update of volume %d, %llu bytes", vol_id, bytes);
+ dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes);
+ ubi_assert(!vol->updating && !vol->changing_leb);
vol->updating = 1;
- err = set_update_marker(ubi, vol_id);
+ err = set_update_marker(ubi, vol);
if (err)
return err;
/* Before updating - wipe out the volume */
for (i = 0; i < vol->reserved_pebs; i++) {
- err = ubi_eba_unmap_leb(ubi, vol_id, i);
+ err = ubi_eba_unmap_leb(ubi, vol, i);
if (err)
return err;
}
if (bytes == 0) {
- err = clear_update_marker(ubi, vol_id, 0);
+ err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
err = ubi_wl_flush(ubi);
@@ -163,9 +170,42 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
}
/**
+ * ubi_start_leb_change - start atomic LEB change.
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @req: operation request
+ *
+ * This function starts atomic LEB change operation. Returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ const struct ubi_leb_change_req *req)
+{
+ ubi_assert(!vol->updating && !vol->changing_leb);
+
+ dbg_msg("start changing LEB %d:%d, %u bytes",
+ vol->vol_id, req->lnum, req->bytes);
+ if (req->bytes == 0)
+ return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0,
+ req->dtype);
+
+ vol->upd_bytes = req->bytes;
+ vol->upd_received = 0;
+ vol->changing_leb = 1;
+ vol->ch_lnum = req->lnum;
+ vol->ch_dtype = req->dtype;
+
+ vol->upd_buf = vmalloc(req->bytes);
+ if (!vol->upd_buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
* write_leb - write update data.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: data size
@@ -191,26 +231,22 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
- int len, int used_ebs)
+static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int len, int used_ebs)
{
- int err, l;
- struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
- l = ALIGN(len, ubi->min_io_size);
- memset(buf + len, 0xFF, l - len);
+ len = ALIGN(len, ubi->min_io_size);
+ memset(buf + len, 0xFF, len - len);
- l = ubi_calc_data_len(ubi, buf, l);
- if (l == 0) {
+ len = ubi_calc_data_len(ubi, buf, len);
+ if (len == 0) {
dbg_msg("all %d bytes contain 0xFF - skip", len);
return 0;
}
- if (len != l)
- dbg_msg("skip last %d bytes (0xFF)", len - l);
- err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l,
- UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN);
} else {
/*
* When writing static volume, and this is the last logical
@@ -222,7 +258,7 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
* contain zeros, not random trash.
*/
memset(buf + len, 0, vol->usable_leb_size - len);
- err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len,
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
UBI_UNKNOWN, used_ebs);
}
@@ -236,16 +272,15 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
* @count: how much bytes to write
*
* This function writes more data to the volume which is being updated. It may
- * be called arbitrary number of times until all of the update data arrive.
- * This function returns %0 in case of success, number of bytes written during
- * the last call if the whole volume update was successfully finished, and a
+ * be called arbitrary number of times until all the update data arriveis. This
+ * function returns %0 in case of success, number of bytes written during the
+ * last call if the whole volume update has been successfully finished, and a
* negative error code in case of failure.
*/
-int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
+int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol,
const void __user *buf, int count)
{
uint64_t tmp;
- struct ubi_volume *vol = ubi->volumes[vol_id];
int lnum, offs, err = 0, len, to_write = count;
dbg_msg("write %d of %lld bytes, %lld already passed",
@@ -290,8 +325,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
* is the last chunk, it's time to flush the buffer.
*/
ubi_assert(flush_len <= vol->usable_leb_size);
- err = write_leb(ubi, vol_id, lnum, vol->upd_buf,
- flush_len, vol->upd_ebs);
+ err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len,
+ vol->upd_ebs);
if (err)
return err;
}
@@ -318,8 +353,8 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
if (len == vol->usable_leb_size ||
vol->upd_received + len == vol->upd_bytes) {
- err = write_leb(ubi, vol_id, lnum, vol->upd_buf, len,
- vol->upd_ebs);
+ err = write_leb(ubi, vol, lnum, vol->upd_buf,
+ len, vol->upd_ebs);
if (err)
break;
}
@@ -333,16 +368,70 @@ int ubi_more_update_data(struct ubi_device *ubi, int vol_id,
ubi_assert(vol->upd_received <= vol->upd_bytes);
if (vol->upd_received == vol->upd_bytes) {
/* The update is finished, clear the update marker */
- err = clear_update_marker(ubi, vol_id, vol->upd_bytes);
+ err = clear_update_marker(ubi, vol, vol->upd_bytes);
if (err)
return err;
err = ubi_wl_flush(ubi);
if (err == 0) {
+ vol->updating = 0;
err = to_write;
vfree(vol->upd_buf);
- vol->updating = 0;
}
}
return err;
}
+
+/**
+ * ubi_more_leb_change_data - accept more data for atomic LEB change.
+ * @vol: volume description object
+ * @buf: write data (user-space memory buffer)
+ * @count: how much bytes to write
+ *
+ * This function accepts more data to the volume which is being under the
+ * "atomic LEB change" operation. It may be called arbitrary number of times
+ * until all data arrives. This function returns %0 in case of success, number
+ * of bytes written during the last call if the whole "atomic LEB change"
+ * operation has been successfully finished, and a negative error code in case
+ * of failure.
+ */
+int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
+ const void __user *buf, int count)
+{
+ int err;
+
+ dbg_msg("write %d of %lld bytes, %lld already passed",
+ count, vol->upd_bytes, vol->upd_received);
+
+ if (ubi->ro_mode)
+ return -EROFS;
+
+ if (vol->upd_received + count > vol->upd_bytes)
+ count = vol->upd_bytes - vol->upd_received;
+
+ err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count);
+ if (err)
+ return -EFAULT;
+
+ vol->upd_received += count;
+
+ if (vol->upd_received == vol->upd_bytes) {
+ int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size);
+
+ memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes);
+ len = ubi_calc_data_len(ubi, vol->upd_buf, len);
+ err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum,
+ vol->upd_buf, len, UBI_UNKNOWN);
+ if (err)
+ return err;
+ }
+
+ ubi_assert(vol->upd_received <= vol->upd_bytes);
+ if (vol->upd_received == vol->upd_bytes) {
+ vol->changing_leb = 0;
+ err = count;
+ vfree(vol->upd_buf);
+ }
+
+ return err;
+}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 88629a320c2b..a3ca2257e601 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -63,21 +63,30 @@ static struct device_attribute attr_vol_upd_marker =
* B. process 2 removes volume Y;
* C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
*
- * What we want to do in a situation like that is to return error when the file
- * is read. This is done by means of the 'removed' flag and the 'vol_lock' of
- * the UBI volume description object.
+ * In this situation, this function will return %-ENODEV because it will find
+ * out that the volume was removed from the @ubi->volumes array.
*/
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+ struct ubi_device *ubi;
- spin_lock(&vol->ubi->volumes_lock);
- if (vol->removed) {
- spin_unlock(&vol->ubi->volumes_lock);
+ ubi = ubi_get_device(vol->ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ spin_lock(&ubi->volumes_lock);
+ if (!ubi->volumes[vol->vol_id]) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_put_device(ubi);
return -ENODEV;
}
+ /* Take a reference to prevent volume removal */
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
if (attr == &attr_vol_reserved_ebs)
ret = sprintf(buf, "%d\n", vol->reserved_pebs);
else if (attr == &attr_vol_type) {
@@ -94,15 +103,22 @@ static ssize_t vol_attribute_show(struct device *dev,
ret = sprintf(buf, "%d\n", vol->corrupted);
else if (attr == &attr_vol_alignment)
ret = sprintf(buf, "%d\n", vol->alignment);
- else if (attr == &attr_vol_usable_eb_size) {
+ else if (attr == &attr_vol_usable_eb_size)
ret = sprintf(buf, "%d\n", vol->usable_leb_size);
- } else if (attr == &attr_vol_data_bytes)
+ else if (attr == &attr_vol_data_bytes)
ret = sprintf(buf, "%lld\n", vol->used_bytes);
else if (attr == &attr_vol_upd_marker)
ret = sprintf(buf, "%d\n", vol->upd_marker);
else
- BUG();
- spin_unlock(&vol->ubi->volumes_lock);
+ /* This must be a bug */
+ ret = -EINVAL;
+
+ /* We've done the operation, drop volume and UBI device references */
+ spin_lock(&ubi->volumes_lock);
+ vol->ref_count -= 1;
+ ubi_assert(vol->ref_count >= 0);
+ spin_unlock(&ubi->volumes_lock);
+ ubi_put_device(ubi);
return ret;
}
@@ -110,7 +126,7 @@ static ssize_t vol_attribute_show(struct device *dev,
static void vol_release(struct device *dev)
{
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
- ubi_assert(vol->removed);
+
kfree(vol);
}
@@ -152,9 +168,7 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
if (err)
return err;
err = device_create_file(&vol->dev, &attr_vol_upd_marker);
- if (err)
- return err;
- return 0;
+ return err;
}
/**
@@ -180,16 +194,18 @@ static void volume_sysfs_close(struct ubi_volume *vol)
* @req: volume creation request
*
* This function creates volume described by @req. If @req->vol_id id
- * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume
+ * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
* and saves it in @req->vol_id. Returns zero in case of success and a negative
- * error code in case of failure.
+ * error code in case of failure. Note, the caller has to have the
+ * @ubi->volumes_mutex locked.
*/
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
{
- int i, err, vol_id = req->vol_id;
+ int i, err, vol_id = req->vol_id, dont_free = 0;
struct ubi_volume *vol;
struct ubi_vtbl_record vtbl_rec;
uint64_t bytes;
+ dev_t dev;
if (ubi->ro_mode)
return -EROFS;
@@ -199,7 +215,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
return -ENOMEM;
spin_lock(&ubi->volumes_lock);
-
if (vol_id == UBI_VOL_NUM_AUTO) {
/* Find unused volume ID */
dbg_msg("search for vacant volume ID");
@@ -252,6 +267,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
}
ubi->avail_pebs -= vol->reserved_pebs;
ubi->rsvd_pebs += vol->reserved_pebs;
+ spin_unlock(&ubi->volumes_lock);
vol->vol_id = vol_id;
vol->alignment = req->alignment;
@@ -259,10 +275,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->vol_type = req->vol_type;
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len + 1);
- vol->exclusive = 1;
vol->ubi = ubi;
- ubi->volumes[vol_id] = vol;
- spin_unlock(&ubi->volumes_lock);
/*
* Finish all pending erases because there may be some LEBs belonging
@@ -299,9 +312,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
- err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1);
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
if (err) {
- ubi_err("cannot add character device for volume %d", vol_id);
+ ubi_err("cannot add character device");
goto out_mapping;
}
@@ -311,12 +325,15 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
- vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1);
+ vol->dev.devt = dev;
vol->dev.class = ubi_class;
+
sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
- if (err)
+ if (err) {
+ ubi_err("cannot register device");
goto out_gluebi;
+ }
err = volume_sysfs_init(ubi, vol);
if (err)
@@ -339,15 +356,27 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
goto out_sysfs;
spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
ubi->vol_count += 1;
- vol->exclusive = 0;
spin_unlock(&ubi->volumes_lock);
paranoid_check_volumes(ubi);
return 0;
+out_sysfs:
+ /*
+ * We have registered our device, we should not free the volume*
+ * description object in this function in case of an error - it is
+ * freed by the release function.
+ *
+ * Get device reference to prevent the release function from being
+ * called just after sysfs has been closed.
+ */
+ dont_free = 1;
+ get_device(&vol->dev);
+ volume_sysfs_close(vol);
out_gluebi:
- err = ubi_destroy_gluebi(vol);
+ ubi_destroy_gluebi(vol);
out_cdev:
cdev_del(&vol->cdev);
out_mapping:
@@ -356,26 +385,13 @@ out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
- ubi->volumes[vol_id] = NULL;
out_unlock:
spin_unlock(&ubi->volumes_lock);
- kfree(vol);
- return err;
-
- /*
- * We are registered, so @vol is destroyed in the release function and
- * we have to de-initialize differently.
- */
-out_sysfs:
- err = ubi_destroy_gluebi(vol);
- cdev_del(&vol->cdev);
- kfree(vol->eba_tbl);
- spin_lock(&ubi->volumes_lock);
- ubi->rsvd_pebs -= vol->reserved_pebs;
- ubi->avail_pebs += vol->reserved_pebs;
- ubi->volumes[vol_id] = NULL;
- spin_unlock(&ubi->volumes_lock);
- volume_sysfs_close(vol);
+ if (dont_free)
+ put_device(&vol->dev);
+ else
+ kfree(vol);
+ ubi_err("cannot create volume %d, error %d", vol_id, err);
return err;
}
@@ -385,7 +401,8 @@ out_sysfs:
*
* This function removes volume described by @desc. The volume has to be opened
* in "exclusive" mode. Returns zero in case of success and a negative error
- * code in case of failure.
+ * code in case of failure. The caller has to have the @ubi->volumes_mutex
+ * locked.
*/
int ubi_remove_volume(struct ubi_volume_desc *desc)
{
@@ -400,30 +417,36 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
if (ubi->ro_mode)
return -EROFS;
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ /*
+ * The volume is busy, probably someone is reading one of its
+ * sysfs files.
+ */
+ err = -EBUSY;
+ goto out_unlock;
+ }
+ ubi->volumes[vol_id] = NULL;
+ spin_unlock(&ubi->volumes_lock);
+
err = ubi_destroy_gluebi(vol);
if (err)
- return err;
+ goto out_err;
err = ubi_change_vtbl_record(ubi, vol_id, NULL);
if (err)
- return err;
+ goto out_err;
for (i = 0; i < vol->reserved_pebs; i++) {
- err = ubi_eba_unmap_leb(ubi, vol_id, i);
+ err = ubi_eba_unmap_leb(ubi, vol, i);
if (err)
- return err;
+ goto out_err;
}
- spin_lock(&ubi->volumes_lock);
- vol->removed = 1;
- ubi->volumes[vol_id] = NULL;
- spin_unlock(&ubi->volumes_lock);
-
kfree(vol->eba_tbl);
vol->eba_tbl = NULL;
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
- kfree(desc);
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
@@ -441,8 +464,15 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
spin_unlock(&ubi->volumes_lock);
paranoid_check_volumes(ubi);
- module_put(THIS_MODULE);
return 0;
+
+out_err:
+ ubi_err("cannot remove volume %d, error %d", vol_id, err);
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ return err;
}
/**
@@ -450,8 +480,9 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
* @desc: volume descriptor
* @reserved_pebs: new size in physical eraseblocks
*
- * This function returns zero in case of success, and a negative error code in
- * case of failure.
+ * This function re-sizes the volume and returns zero in case of success, and a
+ * negative error code in case of failure. The caller has to have the
+ * @ubi->volumes_mutex locked.
*/
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
{
@@ -466,8 +497,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
dbg_msg("re-size volume %d to from %d to %d PEBs",
vol_id, vol->reserved_pebs, reserved_pebs);
- ubi_assert(desc->mode == UBI_EXCLUSIVE);
- ubi_assert(vol == ubi->volumes[vol_id]);
if (vol->vol_type == UBI_STATIC_VOLUME &&
reserved_pebs < vol->used_ebs) {
@@ -487,6 +516,14 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = UBI_LEB_UNMAPPED;
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ spin_unlock(&ubi->volumes_lock);
+ err = -EBUSY;
+ goto out_free;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
/* Reserve physical eraseblocks */
pebs = reserved_pebs - vol->reserved_pebs;
if (pebs > 0) {
@@ -516,7 +553,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (pebs < 0) {
for (i = 0; i < -pebs; i++) {
- err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i);
+ err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
if (err)
goto out_acc;
}
@@ -565,27 +602,28 @@ out_free:
/**
* ubi_add_volume - add volume.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
*
- * This function adds an existin volume and initializes all its data
- * structures. Returnes zero in case of success and a negative error code in
+ * This function adds an existing volume and initializes all its data
+ * structures. Returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_add_volume(struct ubi_device *ubi, int vol_id)
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
- int err;
- struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err, vol_id = vol->vol_id;
+ dev_t dev;
dbg_msg("add volume %d", vol_id);
ubi_dbg_dump_vol_info(vol);
- ubi_assert(vol);
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
- err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1);
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
if (err) {
- ubi_err("cannot add character device for volume %d", vol_id);
+ ubi_err("cannot add character device for volume %d, error %d",
+ vol_id, err);
return err;
}
@@ -595,7 +633,7 @@ int ubi_add_volume(struct ubi_device *ubi, int vol_id)
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
- vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1);
+ vol->dev.devt = dev;
vol->dev.class = ubi_class;
sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
@@ -623,22 +661,19 @@ out_cdev:
/**
* ubi_free_volume - free volume.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
*
- * This function frees all resources for volume @vol_id but does not remove it.
+ * This function frees all resources for volume @vol but does not remove it.
* Used only when the UBI device is detached.
*/
-void ubi_free_volume(struct ubi_device *ubi, int vol_id)
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err;
- struct ubi_volume *vol = ubi->volumes[vol_id];
- dbg_msg("free volume %d", vol_id);
- ubi_assert(vol);
+ dbg_msg("free volume %d", vol->vol_id);
- vol->removed = 1;
+ ubi->volumes[vol->vol_id] = NULL;
err = ubi_destroy_gluebi(vol);
- ubi->volumes[vol_id] = NULL;
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
}
@@ -708,11 +743,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
goto fail;
}
- if (vol->upd_marker != 0 && vol->upd_marker != 1) {
- ubi_err("bad upd_marker");
- goto fail;
- }
-
if (vol->upd_marker && vol->corrupted) {
dbg_err("update marker and corrupted simultaneously");
goto fail;
@@ -747,7 +777,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
n = (long long)vol->used_ebs * vol->usable_leb_size;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
- if (vol->corrupted != 0) {
+ if (vol->corrupted) {
ubi_err("corrupted dynamic volume");
goto fail;
}
@@ -764,10 +794,6 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id)
goto fail;
}
} else {
- if (vol->corrupted != 0 && vol->corrupted != 1) {
- ubi_err("bad corrupted");
- goto fail;
- }
if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
ubi_err("bad used_ebs");
goto fail;
@@ -820,9 +846,7 @@ static void paranoid_check_volumes(struct ubi_device *ubi)
{
int i;
- mutex_lock(&ubi->vtbl_mutex);
for (i = 0; i < ubi->vtbl_slots; i++)
paranoid_check_volume(ubi, i);
- mutex_unlock(&ubi->vtbl_mutex);
}
#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 25b3bd61c7ec..56fc3fbce838 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -86,8 +86,10 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
{
int i, err;
uint32_t crc;
+ struct ubi_volume *layout_vol;
ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
+ layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
if (!vtbl_rec)
vtbl_rec = &empty_vtbl_record;
@@ -96,31 +98,25 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
vtbl_rec->crc = cpu_to_be32(crc);
}
- mutex_lock(&ubi->vtbl_mutex);
memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
- err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i);
- if (err) {
- mutex_unlock(&ubi->vtbl_mutex);
+ err = ubi_eba_unmap_leb(ubi, layout_vol, i);
+ if (err)
return err;
- }
- err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0,
+
+ err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
ubi->vtbl_size, UBI_LONGTERM);
- if (err) {
- mutex_unlock(&ubi->vtbl_mutex);
+ if (err)
return err;
- }
}
paranoid_vtbl_check(ubi);
- mutex_unlock(&ubi->vtbl_mutex);
- return ubi_wl_flush(ubi);
+ return 0;
}
/**
- * vol_til_check - check if volume table is not corrupted and contains sensible
- * data.
- *
+ * vtbl_check - check if volume table is not corrupted and contains sensible
+ * data.
* @ubi: UBI device description object
* @vtbl: volume table
*
@@ -273,7 +269,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
* this volume table copy was found during scanning. It has to be wiped
* out.
*/
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID);
+ sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
if (sv)
old_seb = ubi_scan_find_seb(sv, copy);
@@ -285,7 +281,7 @@ retry:
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
- vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOL_ID);
+ vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
vid_hdr->data_size = vid_hdr->used_ebs =
vid_hdr->data_pad = cpu_to_be32(0);
@@ -518,6 +514,17 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->name[vol->name_len] = '\0';
vol->vol_id = i;
+ if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
+ /* Auto re-size flag may be set only for one volume */
+ if (ubi->autoresize_vol_id != -1) {
+ ubi_err("more then one auto-resize volume (%d "
+ "and %d)", ubi->autoresize_vol_id, i);
+ return -EINVAL;
+ }
+
+ ubi->autoresize_vol_id = i;
+ }
+
ubi_assert(!ubi->volumes[i]);
ubi->volumes[i] = vol;
ubi->vol_count += 1;
@@ -568,6 +575,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->last_eb_bytes = sv->last_data_size;
}
+ /* And add the layout volume */
vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
if (!vol)
return -ENOMEM;
@@ -582,7 +590,8 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->last_eb_bytes = vol->reserved_pebs;
vol->used_bytes =
(long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
- vol->vol_id = UBI_LAYOUT_VOL_ID;
+ vol->vol_id = UBI_LAYOUT_VOLUME_ID;
+ vol->ref_count = 1;
ubi_assert(!ubi->volumes[i]);
ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
@@ -734,7 +743,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE;
ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size);
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOL_ID);
+ sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
if (!sv) {
/*
* No logical eraseblocks belonging to the layout volume were
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6330c8cc72b5..a471a491f0ab 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -117,21 +117,6 @@
#define WL_MAX_FAILURES 32
/**
- * struct ubi_wl_entry - wear-leveling entry.
- * @rb: link in the corresponding RB-tree
- * @ec: erase counter
- * @pnum: physical eraseblock number
- *
- * Each physical eraseblock has a corresponding &struct wl_entry object which
- * may be kept in different RB-trees.
- */
-struct ubi_wl_entry {
- struct rb_node rb;
- int ec;
- int pnum;
-};
-
-/**
* struct ubi_wl_prot_entry - PEB protection entry.
* @rb_pnum: link in the @wl->prot.pnum RB-tree
* @rb_aec: link in the @wl->prot.aec RB-tree
@@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
#define paranoid_check_in_wl_tree(e, root)
#endif
-/* Slab cache for wear-leveling entries */
-static struct kmem_cache *wl_entries_slab;
-
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
* @e: the wear-leveling entry to add
@@ -267,15 +249,26 @@ static int do_work(struct ubi_device *ubi)
int err;
struct ubi_work *wrk;
- spin_lock(&ubi->wl_lock);
+ cond_resched();
+ /*
+ * @ubi->work_sem is used to synchronize with the workers. Workers take
+ * it in read mode, so many of them may be doing works at a time. But
+ * the queue flush code has to be sure the whole queue of works is
+ * done, and it takes the mutex in write mode.
+ */
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works)) {
spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
return 0;
}
wrk = list_entry(ubi->works.next, struct ubi_work, list);
list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
spin_unlock(&ubi->wl_lock);
/*
@@ -286,11 +279,8 @@ static int do_work(struct ubi_device *ubi)
err = wrk->func(ubi, wrk, 0);
if (err)
ubi_err("work failed with error code %d", err);
+ up_read(&ubi->work_sem);
- spin_lock(&ubi->wl_lock);
- ubi->works_count -= 1;
- ubi_assert(ubi->works_count >= 0);
- spin_unlock(&ubi->wl_lock);
return err;
}
@@ -549,8 +539,12 @@ retry:
* prot_tree_del - remove a physical eraseblock from the protection trees
* @ubi: UBI device description object
* @pnum: the physical eraseblock to remove
+ *
+ * This function returns PEB @pnum from the protection trees and returns zero
+ * in case of success and %-ENODEV if the PEB was not found in the protection
+ * trees.
*/
-static void prot_tree_del(struct ubi_device *ubi, int pnum)
+static int prot_tree_del(struct ubi_device *ubi, int pnum)
{
struct rb_node *p;
struct ubi_wl_prot_entry *pe = NULL;
@@ -561,7 +555,7 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
if (pnum == pe->e->pnum)
- break;
+ goto found;
if (pnum < pe->e->pnum)
p = p->rb_left;
@@ -569,10 +563,14 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
p = p->rb_right;
}
+ return -ENODEV;
+
+found:
ubi_assert(pe->e->pnum == pnum);
rb_erase(&pe->rb_aec, &ubi->prot.aec);
rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
kfree(pe);
+ return 0;
}
/**
@@ -744,7 +742,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel)
{
- int err, put = 0;
+ int err, put = 0, scrubbing = 0, protect = 0;
+ struct ubi_wl_prot_entry *uninitialized_var(pe);
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
@@ -757,21 +756,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (!vid_hdr)
return -ENOMEM;
+ mutex_lock(&ubi->move_mutex);
spin_lock(&ubi->wl_lock);
+ ubi_assert(!ubi->move_from && !ubi->move_to);
+ ubi_assert(!ubi->move_to_put);
- /*
- * Only one WL worker at a time is supported at this implementation, so
- * make sure a PEB is not being moved already.
- */
- if (ubi->move_to || !ubi->free.rb_node ||
+ if (!ubi->free.rb_node ||
(!ubi->used.rb_node && !ubi->scrub.rb_node)) {
/*
- * Only one WL worker at a time is supported at this
- * implementation, so if a LEB is already being moved, cancel.
- *
- * No free physical eraseblocks? Well, we cancel wear-leveling
- * then. It will be triggered again when a free physical
- * eraseblock appears.
+ * No free physical eraseblocks? Well, they must be waiting in
+ * the queue to be erased. Cancel movement - it will be
+ * triggered again when a free physical eraseblock appears.
*
* No used physical eraseblocks? They must be temporarily
* protected from being moved. They will be moved to the
@@ -780,10 +775,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("cancel WL, a list is empty: free %d, used %d",
!ubi->free.rb_node, !ubi->used.rb_node);
- ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return 0;
+ goto out_cancel;
}
if (!ubi->scrub.rb_node) {
@@ -798,27 +790,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
- ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return 0;
+ goto out_cancel;
}
paranoid_check_in_wl_tree(e1, &ubi->used);
rb_erase(&e1->rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
} else {
+ /* Perform scrubbing */
+ scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
paranoid_check_in_wl_tree(e1, &ubi->scrub);
- rb_erase(&e1->rb, &ubi->scrub);
+ rb_erase(&e1->rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
paranoid_check_in_wl_tree(e2, &ubi->free);
rb_erase(&e2->rb, &ubi->free);
- ubi_assert(!ubi->move_from && !ubi->move_to);
- ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
@@ -828,6 +817,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* We so far do not know which logical eraseblock our physical
* eraseblock (@e1) belongs to. We have to read the volume identifier
* header first.
+ *
+ * Note, we are protected from this PEB being unmapped and erased. The
+ * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
+ * which is being moved was unmapped.
*/
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
@@ -842,32 +835,51 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* likely have the VID header in place.
*/
dbg_wl("PEB %d has no VID header", e1->pnum);
- err = 0;
- } else {
- ubi_err("error %d while reading VID header from PEB %d",
- err, e1->pnum);
- if (err > 0)
- err = -EIO;
+ goto out_not_moved;
}
- goto error;
+
+ ubi_err("error %d while reading VID header from PEB %d",
+ err, e1->pnum);
+ if (err > 0)
+ err = -EIO;
+ goto out_error;
}
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
if (err) {
- if (err == UBI_IO_BITFLIPS)
- err = 0;
- goto error;
+
+ if (err < 0)
+ goto out_error;
+ if (err == 1)
+ goto out_not_moved;
+
+ /*
+ * For some reason the LEB was not moved - it might be because
+ * the volume is being deleted. We should prevent this PEB from
+ * being selected for wear-levelling movement for some "time",
+ * so put it to the protection tree.
+ */
+
+ dbg_wl("cancelled moving PEB %d", e1->pnum);
+ pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
+ if (!pe) {
+ err = -ENOMEM;
+ goto out_error;
+ }
+
+ protect = 1;
}
ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
+ if (protect)
+ prot_tree_add(ubi, e1, pe, protect);
if (!ubi->move_to_put)
wl_tree_add(e2, &ubi->used);
else
put = 1;
ubi->move_from = ubi->move_to = NULL;
- ubi->move_from_put = ubi->move_to_put = 0;
- ubi->wl_scheduled = 0;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
if (put) {
@@ -877,62 +889,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
err = schedule_erase(ubi, e2, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e2);
- ubi_ro_mode(ubi);
- }
+ if (err)
+ goto out_error;
}
- err = schedule_erase(ubi, e1, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e1);
- ubi_ro_mode(ubi);
+ if (!protect) {
+ err = schedule_erase(ubi, e1, 0);
+ if (err)
+ goto out_error;
}
+
dbg_wl("done");
- return err;
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
/*
- * Some error occurred. @e1 was not changed, so return it back. @e2
- * might be changed, schedule it for erasure.
+ * For some reasons the LEB was not moved, might be an error, might be
+ * something else. @e1 was not changed, so return it back. @e2 might
+ * be changed, schedule it for erasure.
*/
-error:
- if (err)
- dbg_wl("error %d occurred, cancel operation", err);
- ubi_assert(err <= 0);
-
+out_not_moved:
ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
- ubi->wl_scheduled = 0;
- if (ubi->move_from_put)
- put = 1;
+ if (scrubbing)
+ wl_tree_add(e1, &ubi->scrub);
else
wl_tree_add(e1, &ubi->used);
ubi->move_from = ubi->move_to = NULL;
- ubi->move_from_put = ubi->move_to_put = 0;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- if (put) {
- /*
- * Well, the target PEB was put meanwhile, schedule it for
- * erasure.
- */
- dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
- err = schedule_erase(ubi, e1, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e1);
- ubi_ro_mode(ubi);
- }
- }
-
err = schedule_erase(ubi, e2, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e2);
- ubi_ro_mode(ubi);
- }
+ if (err)
+ goto out_error;
+
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
+
+out_error:
+ ubi_err("error %d while moving PEB %d to PEB %d",
+ err, e1->pnum, e2->pnum);
- yield();
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ spin_lock(&ubi->wl_lock);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ kmem_cache_free(ubi_wl_entry_slab, e1);
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+ ubi_ro_mode(ubi);
+
+ mutex_unlock(&ubi->move_mutex);
return err;
+
+out_cancel:
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ mutex_unlock(&ubi->move_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
}
/**
@@ -1020,7 +1037,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
kfree(wl_wrk);
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
return 0;
}
@@ -1049,7 +1066,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
@@ -1119,8 +1136,7 @@ out_ro:
}
/**
- * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling
- * unit.
+ * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
* @ubi: UBI device description object
* @pnum: physical eraseblock to return
* @torture: if this physical eraseblock has to be tortured
@@ -1128,7 +1144,7 @@ out_ro:
* This function is called to return physical eraseblock @pnum to the pool of
* free physical eraseblocks. The @torture flag has to be set if an I/O error
* occurred to this @pnum and it has to be tested. This function returns zero
- * in case of success and a negative error code in case of failure.
+ * in case of success, and a negative error code in case of failure.
*/
int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
{
@@ -1139,8 +1155,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
ubi_assert(pnum >= 0);
ubi_assert(pnum < ubi->peb_count);
+retry:
spin_lock(&ubi->wl_lock);
-
e = ubi->lookuptbl[pnum];
if (e == ubi->move_from) {
/*
@@ -1148,17 +1164,22 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
* be moved. It will be scheduled for erasure in the
* wear-leveling worker.
*/
- dbg_wl("PEB %d is being moved", pnum);
- ubi_assert(!ubi->move_from_put);
- ubi->move_from_put = 1;
+ dbg_wl("PEB %d is being moved, wait", pnum);
spin_unlock(&ubi->wl_lock);
- return 0;
+
+ /* Wait for the WL worker by taking the @ubi->move_mutex */
+ mutex_lock(&ubi->move_mutex);
+ mutex_unlock(&ubi->move_mutex);
+ goto retry;
} else if (e == ubi->move_to) {
/*
* User is putting the physical eraseblock which was selected
* as the target the data is moved to. It may happen if the EBA
- * unit already re-mapped the LEB but the WL unit did has not
- * put the PEB to the "used" tree.
+ * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
+ * the WL unit has not put the PEB to the "used" tree yet, but
+ * it is about to do this. So we just set a flag which will
+ * tell the WL worker that the PEB is not needed anymore and
+ * should be scheduled for erasure.
*/
dbg_wl("PEB %d is the target of data moving", pnum);
ubi_assert(!ubi->move_to_put);
@@ -1172,8 +1193,15 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
} else if (in_wl_tree(e, &ubi->scrub)) {
paranoid_check_in_wl_tree(e, &ubi->scrub);
rb_erase(&e->rb, &ubi->scrub);
- } else
- prot_tree_del(ubi, e->pnum);
+ } else {
+ err = prot_tree_del(ubi, e->pnum);
+ if (err) {
+ ubi_err("PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
}
spin_unlock(&ubi->wl_lock);
@@ -1227,8 +1255,17 @@ retry:
if (in_wl_tree(e, &ubi->used)) {
paranoid_check_in_wl_tree(e, &ubi->used);
rb_erase(&e->rb, &ubi->used);
- } else
- prot_tree_del(ubi, pnum);
+ } else {
+ int err;
+
+ err = prot_tree_del(ubi, e->pnum);
+ if (err) {
+ ubi_err("PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
wl_tree_add(e, &ubi->scrub);
spin_unlock(&ubi->wl_lock);
@@ -1249,17 +1286,32 @@ retry:
*/
int ubi_wl_flush(struct ubi_device *ubi)
{
- int err, pending_count;
-
- pending_count = ubi->works_count;
-
- dbg_wl("flush (%d pending works)", pending_count);
+ int err;
/*
* Erase while the pending works queue is not empty, but not more then
* the number of currently pending works.
*/
- while (pending_count-- > 0) {
+ dbg_wl("flush (%d pending works)", ubi->works_count);
+ while (ubi->works_count) {
+ err = do_work(ubi);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Make sure all the works which have been done in parallel are
+ * finished.
+ */
+ down_write(&ubi->work_sem);
+ up_write(&ubi->work_sem);
+
+ /*
+ * And in case last was the WL worker and it cancelled the LEB
+ * movement, flush again.
+ */
+ while (ubi->works_count) {
+ dbg_wl("flush more (%d pending works)", ubi->works_count);
err = do_work(ubi);
if (err)
return err;
@@ -1294,7 +1346,7 @@ static void tree_destroy(struct rb_root *root)
rb->rb_right = NULL;
}
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
}
}
}
@@ -1303,7 +1355,7 @@ static void tree_destroy(struct rb_root *root)
* ubi_thread - UBI background thread.
* @u: the UBI device description object pointer
*/
-static int ubi_thread(void *u)
+int ubi_thread(void *u)
{
int failures = 0;
struct ubi_device *ubi = u;
@@ -1394,36 +1446,22 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->used = ubi->free = ubi->scrub = RB_ROOT;
ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
+ mutex_init(&ubi->move_mutex);
+ init_rwsem(&ubi->work_sem);
ubi->max_ec = si->max_ec;
INIT_LIST_HEAD(&ubi->works);
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
- ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
- if (IS_ERR(ubi->bgt_thread)) {
- err = PTR_ERR(ubi->bgt_thread);
- ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
- err);
- return err;
- }
-
- if (ubi_devices_cnt == 0) {
- wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
- sizeof(struct ubi_wl_entry),
- 0, 0, NULL);
- if (!wl_entries_slab)
- return -ENOMEM;
- }
-
err = -ENOMEM;
ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
if (!ubi->lookuptbl)
- goto out_free;
+ return err;
list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@@ -1431,7 +1469,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) {
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
}
@@ -1439,7 +1477,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
list_for_each_entry(seb, &si->free, u.list) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@@ -1453,7 +1491,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
list_for_each_entry(seb, &si->corr, u.list) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@@ -1461,7 +1499,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) {
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
}
@@ -1470,7 +1508,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@@ -1510,8 +1548,6 @@ out_free:
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);
- if (ubi_devices_cnt == 0)
- kmem_cache_destroy(wl_entries_slab);
return err;
}
@@ -1541,7 +1577,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
rb->rb_right = NULL;
}
- kmem_cache_free(wl_entries_slab, pe->e);
+ kmem_cache_free(ubi_wl_entry_slab, pe->e);
kfree(pe);
}
}
@@ -1553,10 +1589,6 @@ static void protection_trees_destroy(struct ubi_device *ubi)
*/
void ubi_wl_close(struct ubi_device *ubi)
{
- dbg_wl("disable \"%s\"", ubi->bgt_name);
- if (ubi->bgt_thread)
- kthread_stop(ubi->bgt_thread);
-
dbg_wl("close the UBI wear-leveling unit");
cancel_pending(ubi);
@@ -1565,8 +1597,6 @@ void ubi_wl_close(struct ubi_device *ubi)
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);
- if (ubi_devices_cnt == 1)
- kmem_cache_destroy(wl_entries_slab);
}
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f234ba3f0404..9cc25fd80b60 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -920,8 +920,7 @@ config ENC28J60
---help---
Support for the Microchip EN28J60 ethernet chip.
- To compile this driver as a module, choose M here and read
- <file:Documentation/networking/net-modules.txt>. The module will be
+ To compile this driver as a module, choose M here. The module will be
called enc28j60.
config ENC28J60_WRITEVERIFY
@@ -1738,10 +1737,8 @@ config SC92031
config CPMAC
tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)"
- depends on NET_ETHERNET && EXPERIMENTAL && AR7
+ depends on NET_ETHERNET && EXPERIMENTAL && AR7 && BROKEN
select PHYLIB
- select FIXED_PHY
- select FIXED_MII_100_FDX
help
TI AR7 CPMAC Ethernet support
@@ -2041,8 +2038,7 @@ config IGB
More specific information on configuring the driver is in
<file:Documentation/networking/e1000.txt>.
- To compile this driver as a module, choose M here and read
- <file:Documentation/networking/net-modules.txt>. The module
+ To compile this driver as a module, choose M here. The module
will be called igb.
source "drivers/net/ixp2000/Kconfig"
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 6ccebb830ff9..c85194f2cd2d 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -845,15 +845,6 @@ static void cpmac_adjust_link(struct net_device *dev)
spin_unlock(&priv->lock);
}
-static int cpmac_link_update(struct net_device *dev,
- struct fixed_phy_status *status)
-{
- status->link = 1;
- status->speed = 100;
- status->duplex = 1;
- return 0;
-}
-
static int cpmac_open(struct net_device *dev)
{
int i, size, res;
@@ -996,11 +987,11 @@ static int external_switch;
static int __devinit cpmac_probe(struct platform_device *pdev)
{
int rc, phy_id, i;
+ int mdio_bus_id = cpmac_mii.id;
struct resource *mem;
struct cpmac_priv *priv;
struct net_device *dev;
struct plat_cpmac_data *pdata;
- struct fixed_info *fixed_phy;
DECLARE_MAC_BUF(mac);
pdata = pdev->dev.platform_data;
@@ -1014,9 +1005,23 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
}
if (phy_id == PHY_MAX_ADDR) {
- if (external_switch || dumb_switch)
+ if (external_switch || dumb_switch) {
+ struct fixed_phy_status status = {};
+
+ mdio_bus_id = 0;
+
+ /*
+ * FIXME: this should be in the platform code!
+ * Since there is not platform code at all (that is,
+ * no mainline users of that driver), place it here
+ * for now.
+ */
phy_id = 0;
- else {
+ status.link = 1;
+ status.duplex = 1;
+ status.speed = 100;
+ fixed_phy_add(PHY_POLL, phy_id, &status);
+ } else {
printk(KERN_ERR "cpmac: no PHY present\n");
return -ENODEV;
}
@@ -1060,32 +1065,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, 0xff);
memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
- if (phy_id == 31) {
- snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, cpmac_mii.id,
- phy_id);
- } else {
- /* Let's try to get a free fixed phy... */
- for (i = 0; i < MAX_PHY_AMNT; i++) {
- fixed_phy = fixed_mdio_get_phydev(i);
- if (!fixed_phy)
- continue;
- if (!fixed_phy->phydev->attached_dev) {
- strncpy(priv->phy_name,
- fixed_phy->phydev->dev.bus_id,
- BUS_ID_SIZE);
- fixed_mdio_set_link_update(fixed_phy->phydev,
- &cpmac_link_update);
- goto phy_found;
- }
- }
- if (netif_msg_drv(priv))
- printk(KERN_ERR "%s: Could not find fixed PHY\n",
- dev->name);
- rc = -ENODEV;
- goto fail;
- }
+ snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
-phy_found:
priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phy)) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index d48c396bdabb..901c824bfe6d 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1070,9 +1070,7 @@ void *cxgb_alloc_mem(unsigned long size)
*/
void cxgb_free_mem(void *addr)
{
- unsigned long p = (unsigned long)addr;
-
- if (p >= VMALLOC_START && p < VMALLOC_END)
+ if (is_vmalloc_addr(addr))
vfree(addr);
else
kfree(addr);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 36342230a6de..d4843d014bc9 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -323,8 +323,8 @@ enum {
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
-#define NVREG_MIISTAT_MASK 0x000f
-#define NVREG_MIISTAT_MASK2 0x000f
+#define NVREG_MIISTAT_MASK_RW 0x0007
+#define NVREG_MIISTAT_MASK_ALL 0x000f
NvRegMIIMask = 0x184,
#define NVREG_MII_LINKCHANGE 0x0008
@@ -624,6 +624,9 @@ union ring_type {
#define NV_MSI_X_VECTOR_TX 0x1
#define NV_MSI_X_VECTOR_OTHER 0x2
+#define NV_RESTART_TX 0x1
+#define NV_RESTART_RX 0x2
+
/* statistics */
struct nv_ethtool_str {
char name[ETH_GSTRING_LEN];
@@ -1061,7 +1064,7 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
u32 reg;
int retval;
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
reg = readl(base + NvRegMIIControl);
if (reg & NVREG_MIICTL_INUSE) {
@@ -1432,16 +1435,30 @@ static void nv_mac_reset(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
+ u32 temp1, temp2, temp3;
dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
+
writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
+
+ /* save registers since they will be cleared on reset */
+ temp1 = readl(base + NvRegMacAddrA);
+ temp2 = readl(base + NvRegMacAddrB);
+ temp3 = readl(base + NvRegTransmitPoll);
+
writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
pci_push(base);
udelay(NV_MAC_RESET_DELAY);
writel(0, base + NvRegMacReset);
pci_push(base);
udelay(NV_MAC_RESET_DELAY);
+
+ /* restore saved registers */
+ writel(temp1, base + NvRegMacAddrA);
+ writel(temp2, base + NvRegMacAddrB);
+ writel(temp3, base + NvRegTransmitPoll);
+
writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
pci_push(base);
}
@@ -2767,6 +2784,7 @@ static int nv_update_linkspeed(struct net_device *dev)
int mii_status;
int retval = 0;
u32 control_1000, status_1000, phyreg, pause_flags, txreg;
+ u32 txrxFlags = 0;
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
@@ -2862,6 +2880,16 @@ set_speed:
np->duplex = newdup;
np->linkspeed = newls;
+ /* The transmitter and receiver must be restarted for safe update */
+ if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
+ txrxFlags |= NV_RESTART_TX;
+ nv_stop_tx(dev);
+ }
+ if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
+ txrxFlags |= NV_RESTART_RX;
+ nv_stop_rx(dev);
+ }
+
if (np->gigabit == PHY_GIGABIT) {
phyreg = readl(base + NvRegRandomSeed);
phyreg &= ~(0x3FF00);
@@ -2950,6 +2978,11 @@ set_speed:
}
nv_update_pause(dev, pause_flags);
+ if (txrxFlags & NV_RESTART_TX)
+ nv_start_tx(dev);
+ if (txrxFlags & NV_RESTART_RX)
+ nv_start_rx(dev);
+
return retval;
}
@@ -2976,7 +3009,7 @@ static void nv_link_irq(struct net_device *dev)
u32 miistat;
miistat = readl(base + NvRegMIIStatus);
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
if (miistat & (NVREG_MIISTAT_LINKCHANGE))
@@ -4851,7 +4884,7 @@ static int nv_open(struct net_device *dev)
writel(0, base + NvRegMIIMask);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
- writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
@@ -4889,7 +4922,7 @@ static int nv_open(struct net_device *dev)
nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base);
- writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base);
@@ -4912,7 +4945,7 @@ static int nv_open(struct net_device *dev)
{
u32 miistat;
miistat = readl(base + NvRegMIIStatus);
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
}
/* set linkspeed to invalid value, thus force nv_update_linkspeed
@@ -5280,7 +5313,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
phystate &= ~NVREG_ADAPTCTL_RUNNING;
writel(phystate, base + NvRegAdapterControl);
}
- writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+ writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
if (id->driver_data & DEV_HAS_MGMT_UNIT) {
/* management unit running on the mac? */
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 100bf410bf5f..6a647d95e6ea 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -127,7 +127,7 @@ int gfar_mdio_reset(struct mii_bus *bus)
struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
unsigned int timeout = PHY_INIT_TIMEOUT;
- spin_lock_bh(&bus->mdio_lock);
+ mutex_lock(&bus->mdio_lock);
/* Reset the management interface */
gfar_write(&regs->miimcfg, MIIMCFG_RESET);
@@ -140,7 +140,7 @@ int gfar_mdio_reset(struct mii_bus *bus)
timeout--)
cpu_relax();
- spin_unlock_bh(&bus->mdio_lock);
+ mutex_unlock(&bus->mdio_lock);
if(timeout <= 0) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n",
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index 11b83dae00ac..e04bf9926441 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -262,8 +262,8 @@ static void tm_isr(struct scc_priv *priv);
static int io[MAX_NUM_DEVS] __initdata = { 0, };
-/* Beware! hw[] is also used in cleanup_module(). */
-static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
+/* Beware! hw[] is also used in dmascc_exit(). */
+static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
/* Global variables */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 419861cbc65e..58d3bb622da6 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -1020,7 +1020,7 @@ static const struct ethtool_ops ops = {
.get_link = veth_get_link,
};
-static struct net_device * __init veth_probe_one(int vlan,
+static struct net_device *veth_probe_one(int vlan,
struct vio_dev *vio_dev)
{
struct net_device *dev;
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index a021a6e72641..d0bf206632ca 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -136,8 +136,6 @@ struct ixgbe_ring {
u16 head;
u16 tail;
- /* To protect race between sender and clean_tx_irq */
- spinlock_t tx_lock;
struct ixgbe_queue_stats stats;
@@ -174,7 +172,6 @@ struct ixgbe_adapter {
struct vlan_group *vlgrp;
u16 bd_number;
u16 rx_buf_len;
- atomic_t irq_sem;
struct work_struct reset_task;
/* TX */
@@ -244,6 +241,7 @@ extern const char ixgbe_driver_version[];
extern int ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter);
+extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
extern void ixgbe_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 36353447716d..a119cbd8dbb8 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -103,21 +103,41 @@ static int ixgbe_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
+ if (hw->phy.media_type == ixgbe_media_type_copper) {
+ ecmd->supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_TP | SUPPORTED_Autoneg);
+
+ ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+
+ ecmd->port = PORT_TP;
+ } else {
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising = (ADVERTISED_10000baseT_Full |
+ ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ }
- if (netif_carrier_ok(adapter->netdev)) {
- ecmd->speed = SPEED_10000;
+ adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up);
+ if (link_up) {
+ ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ SPEED_10000 : SPEED_1000;
ecmd->duplex = DUPLEX_FULL;
} else {
ecmd->speed = -1;
ecmd->duplex = -1;
}
- ecmd->autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -125,17 +145,17 @@ static int ixgbe_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
- if (ecmd->autoneg == AUTONEG_ENABLE ||
- ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
- return -EINVAL;
-
- if (netif_running(adapter->netdev)) {
- ixgbe_down(adapter);
- ixgbe_reset(adapter);
- ixgbe_up(adapter);
- } else {
- ixgbe_reset(adapter);
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+ (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ return -EINVAL;
+ /* in this case we currently only support 10Gb/FULL */
+ break;
+ default:
+ break;
}
return 0;
@@ -147,7 +167,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- pause->autoneg = AUTONEG_DISABLE;
+ pause->autoneg = (hw->fc.type == ixgbe_fc_full ? 1 : 0);
if (hw->fc.type == ixgbe_fc_rx_pause) {
pause->rx_pause = 1;
@@ -165,10 +185,8 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- if (pause->autoneg == AUTONEG_ENABLE)
- return -EINVAL;
-
- if (pause->rx_pause && pause->tx_pause)
+ if ((pause->autoneg == AUTONEG_ENABLE) ||
+ (pause->rx_pause && pause->tx_pause))
hw->fc.type = ixgbe_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
hw->fc.type = ixgbe_fc_rx_pause;
@@ -176,15 +194,15 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
hw->fc.type = ixgbe_fc_tx_pause;
else if (!pause->rx_pause && !pause->tx_pause)
hw->fc.type = ixgbe_fc_none;
+ else
+ return -EINVAL;
hw->fc.original_type = hw->fc.type;
- if (netif_running(adapter->netdev)) {
- ixgbe_down(adapter);
- ixgbe_up(adapter);
- } else {
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
ixgbe_reset(adapter);
- }
return 0;
}
@@ -203,12 +221,10 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
else
adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
- if (netif_running(netdev)) {
- ixgbe_down(adapter);
- ixgbe_up(adapter);
- } else {
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
ixgbe_reset(adapter);
- }
return 0;
}
@@ -662,7 +678,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
return 0;
}
- if (netif_running(adapter->netdev))
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (netif_running(netdev))
ixgbe_down(adapter);
/*
@@ -733,6 +752,7 @@ err_setup:
if (netif_running(adapter->netdev))
ixgbe_up(adapter);
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
}
@@ -820,11 +840,8 @@ static int ixgbe_nway_reset(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev)) {
- ixgbe_down(adapter);
- ixgbe_reset(adapter);
- ixgbe_up(adapter);
- }
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 3732dd6c4b2a..ead49e54f31b 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -87,6 +87,25 @@ MODULE_VERSION(DRV_VERSION);
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ /* Let firmware take over control of h/w */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+ ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+}
+
+static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
+{
+ u32 ctrl_ext;
+
+ /* Let firmware know the driver has taken over */
+ ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+ ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+}
#ifdef DEBUG
/**
@@ -165,6 +184,15 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
return false;
}
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+
/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
@@ -177,18 +205,34 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int i, eop;
bool cleaned = false;
- int count = 0;
+ unsigned int total_tx_bytes = 0, total_tx_packets = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
- for (cleaned = false; !cleaned;) {
+ cleaned = false;
+ while (!cleaned) {
tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
cleaned = (i == eop);
tx_ring->stats.bytes += tx_buffer_info->length;
+ if (cleaned) {
+ struct sk_buff *skb = tx_buffer_info->skb;
+#ifdef NETIF_F_TSO
+ unsigned int segs, bytecount;
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_tx_packets += segs;
+ total_tx_bytes += bytecount;
+#else
+ total_tx_packets++;
+ total_tx_bytes += skb->len;
+#endif
+ }
ixgbe_unmap_and_free_tx_resource(adapter,
tx_buffer_info);
tx_desc->wb.status = 0;
@@ -204,29 +248,36 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
/* weight of a sort for tx, avoid endless transmit cleanup */
- if (count++ >= tx_ring->work_limit)
+ if (total_tx_packets >= tx_ring->work_limit)
break;
}
tx_ring->next_to_clean = i;
-#define TX_WAKE_THRESHOLD 32
- spin_lock(&tx_ring->tx_lock);
-
- if (cleaned && netif_carrier_ok(netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) &&
- !test_bit(__IXGBE_DOWN, &adapter->state))
- netif_wake_queue(netdev);
-
- spin_unlock(&tx_ring->tx_lock);
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (total_tx_packets && netif_carrier_ok(netdev) &&
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_queue_stopped(netdev) &&
+ !test_bit(__IXGBE_DOWN, &adapter->state)) {
+ netif_wake_queue(netdev);
+ adapter->restart_queue++;
+ }
+ }
if (adapter->detect_tx_hung)
if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
netif_stop_queue(netdev);
- if (count >= tx_ring->work_limit)
+ if (total_tx_packets >= tx_ring->work_limit)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
+ adapter->net_stats.tx_bytes += total_tx_bytes;
+ adapter->net_stats.tx_packets += total_tx_packets;
+ cleaned = total_tx_packets ? true : false;
return cleaned;
}
@@ -255,25 +306,40 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
}
}
+/**
+ * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
u32 status_err,
struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
- /* Ignore Checksum bit is set */
+ /* Ignore Checksum bit is set, or rx csum disabled */
if ((status_err & IXGBE_RXD_STAT_IXSM) ||
- !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
return;
- /* TCP/UDP checksum error bit is set */
- if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) {
- /* let the stack verify checksum errors */
+
+ /* if IP and error */
+ if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+ (status_err & IXGBE_RXDADV_ERR_IPE)) {
adapter->hw_csum_rx_error++;
return;
}
+
+ if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ return;
+
+ if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
/* It must be a TCP or UDP packet with a valid checksum */
- if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_rx_good++;
}
@@ -379,6 +445,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
u16 hdr_info, vlan_tag;
bool is_vlan, cleaned = false;
int cleaned_count = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
upper_len = 0;
@@ -458,6 +525,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
}
ixgbe_rx_checksum(adapter, staterr, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
skb->protocol = eth_type_trans(skb, netdev);
ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag);
netdev->last_rx = jiffies;
@@ -486,6 +558,9 @@ next_desc:
if (cleaned_count)
ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+
return cleaned;
}
@@ -535,7 +610,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (!test_bit(__IXGBE_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer, jiffies);
}
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
return IRQ_HANDLED;
}
@@ -713,7 +790,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
/* Disable interrupts and register for poll. The flush of the
* posted write is intentionally left out. */
- atomic_inc(&adapter->irq_sem);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
__netif_rx_schedule(netdev, &adapter->napi);
}
@@ -801,7 +877,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
- atomic_inc(&adapter->irq_sem);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
@@ -813,15 +888,13 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
**/
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
{
- if (atomic_dec_and_test(&adapter->irq_sem)) {
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
- (IXGBE_EIMS_ENABLE_MASK &
- ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
- IXGBE_EIMS_ENABLE_MASK);
- IXGBE_WRITE_FLUSH(&adapter->hw);
- }
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
+ (IXGBE_EIMS_ENABLE_MASK &
+ ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
+ IXGBE_EIMS_ENABLE_MASK);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
@@ -1040,7 +1113,8 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u32 ctrl;
- ixgbe_irq_disable(adapter);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_disable(adapter);
adapter->vlgrp = grp;
if (grp) {
@@ -1051,7 +1125,8 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
}
- ixgbe_irq_enable(adapter);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter);
}
static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -1066,9 +1141,13 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ixgbe_irq_disable(adapter);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_disable(adapter);
+
vlan_group_set_device(adapter->vlgrp, vid, NULL);
- ixgbe_irq_enable(adapter);
+
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter);
/* remove VID from filter table */
ixgbe_set_vfta(&adapter->hw, vid, 0, false);
@@ -1170,6 +1249,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
u32 txdctl, rxdctl, mhadd;
int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ ixgbe_get_hw_control(adapter);
+
if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED |
IXGBE_FLAG_MSI_ENABLED)) {
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1224,6 +1305,16 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
return 0;
}
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ msleep(1);
+ ixgbe_down(adapter);
+ ixgbe_up(adapter);
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+
int ixgbe_up(struct ixgbe_adapter *adapter)
{
/* hardware has been reset, we need to reload some things */
@@ -1408,7 +1499,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
msleep(10);
napi_disable(&adapter->napi);
- atomic_set(&adapter->irq_sem, 0);
ixgbe_irq_disable(adapter);
@@ -1447,6 +1537,8 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
+ ixgbe_release_hw_control(adapter);
+
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1481,7 +1573,8 @@ static int ixgbe_clean(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
netif_rx_complete(netdev, napi);
- ixgbe_irq_enable(adapter);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable(adapter);
}
return work_done;
@@ -1506,8 +1599,7 @@ static void ixgbe_reset_task(struct work_struct *work)
adapter->tx_timeout_count++;
- ixgbe_down(adapter);
- ixgbe_up(adapter);
+ ixgbe_reinit_locked(adapter);
}
/**
@@ -1590,7 +1682,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
return -ENOMEM;
}
- atomic_set(&adapter->irq_sem, 1);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -1634,7 +1725,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
txdr->next_to_use = 0;
txdr->next_to_clean = 0;
txdr->work_limit = txdr->count;
- spin_lock_init(&txdr->tx_lock);
return 0;
}
@@ -1828,10 +1918,8 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
- if (netif_running(netdev)) {
- ixgbe_down(adapter);
- ixgbe_up(adapter);
- }
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
return 0;
}
@@ -1852,14 +1940,8 @@ static int ixgbe_open(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int err;
- u32 ctrl_ext;
u32 num_rx_queues = adapter->num_rx_queues;
- /* Let firmware know the driver has taken over */
- ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
-
try_intr_reinit:
/* allocate transmit descriptors */
err = ixgbe_setup_all_tx_resources(adapter);
@@ -1910,6 +1992,7 @@ try_intr_reinit:
return 0;
err_up:
+ ixgbe_release_hw_control(adapter);
ixgbe_free_irq(adapter);
err_req_irq:
ixgbe_free_all_rx_resources(adapter);
@@ -1935,7 +2018,6 @@ err_setup_tx:
static int ixgbe_close(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 ctrl_ext;
ixgbe_down(adapter);
ixgbe_free_irq(adapter);
@@ -1943,9 +2025,7 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
- ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
- ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+ ixgbe_release_hw_control(adapter);
return 0;
}
@@ -1957,22 +2037,26 @@ static int ixgbe_close(struct net_device *netdev)
void ixgbe_update_stats(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u64 good_rx, missed_rx, bprc;
+ u64 total_mpc = 0;
+ u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
- good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC);
- missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6));
- missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7));
- adapter->stats.gprc += (good_rx - missed_rx);
-
- adapter->stats.mpc[0] += missed_rx;
+ for (i = 0; i < 8; i++) {
+ /* for packet buffers not used, the register should read 0 */
+ mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ missed_rx += mpc;
+ adapter->stats.mpc[i] += mpc;
+ total_mpc += adapter->stats.mpc[i];
+ adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+ }
+ adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+ /* work around hardware counting issue */
+ adapter->stats.gprc -= missed_rx;
+
+ /* 82598 hardware only has a 32 bit counter in the high register */
adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+ adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+ adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
adapter->stats.bprc += bprc;
adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
@@ -1984,35 +2068,37 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
-
adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
- adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
- adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+ adapter->stats.lxontxc += lxon;
+ lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+ adapter->stats.lxofftxc += lxoff;
adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
- adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
- adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0));
+ adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+ /*
+ * 82598 errata - tx of flow control packets is included in tx counters
+ */
+ xon_off_tot = lxon + lxoff;
+ adapter->stats.gptc -= xon_off_tot;
+ adapter->stats.mptc -= xon_off_tot;
+ adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
- adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+ adapter->stats.ptc64 -= xon_off_tot;
adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
- adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
/* Fill out the OS statistics structure */
- adapter->net_stats.rx_packets = adapter->stats.gprc;
- adapter->net_stats.tx_packets = adapter->stats.gptc;
- adapter->net_stats.rx_bytes = adapter->stats.gorc;
- adapter->net_stats.tx_bytes = adapter->stats.gotc;
adapter->net_stats.multicast = adapter->stats.mprc;
/* Rx Errors */
@@ -2021,8 +2107,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->net_stats.rx_dropped = 0;
adapter->net_stats.rx_length_errors = adapter->stats.rlec;
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
- adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0];
-
+ adapter->net_stats.rx_missed_errors = total_mpc;
}
/**
@@ -2076,15 +2161,6 @@ static void ixgbe_watchdog(unsigned long data)
round_jiffies(jiffies + 2 * HZ));
}
-#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
-
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
- (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
-
static int ixgbe_tso(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len)
@@ -2356,6 +2432,37 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
writel(i, adapter->hw.hw_addr + tx_ring->tail);
}
+static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbe_ring *tx_ring, int size)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int ixgbe_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbe_ring *tx_ring, int size)
+{
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+
static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2363,7 +2470,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int len = skb->len;
unsigned int first;
unsigned int tx_flags = 0;
- unsigned long flags = 0;
u8 hdr_len;
int tso;
unsigned int mss = 0;
@@ -2389,14 +2495,10 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
for (f = 0; f < nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
- spin_lock_irqsave(&tx_ring->tx_lock, flags);
- if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) {
+ if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
adapter->tx_busy++;
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_BUSY;
}
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= IXGBE_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
@@ -2423,11 +2525,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
netdev->trans_start = jiffies;
- spin_lock_irqsave(&tx_ring->tx_lock, flags);
- /* Make sure there is space in the ring for the next send. */
- if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED)
- netif_stop_queue(netdev);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+ ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
}
@@ -2697,6 +2795,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
return 0;
err_register:
+ ixgbe_release_hw_control(adapter);
err_hw_init:
err_sw_init:
err_eeprom:
@@ -2732,6 +2831,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ ixgbe_release_hw_control(adapter);
+
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 535a4461d88c..61dc4951d6b0 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -617,9 +617,6 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
int err;
#define QUERY_ADAPTER_OUT_SIZE 0x100
-#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00
-#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04
-#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08
#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
#define QUERY_ADAPTER_VSD_OFFSET 0x20
@@ -633,9 +630,6 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
if (err)
goto out;
- MLX4_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET);
- MLX4_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET);
- MLX4_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 7e1dd9e25cfb..e16dec890413 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -99,9 +99,6 @@ struct mlx4_dev_cap {
};
struct mlx4_adapter {
- u32 vendor_id;
- u32 device_id;
- u32 revision_id;
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
};
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 89b3f0b7cdc0..08bfc130a33e 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
-static const char mlx4_version[] __devinitdata =
+static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -163,7 +163,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
-static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
+static int mlx4_load_fw(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
@@ -197,8 +197,8 @@ err_free:
return err;
}
-static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
- int cmpt_entry_sz)
+static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
+ int cmpt_entry_sz)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
@@ -534,7 +534,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
}
priv->eq_table.inta_pin = adapter.inta_pin;
- dev->rev_id = adapter.revision_id;
memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
return 0;
@@ -688,7 +687,7 @@ err_uar_table_free:
return err;
}
-static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev)
+static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry entries[MLX4_NUM_EQ];
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 0c05a10bae3b..9c9e308d0917 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -122,7 +122,7 @@ static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
spin_unlock(&buddy->lock);
}
-static int __devinit mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
+static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
{
int i, s;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 651c2699d5e1..b528ce77c406 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1652,6 +1652,11 @@ static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
}
}
+static inline __be16 sum16_as_be(__sum16 sum)
+{
+ return (__force __be16)sum;
+}
+
/**
* eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw
*
@@ -1689,7 +1694,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- BUG_ON(skb->protocol != ETH_P_IP);
+ BUG_ON(skb->protocol != htons(ETH_P_IP));
cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
ETH_GEN_IP_V_4_CHECKSUM |
@@ -1698,10 +1703,10 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
cmd_sts |= ETH_UDP_FRAME;
- desc->l4i_chk = udp_hdr(skb)->check;
+ desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
break;
case IPPROTO_TCP:
- desc->l4i_chk = tcp_hdr(skb)->check;
+ desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
break;
default:
BUG();
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 36a7ba3134ce..3b78a3819bb3 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -230,10 +230,11 @@ static char mii_preamble_required = 0;
static int tc574_config(struct pcmcia_device *link);
static void tc574_release(struct pcmcia_device *link);
-static void mdio_sync(kio_addr_t ioaddr, int bits);
-static int mdio_read(kio_addr_t ioaddr, int phy_id, int location);
-static void mdio_write(kio_addr_t ioaddr, int phy_id, int location, int value);
-static unsigned short read_eeprom(kio_addr_t ioaddr, int index);
+static void mdio_sync(unsigned int ioaddr, int bits);
+static int mdio_read(unsigned int ioaddr, int phy_id, int location);
+static void mdio_write(unsigned int ioaddr, int phy_id, int location,
+ int value);
+static unsigned short read_eeprom(unsigned int ioaddr, int index);
static void tc574_wait_for_completion(struct net_device *dev, int cmd);
static void tc574_reset(struct net_device *dev);
@@ -341,7 +342,7 @@ static int tc574_config(struct pcmcia_device *link)
tuple_t tuple;
__le16 buf[32];
int last_fn, last_ret, i, j;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
__be16 *phys_addr;
char *cardname;
__u32 config;
@@ -515,7 +516,7 @@ static int tc574_resume(struct pcmcia_device *link)
static void dump_status(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
EL3WINDOW(1);
printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
"%02x, tx free %04x\n", inw(ioaddr+EL3_STATUS),
@@ -544,7 +545,7 @@ static void tc574_wait_for_completion(struct net_device *dev, int cmd)
/* Read a word from the EEPROM using the regular EEPROM access register.
Assume that we are in register window zero.
*/
-static unsigned short read_eeprom(kio_addr_t ioaddr, int index)
+static unsigned short read_eeprom(unsigned int ioaddr, int index)
{
int timer;
outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd);
@@ -572,9 +573,9 @@ static unsigned short read_eeprom(kio_addr_t ioaddr, int index)
/* Generate the preamble required for initial synchronization and
a few older transceivers. */
-static void mdio_sync(kio_addr_t ioaddr, int bits)
+static void mdio_sync(unsigned int ioaddr, int bits)
{
- kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
/* Establish sync by sending at least 32 logic ones. */
while (-- bits >= 0) {
@@ -583,12 +584,12 @@ static void mdio_sync(kio_addr_t ioaddr, int bits)
}
}
-static int mdio_read(kio_addr_t ioaddr, int phy_id, int location)
+static int mdio_read(unsigned int ioaddr, int phy_id, int location)
{
int i;
int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
unsigned int retval = 0;
- kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
if (mii_preamble_required)
mdio_sync(ioaddr, 32);
@@ -608,10 +609,10 @@ static int mdio_read(kio_addr_t ioaddr, int phy_id, int location)
return (retval>>1) & 0xffff;
}
-static void mdio_write(kio_addr_t ioaddr, int phy_id, int location, int value)
+static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value)
{
int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
- kio_addr_t mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+ unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt;
int i;
if (mii_preamble_required)
@@ -637,7 +638,7 @@ static void tc574_reset(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
int i;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
unsigned long flags;
tc574_wait_for_completion(dev, TotalReset|0x10);
@@ -695,7 +696,7 @@ static void tc574_reset(struct net_device *dev)
mdio_write(ioaddr, lp->phys, 4, lp->advertising);
if (!auto_polarity) {
/* works for TDK 78Q2120 series MII's */
- int i = mdio_read(ioaddr, lp->phys, 16) | 0x20;
+ i = mdio_read(ioaddr, lp->phys, 16) | 0x20;
mdio_write(ioaddr, lp->phys, 16, i);
}
@@ -741,7 +742,7 @@ static int el3_open(struct net_device *dev)
static void el3_tx_timeout(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
dump_status(dev);
@@ -756,7 +757,7 @@ static void el3_tx_timeout(struct net_device *dev)
static void pop_tx_status(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i;
/* Clear the Tx status stack. */
@@ -779,7 +780,7 @@ static void pop_tx_status(struct net_device *dev)
static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct el3_private *lp = netdev_priv(dev);
unsigned long flags;
@@ -813,7 +814,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
unsigned status;
int work_budget = max_interrupt_work;
int handled = 0;
@@ -907,7 +908,7 @@ static void media_check(unsigned long arg)
{
struct net_device *dev = (struct net_device *) arg;
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
unsigned long flags;
unsigned short /* cable, */ media, partner;
@@ -996,7 +997,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
static void update_stats(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u8 rx, tx, up;
DEBUG(2, "%s: updating the statistics.\n", dev->name);
@@ -1033,7 +1034,7 @@ static void update_stats(struct net_device *dev)
static int el3_rx(struct net_device *dev, int worklimit)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
short rx_status;
DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
@@ -1094,7 +1095,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 *data = (u16 *)&rq->ifr_ifru;
int phy = lp->phys & 0x1f;
@@ -1148,7 +1149,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void set_rx_mode(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
if (dev->flags & IFF_PROMISC)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
@@ -1161,7 +1162,7 @@ static void set_rx_mode(struct net_device *dev)
static int el3_close(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index e862d14ece79..1b1abb19c911 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -145,7 +145,7 @@ DRV_NAME ".c " DRV_VERSION " 2001/10/13 00:08:50 (David Hinds)";
static int tc589_config(struct pcmcia_device *link);
static void tc589_release(struct pcmcia_device *link);
-static u16 read_eeprom(kio_addr_t ioaddr, int index);
+static u16 read_eeprom(unsigned int ioaddr, int index);
static void tc589_reset(struct net_device *dev);
static void media_check(unsigned long arg);
static int el3_config(struct net_device *dev, struct ifmap *map);
@@ -254,7 +254,7 @@ static int tc589_config(struct pcmcia_device *link)
__le16 buf[32];
__be16 *phys_addr;
int last_fn, last_ret, i, j, multi = 0, fifo;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
DECLARE_MAC_BUF(mac);
@@ -403,7 +403,7 @@ static void tc589_wait_for_completion(struct net_device *dev, int cmd)
Read a word from the EEPROM using the regular EEPROM access register.
Assume that we are in register window zero.
*/
-static u16 read_eeprom(kio_addr_t ioaddr, int index)
+static u16 read_eeprom(unsigned int ioaddr, int index)
{
int i;
outw(EEPROM_READ + index, ioaddr + 10);
@@ -421,7 +421,7 @@ static u16 read_eeprom(kio_addr_t ioaddr, int index)
static void tc589_set_xcvr(struct net_device *dev, int if_port)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
EL3WINDOW(0);
switch (if_port) {
@@ -443,7 +443,7 @@ static void tc589_set_xcvr(struct net_device *dev, int if_port)
static void dump_status(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
EL3WINDOW(1);
printk(KERN_INFO " irq status %04x, rx status %04x, tx status "
"%02x tx free %04x\n", inw(ioaddr+EL3_STATUS),
@@ -459,7 +459,7 @@ static void dump_status(struct net_device *dev)
/* Reset and restore all of the 3c589 registers. */
static void tc589_reset(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i;
EL3WINDOW(0);
@@ -567,7 +567,7 @@ static int el3_open(struct net_device *dev)
static void el3_tx_timeout(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name);
dump_status(dev);
@@ -582,7 +582,7 @@ static void el3_tx_timeout(struct net_device *dev)
static void pop_tx_status(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i;
/* Clear the Tx status stack. */
@@ -604,7 +604,7 @@ static void pop_tx_status(struct net_device *dev)
static int el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct el3_private *priv = netdev_priv(dev);
unsigned long flags;
@@ -641,7 +641,7 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
__u16 status;
int i = 0, handled = 1;
@@ -727,7 +727,7 @@ static void media_check(unsigned long arg)
{
struct net_device *dev = (struct net_device *)(arg);
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 media, errs;
unsigned long flags;
@@ -828,7 +828,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
static void update_stats(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
DEBUG(2, "%s: updating the statistics.\n", dev->name);
/* Turn off statistics updates while reading. */
@@ -855,7 +855,7 @@ static void update_stats(struct net_device *dev)
static int el3_rx(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int worklimit = 32;
short rx_status;
@@ -909,7 +909,7 @@ static void set_multicast_list(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 opts = SetRxFilter | RxStation | RxBroadcast;
if (!pcmcia_dev_present(link)) return;
@@ -924,7 +924,7 @@ static int el3_close(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
DEBUG(1, "%s: shutting down ethercard.\n", dev->name);
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 6d342f6c14f6..e8a63e483a2b 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -96,8 +96,8 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id);
static void ei_watchdog(u_long arg);
static void axnet_reset_8390(struct net_device *dev);
-static int mdio_read(kio_addr_t addr, int phy_id, int loc);
-static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value);
+static int mdio_read(unsigned int addr, int phy_id, int loc);
+static void mdio_write(unsigned int addr, int phy_id, int loc, int value);
static void get_8390_hdr(struct net_device *,
struct e8390_pkt_hdr *, int);
@@ -203,7 +203,7 @@ static void axnet_detach(struct pcmcia_device *link)
static int get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i, j;
/* This is based on drivers/net/ne.c */
@@ -473,7 +473,7 @@ static int axnet_resume(struct pcmcia_device *link)
#define MDIO_MASK 0x0f
#define MDIO_ENB_IN 0x02
-static void mdio_sync(kio_addr_t addr)
+static void mdio_sync(unsigned int addr)
{
int bits;
for (bits = 0; bits < 32; bits++) {
@@ -482,7 +482,7 @@ static void mdio_sync(kio_addr_t addr)
}
}
-static int mdio_read(kio_addr_t addr, int phy_id, int loc)
+static int mdio_read(unsigned int addr, int phy_id, int loc)
{
u_int cmd = (0xf6<<10)|(phy_id<<5)|loc;
int i, retval = 0;
@@ -501,7 +501,7 @@ static int mdio_read(kio_addr_t addr, int phy_id, int loc)
return (retval>>1) & 0xffff;
}
-static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
+static void mdio_write(unsigned int addr, int phy_id, int loc, int value)
{
u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
int i;
@@ -575,7 +575,7 @@ static int axnet_close(struct net_device *dev)
static void axnet_reset_8390(struct net_device *dev)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
int i;
ei_status.txing = ei_status.dmaing = 0;
@@ -610,8 +610,8 @@ static void ei_watchdog(u_long arg)
{
struct net_device *dev = (struct net_device *)(arg);
axnet_dev_t *info = PRIV(dev);
- kio_addr_t nic_base = dev->base_addr;
- kio_addr_t mii_addr = nic_base + AXNET_MII_EEP;
+ unsigned int nic_base = dev->base_addr;
+ unsigned int mii_addr = nic_base + AXNET_MII_EEP;
u_short link;
if (!netif_device_present(dev)) goto reschedule;
@@ -681,7 +681,7 @@ static int axnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
axnet_dev_t *info = PRIV(dev);
u16 *data = (u16 *)&rq->ifr_ifru;
- kio_addr_t mii_addr = dev->base_addr + AXNET_MII_EEP;
+ unsigned int mii_addr = dev->base_addr + AXNET_MII_EEP;
switch (cmd) {
case SIOCGMIIPHY:
data[0] = info->phy_id;
@@ -703,7 +703,7 @@ static void get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr,
int ring_page)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
outb_p(0, nic_base + EN0_RSARLO); /* On page boundary */
outb_p(ring_page, nic_base + EN0_RSARHI);
@@ -721,7 +721,7 @@ static void get_8390_hdr(struct net_device *dev,
static void block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
int xfer_count = count;
char *buf = skb->data;
@@ -744,7 +744,7 @@ static void block_input(struct net_device *dev, int count,
static void block_output(struct net_device *dev, int count,
const u_char *buf, const int start_page)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
#ifdef PCMCIA_DEBUG
if (ei_debug > 4)
@@ -991,7 +991,7 @@ static int ax_open(struct net_device *dev)
*
* Opposite of ax_open(). Only used when "ifconfig <devname> down" is done.
*/
-int ax_close(struct net_device *dev)
+static int ax_close(struct net_device *dev)
{
unsigned long flags;
@@ -1014,7 +1014,7 @@ int ax_close(struct net_device *dev)
* completed (or failed) - i.e. never posted a Tx related interrupt.
*/
-void ei_tx_timeout(struct net_device *dev)
+static void ei_tx_timeout(struct net_device *dev)
{
long e8390_base = dev->base_addr;
struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
@@ -1087,8 +1087,8 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
ei_local->irqlock = 1;
- send_length = ETH_ZLEN < length ? length : ETH_ZLEN;
-
+ send_length = max(length, ETH_ZLEN);
+
/*
* We have two Tx slots available for use. Find the first free
* slot, and then perform some sanity checks. With two Tx bufs,
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 949c6df74c97..8f328a03847b 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -298,7 +298,8 @@ do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
static int mfc_try_io_port(struct pcmcia_device *link)
{
int i, ret;
- static const kio_addr_t serial_base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+ static const unsigned int serial_base[5] =
+ { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
for (i = 0; i < 5; i++) {
link->io.BasePort2 = serial_base[i];
@@ -316,7 +317,7 @@ static int mfc_try_io_port(struct pcmcia_device *link)
static int ungermann_try_io_port(struct pcmcia_device *link)
{
int ret;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
/*
Ungermann-Bass Access/CARD accepts 0x300,0x320,0x340,0x360
0x380,0x3c0 only for ioport.
@@ -342,7 +343,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
cisparse_t parse;
u_short buf[32];
int i, last_fn = 0, last_ret = 0, ret;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
cardtype_t cardtype;
char *card_name = "unknown";
u_char *node_id;
@@ -610,7 +611,7 @@ static int fmvj18x_setup_mfc(struct pcmcia_device *link)
u_char __iomem *base;
int i, j;
struct net_device *dev = link->priv;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
/* Allocate a small memory window */
req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE;
@@ -735,7 +736,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
{
struct net_device *dev = dev_id;
local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
unsigned short tx_stat, rx_stat;
ioaddr = dev->base_addr;
@@ -789,7 +790,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
static void fjn_tx_timeout(struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
printk(KERN_NOTICE "%s: transmit timed out with status %04x, %s?\n",
dev->name, htons(inw(ioaddr + TX_STATUS)),
@@ -819,7 +820,7 @@ static void fjn_tx_timeout(struct net_device *dev)
static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
short length = skb->len;
if (length < ETH_ZLEN)
@@ -892,7 +893,7 @@ static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void fjn_reset(struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i;
DEBUG(4, "fjn_reset(%s) called.\n",dev->name);
@@ -971,7 +972,7 @@ static void fjn_reset(struct net_device *dev)
static void fjn_rx(struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int boguscount = 10; /* 5 -> 10: by agy 19940922 */
DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n",
@@ -1125,7 +1126,7 @@ static int fjn_close(struct net_device *dev)
{
struct local_info_t *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
DEBUG(4, "fjn_close('%s').\n", dev->name);
@@ -1168,7 +1169,7 @@ static struct net_device_stats *fjn_get_stats(struct net_device *dev)
static void set_rx_mode(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_char mc_filter[8]; /* Multicast hash filter */
u_long flags;
int i;
@@ -1197,8 +1198,7 @@ static void set_rx_mode(struct net_device *dev)
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
struct dev_mc_list *mclist;
- int i;
-
+
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index a355a93b908b..cfcbea9b7e2e 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -518,7 +518,7 @@ mace_read
assuming that during normal operation, the MACE is always in
bank 0.
---------------------------------------------------------------------------- */
-static int mace_read(mace_private *lp, kio_addr_t ioaddr, int reg)
+static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
{
int data = 0xFF;
unsigned long flags;
@@ -545,7 +545,8 @@ mace_write
are assuming that during normal operation, the MACE is always in
bank 0.
---------------------------------------------------------------------------- */
-static void mace_write(mace_private *lp, kio_addr_t ioaddr, int reg, int data)
+static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
+ int data)
{
unsigned long flags;
@@ -567,7 +568,7 @@ static void mace_write(mace_private *lp, kio_addr_t ioaddr, int reg, int data)
mace_init
Resets the MACE chip.
---------------------------------------------------------------------------- */
-static int mace_init(mace_private *lp, kio_addr_t ioaddr, char *enet_addr)
+static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
{
int i;
int ct = 0;
@@ -657,7 +658,7 @@ static int nmclan_config(struct pcmcia_device *link)
tuple_t tuple;
u_char buf[64];
int i, last_ret, last_fn;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
DECLARE_MAC_BUF(mac);
DEBUG(0, "nmclan_config(0x%p)\n", link);
@@ -839,7 +840,7 @@ mace_open
---------------------------------------------------------------------------- */
static int mace_open(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
mace_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
@@ -862,7 +863,7 @@ mace_close
---------------------------------------------------------------------------- */
static int mace_close(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
mace_private *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
@@ -935,7 +936,7 @@ static void mace_tx_timeout(struct net_device *dev)
static int mace_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
netif_stop_queue(dev);
@@ -996,7 +997,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
mace_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
int status;
int IntrCnt = MACE_MAX_IR_ITERATIONS;
@@ -1140,7 +1141,7 @@ mace_rx
static int mace_rx(struct net_device *dev, unsigned char RxCnt)
{
mace_private *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
unsigned char rx_framecnt;
unsigned short rx_status;
@@ -1302,7 +1303,7 @@ update_stats
card's SRAM fast enough. If this happens, something is
seriously wrong with the hardware.
---------------------------------------------------------------------------- */
-static void update_stats(kio_addr_t ioaddr, struct net_device *dev)
+static void update_stats(unsigned int ioaddr, struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
@@ -1448,7 +1449,7 @@ static void restore_multicast_list(struct net_device *dev)
mace_private *lp = netdev_priv(dev);
int num_addrs = lp->multicast_num_addrs;
int *ladrf = lp->multicast_ladrf;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i;
DEBUG(2, "%s: restoring Rx mode to %d addresses.\n",
@@ -1540,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
static void restore_multicast_list(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
mace_private *lp = netdev_priv(dev);
DEBUG(2, "%s: restoring Rx mode to %d addresses.\n", dev->name,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 9ba56aa26a1b..6323988dfa1d 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -349,7 +349,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link)
static hw_info_t *get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_char prom[32];
int i, j;
@@ -425,7 +425,7 @@ static hw_info_t *get_dl10019(struct pcmcia_device *link)
static hw_info_t *get_ax88190(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i, j;
/* Not much of a test, but the alternatives are messy */
@@ -521,7 +521,7 @@ static int pcnet_config(struct pcmcia_device *link)
int i, last_ret, last_fn, start_pg, stop_pg, cm_offset;
int has_shmem = 0;
u_short buf[64];
- hw_info_t *hw_info;
+ hw_info_t *local_hw_info;
DECLARE_MAC_BUF(mac);
DEBUG(0, "pcnet_config(0x%p)\n", link);
@@ -590,23 +590,23 @@ static int pcnet_config(struct pcmcia_device *link)
dev->if_port = 0;
}
- hw_info = get_hwinfo(link);
- if (hw_info == NULL)
- hw_info = get_prom(link);
- if (hw_info == NULL)
- hw_info = get_dl10019(link);
- if (hw_info == NULL)
- hw_info = get_ax88190(link);
- if (hw_info == NULL)
- hw_info = get_hwired(link);
-
- if (hw_info == NULL) {
+ local_hw_info = get_hwinfo(link);
+ if (local_hw_info == NULL)
+ local_hw_info = get_prom(link);
+ if (local_hw_info == NULL)
+ local_hw_info = get_dl10019(link);
+ if (local_hw_info == NULL)
+ local_hw_info = get_ax88190(link);
+ if (local_hw_info == NULL)
+ local_hw_info = get_hwired(link);
+
+ if (local_hw_info == NULL) {
printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
" address for io base %#3lx\n", dev->base_addr);
goto failed;
}
- info->flags = hw_info->flags;
+ info->flags = local_hw_info->flags;
/* Check for user overrides */
info->flags |= (delay_output) ? DELAY_OUTPUT : 0;
if ((link->manf_id == MANFID_SOCKET) &&
@@ -756,7 +756,7 @@ static int pcnet_resume(struct pcmcia_device *link)
#define MDIO_DATA_READ 0x10
#define MDIO_MASK 0x0f
-static void mdio_sync(kio_addr_t addr)
+static void mdio_sync(unsigned int addr)
{
int bits, mask = inb(addr) & MDIO_MASK;
for (bits = 0; bits < 32; bits++) {
@@ -765,7 +765,7 @@ static void mdio_sync(kio_addr_t addr)
}
}
-static int mdio_read(kio_addr_t addr, int phy_id, int loc)
+static int mdio_read(unsigned int addr, int phy_id, int loc)
{
u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
int i, retval = 0, mask = inb(addr) & MDIO_MASK;
@@ -784,7 +784,7 @@ static int mdio_read(kio_addr_t addr, int phy_id, int loc)
return (retval>>1) & 0xffff;
}
-static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
+static void mdio_write(unsigned int addr, int phy_id, int loc, int value)
{
u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
int i, mask = inb(addr) & MDIO_MASK;
@@ -818,10 +818,10 @@ static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
#define DL19FDUPLX 0x0400 /* DL10019 Full duplex mode */
-static int read_eeprom(kio_addr_t ioaddr, int location)
+static int read_eeprom(unsigned int ioaddr, int location)
{
int i, retval = 0;
- kio_addr_t ee_addr = ioaddr + DLINK_EEPROM;
+ unsigned int ee_addr = ioaddr + DLINK_EEPROM;
int read_cmd = location | (EE_READ_CMD << 8);
outb(0, ee_addr);
@@ -852,10 +852,10 @@ static int read_eeprom(kio_addr_t ioaddr, int location)
In ASIC mode, EE_ADOT is used to output the data to the ASIC.
*/
-static void write_asic(kio_addr_t ioaddr, int location, short asic_data)
+static void write_asic(unsigned int ioaddr, int location, short asic_data)
{
int i;
- kio_addr_t ee_addr = ioaddr + DLINK_EEPROM;
+ unsigned int ee_addr = ioaddr + DLINK_EEPROM;
short dataval;
int read_cmd = location | (EE_READ_CMD << 8);
@@ -897,7 +897,7 @@ static void write_asic(kio_addr_t ioaddr, int location, short asic_data)
static void set_misc_reg(struct net_device *dev)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
pcnet_dev_t *info = PRIV(dev);
u_char tmp;
@@ -936,7 +936,7 @@ static void set_misc_reg(struct net_device *dev)
static void mii_phy_probe(struct net_device *dev)
{
pcnet_dev_t *info = PRIV(dev);
- kio_addr_t mii_addr = dev->base_addr + DLINK_GPIO;
+ unsigned int mii_addr = dev->base_addr + DLINK_GPIO;
int i;
u_int tmp, phyid;
@@ -1014,7 +1014,7 @@ static int pcnet_close(struct net_device *dev)
static void pcnet_reset_8390(struct net_device *dev)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
int i;
ei_status.txing = ei_status.dmaing = 0;
@@ -1074,8 +1074,8 @@ static void ei_watchdog(u_long arg)
{
struct net_device *dev = (struct net_device *)arg;
pcnet_dev_t *info = PRIV(dev);
- kio_addr_t nic_base = dev->base_addr;
- kio_addr_t mii_addr = nic_base + DLINK_GPIO;
+ unsigned int nic_base = dev->base_addr;
+ unsigned int mii_addr = nic_base + DLINK_GPIO;
u_short link;
if (!netif_device_present(dev)) goto reschedule;
@@ -1177,7 +1177,7 @@ static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
pcnet_dev_t *info = PRIV(dev);
u16 *data = (u16 *)&rq->ifr_ifru;
- kio_addr_t mii_addr = dev->base_addr + DLINK_GPIO;
+ unsigned int mii_addr = dev->base_addr + DLINK_GPIO;
switch (cmd) {
case SIOCGMIIPHY:
data[0] = info->phy_id;
@@ -1199,7 +1199,7 @@ static void dma_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr,
int ring_page)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
if (ei_status.dmaing) {
printk(KERN_NOTICE "%s: DMAing conflict in dma_block_input."
@@ -1230,7 +1230,7 @@ static void dma_get_8390_hdr(struct net_device *dev,
static void dma_block_input(struct net_device *dev, int count,
struct sk_buff *skb, int ring_offset)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
int xfer_count = count;
char *buf = skb->data;
@@ -1285,7 +1285,7 @@ static void dma_block_input(struct net_device *dev, int count,
static void dma_block_output(struct net_device *dev, int count,
const u_char *buf, const int start_page)
{
- kio_addr_t nic_base = dev->base_addr;
+ unsigned int nic_base = dev->base_addr;
pcnet_dev_t *info = PRIV(dev);
#ifdef PCMCIA_DEBUG
int retries = 0;
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index c9868e9dac4c..f18eca9831e8 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -295,7 +295,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map);
static void smc_set_xcvr(struct net_device *dev, int if_port);
static void smc_reset(struct net_device *dev);
static void media_check(u_long arg);
-static void mdio_sync(kio_addr_t addr);
+static void mdio_sync(unsigned int addr);
static int mdio_read(struct net_device *dev, int phy_id, int loc);
static void mdio_write(struct net_device *dev, int phy_id, int loc, int value);
static int smc_link_ok(struct net_device *dev);
@@ -601,8 +601,8 @@ static void mot_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
- kio_addr_t iouart = link->io.BasePort2;
+ unsigned int ioaddr = dev->base_addr;
+ unsigned int iouart = link->io.BasePort2;
/* Set UART base address and force map with COR bit 1 */
writeb(iouart & 0xff, smc->base + MOT_UART + CISREG_IOBASE_0);
@@ -621,7 +621,7 @@ static void mot_config(struct pcmcia_device *link)
static int mot_setup(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int i, wait, loop;
u_int addr;
@@ -754,7 +754,7 @@ free_cfg_mem:
static int osi_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- static const kio_addr_t com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
+ static const unsigned int com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 };
int i, j;
link->conf.Attributes |= CONF_ENABLE_SPKR;
@@ -900,7 +900,7 @@ static int smc91c92_resume(struct pcmcia_device *link)
static int check_sig(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int width;
u_short s;
@@ -960,7 +960,7 @@ static int smc91c92_config(struct pcmcia_device *link)
struct smc_private *smc = netdev_priv(dev);
char *name;
int i, j, rev;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_long mir;
DECLARE_MAC_BUF(mac);
@@ -1136,7 +1136,7 @@ static void smc91c92_release(struct pcmcia_device *link)
#define MDIO_DATA_WRITE1 (MDIO_DIR_WRITE | MDIO_DATA_OUT)
#define MDIO_DATA_READ 0x02
-static void mdio_sync(kio_addr_t addr)
+static void mdio_sync(unsigned int addr)
{
int bits;
for (bits = 0; bits < 32; bits++) {
@@ -1147,7 +1147,7 @@ static void mdio_sync(kio_addr_t addr)
static int mdio_read(struct net_device *dev, int phy_id, int loc)
{
- kio_addr_t addr = dev->base_addr + MGMT;
+ unsigned int addr = dev->base_addr + MGMT;
u_int cmd = (0x06<<10)|(phy_id<<5)|loc;
int i, retval = 0;
@@ -1167,7 +1167,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int loc)
static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
{
- kio_addr_t addr = dev->base_addr + MGMT;
+ unsigned int addr = dev->base_addr + MGMT;
u_int cmd = (0x05<<28)|(phy_id<<23)|(loc<<18)|(1<<17)|value;
int i;
@@ -1193,7 +1193,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
#ifdef PCMCIA_DEBUG
static void smc_dump(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_short i, w, save;
save = inw(ioaddr + BANK_SELECT);
for (w = 0; w < 4; w++) {
@@ -1248,7 +1248,7 @@ static int smc_close(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
struct pcmcia_device *link = smc->p_dev;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
DEBUG(0, "%s: smc_close(), status %4.4x.\n",
dev->name, inw(ioaddr + BANK_SELECT));
@@ -1285,7 +1285,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
{
struct smc_private *smc = netdev_priv(dev);
struct sk_buff *skb = smc->saved_skb;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_char packet_no;
if (!skb) {
@@ -1349,7 +1349,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
static void smc_tx_timeout(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
printk(KERN_NOTICE "%s: SMC91c92 transmit timed out, "
"Tx_status %2.2x status %4.4x.\n",
@@ -1364,7 +1364,7 @@ static void smc_tx_timeout(struct net_device *dev)
static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_short num_pages;
short time_out, ir;
unsigned long flags;
@@ -1434,7 +1434,7 @@ static int smc_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void smc_tx_err(struct net_device * dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int saved_packet = inw(ioaddr + PNR_ARR) & 0xff;
int packet_no = inw(ioaddr + FIFO_PORTS) & 0x7f;
int tx_status;
@@ -1478,7 +1478,7 @@ static void smc_tx_err(struct net_device * dev)
static void smc_eph_irq(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_short card_stats, ephs;
SMC_SELECT_BANK(0);
@@ -1513,7 +1513,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_short saved_bank, saved_pointer, mask, status;
unsigned int handled = 1;
char bogus_cnt = INTR_WORK; /* Work we are willing to do. */
@@ -1633,7 +1633,7 @@ irq_done:
static void smc_rx(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int rx_status;
int packet_length; /* Caution: not frame length, rather words
to transfer from the chip. */
@@ -1738,7 +1738,7 @@ static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
static void set_rx_mode(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct smc_private *smc = netdev_priv(dev);
u_int multicast_table[ 2 ] = { 0, };
unsigned long flags;
@@ -1804,7 +1804,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map)
static void smc_set_xcvr(struct net_device *dev, int if_port)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_short saved_bank;
saved_bank = inw(ioaddr + BANK_SELECT);
@@ -1827,7 +1827,7 @@ static void smc_set_xcvr(struct net_device *dev, int if_port)
static void smc_reset(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct smc_private *smc = netdev_priv(dev);
int i;
@@ -1904,7 +1904,7 @@ static void media_check(u_long arg)
{
struct net_device *dev = (struct net_device *) arg;
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u_short i, media, saved_bank;
u_short link;
unsigned long flags;
@@ -2021,7 +2021,7 @@ reschedule:
static int smc_link_ok(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
struct smc_private *smc = netdev_priv(dev);
if (smc->cfg & CFG_MII_SELECT) {
@@ -2035,7 +2035,7 @@ static int smc_link_ok(struct net_device *dev)
static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
{
u16 tmp;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
ecmd->supported = (SUPPORTED_TP | SUPPORTED_AUI |
SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full);
@@ -2057,7 +2057,7 @@ static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
{
u16 tmp;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
if (ecmd->speed != SPEED_10)
return -EINVAL;
@@ -2100,7 +2100,7 @@ static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
@@ -2118,7 +2118,7 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int ret;
@@ -2136,7 +2136,7 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
static u32 smc_get_link(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
u32 ret;
@@ -2164,7 +2164,7 @@ static int smc_nway_reset(struct net_device *dev)
{
struct smc_private *smc = netdev_priv(dev);
if (smc->cfg & CFG_MII_SELECT) {
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 saved_bank = inw(ioaddr + BANK_SELECT);
int res;
@@ -2196,7 +2196,7 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
struct mii_ioctl_data *mii = if_mii(rq);
int rc = 0;
u16 saved_bank;
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
if (!netif_running(dev))
return -EINVAL;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 1f09bea6db5a..d041f831a18d 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -273,12 +273,12 @@ INT_MODULE_PARM(lockup_hack, 0); /* anti lockup hack */
static unsigned maxrx_bytes = 22000;
/* MII management prototypes */
-static void mii_idle(kio_addr_t ioaddr);
-static void mii_putbit(kio_addr_t ioaddr, unsigned data);
-static int mii_getbit(kio_addr_t ioaddr);
-static void mii_wbits(kio_addr_t ioaddr, unsigned data, int len);
-static unsigned mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg);
-static void mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg,
+static void mii_idle(unsigned int ioaddr);
+static void mii_putbit(unsigned int ioaddr, unsigned data);
+static int mii_getbit(unsigned int ioaddr);
+static void mii_wbits(unsigned int ioaddr, unsigned data, int len);
+static unsigned mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg);
+static void mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg,
unsigned data, int len);
/*
@@ -403,7 +403,7 @@ next_tuple(struct pcmcia_device *handle, tuple_t *tuple, cisparse_t *parse)
static void
PrintRegisters(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
if (pc_debug > 1) {
int i, page;
@@ -439,7 +439,7 @@ PrintRegisters(struct net_device *dev)
* Turn around for read
*/
static void
-mii_idle(kio_addr_t ioaddr)
+mii_idle(unsigned int ioaddr)
{
PutByte(XIRCREG2_GPR2, 0x04|0); /* drive MDCK low */
udelay(1);
@@ -451,7 +451,7 @@ mii_idle(kio_addr_t ioaddr)
* Write a bit to MDI/O
*/
static void
-mii_putbit(kio_addr_t ioaddr, unsigned data)
+mii_putbit(unsigned int ioaddr, unsigned data)
{
#if 1
if (data) {
@@ -484,7 +484,7 @@ mii_putbit(kio_addr_t ioaddr, unsigned data)
* Get a bit from MDI/O
*/
static int
-mii_getbit(kio_addr_t ioaddr)
+mii_getbit(unsigned int ioaddr)
{
unsigned d;
@@ -497,7 +497,7 @@ mii_getbit(kio_addr_t ioaddr)
}
static void
-mii_wbits(kio_addr_t ioaddr, unsigned data, int len)
+mii_wbits(unsigned int ioaddr, unsigned data, int len)
{
unsigned m = 1 << (len-1);
for (; m; m >>= 1)
@@ -505,7 +505,7 @@ mii_wbits(kio_addr_t ioaddr, unsigned data, int len)
}
static unsigned
-mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg)
+mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg)
{
int i;
unsigned data=0, m;
@@ -527,7 +527,8 @@ mii_rd(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg)
}
static void
-mii_wr(kio_addr_t ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len)
+mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, unsigned data,
+ int len)
{
int i;
@@ -726,7 +727,7 @@ xirc2ps_config(struct pcmcia_device * link)
local_info_t *local = netdev_priv(dev);
tuple_t tuple;
cisparse_t parse;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
int err, i;
u_char buf[64];
cistpl_lan_node_id_t *node_id = (cistpl_lan_node_id_t*)parse.funce.data;
@@ -1104,7 +1105,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_char saved_page;
unsigned bytes_rcvd;
unsigned int_status, eth_status, rx_status, tx_status;
@@ -1209,7 +1210,7 @@ xirc2ps_interrupt(int irq, void *dev_id)
unsigned i;
u_long *p = skb_put(skb, pktlen);
register u_long a;
- kio_addr_t edpreg = ioaddr+XIRCREG_EDP-2;
+ unsigned int edpreg = ioaddr+XIRCREG_EDP-2;
for (i=0; i < len ; i += 4, p++) {
a = inl(edpreg);
__asm__("rorl $16,%0\n\t"
@@ -1346,7 +1347,7 @@ static int
do_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
local_info_t *lp = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
int okay;
unsigned freespace;
unsigned pktlen = skb->len;
@@ -1415,7 +1416,7 @@ do_get_stats(struct net_device *dev)
static void
set_addresses(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
local_info_t *lp = netdev_priv(dev);
struct dev_mc_list *dmi = dev->mc_list;
unsigned char *addr;
@@ -1459,7 +1460,7 @@ set_addresses(struct net_device *dev)
static void
set_multicast_list(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
SelectPage(0x42);
if (dev->flags & IFF_PROMISC) { /* snoop */
@@ -1543,7 +1544,7 @@ static int
do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
local_info_t *local = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
u16 *data = (u16 *)&rq->ifr_ifru;
DEBUG(1, "%s: ioctl(%-.6s, %#04x) %04x %04x %04x %04x\n",
@@ -1575,7 +1576,7 @@ static void
hardreset(struct net_device *dev)
{
local_info_t *local = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
SelectPage(4);
udelay(1);
@@ -1592,7 +1593,7 @@ static void
do_reset(struct net_device *dev, int full)
{
local_info_t *local = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
unsigned value;
DEBUG(0, "%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full);
@@ -1753,7 +1754,7 @@ static int
init_mii(struct net_device *dev)
{
local_info_t *local = netdev_priv(dev);
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
unsigned control, status, linkpartner;
int i;
@@ -1826,7 +1827,7 @@ static void
do_powerdown(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
DEBUG(0, "do_powerdown(%p)\n", dev);
@@ -1838,7 +1839,7 @@ do_powerdown(struct net_device *dev)
static int
do_stop(struct net_device *dev)
{
- kio_addr_t ioaddr = dev->base_addr;
+ unsigned int ioaddr = dev->base_addr;
local_info_t *lp = netdev_priv(dev);
struct pcmcia_device *link = lp->p_dev;
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 1b51bb668d39..5aa0a8089694 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -2468,9 +2468,10 @@ static int __init pppol2tp_init(void)
out:
return err;
-
+#ifdef CONFIG_PROC_FS
out_unregister_pppox_proto:
unregister_pppox_proto(PX_PROTO_OL2TP);
+#endif
out_unregister_pppol2tp_proto:
proto_unregister(&pppol2tp_sk_proto);
goto out;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index dc062367a1c8..9a6295909e43 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -857,7 +857,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
/* On chips without ram buffer, pause is controled by MAC level */
- if (sky2_read8(hw, B2_E_0) == 0) {
+ if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
@@ -1194,7 +1194,7 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
struct sk_buff *skb;
int i;
- if (sky2->hw->flags & SKY2_HW_FIFO_HANG_CHECK) {
+ if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
unsigned char *start;
/*
* Workaround for a bug in FIFO that cause hang
@@ -1387,6 +1387,7 @@ static int sky2_up(struct net_device *dev)
if (ramsize > 0) {
u32 rxspace;
+ hw->flags |= SKY2_HW_RAM_BUFFER;
pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
if (ramsize < 16)
rxspace = ramsize / 2;
@@ -2026,7 +2027,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
synchronize_irq(hw->pdev->irq);
- if (sky2_read8(hw, B2_E_0) == 0)
+ if (!(hw->flags & SKY2_HW_RAM_BUFFER))
sky2_set_tx_stfwd(hw, port);
ctl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2566,7 +2567,7 @@ static void sky2_watchdog(unsigned long arg)
++active;
/* For chips with Rx FIFO, check if stuck */
- if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) &&
+ if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
sky2_rx_hung(dev)) {
pr_info(PFX "%s: receiver hang detected\n",
dev->name);
@@ -2722,11 +2723,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
switch(hw->chip_id) {
case CHIP_ID_YUKON_XL:
- hw->flags = SKY2_HW_GIGABIT
- | SKY2_HW_NEWER_PHY;
- if (hw->chip_rev < 3)
- hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
-
+ hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
break;
case CHIP_ID_YUKON_EC_U:
@@ -2752,7 +2749,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
return -EOPNOTSUPP;
}
- hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK;
+ hw->flags = SKY2_HW_GIGABIT;
break;
case CHIP_ID_YUKON_FE:
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 2bced1a0898f..5ab5c1c7c5aa 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2045,7 +2045,7 @@ struct sky2_hw {
#define SKY2_HW_FIBRE_PHY 0x00000002
#define SKY2_HW_GIGABIT 0x00000004
#define SKY2_HW_NEWER_PHY 0x00000008
-#define SKY2_HW_FIFO_HANG_CHECK 0x00000010
+#define SKY2_HW_RAM_BUFFER 0x00000010
#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c99ce74a7aff..3af5b92b48c8 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -465,7 +465,7 @@ static struct pci_driver tlan_driver = {
static int __init tlan_probe(void)
{
- static int pad_allocated;
+ int rc = -ENODEV;
printk(KERN_INFO "%s", tlan_banner);
@@ -473,17 +473,22 @@ static int __init tlan_probe(void)
if (TLanPadBuffer == NULL) {
printk(KERN_ERR "TLAN: Could not allocate memory for pad buffer.\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_out;
}
memset(TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE);
- pad_allocated = 1;
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
/* Use new style PCI probing. Now the kernel will
do most of this for us */
- pci_register_driver(&tlan_driver);
+ rc = pci_register_driver(&tlan_driver);
+
+ if (rc != 0) {
+ printk(KERN_ERR "TLAN: Could not register pci driver.\n");
+ goto err_out_pci_free;
+ }
TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
TLan_EisaProbe();
@@ -493,11 +498,17 @@ static int __init tlan_probe(void)
tlan_have_pci, tlan_have_eisa);
if (TLanDevicesInstalled == 0) {
- pci_unregister_driver(&tlan_driver);
- pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err_out_pci_unreg;
}
return 0;
+
+err_out_pci_unreg:
+ pci_unregister_driver(&tlan_driver);
+err_out_pci_free:
+ pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
+err_out:
+ return rc;
}
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 8fc7274642eb..6b93d0169116 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -441,7 +441,7 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&card->lock,flags);
trigger_transmit(card);
- return -EIO;
+ return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 46339f6bcd00..038c1ef94d2e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -529,9 +529,13 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
if (ifr->ifr_flags & IFF_NO_PI)
tun->flags |= TUN_NO_PI;
+ else
+ tun->flags &= ~TUN_NO_PI;
if (ifr->ifr_flags & IFF_ONE_QUEUE)
tun->flags |= TUN_ONE_QUEUE;
+ else
+ tun->flags &= ~TUN_ONE_QUEUE;
file->private_data = tun;
tun->attached = 1;
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index e3ba14a19915..c69e654d539f 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -109,7 +109,7 @@ int uec_mdio_reset(struct mii_bus *bus)
struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
unsigned int timeout = PHY_INIT_TIMEOUT;
- spin_lock_bh(&bus->mdio_lock);
+ mutex_lock(&bus->mdio_lock);
/* Reset the management interface */
out_be32(&regs->miimcfg, MIIMCFG_RESET_MANAGEMENT);
@@ -121,7 +121,7 @@ int uec_mdio_reset(struct mii_bus *bus)
while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
cpu_relax();
- spin_unlock_bh(&bus->mdio_lock);
+ mutex_unlock(&bus->mdio_lock);
if (timeout <= 0) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n", bus->name);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e66de0c12fc1..fdc23678117b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -302,10 +302,12 @@ static int virtnet_open(struct net_device *dev)
/* If all buffers were filled by other side before we napi_enabled, we
* won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here. */
- vi->rvq->vq_ops->disable_cb(vi->rvq);
- netif_rx_schedule(vi->dev, &vi->napi);
-
+ * now. virtnet_poll wants re-enable the queue, so we disable here.
+ * We synchronize against interrupts via NAPI_STATE_SCHED */
+ if (netif_rx_schedule_prep(dev, &vi->napi)) {
+ vi->rvq->vq_ops->disable_cb(vi->rvq);
+ __netif_rx_schedule(dev, &vi->napi);
+ }
return 0;
}
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index d553e6f32851..39951d0c34d6 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -1,7 +1,7 @@
/*
* Generic HDLC support routines for Linux
*
- * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
+ * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -39,7 +39,7 @@
#include <net/net_namespace.h>
-static const char* version = "HDLC support module revision 1.21";
+static const char* version = "HDLC support module revision 1.22";
#undef DEBUG_LINK
@@ -66,19 +66,15 @@ static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *p, struct net_device *orig_dev)
{
- struct hdlc_device_desc *desc = dev_to_desc(dev);
+ struct hdlc_device *hdlc = dev_to_hdlc(dev);
if (dev->nd_net != &init_net) {
kfree_skb(skb);
return 0;
}
- if (desc->netif_rx)
- return desc->netif_rx(skb);
-
- desc->stats.rx_dropped++; /* Shouldn't happen */
- dev_kfree_skb(skb);
- return NET_RX_DROP;
+ BUG_ON(!hdlc->proto->netif_rx);
+ return hdlc->proto->netif_rx(skb);
}
@@ -87,7 +83,7 @@ static inline void hdlc_proto_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto->start)
- return hdlc->proto->start(dev);
+ hdlc->proto->start(dev);
}
@@ -96,7 +92,7 @@ static inline void hdlc_proto_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto->stop)
- return hdlc->proto->stop(dev);
+ hdlc->proto->stop(dev);
}
@@ -263,8 +259,7 @@ static void hdlc_setup(struct net_device *dev)
struct net_device *alloc_hdlcdev(void *priv)
{
struct net_device *dev;
- dev = alloc_netdev(sizeof(struct hdlc_device_desc) +
- sizeof(hdlc_device), "hdlc%d", hdlc_setup);
+ dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d", hdlc_setup);
if (dev)
dev_to_hdlc(dev)->priv = priv;
return dev;
@@ -281,7 +276,7 @@ void unregister_hdlc_device(struct net_device *dev)
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
- int (*rx)(struct sk_buff *skb), size_t size)
+ size_t size)
{
detach_hdlc_protocol(dev);
@@ -297,7 +292,6 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
return -ENOBUFS;
}
dev_to_hdlc(dev)->proto = proto;
- dev_to_desc(dev)->netif_rx = rx;
return 0;
}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 038a6e748bbf..7133c688cf20 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -250,7 +250,7 @@ static int cisco_rx(struct sk_buff *skb)
return NET_RX_DROP;
rx_error:
- dev_to_desc(dev)->stats.rx_errors++; /* Mark error */
+ dev_to_hdlc(dev)->stats.rx_errors++; /* Mark error */
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
@@ -314,6 +314,7 @@ static struct hdlc_proto proto = {
.stop = cisco_stop,
.type_trans = cisco_type_trans,
.ioctl = cisco_ioctl,
+ .netif_rx = cisco_rx,
.module = THIS_MODULE,
};
@@ -360,7 +361,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
- result = attach_hdlc_protocol(dev, &proto, cisco_rx,
+ result = attach_hdlc_protocol(dev, &proto,
sizeof(struct cisco_state));
if (result)
return result;
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 071a64cacd5c..c4ab0326f911 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -42,7 +42,6 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/pkt_sched.h>
-#include <linux/random.h>
#include <linux/inetdevice.h>
#include <linux/lapb.h>
#include <linux/rtnetlink.h>
@@ -136,6 +135,10 @@ typedef struct pvc_device_struct {
}state;
}pvc_device;
+struct pvc_desc {
+ struct net_device_stats stats;
+ pvc_device *pvc;
+};
struct frad_state {
fr_proto settings;
@@ -171,17 +174,20 @@ static inline void dlci_to_q922(u8 *hdr, u16 dlci)
}
-static inline struct frad_state * state(hdlc_device *hdlc)
+static inline struct frad_state* state(hdlc_device *hdlc)
{
return(struct frad_state *)(hdlc->state);
}
-
-static __inline__ pvc_device* dev_to_pvc(struct net_device *dev)
+static inline struct pvc_desc* pvcdev_to_desc(struct net_device *dev)
{
return dev->priv;
}
+static inline struct net_device_stats* pvc_get_stats(struct net_device *dev)
+{
+ return &pvcdev_to_desc(dev)->stats;
+}
static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
{
@@ -351,7 +357,7 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
static int pvc_open(struct net_device *dev)
{
- pvc_device *pvc = dev_to_pvc(dev);
+ pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
if ((pvc->frad->flags & IFF_UP) == 0)
return -EIO; /* Frad must be UP in order to activate PVC */
@@ -371,7 +377,7 @@ static int pvc_open(struct net_device *dev)
static int pvc_close(struct net_device *dev)
{
- pvc_device *pvc = dev_to_pvc(dev);
+ pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
if (--pvc->open_count == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
@@ -390,7 +396,7 @@ static int pvc_close(struct net_device *dev)
static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- pvc_device *pvc = dev_to_pvc(dev);
+ pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
fr_proto_pvc_info info;
if (ifr->ifr_settings.type == IF_GET_PROTO) {
@@ -416,17 +422,9 @@ static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL;
}
-
-static inline struct net_device_stats *pvc_get_stats(struct net_device *dev)
-{
- return &dev_to_desc(dev)->stats;
-}
-
-
-
static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
{
- pvc_device *pvc = dev_to_pvc(dev);
+ pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
struct net_device_stats *stats = pvc_get_stats(dev);
if (pvc->state.active) {
@@ -957,7 +955,7 @@ static int fr_rx(struct sk_buff *skb)
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- dev_to_desc(frad)->stats.rx_dropped++;
+ dev_to_hdlc(frad)->stats.rx_dropped++;
return NET_RX_DROP;
}
@@ -1018,7 +1016,7 @@ static int fr_rx(struct sk_buff *skb)
}
rx_error:
- dev_to_desc(frad)->stats.rx_errors++; /* Mark error */
+ dev_to_hdlc(frad)->stats.rx_errors++; /* Mark error */
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
@@ -1109,11 +1107,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
used = pvc_is_used(pvc);
if (type == ARPHRD_ETHER)
- dev = alloc_netdev(sizeof(struct net_device_stats),
- "pvceth%d", ether_setup);
+ dev = alloc_netdev(sizeof(struct pvc_desc), "pvceth%d",
+ ether_setup);
else
- dev = alloc_netdev(sizeof(struct net_device_stats),
- "pvc%d", pvc_setup);
+ dev = alloc_netdev(sizeof(struct pvc_desc), "pvc%d", pvc_setup);
if (!dev) {
printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
@@ -1122,10 +1119,9 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
return -ENOBUFS;
}
- if (type == ARPHRD_ETHER) {
- memcpy(dev->dev_addr, "\x00\x01", 2);
- get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
- } else {
+ if (type == ARPHRD_ETHER)
+ random_ether_addr(dev->dev_addr);
+ else {
*(__be16*)dev->dev_addr = htons(dlci);
dlci_to_q922(dev->broadcast, dlci);
}
@@ -1137,7 +1133,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->change_mtu = pvc_change_mtu;
dev->mtu = HDLC_MAX_MTU;
dev->tx_queue_len = 0;
- dev->priv = pvc;
+ pvcdev_to_desc(dev)->pvc = pvc;
result = dev_alloc_name(dev, dev->name);
if (result < 0) {
@@ -1219,6 +1215,7 @@ static struct hdlc_proto proto = {
.stop = fr_stop,
.detach = fr_destroy,
.ioctl = fr_ioctl,
+ .netif_rx = fr_rx,
.module = THIS_MODULE,
};
@@ -1277,7 +1274,7 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
return result;
if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
- result = attach_hdlc_protocol(dev, &proto, fr_rx,
+ result = attach_hdlc_protocol(dev, &proto,
sizeof(struct frad_state));
if (result)
return result;
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 519e1550e2e7..10396d9686f4 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -122,7 +122,7 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
- result = attach_hdlc_protocol(dev, &proto, NULL,
+ result = attach_hdlc_protocol(dev, &proto,
sizeof(struct ppp_state));
if (result)
return result;
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index e23bc6656267..bbbb819d764c 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -82,7 +82,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
- result = attach_hdlc_protocol(dev, &proto, NULL,
+ result = attach_hdlc_protocol(dev, &proto,
sizeof(raw_hdlc_proto));
if (result)
return result;
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index 8895394e6006..d20c685f6711 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/pkt_sched.h>
-#include <linux/random.h>
#include <linux/inetdevice.h>
#include <linux/lapb.h>
#include <linux/rtnetlink.h>
@@ -96,7 +95,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
- result = attach_hdlc_protocol(dev, &proto, NULL,
+ result = attach_hdlc_protocol(dev, &proto,
sizeof(raw_hdlc_proto));
if (result)
return result;
@@ -107,8 +106,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
ether_setup(dev);
dev->change_mtu = old_ch_mtu;
dev->tx_queue_len = old_qlen;
- memcpy(dev->dev_addr, "\x00\x01", 2);
- get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
+ random_ether_addr(dev->dev_addr);
netif_dormant_off(dev);
return 0;
}
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index cd7b22f50edc..c15cc11e399b 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -164,17 +164,17 @@ static void x25_close(struct net_device *dev)
static int x25_rx(struct sk_buff *skb)
{
- struct hdlc_device_desc *desc = dev_to_desc(skb->dev);
+ struct hdlc_device *hdlc = dev_to_hdlc(skb->dev);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
- desc->stats.rx_dropped++;
+ hdlc->stats.rx_dropped++;
return NET_RX_DROP;
}
if (lapb_data_received(skb->dev, skb) == LAPB_OK)
return NET_RX_SUCCESS;
- desc->stats.rx_errors++;
+ hdlc->stats.rx_errors++;
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
@@ -184,6 +184,7 @@ static struct hdlc_proto proto = {
.open = x25_open,
.close = x25_close,
.ioctl = x25_ioctl,
+ .netif_rx = x25_rx,
.module = THIS_MODULE,
};
@@ -211,8 +212,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
if (result)
return result;
- if ((result = attach_hdlc_protocol(dev, &proto,
- x25_rx, 0)) != 0)
+ if ((result = attach_hdlc_protocol(dev, &proto, 0)))
return result;
dev->hard_start_xmit = x25_xmit;
dev->type = ARPHRD_X25;
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 32a24f5c4fa6..08a011f0834a 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -724,6 +724,7 @@ struct b43_wldev {
bool short_preamble; /* TRUE, if short preamble is enabled. */
bool short_slot; /* TRUE, if short slot timing is enabled. */
bool radio_hw_enable; /* saved state of radio hardware enabled state */
+ bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */
/* PHY/Radio device. */
struct b43_phy phy;
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 8a708b77925d..3dfb28a34be9 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -337,7 +337,7 @@ static inline int txring_to_priority(struct b43_dmaring *ring)
return idx_to_prio[index];
}
-u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
+static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
{
static const u16 map64[] = {
B43_MMIO_DMA64_BASE0,
@@ -356,7 +356,7 @@ u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
B43_MMIO_DMA32_BASE5,
};
- if (dma64bit) {
+ if (type == B43_DMA_64BIT) {
B43_WARN_ON(!(controller_idx >= 0 &&
controller_idx < ARRAY_SIZE(map64)));
return map64[controller_idx];
@@ -437,7 +437,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
* 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
* which accounts for the GFP_DMA flag below.
*/
- if (ring->dma64)
+ if (ring->type == B43_DMA_64BIT)
flags |= GFP_DMA;
ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
&(ring->dmabase), flags);
@@ -459,7 +459,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
}
/* Reset the RX DMA channel */
-int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
+static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
+ enum b43_dmatype type)
{
int i;
u32 value;
@@ -467,12 +468,13 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
might_sleep();
- offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
b43_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) {
- offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS;
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
+ B43_DMA32_RXSTATUS;
value = b43_read32(dev, mmio_base + offset);
- if (dma64) {
+ if (type == B43_DMA_64BIT) {
value &= B43_DMA64_RXSTAT;
if (value == B43_DMA64_RXSTAT_DISABLED) {
i = -1;
@@ -496,7 +498,8 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
}
/* Reset the TX DMA channel */
-int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
+static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
+ enum b43_dmatype type)
{
int i;
u32 value;
@@ -505,9 +508,10 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
might_sleep();
for (i = 0; i < 10; i++) {
- offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
+ B43_DMA32_TXSTATUS;
value = b43_read32(dev, mmio_base + offset);
- if (dma64) {
+ if (type == B43_DMA_64BIT) {
value &= B43_DMA64_TXSTAT;
if (value == B43_DMA64_TXSTAT_DISABLED ||
value == B43_DMA64_TXSTAT_IDLEWAIT ||
@@ -522,12 +526,13 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
}
msleep(1);
}
- offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
b43_write32(dev, mmio_base + offset, 0);
for (i = 0; i < 10; i++) {
- offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
+ B43_DMA32_TXSTATUS;
value = b43_read32(dev, mmio_base + offset);
- if (dma64) {
+ if (type == B43_DMA_64BIT) {
value &= B43_DMA64_TXSTAT;
if (value == B43_DMA64_TXSTAT_DISABLED) {
i = -1;
@@ -552,6 +557,33 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
return 0;
}
+/* Check if a DMA mapping address is invalid. */
+static bool b43_dma_mapping_error(struct b43_dmaring *ring,
+ dma_addr_t addr,
+ size_t buffersize)
+{
+ if (unlikely(dma_mapping_error(addr)))
+ return 1;
+
+ switch (ring->type) {
+ case B43_DMA_30BIT:
+ if ((u64)addr + buffersize > (1ULL << 30))
+ return 1;
+ break;
+ case B43_DMA_32BIT:
+ if ((u64)addr + buffersize > (1ULL << 32))
+ return 1;
+ break;
+ case B43_DMA_64BIT:
+ /* Currently we can't have addresses beyond
+ * 64bit in the kernel. */
+ break;
+ }
+
+ /* The address is OK. */
+ return 0;
+}
+
static int setup_rx_descbuffer(struct b43_dmaring *ring,
struct b43_dmadesc_generic *desc,
struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
@@ -567,7 +599,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
if (unlikely(!skb))
return -ENOMEM;
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
- if (dma_mapping_error(dmaaddr)) {
+ if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
/* ugh. try to realloc in zone_dma */
gfp_flags |= GFP_DMA;
@@ -580,7 +612,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
ring->rx_buffersize, 0);
}
- if (dma_mapping_error(dmaaddr)) {
+ if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
dev_kfree_skb_any(skb);
return -EIO;
}
@@ -645,7 +677,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
u32 trans = ssb_dma_translation(ring->dev->dev);
if (ring->tx) {
- if (ring->dma64) {
+ if (ring->type == B43_DMA_64BIT) {
u64 ringbase = (u64) (ring->dmabase);
addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -677,7 +709,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
err = alloc_initial_descbuffers(ring);
if (err)
goto out;
- if (ring->dma64) {
+ if (ring->type == B43_DMA_64BIT) {
u64 ringbase = (u64) (ring->dmabase);
addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -722,16 +754,16 @@ static void dmacontroller_cleanup(struct b43_dmaring *ring)
{
if (ring->tx) {
b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
- ring->dma64);
- if (ring->dma64) {
+ ring->type);
+ if (ring->type == B43_DMA_64BIT) {
b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
} else
b43_dma_write(ring, B43_DMA32_TXRING, 0);
} else {
b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
- ring->dma64);
- if (ring->dma64) {
+ ring->type);
+ if (ring->type == B43_DMA_64BIT) {
b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
} else
@@ -786,7 +818,8 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
static
struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
int controller_index,
- int for_tx, int dma64)
+ int for_tx,
+ enum b43_dmatype type)
{
struct b43_dmaring *ring;
int err;
@@ -796,6 +829,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
goto out;
+ ring->type = type;
nr_slots = B43_RXRING_SLOTS;
if (for_tx)
@@ -818,7 +852,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
b43_txhdr_size(dev),
DMA_TO_DEVICE);
- if (dma_mapping_error(dma_test)) {
+ if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) {
/* ugh realloc */
kfree(ring->txhdr_cache);
ring->txhdr_cache = kcalloc(nr_slots,
@@ -832,7 +866,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
b43_txhdr_size(dev),
DMA_TO_DEVICE);
- if (dma_mapping_error(dma_test))
+ if (b43_dma_mapping_error(ring, dma_test,
+ b43_txhdr_size(dev)))
goto err_kfree_txhdr_cache;
}
@@ -843,10 +878,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
ring->dev = dev;
ring->nr_slots = nr_slots;
- ring->mmio_base = b43_dmacontroller_base(dma64, controller_index);
+ ring->mmio_base = b43_dmacontroller_base(type, controller_index);
ring->index = controller_index;
- ring->dma64 = !!dma64;
- if (dma64)
+ if (type == B43_DMA_64BIT)
ring->ops = &dma64_ops;
else
ring->ops = &dma32_ops;
@@ -896,8 +930,8 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring)
if (!ring)
return;
- b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
- (ring->dma64) ? "64" : "32",
+ b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n",
+ (unsigned int)(ring->type),
ring->mmio_base,
(ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);
/* Device IRQs are disabled prior entering this function,
@@ -941,12 +975,22 @@ int b43_dma_init(struct b43_wldev *dev)
struct b43_dmaring *ring;
int err;
u64 dmamask;
- int dma64 = 0;
+ enum b43_dmatype type;
dmamask = supported_dma_mask(dev);
- if (dmamask == DMA_64BIT_MASK)
- dma64 = 1;
-
+ switch (dmamask) {
+ default:
+ B43_WARN_ON(1);
+ case DMA_30BIT_MASK:
+ type = B43_DMA_30BIT;
+ break;
+ case DMA_32BIT_MASK:
+ type = B43_DMA_32BIT;
+ break;
+ case DMA_64BIT_MASK:
+ type = B43_DMA_64BIT;
+ break;
+ }
err = ssb_dma_set_mask(dev->dev, dmamask);
if (err) {
b43err(dev->wl, "The machine/kernel does not support "
@@ -958,52 +1002,51 @@ int b43_dma_init(struct b43_wldev *dev)
err = -ENOMEM;
/* setup TX DMA channels. */
- ring = b43_setup_dmaring(dev, 0, 1, dma64);
+ ring = b43_setup_dmaring(dev, 0, 1, type);
if (!ring)
goto out;
dma->tx_ring0 = ring;
- ring = b43_setup_dmaring(dev, 1, 1, dma64);
+ ring = b43_setup_dmaring(dev, 1, 1, type);
if (!ring)
goto err_destroy_tx0;
dma->tx_ring1 = ring;
- ring = b43_setup_dmaring(dev, 2, 1, dma64);
+ ring = b43_setup_dmaring(dev, 2, 1, type);
if (!ring)
goto err_destroy_tx1;
dma->tx_ring2 = ring;
- ring = b43_setup_dmaring(dev, 3, 1, dma64);
+ ring = b43_setup_dmaring(dev, 3, 1, type);
if (!ring)
goto err_destroy_tx2;
dma->tx_ring3 = ring;
- ring = b43_setup_dmaring(dev, 4, 1, dma64);
+ ring = b43_setup_dmaring(dev, 4, 1, type);
if (!ring)
goto err_destroy_tx3;
dma->tx_ring4 = ring;
- ring = b43_setup_dmaring(dev, 5, 1, dma64);
+ ring = b43_setup_dmaring(dev, 5, 1, type);
if (!ring)
goto err_destroy_tx4;
dma->tx_ring5 = ring;
/* setup RX DMA channels. */
- ring = b43_setup_dmaring(dev, 0, 0, dma64);
+ ring = b43_setup_dmaring(dev, 0, 0, type);
if (!ring)
goto err_destroy_tx5;
dma->rx_ring0 = ring;
if (dev->dev->id.revision < 5) {
- ring = b43_setup_dmaring(dev, 3, 0, dma64);
+ ring = b43_setup_dmaring(dev, 3, 0, type);
if (!ring)
goto err_destroy_rx0;
dma->rx_ring3 = ring;
}
- b43dbg(dev->wl, "%d-bit DMA initialized\n",
- (dmamask == DMA_64BIT_MASK) ? 64 :
- (dmamask == DMA_32BIT_MASK) ? 32 : 30);
+ b43dbg(dev->wl, "%u-bit DMA initialized\n",
+ (unsigned int)type);
err = 0;
out:
return err;
@@ -1146,7 +1189,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
hdrsize, 1);
- if (dma_mapping_error(meta_hdr->dmaaddr)) {
+ if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
return -EIO;
@@ -1165,7 +1208,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
- if (dma_mapping_error(meta->dmaaddr)) {
+ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
ring->current_slot = old_top_slot;
@@ -1179,7 +1222,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
skb = bounce_skb;
meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
- if (dma_mapping_error(meta->dmaaddr)) {
+ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
err = -EIO;
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 58db03ac536e..c0d6b69e6501 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -203,6 +203,12 @@ struct b43_dma_ops {
void (*set_current_rxslot) (struct b43_dmaring * ring, int slot);
};
+enum b43_dmatype {
+ B43_DMA_30BIT = 30,
+ B43_DMA_32BIT = 32,
+ B43_DMA_64BIT = 64,
+};
+
struct b43_dmaring {
/* Lowlevel DMA ops. */
const struct b43_dma_ops *ops;
@@ -235,8 +241,8 @@ struct b43_dmaring {
int index;
/* Boolean. Is this a TX ring? */
bool tx;
- /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */
- bool dma64;
+ /* The type of DMA engine used. */
+ enum b43_dmatype type;
/* Boolean. Is this ring stopped at ieee80211 level? */
bool stopped;
/* Lock, only used for TX. */
@@ -255,8 +261,7 @@ static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)
return b43_read32(ring->dev, ring->mmio_base + offset);
}
-static inline
- void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
+static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
{
b43_write32(ring->dev, ring->mmio_base + offset, value);
}
@@ -264,13 +269,6 @@ static inline
int b43_dma_init(struct b43_wldev *dev);
void b43_dma_free(struct b43_wldev *dev);
-int b43_dmacontroller_rx_reset(struct b43_wldev *dev,
- u16 dmacontroller_mmio_base, int dma64);
-int b43_dmacontroller_tx_reset(struct b43_wldev *dev,
- u16 dmacontroller_mmio_base, int dma64);
-
-u16 b43_dmacontroller_base(int dma64bit, int dmacontroller_idx);
-
void b43_dma_tx_suspend(struct b43_wldev *dev);
void b43_dma_tx_resume(struct b43_wldev *dev);
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index 4b590d8c65ff..0aac1ff511df 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -116,7 +116,10 @@ static void b43_unregister_led(struct b43_led *led)
{
if (!led->dev)
return;
- led_classdev_unregister(&led->led_dev);
+ if (led->dev->suspend_in_progress)
+ led_classdev_unregister_suspended(&led->led_dev);
+ else
+ led_classdev_unregister(&led->led_dev);
b43_led_turn_off(led->dev, led->index, led->activelow);
led->dev = NULL;
}
@@ -144,12 +147,12 @@ static void b43_map_led(struct b43_wldev *dev,
case B43_LED_TRANSFER:
case B43_LED_APTRANSFER:
snprintf(name, sizeof(name),
- "b43-%s:tx", wiphy_name(hw->wiphy));
+ "b43-%s::tx", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->led_tx, name,
ieee80211_get_tx_led_name(hw),
led_index, activelow);
snprintf(name, sizeof(name),
- "b43-%s:rx", wiphy_name(hw->wiphy));
+ "b43-%s::rx", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->led_rx, name,
ieee80211_get_rx_led_name(hw),
led_index, activelow);
@@ -159,7 +162,7 @@ static void b43_map_led(struct b43_wldev *dev,
case B43_LED_RADIO_B:
case B43_LED_MODE_BG:
snprintf(name, sizeof(name),
- "b43-%s:radio", wiphy_name(hw->wiphy));
+ "b43-%s::radio", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->led_radio, name,
b43_rfkill_led_name(dev),
led_index, activelow);
@@ -170,7 +173,7 @@ static void b43_map_led(struct b43_wldev *dev,
case B43_LED_WEIRD:
case B43_LED_ASSOC:
snprintf(name, sizeof(name),
- "b43-%s:assoc", wiphy_name(hw->wiphy));
+ "b43-%s::assoc", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->led_assoc, name,
ieee80211_get_assoc_led_name(hw),
led_index, activelow);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 64c154d080d8..ef65c41af00f 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -38,6 +38,7 @@
#include <linux/wireless.h>
#include <linux/workqueue.h>
#include <linux/skbuff.h>
+#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <asm/unaligned.h>
@@ -2554,10 +2555,10 @@ static int b43_rng_read(struct hwrng *rng, u32 * data)
return (sizeof(u16));
}
-static void b43_rng_exit(struct b43_wl *wl)
+static void b43_rng_exit(struct b43_wl *wl, bool suspended)
{
if (wl->rng_initialized)
- hwrng_unregister(&wl->rng);
+ __hwrng_unregister(&wl->rng, suspended);
}
static int b43_rng_init(struct b43_wl *wl)
@@ -3417,8 +3418,10 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
macctl |= B43_MACCTL_PSM_JMP0;
b43_write32(dev, B43_MMIO_MACCTL, macctl);
- b43_leds_exit(dev);
- b43_rng_exit(dev->wl);
+ if (!dev->suspend_in_progress) {
+ b43_leds_exit(dev);
+ b43_rng_exit(dev->wl, false);
+ }
b43_dma_free(dev);
b43_chip_exit(dev);
b43_radio_turn_off(dev, 1);
@@ -3534,11 +3537,13 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */
b43_upload_card_macaddress(dev);
b43_security_init(dev);
- b43_rng_init(wl);
+ if (!dev->suspend_in_progress)
+ b43_rng_init(wl);
b43_set_status(dev, B43_STAT_INITIALIZED);
- b43_leds_init(dev);
+ if (!dev->suspend_in_progress)
+ b43_leds_init(dev);
out:
return err;
@@ -4135,6 +4140,7 @@ static int b43_suspend(struct ssb_device *dev, pm_message_t state)
b43dbg(wl, "Suspending...\n");
mutex_lock(&wl->mutex);
+ wldev->suspend_in_progress = true;
wldev->suspend_init_status = b43_status(wldev);
if (wldev->suspend_init_status >= B43_STAT_STARTED)
b43_wireless_core_stop(wldev);
@@ -4166,15 +4172,17 @@ static int b43_resume(struct ssb_device *dev)
if (wldev->suspend_init_status >= B43_STAT_STARTED) {
err = b43_wireless_core_start(wldev);
if (err) {
+ b43_leds_exit(wldev);
+ b43_rng_exit(wldev->wl, true);
b43_wireless_core_exit(wldev);
b43err(wl, "Resume failed at core start\n");
goto out;
}
}
- mutex_unlock(&wl->mutex);
-
b43dbg(wl, "Device resumed.\n");
- out:
+ out:
+ wldev->suspend_in_progress = false;
+ mutex_unlock(&wl->mutex);
return err;
}
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 83161d9af813..6e08405e8026 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1164,7 +1164,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
{
const struct b43legacy_dma_ops *ops = ring->ops;
u8 *header;
- int slot;
+ int slot, old_top_slot, old_used_slots;
int err;
struct b43legacy_dmadesc_generic *desc;
struct b43legacy_dmadesc_meta *meta;
@@ -1174,6 +1174,9 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
#define SLOTS_PER_PACKET 2
B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
+ old_top_slot = ring->current_slot;
+ old_used_slots = ring->used_slots;
+
/* Get a slot for the header. */
slot = request_slot(ring);
desc = ops->idx2desc(ring, slot, &meta_hdr);
@@ -1181,9 +1184,14 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
header = &(ring->txhdr_cache[slot * sizeof(
struct b43legacy_txhdr_fw3)]);
- b43legacy_generate_txhdr(ring->dev, header,
+ err = b43legacy_generate_txhdr(ring->dev, header,
skb->data, skb->len, ctl,
generate_cookie(ring, slot));
+ if (unlikely(err)) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
+ return err;
+ }
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
sizeof(struct b43legacy_txhdr_fw3), 1);
@@ -1206,6 +1214,8 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
if (dma_mapping_error(meta->dmaaddr)) {
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
err = -ENOMEM;
goto out_unmap_hdr;
}
@@ -1216,6 +1226,8 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (dma_mapping_error(meta->dmaaddr)) {
+ ring->current_slot = old_top_slot;
+ ring->used_slots = old_used_slots;
err = -EIO;
goto out_free_bounce;
}
@@ -1282,6 +1294,13 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
B43legacy_BUG_ON(ring->stopped);
err = dma_tx_fragment(ring, skb, ctl);
+ if (unlikely(err == -ENOKEY)) {
+ /* Drop this packet, as we don't have the encryption key
+ * anymore and must not transmit it unencrypted. */
+ dev_kfree_skb_any(skb);
+ err = 0;
+ goto out_unlock;
+ }
if (unlikely(err)) {
b43legacyerr(dev->wl, "DMA tx mapping failure\n");
goto out_unlock;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index aa20d5d56e2f..53f7f2e97615 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3160,8 +3160,6 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0414, 0x01F4);
ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */
- memset(wl->bssid, 0, ETH_ALEN);
- memset(wl->mac_addr, 0, ETH_ALEN);
b43legacy_upload_card_macaddress(dev);
b43legacy_security_init(dev);
b43legacy_rng_init(wl);
@@ -3263,6 +3261,13 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
* LEDs that are registered later depend on it. */
b43legacy_rfkill_init(dev);
+ /* Kill all old instance specific information to make sure
+ * the card won't use it in the short timeframe between start
+ * and mac80211 reconfiguring it. */
+ memset(wl->bssid, 0, ETH_ALEN);
+ memset(wl->mac_addr, 0, ETH_ALEN);
+ wl->filter_flags = 0;
+
mutex_lock(&wl->mutex);
if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) {
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index e4f4c5c39e33..bcdd54eb2edb 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -181,7 +181,7 @@ union txhdr_union {
struct b43legacy_txhdr_fw3 txhdr_fw3;
};
-static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
+static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
struct sk_buff *skb,
struct b43legacy_pio_txpacket *packet,
size_t txhdr_size)
@@ -189,14 +189,17 @@ static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
union txhdr_union txhdr_data;
u8 *txhdr = NULL;
unsigned int octets;
+ int err;
txhdr = (u8 *)(&txhdr_data.txhdr_fw3);
B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
- b43legacy_generate_txhdr(queue->dev,
+ err = b43legacy_generate_txhdr(queue->dev,
txhdr, skb->data, skb->len,
&packet->txstat.control,
generate_cookie(queue, packet));
+ if (err)
+ return err;
tx_start(queue);
octets = skb->len + txhdr_size;
@@ -204,6 +207,8 @@ static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
octets--;
tx_data(queue, txhdr, (u8 *)skb->data, octets);
tx_complete(queue, skb);
+
+ return 0;
}
static void free_txpacket(struct b43legacy_pio_txpacket *packet,
@@ -226,6 +231,7 @@ static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
struct b43legacy_pioqueue *queue = packet->queue;
struct sk_buff *skb = packet->skb;
u16 octets;
+ int err;
octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3);
if (queue->tx_devq_size < octets) {
@@ -247,8 +253,14 @@ static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
if (queue->tx_devq_used + octets > queue->tx_devq_size)
return -EBUSY;
/* Now poke the device. */
- pio_tx_write_fragment(queue, skb, packet,
+ err = pio_tx_write_fragment(queue, skb, packet,
sizeof(struct b43legacy_txhdr_fw3));
+ if (unlikely(err == -ENOKEY)) {
+ /* Drop this packet, as we don't have the encryption key
+ * anymore and must not transmit it unencrypted. */
+ free_txpacket(packet, 1);
+ return 0;
+ }
/* Account for the packet size.
* (We must not overflow the device TX queue)
@@ -486,6 +498,9 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
queue = parse_cookie(dev, status->cookie, &packet);
B43legacy_WARN_ON(!queue);
+ if (!packet->skb)
+ return;
+
queue->tx_devq_packets--;
queue->tx_devq_used -= (packet->skb->len +
sizeof(struct b43legacy_txhdr_fw3));
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index e20c552442d5..d84408a82db9 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -181,7 +181,7 @@ static u8 b43legacy_calc_fallback_rate(u8 bitrate)
return 0;
}
-static void generate_txhdr_fw3(struct b43legacy_wldev *dev,
+static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
struct b43legacy_txhdr_fw3 *txhdr,
const unsigned char *fragment_data,
unsigned int fragment_len,
@@ -252,6 +252,13 @@ static void generate_txhdr_fw3(struct b43legacy_wldev *dev,
iv_len = min((size_t)txctl->iv_len,
ARRAY_SIZE(txhdr->iv));
memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
+ } else {
+ /* This key is invalid. This might only happen
+ * in a short timeframe after machine resume before
+ * we were able to reconfigure keys.
+ * Drop this packet completely. Do not transmit it
+ * unencrypted to avoid leaking information. */
+ return -ENOKEY;
}
}
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
@@ -345,16 +352,18 @@ static void generate_txhdr_fw3(struct b43legacy_wldev *dev,
/* Apply the bitfields */
txhdr->mac_ctl = cpu_to_le32(mac_ctl);
txhdr->phy_ctl = cpu_to_le16(phy_ctl);
+
+ return 0;
}
-void b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
+int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
u8 *txhdr,
const unsigned char *fragment_data,
unsigned int fragment_len,
const struct ieee80211_tx_control *txctl,
u16 cookie)
{
- generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
+ return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
fragment_data, fragment_len,
txctl, cookie);
}
diff --git a/drivers/net/wireless/b43legacy/xmit.h b/drivers/net/wireless/b43legacy/xmit.h
index 8a155d0a5d1f..bab47928a0c9 100644
--- a/drivers/net/wireless/b43legacy/xmit.h
+++ b/drivers/net/wireless/b43legacy/xmit.h
@@ -76,7 +76,7 @@ struct b43legacy_txhdr_fw3 {
-void b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
+int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
u8 *txhdr,
const unsigned char *fragment_data,
unsigned int fragment_len,
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 2ab107f45793..5bf9e00b070c 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -162,7 +162,7 @@ that only one external action is invoked at a time.
#include <linux/firmware.h>
#include <linux/acpi.h>
#include <linux/ctype.h>
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include "ipw2100.h"
@@ -1701,7 +1701,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
/* the ipw2100 hardware really doesn't want power management delays
* longer than 175usec
*/
- modify_acceptable_latency("ipw2100", 175);
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 175);
/* If the interrupt is enabled, turn it off... */
spin_lock_irqsave(&priv->low_lock, flags);
@@ -1856,7 +1856,8 @@ static void ipw2100_down(struct ipw2100_priv *priv)
ipw2100_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->low_lock, flags);
- modify_acceptable_latency("ipw2100", INFINITE_LATENCY);
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100",
+ PM_QOS_DEFAULT_VALUE);
/* We have to signal any supplicant if we are disassociating */
if (associated)
@@ -6554,7 +6555,8 @@ static int __init ipw2100_init(void)
if (ret)
goto out;
- set_acceptable_latency("ipw2100", INFINITE_LATENCY);
+ pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100",
+ PM_QOS_DEFAULT_VALUE);
#ifdef CONFIG_IPW2100_DEBUG
ipw2100_debug_level = debug;
ret = driver_create_file(&ipw2100_pci_driver.driver,
@@ -6576,7 +6578,7 @@ static void __exit ipw2100_exit(void)
&driver_attr_debug_level);
#endif
pci_unregister_driver(&ipw2100_pci_driver);
- remove_acceptable_latency("ipw2100");
+ pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100");
}
module_init(ipw2100_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index f55c75712b55..5ee1ad69898b 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -4207,13 +4207,13 @@ static u8 ratio2dB[100] = {
* Conversion assumes that levels are voltages (20*log), not powers (10*log). */
int iwl3945_calc_db_from_ratio(int sig_ratio)
{
- /* Anything above 1000:1 just report as 60 dB */
- if (sig_ratio > 1000)
+ /* 1000:1 or higher just report as 60 dB */
+ if (sig_ratio >= 1000)
return 60;
- /* Above 100:1, divide by 10 and use table,
+ /* 100:1 or higher, divide by 10 and use table,
* add 20 dB to make up for divide by 10 */
- if (sig_ratio > 100)
+ if (sig_ratio >= 100)
return (20 + (int)ratio2dB[sig_ratio/10]);
/* We shouldn't see this */
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index d2fa079fbc4c..f479c1af6782 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -195,7 +195,7 @@ static int netwave_pcmcia_config(struct pcmcia_device *arg); /* Runs after card
static void netwave_detach(struct pcmcia_device *p_dev); /* Destroy instance */
/* Hardware configuration */
-static void netwave_doreset(kio_addr_t iobase, u_char __iomem *ramBase);
+static void netwave_doreset(unsigned int iobase, u_char __iomem *ramBase);
static void netwave_reset(struct net_device *dev);
/* Misc device stuff */
@@ -309,7 +309,7 @@ static inline void wait_WOC(unsigned int iobase)
}
static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
- kio_addr_t iobase) {
+ unsigned int iobase) {
u_short resultBuffer;
/* if time since last snapshot is > 1 sec. (100 jiffies?) then take
@@ -340,7 +340,7 @@ static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev)
{
unsigned long flags;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
struct iw_statistics* wstats;
@@ -471,7 +471,7 @@ static int netwave_set_nwid(struct net_device *dev,
char *extra)
{
unsigned long flags;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
@@ -518,7 +518,7 @@ static int netwave_set_scramble(struct net_device *dev,
char *key)
{
unsigned long flags;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
@@ -621,7 +621,7 @@ static int netwave_get_snap(struct net_device *dev,
char *extra)
{
unsigned long flags;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
@@ -874,7 +874,7 @@ static int netwave_resume(struct pcmcia_device *link)
*
* Proper hardware reset of the card.
*/
-static void netwave_doreset(kio_addr_t ioBase, u_char __iomem *ramBase)
+static void netwave_doreset(unsigned int ioBase, u_char __iomem *ramBase)
{
/* Reset card */
wait_WOC(ioBase);
@@ -892,7 +892,7 @@ static void netwave_reset(struct net_device *dev) {
/* u_char state; */
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
DEBUG(0, "netwave_reset: Done with hardware reset\n");
@@ -973,7 +973,7 @@ static int netwave_hw_xmit(unsigned char* data, int len,
netwave_private *priv = netdev_priv(dev);
u_char __iomem * ramBase = priv->ramBase;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
/* Disable interrupts & save flags */
spin_lock_irqsave(&priv->spinlock, flags);
@@ -1065,7 +1065,7 @@ static int netwave_start_xmit(struct sk_buff *skb, struct net_device *dev) {
*/
static irqreturn_t netwave_interrupt(int irq, void* dev_id)
{
- kio_addr_t iobase;
+ unsigned int iobase;
u_char __iomem *ramBase;
struct net_device *dev = (struct net_device *)dev_id;
struct netwave_private *priv = netdev_priv(dev);
@@ -1235,7 +1235,7 @@ static int netwave_rx(struct net_device *dev)
{
netwave_private *priv = netdev_priv(dev);
u_char __iomem *ramBase = priv->ramBase;
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
u_char rxStatus;
struct sk_buff *skb = NULL;
unsigned int curBuffer,
@@ -1388,7 +1388,7 @@ module_exit(exit_netwave_cs);
*/
static void set_multicast_list(struct net_device *dev)
{
- kio_addr_t iobase = dev->base_addr;
+ unsigned int iobase = dev->base_addr;
netwave_private *priv = netdev_priv(dev);
u_char __iomem * ramBase = priv->ramBase;
u_char rcvMode = 0;
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index c2037b2a05bf..06eea6ab7bf0 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -149,7 +149,7 @@ psa_write(struct net_device * dev,
net_local *lp = netdev_priv(dev);
u_char __iomem *ptr = lp->mem + PSA_ADDR + (o << 1);
int count = 0;
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
/* As there seem to have no flag PSA_BUSY as in the ISA model, we are
* oblige to verify this address to know when the PSA is ready... */
volatile u_char __iomem *verify = lp->mem + PSA_ADDR +
@@ -708,7 +708,7 @@ static void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqua
/* Perform a handover to a new WavePoint */
static void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp)
{
- kio_addr_t base = lp->dev->base_addr;
+ unsigned int base = lp->dev->base_addr;
mm_t m;
unsigned long flags;
@@ -821,7 +821,7 @@ wv_82593_cmd(struct net_device * dev,
int cmd,
int result)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
int status;
int wait_completed;
long spin;
@@ -945,7 +945,7 @@ read_ringbuf(struct net_device * dev,
char * buf,
int len)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
int ring_ptr = addr;
int chunk_len;
char * buf_ptr = buf;
@@ -1096,7 +1096,7 @@ wv_psa_show(psa_t * p)
static void
wv_mmc_show(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
mmr_t m;
@@ -1275,7 +1275,7 @@ wv_packet_info(u_char * p, /* Packet to dump */
static inline void
wv_init_info(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
psa_t psa;
DECLARE_MAC_BUF(mac);
@@ -1294,7 +1294,7 @@ wv_init_info(struct net_device * dev)
#ifdef DEBUG_BASIC_SHOW
/* Now, let's go for the basic stuff */
- printk(KERN_NOTICE "%s: WaveLAN: port %#lx, irq %d, "
+ printk(KERN_NOTICE "%s: WaveLAN: port %#x, irq %d, "
"hw_addr %s",
dev->name, base, dev->irq,
print_mac(mac, dev->dev_addr));
@@ -1828,7 +1828,7 @@ static int wavelan_set_nwid(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
psa_t psa;
mm_t m;
@@ -1918,7 +1918,7 @@ static int wavelan_set_freq(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
unsigned long flags;
int ret;
@@ -1948,7 +1948,7 @@ static int wavelan_get_freq(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
psa_t psa;
unsigned long flags;
@@ -1994,7 +1994,7 @@ static int wavelan_set_sens(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
psa_t psa;
unsigned long flags;
@@ -2060,7 +2060,7 @@ static int wavelan_set_encode(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
unsigned long flags;
psa_t psa;
@@ -2130,7 +2130,7 @@ static int wavelan_get_encode(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
psa_t psa;
unsigned long flags;
@@ -2349,7 +2349,7 @@ static int wavelan_get_range(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
struct iw_range *range = (struct iw_range *) extra;
unsigned long flags;
@@ -2425,7 +2425,7 @@ static int wavelan_set_qthr(struct net_device *dev,
union iwreq_data *wrqu,
char *extra)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local *lp = netdev_priv(dev);
psa_t psa;
unsigned long flags;
@@ -2701,7 +2701,7 @@ static const struct iw_handler_def wavelan_handler_def =
static iw_stats *
wavelan_get_wireless_stats(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
mmr_t m;
iw_stats * wstats;
@@ -2764,7 +2764,7 @@ wv_start_of_frame(struct net_device * dev,
int rfp, /* end of frame */
int wrap) /* start of buffer */
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
int rp;
int len;
@@ -2925,7 +2925,7 @@ wv_packet_read(struct net_device * dev,
static inline void
wv_packet_rcv(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
int newrfp;
int rp;
@@ -3062,7 +3062,7 @@ wv_packet_write(struct net_device * dev,
short length)
{
net_local * lp = netdev_priv(dev);
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
unsigned long flags;
int clen = length;
register u_short xmtdata_base = TX_BASE;
@@ -3183,7 +3183,7 @@ wavelan_packet_xmit(struct sk_buff * skb,
static inline int
wv_mmc_init(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
psa_t psa;
mmw_t m;
int configured;
@@ -3377,7 +3377,7 @@ wv_mmc_init(struct net_device * dev)
static int
wv_ru_stop(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
unsigned long flags;
int status;
@@ -3440,7 +3440,7 @@ wv_ru_stop(struct net_device * dev)
static int
wv_ru_start(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
unsigned long flags;
@@ -3528,7 +3528,7 @@ wv_ru_start(struct net_device * dev)
static int
wv_82593_config(struct net_device * dev)
{
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
net_local * lp = netdev_priv(dev);
struct i82593_conf_block cfblk;
int ret = TRUE;
@@ -3765,7 +3765,7 @@ static int
wv_hw_config(struct net_device * dev)
{
net_local * lp = netdev_priv(dev);
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
unsigned long flags;
int ret = FALSE;
@@ -4047,7 +4047,7 @@ wavelan_interrupt(int irq,
{
struct net_device * dev = dev_id;
net_local * lp;
- kio_addr_t base;
+ unsigned int base;
int status0;
u_int tx_status;
@@ -4306,7 +4306,7 @@ static void
wavelan_watchdog(struct net_device * dev)
{
net_local * lp = netdev_priv(dev);
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
unsigned long flags;
int aborted = FALSE;
@@ -4382,7 +4382,7 @@ wavelan_open(struct net_device * dev)
{
net_local * lp = netdev_priv(dev);
struct pcmcia_device * link = lp->link;
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
#ifdef DEBUG_CALLBACK_TRACE
printk(KERN_DEBUG "%s: ->wavelan_open(dev=0x%x)\n", dev->name,
@@ -4436,7 +4436,7 @@ static int
wavelan_close(struct net_device * dev)
{
struct pcmcia_device * link = ((net_local *)netdev_priv(dev))->link;
- kio_addr_t base = dev->base_addr;
+ unsigned int base = dev->base_addr;
#ifdef DEBUG_CALLBACK_TRACE
printk(KERN_DEBUG "%s: ->wavelan_close(dev=0x%x)\n", dev->name,
diff --git a/drivers/nubus/Makefile b/drivers/nubus/Makefile
index f5ef03cf9879..21bda2031e7e 100644
--- a/drivers/nubus/Makefile
+++ b/drivers/nubus/Makefile
@@ -4,5 +4,4 @@
obj-y := nubus.o
-obj-$(CONFIG_MODULES) += nubus_syms.o
obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c
index f4076aeb2098..2f047e573d86 100644
--- a/drivers/nubus/nubus.c
+++ b/drivers/nubus/nubus.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/module.h>
#include <asm/setup.h>
#include <asm/system.h>
#include <asm/page.h>
@@ -186,6 +187,7 @@ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent* dirent,
len--;
}
}
+EXPORT_SYMBOL(nubus_get_rsrc_mem);
void nubus_get_rsrc_str(void *dest, const struct nubus_dirent* dirent,
int len)
@@ -200,6 +202,7 @@ void nubus_get_rsrc_str(void *dest, const struct nubus_dirent* dirent,
len--;
}
}
+EXPORT_SYMBOL(nubus_get_rsrc_str);
int nubus_get_root_dir(const struct nubus_board* board,
struct nubus_dir* dir)
@@ -209,6 +212,7 @@ int nubus_get_root_dir(const struct nubus_board* board,
dir->mask = board->lanes;
return 0;
}
+EXPORT_SYMBOL(nubus_get_root_dir);
/* This is a slyly renamed version of the above */
int nubus_get_func_dir(const struct nubus_dev* dev,
@@ -219,6 +223,7 @@ int nubus_get_func_dir(const struct nubus_dev* dev,
dir->mask = dev->board->lanes;
return 0;
}
+EXPORT_SYMBOL(nubus_get_func_dir);
int nubus_get_board_dir(const struct nubus_board* board,
struct nubus_dir* dir)
@@ -237,6 +242,7 @@ int nubus_get_board_dir(const struct nubus_board* board,
return -1;
return 0;
}
+EXPORT_SYMBOL(nubus_get_board_dir);
int nubus_get_subdir(const struct nubus_dirent *ent,
struct nubus_dir *dir)
@@ -246,6 +252,7 @@ int nubus_get_subdir(const struct nubus_dirent *ent,
dir->mask = ent->mask;
return 0;
}
+EXPORT_SYMBOL(nubus_get_subdir);
int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent)
{
@@ -274,12 +281,14 @@ int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent)
ent->mask = nd->mask;
return 0;
}
+EXPORT_SYMBOL(nubus_readdir);
int nubus_rewinddir(struct nubus_dir* dir)
{
dir->ptr = dir->base;
return 0;
}
+EXPORT_SYMBOL(nubus_rewinddir);
/* Driver interface functions, more or less like in pci.c */
@@ -303,6 +312,7 @@ nubus_find_device(unsigned short category,
}
return NULL;
}
+EXPORT_SYMBOL(nubus_find_device);
struct nubus_dev*
nubus_find_type(unsigned short category,
@@ -320,6 +330,7 @@ nubus_find_type(unsigned short category,
}
return NULL;
}
+EXPORT_SYMBOL(nubus_find_type);
struct nubus_dev*
nubus_find_slot(unsigned int slot,
@@ -335,6 +346,7 @@ nubus_find_slot(unsigned int slot,
}
return NULL;
}
+EXPORT_SYMBOL(nubus_find_slot);
int
nubus_find_rsrc(struct nubus_dir* dir, unsigned char rsrc_type,
@@ -346,6 +358,7 @@ nubus_find_rsrc(struct nubus_dir* dir, unsigned char rsrc_type,
}
return -1;
}
+EXPORT_SYMBOL(nubus_find_rsrc);
/* Initialization functions - decide which slots contain stuff worth
looking at, and print out lots and lots of information from the
diff --git a/drivers/nubus/nubus_syms.c b/drivers/nubus/nubus_syms.c
deleted file mode 100644
index 9204f04fbf0b..000000000000
--- a/drivers/nubus/nubus_syms.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Exported symbols for NuBus services
-
- (c) 1999 David Huggins-Daines <dhd@debian.org> */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/nubus.h>
-
-#ifdef CONFIG_PROC_FS
-EXPORT_SYMBOL(nubus_proc_attach_device);
-EXPORT_SYMBOL(nubus_proc_detach_device);
-#endif
-
-MODULE_LICENSE("GPL");
-
-EXPORT_SYMBOL(nubus_find_device);
-EXPORT_SYMBOL(nubus_find_type);
-EXPORT_SYMBOL(nubus_find_slot);
-EXPORT_SYMBOL(nubus_get_root_dir);
-EXPORT_SYMBOL(nubus_get_board_dir);
-EXPORT_SYMBOL(nubus_get_func_dir);
-EXPORT_SYMBOL(nubus_readdir);
-EXPORT_SYMBOL(nubus_find_rsrc);
-EXPORT_SYMBOL(nubus_rewinddir);
-EXPORT_SYMBOL(nubus_get_subdir);
-EXPORT_SYMBOL(nubus_get_rsrc_mem);
-EXPORT_SYMBOL(nubus_get_rsrc_str);
-
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 5271a4a7af26..e07492be1f4a 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -22,6 +22,8 @@
#include <linux/nubus.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
+#include <linux/module.h>
+
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -140,6 +142,7 @@ int nubus_proc_attach_device(struct nubus_dev *dev)
return 0;
}
+EXPORT_SYMBOL(nubus_proc_attach_device);
/* FIXME: this is certainly broken! */
int nubus_proc_detach_device(struct nubus_dev *dev)
@@ -154,6 +157,7 @@ int nubus_proc_detach_device(struct nubus_dev *dev)
}
return 0;
}
+EXPORT_SYMBOL(nubus_proc_detach_device);
void __init proc_bus_nubus_add_devices(void)
{
diff --git a/drivers/of/base.c b/drivers/of/base.c
index b306fef1ac41..80c9deca5f35 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -138,6 +138,31 @@ struct device_node *of_get_parent(const struct device_node *node)
EXPORT_SYMBOL(of_get_parent);
/**
+ * of_get_next_parent - Iterate to a node's parent
+ * @node: Node to get parent of
+ *
+ * This is like of_get_parent() except that it drops the
+ * refcount on the passed node, making it suitable for iterating
+ * through a node's parents.
+ *
+ * Returns a node pointer with refcount incremented, use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_get_next_parent(struct device_node *node)
+{
+ struct device_node *parent;
+
+ if (!node)
+ return NULL;
+
+ read_lock(&devtree_lock);
+ parent = of_node_get(node->parent);
+ of_node_put(node);
+ read_unlock(&devtree_lock);
+ return parent;
+}
+
+/**
* of_get_next_child - Iterate a node childs
* @node: parent node
* @prev: previous child of the parent node, or NULL to get first
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index b47bb2d7476a..ca09a63a64db 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -85,6 +85,15 @@ static int of_platform_device_resume(struct device * dev)
return error;
}
+static void of_platform_device_shutdown(struct device *dev)
+{
+ struct of_device *of_dev = to_of_device(dev);
+ struct of_platform_driver *drv = to_of_platform_driver(dev->driver);
+
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(of_dev);
+}
+
int of_bus_type_init(struct bus_type *bus, const char *name)
{
bus->name = name;
@@ -93,6 +102,7 @@ int of_bus_type_init(struct bus_type *bus, const char *name)
bus->remove = of_platform_device_remove;
bus->suspend = of_platform_device_suspend;
bus->resume = of_platform_device_resume;
+ bus->shutdown = of_platform_device_shutdown;
return bus_register(bus);
}
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index ca52307b8f40..d08b284de196 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -941,7 +941,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
- coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range);
+ coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
/*
** Program the I/O Pdir
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index 0a1f99a2e93e..97ba8286c596 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -95,12 +95,14 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
*/
static inline unsigned int
-iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
+iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
+ struct scatterlist *startsg, int nents,
int (*iommu_alloc_range)(struct ioc *, size_t))
{
struct scatterlist *contig_sg; /* contig chunk head */
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
unsigned int n_mappings = 0;
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
while (nents > 0) {
@@ -142,6 +144,9 @@ iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
IOVP_SIZE) > DMA_CHUNK_SIZE))
break;
+ if (startsg->length + dma_len > max_seg_size)
+ break;
+
/*
** Next see if we can append the next chunk (i.e.
** it must end on one page and begin on another
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index e527a0e1d6c0..d06627c3f353 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -946,7 +946,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
** w/o this association, we wouldn't have coherent DMA!
** Access to the virtual address is what forces a two pass algorithm.
*/
- coalesced = iommu_coalesce_chunks(ioc, sglist, nents, sba_alloc_range);
+ coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
/*
** Program the I/O Pdir
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index e9743d3efaf6..238628d3a854 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1540,6 +1540,38 @@ static void __devinit detect_and_report_smsc (void)
smsc_check(0x3f0,0x44);
smsc_check(0x370,0x44);
}
+
+static void __devinit detect_and_report_it87(void)
+{
+ u16 dev;
+ u8 r;
+ if (verbose_probing)
+ printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
+ if (!request_region(0x2e, 1, __FUNCTION__))
+ return;
+ outb(0x87, 0x2e);
+ outb(0x01, 0x2e);
+ outb(0x55, 0x2e);
+ outb(0x55, 0x2e);
+ outb(0x20, 0x2e);
+ dev = inb(0x2f) << 8;
+ outb(0x21, 0x2e);
+ dev |= inb(0x2f);
+ if (dev == 0x8712 || dev == 0x8705 || dev == 0x8715 ||
+ dev == 0x8716 || dev == 0x8718 || dev == 0x8726) {
+ printk(KERN_INFO "IT%04X SuperIO detected.\n", dev);
+ outb(0x07, 0x2E); /* Parallel Port */
+ outb(0x03, 0x2F);
+ outb(0xF0, 0x2E); /* BOOT 0x80 off */
+ r = inb(0x2f);
+ outb(0xF0, 0x2E);
+ outb(r | 8, 0x2F);
+ outb(0x02, 0x2E); /* Lock */
+ outb(0x02, 0x2F);
+
+ release_region(0x2e, 1);
+ }
+}
#endif /* CONFIG_PARPORT_PC_SUPERIO */
static int get_superio_dma (struct parport *p)
@@ -2767,6 +2799,7 @@ enum parport_pc_pci_cards {
netmos_9755,
netmos_9805,
netmos_9815,
+ quatech_sppxp100,
};
@@ -2843,6 +2876,7 @@ static struct parport_pc_pci {
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} }, /* untested */
/* netmos_9805 */ { 1, { { 0, -1 }, } }, /* untested */
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* untested */
+ /* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
static const struct pci_device_id parport_pc_pci_tbl[] = {
@@ -2926,6 +2960,9 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9805 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9815,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
+ /* Quatech SPPXP-100 Parallel port PCI ExpressCard */
+ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
{ 0, } /* terminate list */
};
MODULE_DEVICE_TABLE(pci,parport_pc_pci_tbl);
@@ -3159,24 +3196,25 @@ static void __init parport_pc_find_ports (int autoirq, int autodma)
int count = 0, err;
#ifdef CONFIG_PARPORT_PC_SUPERIO
- detect_and_report_winbond ();
- detect_and_report_smsc ();
+ detect_and_report_it87();
+ detect_and_report_winbond();
+ detect_and_report_smsc();
#endif
/* Onboard SuperIO chipsets that show themselves on the PCI bus. */
- count += parport_pc_init_superio (autoirq, autodma);
+ count += parport_pc_init_superio(autoirq, autodma);
/* PnP ports, skip detection if SuperIO already found them */
if (!count) {
- err = pnp_register_driver (&parport_pc_pnp_driver);
+ err = pnp_register_driver(&parport_pc_pnp_driver);
if (!err)
pnp_registered_parport = 1;
}
/* ISA ports and whatever (see asm/parport.h). */
- parport_pc_find_nonpci_ports (autoirq, autodma);
+ parport_pc_find_nonpci_ports(autoirq, autodma);
- err = pci_register_driver (&parport_pc_pci_driver);
+ err = pci_register_driver(&parport_pc_pci_driver);
if (!err)
pci_registered_parport = 1;
}
diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c
index bd6ad8b38168..e2e95b36a603 100644
--- a/drivers/parport/parport_serial.c
+++ b/drivers/parport/parport_serial.c
@@ -77,7 +77,7 @@ static struct parport_pc_pci cards[] __devinitdata = {
/* titan_110l */ { 1, { { 3, -1 }, } },
/* titan_210l */ { 1, { { 3, -1 }, } },
/* netmos_9xx5_combo */ { 1, { { 2, -1 }, }, netmos_parallel_init },
- /* netmos_9855 */ { 1, { { 0, -1 }, }, netmos_parallel_init },
+ /* netmos_9855 */ { 1, { { 2, -1 }, }, netmos_parallel_init },
/* avlab_1s1p */ { 1, { { 1, 2}, } },
/* avlab_1s2p */ { 2, { { 1, 2}, { 3, 4 },} },
/* avlab_2s1p */ { 1, { { 2, 3}, } },
@@ -185,7 +185,7 @@ static struct pciserial_board pci_parport_serial_boards[] __devinitdata = {
.uart_offset = 8,
},
[netmos_9855] = {
- .flags = FL_BASE2 | FL_BASE_BARS,
+ .flags = FL_BASE4 | FL_BASE_BARS,
.num_ports = 1,
.base_baud = 115200,
.uart_offset = 8,
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 91b2dc956be5..8ed26480371f 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include "iova.h"
+#include "intel-iommu.h"
#undef PREFIX
#define PREFIX "DMAR:"
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 4e01df99681a..31fa6c92aa5e 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1088,7 +1088,7 @@ static void dmar_init_reserved_ranges(void)
int i;
u64 addr, size;
- init_iova_domain(&reserved_iova_list);
+ init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
/* IOAPIC ranges shouldn't be accessed by DMA */
iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
@@ -1142,7 +1142,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
int adjust_width, agaw;
unsigned long sagaw;
- init_iova_domain(&domain->iovad);
+ init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
spin_lock_init(&domain->mapping_lock);
domain_reserve_special_ranges(domain);
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
index 459ad1f9dc54..0e4862675ad2 100644
--- a/drivers/pci/intel-iommu.h
+++ b/drivers/pci/intel-iommu.h
@@ -23,10 +23,24 @@
#include <linux/types.h>
#include <linux/msi.h>
+#include <linux/sysdev.h>
#include "iova.h"
#include <linux/io.h>
/*
+ * We need a fixed PAGE_SIZE of 4K irrespective of
+ * arch PAGE_SIZE for IOMMU page tables.
+ */
+#define PAGE_SHIFT_4K (12)
+#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
+#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
+#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
+
+#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
+#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
+#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
+
+/*
* Intel IOMMU register specification per version 1.0 public spec.
*/
diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c
index a84571c29360..8de7ab6c6d0c 100644
--- a/drivers/pci/iova.c
+++ b/drivers/pci/iova.c
@@ -9,19 +9,19 @@
#include "iova.h"
void
-init_iova_domain(struct iova_domain *iovad)
+init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
{
spin_lock_init(&iovad->iova_alloc_lock);
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL;
-
+ iovad->dma_32bit_pfn = pfn_32bit;
}
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
{
- if ((*limit_pfn != DMA_32BIT_PFN) ||
+ if ((*limit_pfn != iovad->dma_32bit_pfn) ||
(iovad->cached32_node == NULL))
return rb_last(&iovad->rbroot);
else {
@@ -37,7 +37,7 @@ static void
__cached_rbnode_insert_update(struct iova_domain *iovad,
unsigned long limit_pfn, struct iova *new)
{
- if (limit_pfn != DMA_32BIT_PFN)
+ if (limit_pfn != iovad->dma_32bit_pfn)
return;
iovad->cached32_node = &new->node;
}
diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h
index ae3028d5a941..d521b5b7319c 100644
--- a/drivers/pci/iova.h
+++ b/drivers/pci/iova.h
@@ -15,22 +15,9 @@
#include <linux/rbtree.h>
#include <linux/dma-mapping.h>
-/*
- * We need a fixed PAGE_SIZE of 4K irrespective of
- * arch PAGE_SIZE for IOMMU page tables.
- */
-#define PAGE_SHIFT_4K (12)
-#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
-#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
-#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
-
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
-#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
-#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
-#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
-
/* iova structure */
struct iova {
struct rb_node node;
@@ -44,6 +31,7 @@ struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */
+ unsigned long dma_32bit_pfn;
};
struct iova *alloc_iova_mem(void);
@@ -56,7 +44,7 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
-void init_iova_domain(struct iova_domain *iovad);
+void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 04aac7782468..ae3df46eaabf 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1451,6 +1451,22 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
}
#endif
+#ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE
+int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
+{
+ return dma_set_max_seg_size(&dev->dev, size);
+}
+EXPORT_SYMBOL(pci_set_dma_max_seg_size);
+#endif
+
+#ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY
+int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
+{
+ return dma_set_seg_boundary(&dev->dev, mask);
+}
+EXPORT_SYMBOL(pci_set_dma_seg_boundary);
+#endif
+
/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
* @dev: PCI device to query
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 7f5dab34d315..4d23b9fb551b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -933,8 +933,12 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
set_dev_node(&dev->dev, pcibus_to_node(bus));
dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
+ pci_set_dma_max_seg_size(dev, 65536);
+ pci_set_dma_seg_boundary(dev, 0xffffffff);
+
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index eb6abd3f9221..385e145e1acc 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -21,9 +21,9 @@
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/sizes.h>
+#include <asm/gpio.h>
#include <asm/arch/board.h>
-#include <asm/arch/gpio.h>
#include <asm/arch/at91rm9200_mc.h>
@@ -56,7 +56,7 @@ struct at91_cf_socket {
static inline int at91_cf_present(struct at91_cf_socket *cf)
{
- return !at91_get_gpio_value(cf->board->det_pin);
+ return !gpio_get_value(cf->board->det_pin);
}
/*--------------------------------------------------------------------------*/
@@ -100,9 +100,9 @@ static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp)
int vcc = cf->board->vcc_pin;
*sp = SS_DETECT | SS_3VCARD;
- if (!rdy || at91_get_gpio_value(rdy))
+ if (!rdy || gpio_get_value(rdy))
*sp |= SS_READY;
- if (!vcc || at91_get_gpio_value(vcc))
+ if (!vcc || gpio_get_value(vcc))
*sp |= SS_POWERON;
} else
*sp = 0;
@@ -121,10 +121,10 @@ at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
if (cf->board->vcc_pin) {
switch (s->Vcc) {
case 0:
- at91_set_gpio_value(cf->board->vcc_pin, 0);
+ gpio_set_value(cf->board->vcc_pin, 0);
break;
case 33:
- at91_set_gpio_value(cf->board->vcc_pin, 1);
+ gpio_set_value(cf->board->vcc_pin, 1);
break;
default:
return -EINVAL;
@@ -132,7 +132,7 @@ at91_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
}
/* toggle reset if needed */
- at91_set_gpio_value(cf->board->rst_pin, s->flags & SS_RESET);
+ gpio_set_value(cf->board->rst_pin, s->flags & SS_RESET);
pr_debug("%s: Vcc %d, io_irq %d, flags %04x csc %04x\n",
driver_name, s->Vcc, s->io_irq, s->flags, s->csc_mask);
@@ -239,11 +239,24 @@ static int __init at91_cf_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cf);
/* must be a GPIO; ergo must trigger on both edges */
- status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf);
+ status = gpio_request(board->det_pin, "cf_det");
if (status < 0)
goto fail0;
+ status = request_irq(board->det_pin, at91_cf_irq, 0, driver_name, cf);
+ if (status < 0)
+ goto fail00;
device_init_wakeup(&pdev->dev, 1);
+ status = gpio_request(board->rst_pin, "cf_rst");
+ if (status < 0)
+ goto fail0a;
+
+ if (board->vcc_pin) {
+ status = gpio_request(board->vcc_pin, "cf_vcc");
+ if (status < 0)
+ goto fail0b;
+ }
+
/*
* The card driver will request this irq later as needed.
* but it causes lots of "irqNN: nobody cared" messages
@@ -251,16 +264,20 @@ static int __init at91_cf_probe(struct platform_device *pdev)
* (Note: DK board doesn't wire the IRQ pin...)
*/
if (board->irq_pin) {
+ status = gpio_request(board->irq_pin, "cf_irq");
+ if (status < 0)
+ goto fail0c;
status = request_irq(board->irq_pin, at91_cf_irq,
IRQF_SHARED, driver_name, cf);
if (status < 0)
- goto fail0a;
+ goto fail0d;
cf->socket.pci_irq = board->irq_pin;
} else
cf->socket.pci_irq = NR_IRQS + 1;
/* pcmcia layer only remaps "real" memory not iospace */
- cf->socket.io_offset = (unsigned long) ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K);
+ cf->socket.io_offset = (unsigned long)
+ ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K);
if (!cf->socket.io_offset) {
status = -ENXIO;
goto fail1;
@@ -296,11 +313,21 @@ fail2:
fail1:
if (cf->socket.io_offset)
iounmap((void __iomem *) cf->socket.io_offset);
- if (board->irq_pin)
+ if (board->irq_pin) {
free_irq(board->irq_pin, cf);
+fail0d:
+ gpio_free(board->irq_pin);
+ }
+fail0c:
+ if (board->vcc_pin)
+ gpio_free(board->vcc_pin);
+fail0b:
+ gpio_free(board->rst_pin);
fail0a:
device_init_wakeup(&pdev->dev, 0);
free_irq(board->det_pin, cf);
+fail00:
+ gpio_free(board->det_pin);
fail0:
kfree(cf);
return status;
@@ -313,13 +340,18 @@ static int __exit at91_cf_remove(struct platform_device *pdev)
struct resource *io = cf->socket.io[0].res;
pcmcia_unregister_socket(&cf->socket);
- if (board->irq_pin)
+ release_mem_region(io->start, io->end + 1 - io->start);
+ iounmap((void __iomem *) cf->socket.io_offset);
+ if (board->irq_pin) {
free_irq(board->irq_pin, cf);
+ gpio_free(board->irq_pin);
+ }
+ if (board->vcc_pin)
+ gpio_free(board->vcc_pin);
+ gpio_free(board->rst_pin);
device_init_wakeup(&pdev->dev, 0);
free_irq(board->det_pin, cf);
- iounmap((void __iomem *) cf->socket.io_offset);
- release_mem_region(io->start, io->end + 1 - io->start);
-
+ gpio_free(board->det_pin);
kfree(cf);
return 0;
}
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index a1bd763b4e33..714baaeb6da1 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -143,7 +143,7 @@ int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void
/* Config space? */
if (space == 0) {
if (addr + len > 0x100)
- goto fail;
+ goto failput;
for (; len; addr++, ptr++, len--)
pci_read_config_byte(dev, addr, ptr);
return 0;
@@ -171,6 +171,8 @@ int read_cb_mem(struct pcmcia_socket * s, int space, u_int addr, u_int len, void
memcpy_fromio(ptr, s->cb_cis_virt + addr, len);
return 0;
+failput:
+ pci_dev_put(dev);
fail:
memset(ptr, 0xff, len);
return -1;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 15c18f5246d6..5a85871f5ee9 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -865,11 +865,12 @@ static int pcmcia_load_firmware(struct pcmcia_device *dev, char * filename)
ds_dbg(1, "trying to load CIS file %s\n", filename);
if (strlen(filename) > 14) {
- printk(KERN_WARNING "pcmcia: CIS filename is too long\n");
+ printk(KERN_WARNING "pcmcia: CIS filename is too long [%s]\n",
+ filename);
return -EINVAL;
}
- snprintf(path, 20, "%s", filename);
+ snprintf(path, sizeof(path), "%s", filename);
if (request_firmware(&fw, path, &dev->dev) == 0) {
if (fw->size >= CISTPL_MAX_CIS_SIZE) {
@@ -1130,8 +1131,6 @@ static int runtime_suspend(struct device *dev)
down(&dev->sem);
rc = pcmcia_dev_suspend(dev, PMSG_SUSPEND);
up(&dev->sem);
- if (!rc)
- dev->power.power_state.event = PM_EVENT_SUSPEND;
return rc;
}
@@ -1142,8 +1141,6 @@ static void runtime_resume(struct device *dev)
down(&dev->sem);
rc = pcmcia_dev_resume(dev);
up(&dev->sem);
- if (!rc)
- dev->power.power_state.event = PM_EVENT_ON;
}
/************************ per-device sysfs output ***************************/
@@ -1265,6 +1262,9 @@ static int pcmcia_dev_suspend(struct device * dev, pm_message_t state)
struct pcmcia_driver *p_drv = NULL;
int ret = 0;
+ if (p_dev->suspended)
+ return 0;
+
ds_dbg(2, "suspending %s\n", dev->bus_id);
if (dev->driver)
@@ -1301,6 +1301,9 @@ static int pcmcia_dev_resume(struct device * dev)
struct pcmcia_driver *p_drv = NULL;
int ret = 0;
+ if (!p_dev->suspended)
+ return 0;
+
ds_dbg(2, "resuming %s\n", dev->bus_id);
if (dev->driver)
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index df21e2d16f87..749515534cc0 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -82,7 +82,7 @@ struct socket_info {
1 = empty socket,
2 = card but not initialized,
3 = operational card */
- kio_addr_t io_base; /* base io address of the socket */
+ unsigned int io_base; /* base io address of the socket */
struct pcmcia_socket socket;
struct pci_dev *dev; /* The PCI device for the socket */
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 839bb1c0db58..32a2ab119798 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -164,7 +164,7 @@ struct i82365_socket {
u_short type, flags;
struct pcmcia_socket socket;
unsigned int number;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_short psock;
u_char cs_irq, intr;
union {
@@ -238,7 +238,7 @@ static u_char i365_get(u_short sock, u_short reg)
unsigned long flags;
spin_lock_irqsave(&bus_lock,flags);
{
- kio_addr_t port = socket[sock].ioaddr;
+ unsigned int port = socket[sock].ioaddr;
u_char val;
reg = I365_REG(socket[sock].psock, reg);
outb(reg, port); val = inb(port+1);
@@ -252,7 +252,7 @@ static void i365_set(u_short sock, u_short reg, u_char data)
unsigned long flags;
spin_lock_irqsave(&bus_lock,flags);
{
- kio_addr_t port = socket[sock].ioaddr;
+ unsigned int port = socket[sock].ioaddr;
u_char val = I365_REG(socket[sock].psock, reg);
outb(val, port); outb(data, port+1);
spin_unlock_irqrestore(&bus_lock,flags);
@@ -588,7 +588,7 @@ static int to_cycles(int ns)
/*====================================================================*/
-static int __init identify(kio_addr_t port, u_short sock)
+static int __init identify(unsigned int port, u_short sock)
{
u_char val;
int type = -1;
@@ -659,7 +659,7 @@ static int __init identify(kio_addr_t port, u_short sock)
static int __init is_alive(u_short sock)
{
u_char stat;
- kio_addr_t start, stop;
+ unsigned int start, stop;
stat = i365_get(sock, I365_STATUS);
start = i365_get_pair(sock, I365_IO(0)+I365_W_START);
@@ -678,7 +678,7 @@ static int __init is_alive(u_short sock)
/*====================================================================*/
-static void __init add_socket(kio_addr_t port, int psock, int type)
+static void __init add_socket(unsigned int port, int psock, int type)
{
socket[sockets].ioaddr = port;
socket[sockets].psock = psock;
@@ -698,7 +698,7 @@ static void __init add_pcic(int ns, int type)
base = sockets-ns;
if (base == 0) printk("\n");
printk(KERN_INFO " %s", pcic[type].name);
- printk(" ISA-to-PCMCIA at port %#lx ofs 0x%02x",
+ printk(" ISA-to-PCMCIA at port %#x ofs 0x%02x",
t->ioaddr, t->psock*0x40);
printk(", %d socket%s\n", ns, ((ns > 1) ? "s" : ""));
@@ -772,7 +772,7 @@ static struct pnp_dev *i82365_pnpdev;
static void __init isa_probe(void)
{
int i, j, sock, k, ns, id;
- kio_addr_t port;
+ unsigned int port;
#ifdef CONFIG_PNP
struct isapnp_device_id *devid;
struct pnp_dev *dev;
@@ -1053,7 +1053,7 @@ static int i365_set_io_map(u_short sock, struct pccard_io_map *io)
u_char map, ioctl;
debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, "
- "%#lx-%#lx)\n", sock, io->map, io->flags,
+ "%#x-%#x)\n", sock, io->map, io->flags,
io->speed, io->start, io->stop);
map = io->map;
if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 91da15b5a81e..3616da227152 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -58,7 +58,7 @@ typedef struct pcc_socket {
u_short type, flags;
struct pcmcia_socket socket;
unsigned int number;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_long mapaddr;
u_long base; /* PCC register base */
u_char cs_irq1, cs_irq2, intr;
@@ -298,7 +298,8 @@ static int __init is_alive(u_short sock)
return 0;
}
-static void add_pcc_socket(ulong base, int irq, ulong mapaddr, kio_addr_t ioaddr)
+static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
+ unsigned int ioaddr)
{
pcc_socket_t *t = &socket[pcc_sockets];
@@ -738,7 +739,7 @@ static int __init init_m32r_pcc(void)
#else /* CONFIG_PLAT_USRV */
{
ulong base, mapaddr;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
for (i = 0 ; i < M32R_MAX_PCC ; i++) {
base = (ulong)PLD_CFRSTCR;
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index ec4c1253ebbb..2b42b7155e34 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -65,7 +65,7 @@ typedef struct pcc_socket {
u_short type, flags;
struct pcmcia_socket socket;
unsigned int number;
- kio_addr_t ioaddr;
+ unsigned int ioaddr;
u_long mapaddr;
u_long base; /* PCC register base */
u_char cs_irq, intr;
@@ -310,7 +310,8 @@ static int __init is_alive(u_short sock)
return 0;
}
-static void add_pcc_socket(ulong base, int irq, ulong mapaddr, kio_addr_t ioaddr)
+static void add_pcc_socket(ulong base, int irq, ulong mapaddr,
+ unsigned int ioaddr)
{
pcc_socket_t *t = &socket[pcc_sockets];
@@ -491,7 +492,7 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io)
u_char map;
debug(3, "m32r-pcc: SetIOMap(%d, %d, %#2.2x, %d ns, "
- "%#lx-%#lx)\n", sock, io->map, io->flags,
+ "%#x-%#x)\n", sock, io->map, io->flags,
io->speed, io->start, io->stop);
map = io->map;
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 4ea426a25909..ac70d2cb7dd4 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1174,8 +1174,10 @@ static int __init m8xx_probe(struct of_device *ofdev,
pcmcia_schlvl = irq_of_parse_and_map(np, 0);
hwirq = irq_map[pcmcia_schlvl].hwirq;
- if (pcmcia_schlvl < 0)
+ if (pcmcia_schlvl < 0) {
+ iounmap(pcmcia);
return -EINVAL;
+ }
m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra;
m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb;
@@ -1189,6 +1191,7 @@ static int __init m8xx_probe(struct of_device *ofdev,
driver_name, socket)) {
pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n",
pcmcia_schlvl);
+ iounmap(pcmcia);
return -1;
}
@@ -1284,6 +1287,7 @@ static int m8xx_remove(struct of_device *ofdev)
}
for (i = 0; i < PCMCIA_SOCKETS_NO; i++)
pcmcia_unregister_socket(&socket[i].socket);
+ iounmap(pcmcia);
free_irq(pcmcia_schlvl, NULL);
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 0ce39de834c4..1d128fbd1a92 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -65,23 +65,23 @@ extern int ds_pc_debug;
* Special stuff for managing IO windows, because they are scarce
*/
-static int alloc_io_space(struct pcmcia_socket *s, u_int attr, ioaddr_t *base,
- ioaddr_t num, u_int lines)
+static int alloc_io_space(struct pcmcia_socket *s, u_int attr,
+ unsigned int *base, unsigned int num, u_int lines)
{
int i;
- kio_addr_t try, align;
+ unsigned int try, align;
align = (*base) ? (lines ? 1<<lines : 0) : 1;
if (align && (align < num)) {
if (*base) {
- ds_dbg(s, 0, "odd IO request: num %#x align %#lx\n",
+ ds_dbg(s, 0, "odd IO request: num %#x align %#x\n",
num, align);
align = 0;
} else
while (align && (align < num)) align <<= 1;
}
if (*base & ~(align-1)) {
- ds_dbg(s, 0, "odd IO request: base %#x align %#lx\n",
+ ds_dbg(s, 0, "odd IO request: base %#x align %#x\n",
*base, align);
align = 0;
}
@@ -132,8 +132,8 @@ static int alloc_io_space(struct pcmcia_socket *s, u_int attr, ioaddr_t *base,
} /* alloc_io_space */
-static void release_io_space(struct pcmcia_socket *s, ioaddr_t base,
- ioaddr_t num)
+static void release_io_space(struct pcmcia_socket *s, unsigned int base,
+ unsigned int num)
{
int i;
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index bfcaad6021cf..a8d100707721 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -186,15 +186,16 @@ static int sub_interval(struct resource_map *map, u_long base, u_long num)
======================================================================*/
#ifdef CONFIG_PCMCIA_PROBE
-static void do_io_probe(struct pcmcia_socket *s, kio_addr_t base, kio_addr_t num)
+static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
+ unsigned int num)
{
struct resource *res;
struct socket_data *s_data = s->resource_data;
- kio_addr_t i, j, bad;
+ unsigned int i, j, bad;
int any;
u_char *b, hole, most;
- printk(KERN_INFO "cs: IO port probe %#lx-%#lx:",
+ printk(KERN_INFO "cs: IO port probe %#x-%#x:",
base, base+num-1);
/* First, what does a floating port look like? */
@@ -233,7 +234,7 @@ static void do_io_probe(struct pcmcia_socket *s, kio_addr_t base, kio_addr_t num
} else {
if (bad) {
sub_interval(&s_data->io_db, bad, i-bad);
- printk(" %#lx-%#lx", bad, i-1);
+ printk(" %#x-%#x", bad, i-1);
bad = 0;
}
}
@@ -244,7 +245,7 @@ static void do_io_probe(struct pcmcia_socket *s, kio_addr_t base, kio_addr_t num
return;
} else {
sub_interval(&s_data->io_db, bad, i-bad);
- printk(" %#lx-%#lx", bad, i-1);
+ printk(" %#x-%#x", bad, i-1);
}
}
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 749ac3710914..5792bd5c54f9 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -719,7 +719,7 @@ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
u_short base, len, ioctl;
debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, "
- "%#lx-%#lx)\n", psock, io->map, io->flags,
+ "%#x-%#x)\n", psock, io->map, io->flags,
io->speed, io->start, io->stop);
if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) ||
(io->stop < io->start)) return -EINVAL;
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index a262762c5b88..12a1645a2e43 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -161,8 +161,7 @@ static int pnp_bus_suspend(struct device *dev, pm_message_t state)
return error;
}
- if (!(pnp_drv->flags & PNP_DRIVER_RES_DO_NOT_CHANGE) &&
- pnp_can_disable(pnp_dev)) {
+ if (pnp_can_disable(pnp_dev)) {
error = pnp_stop_dev(pnp_dev);
if (error)
return error;
@@ -185,14 +184,17 @@ static int pnp_bus_resume(struct device *dev)
if (pnp_dev->protocol && pnp_dev->protocol->resume)
pnp_dev->protocol->resume(pnp_dev);
- if (!(pnp_drv->flags & PNP_DRIVER_RES_DO_NOT_CHANGE)) {
+ if (pnp_can_write(pnp_dev)) {
error = pnp_start_dev(pnp_dev);
if (error)
return error;
}
- if (pnp_drv->resume)
- return pnp_drv->resume(pnp_dev);
+ if (pnp_drv->resume) {
+ error = pnp_drv->resume(pnp_dev);
+ if (error)
+ return error;
+ }
return 0;
}
diff --git a/drivers/pnp/interface.c b/drivers/pnp/interface.c
index 31548044fdde..982658477a58 100644
--- a/drivers/pnp/interface.c
+++ b/drivers/pnp/interface.c
@@ -10,9 +10,12 @@
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/types.h>
+#include <linux/pnp.h>
#include <linux/stat.h>
#include <linux/ctype.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
+
#include <asm/uaccess.h>
#include "base.h"
@@ -315,8 +318,6 @@ static ssize_t pnp_show_current_resources(struct device *dmdev,
return ret;
}
-extern struct semaphore pnp_res_mutex;
-
static ssize_t
pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
const char *ubuf, size_t count)
@@ -361,10 +362,10 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
goto done;
}
if (!strnicmp(buf, "get", 3)) {
- down(&pnp_res_mutex);
+ mutex_lock(&pnp_res_mutex);
if (pnp_can_read(dev))
dev->protocol->get(dev, &dev->res);
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
goto done;
}
if (!strnicmp(buf, "set", 3)) {
@@ -373,7 +374,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
goto done;
buf += 3;
pnp_init_resource_table(&dev->res);
- down(&pnp_res_mutex);
+ mutex_lock(&pnp_res_mutex);
while (1) {
while (isspace(*buf))
++buf;
@@ -455,7 +456,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
}
break;
}
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
goto done;
}
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index c6b3d4e63ccc..c28caf272c11 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -12,9 +12,10 @@
#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/mutex.h>
#include "base.h"
-DECLARE_MUTEX(pnp_res_mutex);
+DEFINE_MUTEX(pnp_res_mutex);
static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
{
@@ -297,7 +298,7 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
if (!pnp_can_configure(dev))
return -ENODEV;
- down(&pnp_res_mutex);
+ mutex_lock(&pnp_res_mutex);
pnp_clean_resource_table(&dev->res); /* start with a fresh slate */
if (dev->independent) {
port = dev->independent->port;
@@ -366,12 +367,12 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
} else if (dev->dependent)
goto fail;
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
return 1;
fail:
pnp_clean_resource_table(&dev->res);
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
return 0;
}
@@ -396,7 +397,7 @@ int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
return -ENOMEM;
*bak = dev->res;
- down(&pnp_res_mutex);
+ mutex_lock(&pnp_res_mutex);
dev->res = *res;
if (!(mode & PNP_CONFIG_FORCE)) {
for (i = 0; i < PNP_MAX_PORT; i++) {
@@ -416,14 +417,14 @@ int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
goto fail;
}
}
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
kfree(bak);
return 0;
fail:
dev->res = *bak;
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
kfree(bak);
return -EINVAL;
}
@@ -513,7 +514,7 @@ int pnp_activate_dev(struct pnp_dev *dev)
int error;
if (dev->active)
- return 0; /* the device is already active */
+ return 0;
/* ensure resources are allocated */
if (pnp_auto_config_dev(dev))
@@ -524,7 +525,7 @@ int pnp_activate_dev(struct pnp_dev *dev)
return error;
dev->active = 1;
- return 1;
+ return 0;
}
/**
@@ -538,7 +539,7 @@ int pnp_disable_dev(struct pnp_dev *dev)
int error;
if (!dev->active)
- return 0; /* the device is already disabled */
+ return 0;
error = pnp_stop_dev(dev);
if (error)
@@ -547,11 +548,11 @@ int pnp_disable_dev(struct pnp_dev *dev)
dev->active = 0;
/* release the resources so that other devices can use them */
- down(&pnp_res_mutex);
+ mutex_lock(&pnp_res_mutex);
pnp_clean_resource_table(&dev->res);
- up(&pnp_res_mutex);
+ mutex_unlock(&pnp_res_mutex);
- return 1;
+ return 0;
}
/**
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index dada89906314..662b4c279cfc 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -183,7 +183,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
if (ACPI_SUCCESS(status))
dev->capabilities |= PNP_CONFIGURABLE;
dev->capabilities |= PNP_READ;
- if (device->flags.dynamic_status)
+ if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE))
dev->capabilities |= PNP_WRITE;
if (device->flags.removable)
dev->capabilities |= PNP_REMOVABLE;
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 6b9840cce0f4..6aa231ef642d 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -391,8 +391,8 @@ acpi_status pnpacpi_parse_allocated_resource(acpi_handle handle,
pnpacpi_allocated_resource, res);
}
-static void pnpacpi_parse_dma_option(struct pnp_option *option,
- struct acpi_resource_dma *p)
+static __init void pnpacpi_parse_dma_option(struct pnp_option *option,
+ struct acpi_resource_dma *p)
{
int i;
struct pnp_dma *dma;
@@ -411,8 +411,8 @@ static void pnpacpi_parse_dma_option(struct pnp_option *option,
pnp_register_dma_resource(option, dma);
}
-static void pnpacpi_parse_irq_option(struct pnp_option *option,
- struct acpi_resource_irq *p)
+static __init void pnpacpi_parse_irq_option(struct pnp_option *option,
+ struct acpi_resource_irq *p)
{
int i;
struct pnp_irq *irq;
@@ -431,8 +431,8 @@ static void pnpacpi_parse_irq_option(struct pnp_option *option,
pnp_register_irq_resource(option, irq);
}
-static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
- struct acpi_resource_extended_irq *p)
+static __init void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
+ struct acpi_resource_extended_irq *p)
{
int i;
struct pnp_irq *irq;
@@ -451,8 +451,8 @@ static void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
pnp_register_irq_resource(option, irq);
}
-static void pnpacpi_parse_port_option(struct pnp_option *option,
- struct acpi_resource_io *io)
+static __init void pnpacpi_parse_port_option(struct pnp_option *option,
+ struct acpi_resource_io *io)
{
struct pnp_port *port;
@@ -470,8 +470,8 @@ static void pnpacpi_parse_port_option(struct pnp_option *option,
pnp_register_port_resource(option, port);
}
-static void pnpacpi_parse_fixed_port_option(struct pnp_option *option,
- struct acpi_resource_fixed_io *io)
+static __init void pnpacpi_parse_fixed_port_option(struct pnp_option *option,
+ struct acpi_resource_fixed_io *io)
{
struct pnp_port *port;
@@ -487,8 +487,8 @@ static void pnpacpi_parse_fixed_port_option(struct pnp_option *option,
pnp_register_port_resource(option, port);
}
-static void pnpacpi_parse_mem24_option(struct pnp_option *option,
- struct acpi_resource_memory24 *p)
+static __init void pnpacpi_parse_mem24_option(struct pnp_option *option,
+ struct acpi_resource_memory24 *p)
{
struct pnp_mem *mem;
@@ -508,8 +508,8 @@ static void pnpacpi_parse_mem24_option(struct pnp_option *option,
pnp_register_mem_resource(option, mem);
}
-static void pnpacpi_parse_mem32_option(struct pnp_option *option,
- struct acpi_resource_memory32 *p)
+static __init void pnpacpi_parse_mem32_option(struct pnp_option *option,
+ struct acpi_resource_memory32 *p)
{
struct pnp_mem *mem;
@@ -529,8 +529,8 @@ static void pnpacpi_parse_mem32_option(struct pnp_option *option,
pnp_register_mem_resource(option, mem);
}
-static void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
- struct acpi_resource_fixed_memory32 *p)
+static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
+ struct acpi_resource_fixed_memory32 *p)
{
struct pnp_mem *mem;
@@ -549,8 +549,8 @@ static void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
pnp_register_mem_resource(option, mem);
}
-static void pnpacpi_parse_address_option(struct pnp_option *option,
- struct acpi_resource *r)
+static __init void pnpacpi_parse_address_option(struct pnp_option *option,
+ struct acpi_resource *r)
{
struct acpi_resource_address64 addr, *p = &addr;
acpi_status status;
@@ -596,8 +596,8 @@ struct acpipnp_parse_option_s {
struct pnp_dev *dev;
};
-static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
- void *data)
+static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
+ void *data)
{
int priority = 0;
struct acpipnp_parse_option_s *parse_data = data;
@@ -696,8 +696,8 @@ static acpi_status pnpacpi_option_resource(struct acpi_resource *res,
return AE_OK;
}
-acpi_status pnpacpi_parse_resource_option_data(acpi_handle handle,
- struct pnp_dev * dev)
+acpi_status __init pnpacpi_parse_resource_option_data(acpi_handle handle,
+ struct pnp_dev *dev)
{
acpi_status status;
struct acpipnp_parse_option_s parse_data;
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index e33e03f71084..f7e67197a568 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -315,7 +315,7 @@ struct pnp_protocol pnpbios_protocol = {
.disable = pnpbios_disable_resources,
};
-static int insert_device(struct pnp_bios_node *node)
+static int __init insert_device(struct pnp_bios_node *node)
{
struct list_head *pos;
struct pnp_dev *dev;
diff --git a/drivers/pnp/pnpbios/rsparser.c b/drivers/pnp/pnpbios/rsparser.c
index 3fabf11b0027..caade3531416 100644
--- a/drivers/pnp/pnpbios/rsparser.c
+++ b/drivers/pnp/pnpbios/rsparser.c
@@ -262,8 +262,8 @@ len_err:
* Resource Configuration Options
*/
-static void pnpbios_parse_mem_option(unsigned char *p, int size,
- struct pnp_option *option)
+static __init void pnpbios_parse_mem_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
struct pnp_mem *mem;
@@ -278,8 +278,8 @@ static void pnpbios_parse_mem_option(unsigned char *p, int size,
pnp_register_mem_resource(option, mem);
}
-static void pnpbios_parse_mem32_option(unsigned char *p, int size,
- struct pnp_option *option)
+static __init void pnpbios_parse_mem32_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
struct pnp_mem *mem;
@@ -294,8 +294,8 @@ static void pnpbios_parse_mem32_option(unsigned char *p, int size,
pnp_register_mem_resource(option, mem);
}
-static void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size,
- struct pnp_option *option)
+static __init void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
struct pnp_mem *mem;
@@ -309,7 +309,7 @@ static void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size,
pnp_register_mem_resource(option, mem);
}
-static void pnpbios_parse_irq_option(unsigned char *p, int size,
+static __init void pnpbios_parse_irq_option(unsigned char *p, int size,
struct pnp_option *option)
{
struct pnp_irq *irq;
@@ -327,7 +327,7 @@ static void pnpbios_parse_irq_option(unsigned char *p, int size,
pnp_register_irq_resource(option, irq);
}
-static void pnpbios_parse_dma_option(unsigned char *p, int size,
+static __init void pnpbios_parse_dma_option(unsigned char *p, int size,
struct pnp_option *option)
{
struct pnp_dma *dma;
@@ -340,8 +340,8 @@ static void pnpbios_parse_dma_option(unsigned char *p, int size,
pnp_register_dma_resource(option, dma);
}
-static void pnpbios_parse_port_option(unsigned char *p, int size,
- struct pnp_option *option)
+static __init void pnpbios_parse_port_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
struct pnp_port *port;
@@ -356,8 +356,8 @@ static void pnpbios_parse_port_option(unsigned char *p, int size,
pnp_register_port_resource(option, port);
}
-static void pnpbios_parse_fixed_port_option(unsigned char *p, int size,
- struct pnp_option *option)
+static __init void pnpbios_parse_fixed_port_option(unsigned char *p, int size,
+ struct pnp_option *option)
{
struct pnp_port *port;
@@ -371,9 +371,9 @@ static void pnpbios_parse_fixed_port_option(unsigned char *p, int size,
pnp_register_port_resource(option, port);
}
-static unsigned char *pnpbios_parse_resource_option_data(unsigned char *p,
- unsigned char *end,
- struct pnp_dev *dev)
+static __init unsigned char *
+pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
+ struct pnp_dev *dev)
{
unsigned int len, tag;
int priority = 0;
@@ -781,7 +781,8 @@ len_err:
* Core Parsing Functions
*/
-int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node *node)
+int __init pnpbios_parse_data_stream(struct pnp_dev *dev,
+ struct pnp_bios_node *node)
{
unsigned char *p = (char *)node->data;
unsigned char *end = (char *)(node->data + node->size);
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index e903b8c2b1fa..4065139753b6 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/pnp.h>
#include <linux/io.h>
+#include <linux/dmi.h>
#include <linux/kallsyms.h>
#include "base.h"
@@ -108,6 +109,46 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev)
"pnp: SB audio device quirk - increasing port range\n");
}
+static void quirk_supermicro_h8dce_system(struct pnp_dev *dev)
+{
+ int i;
+ static struct dmi_system_id supermicro_h8dce[] = {
+ {
+ .ident = "Supermicro H8DCE",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "H8DCE"),
+ },
+ },
+ { }
+ };
+
+ if (!dmi_check_system(supermicro_h8dce))
+ return;
+
+ /*
+ * On the Supermicro H8DCE, there's a system device with resources
+ * that overlap BAR 6 of the built-in SATA PCI adapter. If the PNP
+ * system device claims them, the sata_nv driver won't be able to.
+ * More details at:
+ * https://bugzilla.redhat.com/show_bug.cgi?id=280641
+ * https://bugzilla.redhat.com/show_bug.cgi?id=313491
+ * http://lkml.org/lkml/2008/1/9/449
+ * http://thread.gmane.org/gmane.linux.acpi.devel/27312
+ */
+ for (i = 0; i < PNP_MAX_MEM; i++) {
+ if (pnp_mem_valid(dev, i) && pnp_mem_len(dev, i) &&
+ (pnp_mem_start(dev, i) & 0xdfef0000) == 0xdfef0000) {
+ dev_warn(&dev->dev, "disabling 0x%llx-0x%llx to prevent"
+ " conflict with sata_nv PCI device\n",
+ (unsigned long long) pnp_mem_start(dev, i),
+ (unsigned long long) (pnp_mem_start(dev, i) +
+ pnp_mem_len(dev, i) - 1));
+ pnp_mem_flags(dev, i) = 0;
+ }
+ }
+}
+
/*
* PnP Quirks
* Cards or devices that need some tweaking due to incomplete resource info
@@ -128,6 +169,8 @@ static struct pnp_fixup pnp_fixups[] = {
{"CTL0043", quirk_sb16audio_resources},
{"CTL0044", quirk_sb16audio_resources},
{"CTL0045", quirk_sb16audio_resources},
+ {"PNP0c01", quirk_supermicro_h8dce_system},
+ {"PNP0c02", quirk_supermicro_h8dce_system},
{""}
};
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index d4824840c5bf..c444d6b10c58 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -106,7 +106,6 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(energy_now),
POWER_SUPPLY_ATTR(energy_avg),
POWER_SUPPLY_ATTR(capacity),
- POWER_SUPPLY_ATTR(capacity_level),
POWER_SUPPLY_ATTR(temp),
POWER_SUPPLY_ATTR(temp_ambient),
POWER_SUPPLY_ATTR(time_to_empty_now),
@@ -116,6 +115,7 @@ static struct device_attribute power_supply_attrs[] = {
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
POWER_SUPPLY_ATTR(manufacturer),
+ POWER_SUPPLY_ATTR(serial_number),
};
static ssize_t power_supply_show_static_attrs(struct device *dev,
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 87b3493d88e5..6f2f90ebb020 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -78,23 +78,21 @@ static const struct avset_video_mode {
u32 aspect;
u32 x;
u32 y;
- u32 interlace;
- u32 freq;
} video_mode_table[] = {
{ 0, }, /* auto */
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480I, A_N, 720, 480, 1, 60},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480P, A_N, 720, 480, 0, 60},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_60HZ, A_N, 1280, 720, 0, 60},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_60HZ, A_W, 1920, 1080, 1, 60},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_60HZ, A_W, 1920, 1080, 0, 60},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576I, A_N, 720, 576, 1, 50},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576P, A_N, 720, 576, 0, 50},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_50HZ, A_N, 1280, 720, 0, 50},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_50HZ, A_W, 1920, 1080, 1, 50},
- {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_50HZ, A_W, 1920, 1080, 0, 50},
- { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WXGA, A_W, 1280, 768, 0, 60},
- { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_SXGA, A_N, 1280, 1024, 0, 60},
- { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WUXGA, A_W, 1920, 1200, 0, 60},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480I, A_N, 720, 480},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_480P, A_N, 720, 480},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_60HZ, A_N, 1280, 720},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_60HZ, A_W, 1920, 1080},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_60HZ, A_W, 1920, 1080},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576I, A_N, 720, 576},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_576P, A_N, 720, 576},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_720P_50HZ, A_N, 1280, 720},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080I_50HZ, A_W, 1920, 1080},
+ {YUV444, XRGB, PS3AV_CMD_VIDEO_VID_1080P_50HZ, A_W, 1920, 1080},
+ { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WXGA, A_W, 1280, 768},
+ { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_SXGA, A_N, 1280, 1024},
+ { RGB8, XRGB, PS3AV_CMD_VIDEO_VID_WUXGA, A_W, 1920, 1200},
};
/* supported CIDs */
@@ -544,7 +542,7 @@ static void ps3av_set_videomode_packet(u32 id)
static void ps3av_set_videomode_cont(u32 id, u32 old_id)
{
- static int vesa = 0;
+ static int vesa;
int res;
/* video signal off */
@@ -554,9 +552,9 @@ static void ps3av_set_videomode_cont(u32 id, u32 old_id)
* AV backend needs non-VESA mode setting at least one time
* when VESA mode is used.
*/
- if (vesa == 0 && (id & PS3AV_MODE_MASK) >= 11) {
+ if (vesa == 0 && (id & PS3AV_MODE_MASK) >= PS3AV_MODE_WXGA) {
/* vesa mode */
- ps3av_set_videomode_packet(2); /* 480P */
+ ps3av_set_videomode_packet(PS3AV_MODE_480P);
}
vesa = 1;
@@ -596,20 +594,21 @@ static const struct {
unsigned mask : 19;
unsigned id : 4;
} ps3av_preferred_modes[] = {
- { .mask = PS3AV_RESBIT_WUXGA << SHIFT_VESA, .id = 13 },
- { .mask = PS3AV_RESBIT_1920x1080P << SHIFT_60, .id = 5 },
- { .mask = PS3AV_RESBIT_1920x1080P << SHIFT_50, .id = 10 },
- { .mask = PS3AV_RESBIT_1920x1080I << SHIFT_60, .id = 4 },
- { .mask = PS3AV_RESBIT_1920x1080I << SHIFT_50, .id = 9 },
- { .mask = PS3AV_RESBIT_SXGA << SHIFT_VESA, .id = 12 },
- { .mask = PS3AV_RESBIT_WXGA << SHIFT_VESA, .id = 11 },
- { .mask = PS3AV_RESBIT_1280x720P << SHIFT_60, .id = 3 },
- { .mask = PS3AV_RESBIT_1280x720P << SHIFT_50, .id = 8 },
- { .mask = PS3AV_RESBIT_720x480P << SHIFT_60, .id = 2 },
- { .mask = PS3AV_RESBIT_720x576P << SHIFT_50, .id = 7 },
+ { PS3AV_RESBIT_WUXGA << SHIFT_VESA, PS3AV_MODE_WUXGA },
+ { PS3AV_RESBIT_1920x1080P << SHIFT_60, PS3AV_MODE_1080P60 },
+ { PS3AV_RESBIT_1920x1080P << SHIFT_50, PS3AV_MODE_1080P50 },
+ { PS3AV_RESBIT_1920x1080I << SHIFT_60, PS3AV_MODE_1080I60 },
+ { PS3AV_RESBIT_1920x1080I << SHIFT_50, PS3AV_MODE_1080I50 },
+ { PS3AV_RESBIT_SXGA << SHIFT_VESA, PS3AV_MODE_SXGA },
+ { PS3AV_RESBIT_WXGA << SHIFT_VESA, PS3AV_MODE_WXGA },
+ { PS3AV_RESBIT_1280x720P << SHIFT_60, PS3AV_MODE_720P60 },
+ { PS3AV_RESBIT_1280x720P << SHIFT_50, PS3AV_MODE_720P50 },
+ { PS3AV_RESBIT_720x480P << SHIFT_60, PS3AV_MODE_480P },
+ { PS3AV_RESBIT_720x576P << SHIFT_50, PS3AV_MODE_576P },
};
-static int ps3av_resbit2id(u32 res_50, u32 res_60, u32 res_vesa)
+static enum ps3av_mode_num ps3av_resbit2id(u32 res_50, u32 res_60,
+ u32 res_vesa)
{
unsigned int i;
u32 res_all;
@@ -638,9 +637,9 @@ static int ps3av_resbit2id(u32 res_50, u32 res_60, u32 res_vesa)
return 0;
}
-static int ps3av_hdmi_get_id(struct ps3av_info_monitor *info)
+static enum ps3av_mode_num ps3av_hdmi_get_id(struct ps3av_info_monitor *info)
{
- int id;
+ enum ps3av_mode_num id;
if (safe_mode)
return PS3AV_DEFAULT_HDMI_MODE_ID_REG_60;
@@ -854,7 +853,7 @@ int ps3av_set_video_mode(u32 id)
/* auto mode */
option = id & ~PS3AV_MODE_MASK;
- if ((id & PS3AV_MODE_MASK) == 0) {
+ if ((id & PS3AV_MODE_MASK) == PS3AV_MODE_AUTO) {
id = ps3av_auto_videomode(&ps3av->av_hw_conf);
if (id < 1) {
printk(KERN_ERR "%s: invalid id :%d\n", __func__, id);
@@ -889,36 +888,6 @@ int ps3av_get_mode(void)
EXPORT_SYMBOL_GPL(ps3av_get_mode);
-int ps3av_get_scanmode(int id)
-{
- int size;
-
- id = id & PS3AV_MODE_MASK;
- size = ARRAY_SIZE(video_mode_table);
- if (id > size - 1 || id < 0) {
- printk(KERN_ERR "%s: invalid mode %d\n", __func__, id);
- return -EINVAL;
- }
- return video_mode_table[id].interlace;
-}
-
-EXPORT_SYMBOL_GPL(ps3av_get_scanmode);
-
-int ps3av_get_refresh_rate(int id)
-{
- int size;
-
- id = id & PS3AV_MODE_MASK;
- size = ARRAY_SIZE(video_mode_table);
- if (id > size - 1 || id < 0) {
- printk(KERN_ERR "%s: invalid mode %d\n", __func__, id);
- return -EINVAL;
- }
- return video_mode_table[id].freq;
-}
-
-EXPORT_SYMBOL_GPL(ps3av_get_refresh_rate);
-
/* get resolution by video_mode */
int ps3av_video_mode2res(u32 id, u32 *xres, u32 *yres)
{
@@ -990,7 +959,7 @@ static int ps3av_probe(struct ps3_system_bus_device *dev)
return -ENOMEM;
mutex_init(&ps3av->mutex);
- ps3av->ps3av_mode = 0;
+ ps3av->ps3av_mode = PS3AV_MODE_AUTO;
ps3av->dev = dev;
INIT_WORK(&ps3av->work, ps3avd);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 45e4b9648176..6402d699072b 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -20,6 +20,10 @@ menuconfig RTC_CLASS
if RTC_CLASS
+if GEN_RTC || RTC
+comment "Conflicting RTC option has been selected, check GEN_RTC and RTC"
+endif
+
config RTC_HCTOSYS
bool "Set system time from RTC on startup and resume"
depends on RTC_CLASS = y
@@ -49,7 +53,7 @@ config RTC_HCTOSYS_DEVICE
If the clock you specify here is not battery backed, it may still
be useful to reinitialize system time when resuming from system
- sleep states. Do not specify an RTC here unless it stays powered
+ sleep states. Do not specify an RTC here unless it stays powered
during all this system's supported sleep states.
config RTC_DEBUG
@@ -142,7 +146,7 @@ config RTC_DRV_DS1307
will be called rtc-ds1307.
config RTC_DRV_DS1374
- tristate "Maxim/Dallas Semiconductor DS1374 Real Time Clock"
+ tristate "Dallas/Maxim DS1374"
depends on RTC_CLASS && I2C
help
If you say yes here you get support for Dallas Semiconductor
@@ -162,7 +166,7 @@ config RTC_DRV_DS1672
will be called rtc-ds1672.
config RTC_DRV_MAX6900
- tristate "Maxim 6900"
+ tristate "Maxim MAX6900"
help
If you say yes here you will get support for the
Maxim MAX6900 I2C RTC chip.
@@ -180,10 +184,10 @@ config RTC_DRV_RS5C372
will be called rtc-rs5c372.
config RTC_DRV_ISL1208
- tristate "Intersil 1208"
+ tristate "Intersil ISL1208"
help
If you say yes here you get support for the
- Intersil 1208 RTC chip.
+ Intersil ISL1208 RTC chip.
This driver can also be built as a module. If so, the module
will be called rtc-isl1208.
@@ -220,7 +224,7 @@ config RTC_DRV_PCF8583
will be called rtc-pcf8583.
config RTC_DRV_M41T80
- tristate "ST M41T80 series RTC"
+ tristate "ST M41T80/81/82/83/84/85/87"
help
If you say Y here you will get support for the
ST M41T80 RTC chips series. Currently following chips are
@@ -252,23 +256,32 @@ comment "SPI RTC drivers"
if SPI_MASTER
-config RTC_DRV_RS5C348
- tristate "Ricoh RS5C348A/B"
+config RTC_DRV_MAX6902
+ tristate "Maxim MAX6902"
help
- If you say yes here you get support for the
- Ricoh RS5C348A and RS5C348B RTC chips.
+ If you say yes here you will get support for the
+ Maxim MAX6902 SPI RTC chip.
This driver can also be built as a module. If so, the module
- will be called rtc-rs5c348.
+ will be called rtc-max6902.
-config RTC_DRV_MAX6902
- tristate "Maxim 6902"
+config RTC_DRV_R9701
+ tristate "Epson RTC-9701JE"
help
If you say yes here you will get support for the
- Maxim MAX6902 SPI RTC chip.
+ Epson RTC-9701JE SPI RTC chip.
This driver can also be built as a module. If so, the module
- will be called rtc-max6902.
+ will be called rtc-r9701.
+
+config RTC_DRV_RS5C348
+ tristate "Ricoh RS5C348A/B"
+ help
+ If you say yes here you get support for the
+ Ricoh RS5C348A and RS5C348B RTC chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rs5c348.
endif # SPI_MASTER
@@ -302,34 +315,50 @@ config RTC_DRV_DS1216
help
If you say yes here you get support for the Dallas DS1216 RTC chips.
-config RTC_DRV_DS1553
- tristate "Dallas DS1553"
+config RTC_DRV_DS1302
+ tristate "Dallas DS1302"
+ depends on SH_SECUREEDGE5410
+ help
+ If you say yes here you get support for the Dallas DS1302 RTC chips.
+
+config RTC_DRV_DS1511
+ tristate "Dallas DS1511"
+ depends on RTC_CLASS
help
If you say yes here you get support for the
- Dallas DS1553 timekeeping chip.
+ Dallas DS1511 timekeeping/watchdog chip.
This driver can also be built as a module. If so, the module
- will be called rtc-ds1553.
+ will be called rtc-ds1511.
-config RTC_DRV_STK17TA8
- tristate "Simtek STK17TA8"
- depends on RTC_CLASS
+config RTC_DRV_DS1553
+ tristate "Maxim/Dallas DS1553"
help
If you say yes here you get support for the
- Simtek STK17TA8 timekeeping chip.
+ Maxim/Dallas DS1553 timekeeping chip.
This driver can also be built as a module. If so, the module
- will be called rtc-stk17ta8.
+ will be called rtc-ds1553.
config RTC_DRV_DS1742
- tristate "Dallas DS1742/1743"
+ tristate "Maxim/Dallas DS1742/1743"
help
If you say yes here you get support for the
- Dallas DS1742/1743 timekeeping chip.
+ Maxim/Dallas DS1742/1743 timekeeping chip.
This driver can also be built as a module. If so, the module
will be called rtc-ds1742.
+config RTC_DRV_STK17TA8
+ tristate "Simtek STK17TA8"
+ depends on RTC_CLASS
+ help
+ If you say yes here you get support for the
+ Simtek STK17TA8 timekeeping chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-stk17ta8.
+
config RTC_DRV_M48T86
tristate "ST M48T86/Dallas DS12887"
help
@@ -440,10 +469,47 @@ config RTC_DRV_AT32AP700X
AT32AP700x family processors.
config RTC_DRV_AT91RM9200
- tristate "AT91RM9200"
- depends on ARCH_AT91RM9200
- help
- Driver for the Atmel AT91RM9200's internal RTC (Realtime Clock).
+ tristate "AT91RM9200 or AT91SAM9RL"
+ depends on ARCH_AT91RM9200 || ARCH_AT91SAM9RL
+ help
+ Driver for the internal RTC (Realtime Clock) module found on
+ Atmel AT91RM9200's and AT91SAM9RL chips. On SAM9RL chips
+ this is powered by the backup power supply.
+
+config RTC_DRV_AT91SAM9
+ tristate "AT91SAM9x"
+ depends on ARCH_AT91 && !(ARCH_AT91RM9200 || ARCH_AT91X40)
+ help
+ RTC driver for the Atmel AT91SAM9x internal RTT (Real Time Timer).
+ These timers are powered by the backup power supply (such as a
+ small coin cell battery), but do not need to be used as RTCs.
+
+ (On AT91SAM9rl chips you probably want to use the dedicated RTC
+ module and leave the RTT available for other uses.)
+
+config RTC_DRV_AT91SAM9_RTT
+ int
+ range 0 1
+ default 0
+ prompt "RTT module Number" if ARCH_AT91SAM9263
+ depends on RTC_DRV_AT91SAM9
+ help
+ More than one RTT module is available. You can choose which
+ one will be used as an RTC. The default of zero is normally
+ OK to use, though some systems use that for non-RTC purposes.
+
+config RTC_DRV_AT91SAM9_GPBR
+ int
+ range 0 3 if !ARCH_AT91SAM9263
+ range 0 15 if ARCH_AT91SAM9263
+ default 0
+ prompt "Backup Register Number"
+ depends on RTC_DRV_AT91SAM9
+ help
+ The RTC driver needs to use one of the General Purpose Backup
+ Registers (GPBRs) as well as the RTT. You can choose which one
+ will be used. The default of zero is normally OK to use, but
+ on some systems other software needs to use that register.
config RTC_DRV_BFIN
tristate "Blackfin On-Chip RTC"
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 465db4dd50b2..ec703f34ab86 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -19,11 +19,14 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o
+obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o
obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o
obj-$(CONFIG_RTC_DRV_DS1216) += rtc-ds1216.o
+obj-$(CONFIG_RTC_DRV_DS1302) += rtc-ds1302.o
obj-$(CONFIG_RTC_DRV_DS1307) += rtc-ds1307.o
obj-$(CONFIG_RTC_DRV_DS1374) += rtc-ds1374.o
+obj-$(CONFIG_RTC_DRV_DS1511) += rtc-ds1511.o
obj-$(CONFIG_RTC_DRV_DS1553) += rtc-ds1553.o
obj-$(CONFIG_RTC_DRV_DS1672) += rtc-ds1672.o
obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o
@@ -38,6 +41,7 @@ obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
+obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o
obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
new file mode 100644
index 000000000000..bbf10ecf416c
--- /dev/null
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -0,0 +1,520 @@
+/*
+ * "RTT as Real Time Clock" driver for AT91SAM9 SoC family
+ *
+ * (C) 2007 Michel Benoit
+ *
+ * Based on rtc-at91rm9200.c by Rick Bronson
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/rtc.h>
+#include <linux/interrupt.h>
+#include <linux/ioctl.h>
+
+#include <asm/mach/time.h>
+#include <asm/arch/board.h>
+#include <asm/arch/at91_rtt.h>
+
+
+/*
+ * This driver uses two configurable hardware resources that live in the
+ * AT91SAM9 backup power domain (intended to be powered at all times)
+ * to implement the Real Time Clock interfaces
+ *
+ * - A "Real-time Timer" (RTT) counts up in seconds from a base time.
+ * We can't assign the counter value (CRTV) ... but we can reset it.
+ *
+ * - One of the "General Purpose Backup Registers" (GPBRs) holds the
+ * base time, normally an offset from the beginning of the POSIX
+ * epoch (1970-Jan-1 00:00:00 UTC). Some systems also include the
+ * local timezone's offset.
+ *
+ * The RTC's value is the RTT counter plus that offset. The RTC's alarm
+ * is likewise a base (ALMV) plus that offset.
+ *
+ * Not all RTTs will be used as RTCs; some systems have multiple RTTs to
+ * choose from, or a "real" RTC module. All systems have multiple GPBR
+ * registers available, likewise usable for more than "RTC" support.
+ */
+
+/*
+ * We store ALARM_DISABLED in ALMV to record that no alarm is set.
+ * It's also the reset value for that field.
+ */
+#define ALARM_DISABLED ((u32)~0)
+
+
+struct sam9_rtc {
+ void __iomem *rtt;
+ struct rtc_device *rtcdev;
+ u32 imr;
+};
+
+#define rtt_readl(rtc, field) \
+ __raw_readl((rtc)->rtt + AT91_RTT_ ## field)
+#define rtt_writel(rtc, field, val) \
+ __raw_writel((val), (rtc)->rtt + AT91_RTT_ ## field)
+
+#define gpbr_readl(rtc) \
+ at91_sys_read(AT91_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR)
+#define gpbr_writel(rtc, val) \
+ at91_sys_write(AT91_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR, (val))
+
+/*
+ * Read current time and date in RTC
+ */
+static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ u32 secs, secs2;
+ u32 offset;
+
+ /* read current time offset */
+ offset = gpbr_readl(rtc);
+ if (offset == 0)
+ return -EILSEQ;
+
+ /* reread the counter to help sync the two clock domains */
+ secs = rtt_readl(rtc, VR);
+ secs2 = rtt_readl(rtc, VR);
+ if (secs != secs2)
+ secs = rtt_readl(rtc, VR);
+
+ rtc_time_to_tm(offset + secs, tm);
+
+ dev_dbg(dev, "%s: %4d-%02d-%02d %02d:%02d:%02d\n", "readtime",
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ return 0;
+}
+
+/*
+ * Set current time and date in RTC
+ */
+static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ int err;
+ u32 offset, alarm, mr;
+ unsigned long secs;
+
+ dev_dbg(dev, "%s: %4d-%02d-%02d %02d:%02d:%02d\n", "settime",
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ err = rtc_tm_to_time(tm, &secs);
+ if (err != 0)
+ return err;
+
+ mr = rtt_readl(rtc, MR);
+
+ /* disable interrupts */
+ rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
+
+ /* read current time offset */
+ offset = gpbr_readl(rtc);
+
+ /* store the new base time in a battery backup register */
+ secs += 1;
+ gpbr_writel(rtc, secs);
+
+ /* adjust the alarm time for the new base */
+ alarm = rtt_readl(rtc, AR);
+ if (alarm != ALARM_DISABLED) {
+ if (offset > secs) {
+ /* time jumped backwards, increase time until alarm */
+ alarm += (offset - secs);
+ } else if ((alarm + offset) > secs) {
+ /* time jumped forwards, decrease time until alarm */
+ alarm -= (secs - offset);
+ } else {
+ /* time jumped past the alarm, disable alarm */
+ alarm = ALARM_DISABLED;
+ mr &= ~AT91_RTT_ALMIEN;
+ }
+ rtt_writel(rtc, AR, alarm);
+ }
+
+ /* reset the timer, and re-enable interrupts */
+ rtt_writel(rtc, MR, mr | AT91_RTT_RTTRST);
+
+ return 0;
+}
+
+static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alrm->time;
+ u32 alarm = rtt_readl(rtc, AR);
+ u32 offset;
+
+ offset = gpbr_readl(rtc);
+ if (offset == 0)
+ return -EILSEQ;
+
+ memset(alrm, 0, sizeof(alrm));
+ if (alarm != ALARM_DISABLED && offset != 0) {
+ rtc_time_to_tm(offset + alarm, tm);
+
+ dev_dbg(dev, "%s: %4d-%02d-%02d %02d:%02d:%02d\n", "readalarm",
+ 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ if (rtt_readl(rtc, MR) & AT91_RTT_ALMIEN)
+ alrm->enabled = 1;
+ }
+
+ return 0;
+}
+
+static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ struct rtc_time *tm = &alrm->time;
+ unsigned long secs;
+ u32 offset;
+ u32 mr;
+ int err;
+
+ err = rtc_tm_to_time(tm, &secs);
+ if (err != 0)
+ return err;
+
+ offset = gpbr_readl(rtc);
+ if (offset == 0) {
+ /* time is not set */
+ return -EILSEQ;
+ }
+ mr = rtt_readl(rtc, MR);
+ rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
+
+ /* alarm in the past? finish and leave disabled */
+ if (secs <= offset) {
+ rtt_writel(rtc, AR, ALARM_DISABLED);
+ return 0;
+ }
+
+ /* else set alarm and maybe enable it */
+ rtt_writel(rtc, AR, secs - offset);
+ if (alrm->enabled)
+ rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
+
+ dev_dbg(dev, "%s: %4d-%02d-%02d %02d:%02d:%02d\n", "setalarm",
+ tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour,
+ tm->tm_min, tm->tm_sec);
+
+ return 0;
+}
+
+/*
+ * Handle commands from user-space
+ */
+static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ int ret = 0;
+ u32 mr = rtt_readl(rtc, MR);
+
+ dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
+
+ switch (cmd) {
+ case RTC_AIE_OFF: /* alarm off */
+ rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
+ break;
+ case RTC_AIE_ON: /* alarm on */
+ rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
+ break;
+ case RTC_UIE_OFF: /* update off */
+ rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
+ break;
+ case RTC_UIE_ON: /* update on */
+ rtt_writel(rtc, MR, mr | AT91_RTT_RTTINCIEN);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Provide additional RTC information in /proc/driver/rtc
+ */
+static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ struct sam9_rtc *rtc = dev_get_drvdata(dev);
+ u32 mr = mr = rtt_readl(rtc, MR);
+
+ seq_printf(seq, "update_IRQ\t: %s\n",
+ (mr & AT91_RTT_RTTINCIEN) ? "yes" : "no");
+ return 0;
+}
+
+/*
+ * IRQ handler for the RTC
+ */
+static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
+{
+ struct sam9_rtc *rtc = _rtc;
+ u32 sr, mr;
+ unsigned long events = 0;
+
+ /* Shared interrupt may be for another device. Note: reading
+ * SR clears it, so we must only read it in this irq handler!
+ */
+ mr = rtt_readl(rtc, MR) & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
+ sr = rtt_readl(rtc, SR) & mr;
+ if (!sr)
+ return IRQ_NONE;
+
+ /* alarm status */
+ if (sr & AT91_RTT_ALMS)
+ events |= (RTC_AF | RTC_IRQF);
+
+ /* timer update/increment */
+ if (sr & AT91_RTT_RTTINC)
+ events |= (RTC_UF | RTC_IRQF);
+
+ rtc_update_irq(rtc->rtcdev, 1, events);
+
+ pr_debug("%s: num=%ld, events=0x%02lx\n", __FUNCTION__,
+ events >> 8, events & 0x000000FF);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops at91_rtc_ops = {
+ .ioctl = at91_rtc_ioctl,
+ .read_time = at91_rtc_readtime,
+ .set_time = at91_rtc_settime,
+ .read_alarm = at91_rtc_readalarm,
+ .set_alarm = at91_rtc_setalarm,
+ .proc = at91_rtc_proc,
+};
+
+/*
+ * Initialize and install RTC driver
+ */
+static int __init at91_rtc_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct sam9_rtc *rtc;
+ int ret;
+ u32 mr;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+
+ rtc = kzalloc(sizeof *rtc, GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, rtc);
+ rtc->rtt = (void __force __iomem *) (AT91_VA_BASE_SYS - AT91_BASE_SYS);
+ rtc->rtt += r->start;
+
+ mr = rtt_readl(rtc, MR);
+
+ /* unless RTT is counting at 1 Hz, re-initialize it */
+ if ((mr & AT91_RTT_RTPRES) != AT91_SLOW_CLOCK) {
+ mr = AT91_RTT_RTTRST | (AT91_SLOW_CLOCK & AT91_RTT_RTPRES);
+ gpbr_writel(rtc, 0);
+ }
+
+ /* disable all interrupts (same as on shutdown path) */
+ mr &= ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
+ rtt_writel(rtc, MR, mr);
+
+ rtc->rtcdev = rtc_device_register(pdev->name, &pdev->dev,
+ &at91_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtcdev)) {
+ ret = PTR_ERR(rtc->rtcdev);
+ goto fail;
+ }
+
+ /* register irq handler after we know what name we'll use */
+ ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt,
+ IRQF_DISABLED | IRQF_SHARED,
+ rtc->rtcdev->dev.bus_id, rtc);
+ if (ret) {
+ dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS);
+ rtc_device_unregister(rtc->rtcdev);
+ goto fail;
+ }
+
+ /* NOTE: sam9260 rev A silicon has a ROM bug which resets the
+ * RTT on at least some reboots. If you have that chip, you must
+ * initialize the time from some external source like a GPS, wall
+ * clock, discrete RTC, etc
+ */
+
+ if (gpbr_readl(rtc) == 0)
+ dev_warn(&pdev->dev, "%s: SET TIME!\n",
+ rtc->rtcdev->dev.bus_id);
+
+ return 0;
+
+fail:
+ platform_set_drvdata(pdev, NULL);
+ kfree(rtc);
+ return ret;
+}
+
+/*
+ * Disable and remove the RTC driver
+ */
+static int __exit at91_rtc_remove(struct platform_device *pdev)
+{
+ struct sam9_rtc *rtc = platform_get_drvdata(pdev);
+ u32 mr = rtt_readl(rtc, MR);
+
+ /* disable all interrupts */
+ rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
+ free_irq(AT91_ID_SYS, rtc);
+
+ rtc_device_unregister(rtc->rtcdev);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(rtc);
+ return 0;
+}
+
+static void at91_rtc_shutdown(struct platform_device *pdev)
+{
+ struct sam9_rtc *rtc = platform_get_drvdata(pdev);
+ u32 mr = rtt_readl(rtc, MR);
+
+ rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
+ rtt_writel(rtc, MR, mr & ~rtc->imr);
+}
+
+#ifdef CONFIG_PM
+
+/* AT91SAM9 RTC Power management control */
+
+static int at91_rtc_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct sam9_rtc *rtc = platform_get_drvdata(pdev);
+ u32 mr = rtt_readl(rtc, MR);
+
+ /*
+ * This IRQ is shared with DBGU and other hardware which isn't
+ * necessarily a wakeup event source.
+ */
+ rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
+ if (rtc->imr) {
+ if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) {
+ enable_irq_wake(AT91_ID_SYS);
+ /* don't let RTTINC cause wakeups */
+ if (mr & AT91_RTT_RTTINCIEN)
+ rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
+ } else
+ rtt_writel(rtc, MR, mr & ~rtc->imr);
+ }
+
+ return 0;
+}
+
+static int at91_rtc_resume(struct platform_device *pdev)
+{
+ struct sam9_rtc *rtc = platform_get_drvdata(pdev);
+ u32 mr;
+
+ if (rtc->imr) {
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(AT91_ID_SYS);
+ mr = rtt_readl(rtc, MR);
+ rtt_writel(rtc, MR, mr | rtc->imr);
+ }
+
+ return 0;
+}
+#else
+#define at91_rtc_suspend NULL
+#define at91_rtc_resume NULL
+#endif
+
+static struct platform_driver at91_rtc_driver = {
+ .driver.name = "rtc-at91sam9",
+ .driver.owner = THIS_MODULE,
+ .remove = __exit_p(at91_rtc_remove),
+ .shutdown = at91_rtc_shutdown,
+ .suspend = at91_rtc_suspend,
+ .resume = at91_rtc_resume,
+};
+
+/* Chips can have more than one RTT module, and they can be used for more
+ * than just RTCs. So we can't just register as "the" RTT driver.
+ *
+ * A normal approach in such cases is to create a library to allocate and
+ * free the modules. Here we just use bus_find_device() as like such a
+ * library, binding directly ... no runtime "library" footprint is needed.
+ */
+static int __init at91_rtc_match(struct device *dev, void *v)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ /* continue searching if this isn't the RTT we need */
+ if (strcmp("at91_rtt", pdev->name) != 0
+ || pdev->id != CONFIG_RTC_DRV_AT91SAM9_RTT)
+ goto fail;
+
+ /* else we found it ... but fail unless we can bind to the RTC driver */
+ if (dev->driver) {
+ dev_dbg(dev, "busy, can't use as RTC!\n");
+ goto fail;
+ }
+ dev->driver = &at91_rtc_driver.driver;
+ if (device_attach(dev) == 0) {
+ dev_dbg(dev, "can't attach RTC!\n");
+ goto fail;
+ }
+ ret = at91_rtc_probe(pdev);
+ if (ret == 0)
+ return true;
+
+ dev_dbg(dev, "RTC probe err %d!\n", ret);
+fail:
+ return false;
+}
+
+static int __init at91_rtc_init(void)
+{
+ int status;
+ struct device *rtc;
+
+ status = platform_driver_register(&at91_rtc_driver);
+ if (status)
+ return status;
+ rtc = bus_find_device(&platform_bus_type, NULL,
+ NULL, at91_rtc_match);
+ if (!rtc)
+ platform_driver_unregister(&at91_rtc_driver);
+ return rtc ? 0 : -ENODEV;
+}
+module_init(at91_rtc_init);
+
+static void __exit at91_rtc_exit(void)
+{
+ platform_driver_unregister(&at91_rtc_driver);
+}
+module_exit(at91_rtc_exit);
+
+
+MODULE_AUTHOR("Michel Benoit");
+MODULE_DESCRIPTION("RTC driver for Atmel AT91SAM9x");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 1aa709dda0d6..d90ba860d216 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -1,6 +1,6 @@
/*
* Blackfin On-Chip Real Time Clock Driver
- * Supports BF53[123]/BF53[467]/BF54[2489]
+ * Supports BF52[257]/BF53[123]/BF53[467]/BF54[24789]
*
* Copyright 2004-2007 Analog Devices Inc.
*
@@ -32,26 +32,25 @@
* writes to clear status registers complete immediately.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/bcd.h>
-#include <linux/rtc.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/rtc.h>
#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
#include <asm/blackfin.h>
-#define stamp(fmt, args...) pr_debug("%s:%i: " fmt "\n", __FUNCTION__, __LINE__, ## args)
-#define stampit() stamp("here i am")
+#define dev_dbg_stamp(dev) dev_dbg(dev, "%s:%i: here i am\n", __func__, __LINE__)
struct bfin_rtc {
struct rtc_device *rtc_dev;
struct rtc_time rtc_alarm;
- spinlock_t lock;
+ u16 rtc_wrote_regs;
};
/* Bit values for the ISTAT / ICTL registers */
@@ -72,7 +71,7 @@ struct bfin_rtc {
#define SEC_BITS_OFF 0
/* Some helper functions to convert between the common RTC notion of time
- * and the internal Blackfin notion that is stored in 32bits.
+ * and the internal Blackfin notion that is encoded in 32bits.
*/
static inline u32 rtc_time_to_bfin(unsigned long now)
{
@@ -97,7 +96,10 @@ static inline void rtc_bfin_to_tm(u32 rtc_bfin, struct rtc_time *tm)
rtc_time_to_tm(rtc_bfin_to_time(rtc_bfin), tm);
}
-/* Wait for the previous write to a RTC register to complete.
+/**
+ * bfin_rtc_sync_pending - make sure pending writes have complete
+ *
+ * Wait for the previous write to a RTC register to complete.
* Unfortunately, we can't sleep here as that introduces a race condition when
* turning on interrupt events. Consider this:
* - process sets alarm
@@ -112,188 +114,202 @@ static inline void rtc_bfin_to_tm(u32 rtc_bfin, struct rtc_time *tm)
* If anyone can point out the obvious solution here, i'm listening :). This
* shouldn't be an issue on an SMP or preempt system as this function should
* only be called with the rtc lock held.
+ *
+ * Other options:
+ * - disable PREN so the sync happens at 32.768kHZ ... but this changes the
+ * inc rate for all RTC registers from 1HZ to 32.768kHZ ...
+ * - use the write complete IRQ
*/
-static void rtc_bfin_sync_pending(void)
+/*
+static void bfin_rtc_sync_pending_polled(void)
{
- stampit();
- while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_COMPLETE)) {
+ while (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_COMPLETE))
if (!(bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING))
break;
- }
bfin_write_RTC_ISTAT(RTC_ISTAT_WRITE_COMPLETE);
}
+*/
+static DECLARE_COMPLETION(bfin_write_complete);
+static void bfin_rtc_sync_pending(struct device *dev)
+{
+ dev_dbg_stamp(dev);
+ while (bfin_read_RTC_ISTAT() & RTC_ISTAT_WRITE_PENDING)
+ wait_for_completion_timeout(&bfin_write_complete, HZ * 5);
+ dev_dbg_stamp(dev);
+}
-static void rtc_bfin_reset(struct bfin_rtc *rtc)
+/**
+ * bfin_rtc_reset - set RTC to sane/known state
+ *
+ * Initialize the RTC. Enable pre-scaler to scale RTC clock
+ * to 1Hz and clear interrupt/status registers.
+ */
+static void bfin_rtc_reset(struct device *dev)
{
- /* Initialize the RTC. Enable pre-scaler to scale RTC clock
- * to 1Hz and clear interrupt/status registers. */
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
+ struct bfin_rtc *rtc = dev_get_drvdata(dev);
+ dev_dbg_stamp(dev);
+ bfin_rtc_sync_pending(dev);
bfin_write_RTC_PREN(0x1);
- bfin_write_RTC_ICTL(0);
+ bfin_write_RTC_ICTL(RTC_ISTAT_WRITE_COMPLETE);
bfin_write_RTC_SWCNT(0);
bfin_write_RTC_ALARM(0);
bfin_write_RTC_ISTAT(0xFFFF);
- spin_unlock_irq(&rtc->lock);
+ rtc->rtc_wrote_regs = 0;
}
+/**
+ * bfin_rtc_interrupt - handle interrupt from RTC
+ *
+ * Since we handle all RTC events here, we have to make sure the requested
+ * interrupt is enabled (in RTC_ICTL) as the event status register (RTC_ISTAT)
+ * always gets updated regardless of the interrupt being enabled. So when one
+ * even we care about (e.g. stopwatch) goes off, we don't want to turn around
+ * and say that other events have happened as well (e.g. second). We do not
+ * have to worry about pending writes to the RTC_ICTL register as interrupts
+ * only fire if they are enabled in the RTC_ICTL register.
+ */
static irqreturn_t bfin_rtc_interrupt(int irq, void *dev_id)
{
- struct platform_device *pdev = to_platform_device(dev_id);
- struct bfin_rtc *rtc = platform_get_drvdata(pdev);
+ struct device *dev = dev_id;
+ struct bfin_rtc *rtc = dev_get_drvdata(dev);
unsigned long events = 0;
- u16 rtc_istat;
-
- stampit();
+ bool write_complete = false;
+ u16 rtc_istat, rtc_ictl;
- spin_lock_irq(&rtc->lock);
+ dev_dbg_stamp(dev);
rtc_istat = bfin_read_RTC_ISTAT();
+ rtc_ictl = bfin_read_RTC_ICTL();
- if (rtc_istat & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
- events |= RTC_AF | RTC_IRQF;
+ if (rtc_istat & RTC_ISTAT_WRITE_COMPLETE) {
+ bfin_write_RTC_ISTAT(RTC_ISTAT_WRITE_COMPLETE);
+ write_complete = true;
+ complete(&bfin_write_complete);
}
- if (rtc_istat & RTC_ISTAT_STOPWATCH) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_STOPWATCH);
- events |= RTC_PF | RTC_IRQF;
- bfin_write_RTC_SWCNT(rtc->rtc_dev->irq_freq);
+ if (rtc_ictl & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
+ if (rtc_istat & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY)) {
+ bfin_write_RTC_ISTAT(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY);
+ events |= RTC_AF | RTC_IRQF;
+ }
}
- if (rtc_istat & RTC_ISTAT_SEC) {
- bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
- events |= RTC_UF | RTC_IRQF;
+ if (rtc_ictl & RTC_ISTAT_STOPWATCH) {
+ if (rtc_istat & RTC_ISTAT_STOPWATCH) {
+ bfin_write_RTC_ISTAT(RTC_ISTAT_STOPWATCH);
+ events |= RTC_PF | RTC_IRQF;
+ bfin_write_RTC_SWCNT(rtc->rtc_dev->irq_freq);
+ }
}
- rtc_update_irq(rtc->rtc_dev, 1, events);
+ if (rtc_ictl & RTC_ISTAT_SEC) {
+ if (rtc_istat & RTC_ISTAT_SEC) {
+ bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
+ events |= RTC_UF | RTC_IRQF;
+ }
+ }
- spin_unlock_irq(&rtc->lock);
+ if (events)
+ rtc_update_irq(rtc->rtc_dev, 1, events);
- return IRQ_HANDLED;
+ if (write_complete || events)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
}
static int bfin_rtc_open(struct device *dev)
{
- struct bfin_rtc *rtc = dev_get_drvdata(dev);
int ret;
- stampit();
+ dev_dbg_stamp(dev);
- ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_DISABLED, "rtc-bfin", dev);
- if (unlikely(ret)) {
- dev_err(dev, "request RTC IRQ failed with %d\n", ret);
- return ret;
- }
-
- rtc_bfin_reset(rtc);
+ ret = request_irq(IRQ_RTC, bfin_rtc_interrupt, IRQF_SHARED, to_platform_device(dev)->name, dev);
+ if (!ret)
+ bfin_rtc_reset(dev);
return ret;
}
static void bfin_rtc_release(struct device *dev)
{
- struct bfin_rtc *rtc = dev_get_drvdata(dev);
- stampit();
- rtc_bfin_reset(rtc);
+ dev_dbg_stamp(dev);
+ bfin_rtc_reset(dev);
free_irq(IRQ_RTC, dev);
}
+static void bfin_rtc_int_set(struct bfin_rtc *rtc, u16 rtc_int)
+{
+ bfin_write_RTC_ISTAT(rtc_int);
+ bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | rtc_int);
+}
+static void bfin_rtc_int_clear(struct bfin_rtc *rtc, u16 rtc_int)
+{
+ bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & rtc_int);
+}
+static void bfin_rtc_int_set_alarm(struct bfin_rtc *rtc)
+{
+ /* Blackfin has different bits for whether the alarm is
+ * more than 24 hours away.
+ */
+ bfin_rtc_int_set(rtc, (rtc->rtc_alarm.tm_yday == -1 ? RTC_ISTAT_ALARM : RTC_ISTAT_ALARM_DAY));
+}
static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
{
struct bfin_rtc *rtc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ dev_dbg_stamp(dev);
- stampit();
+ bfin_rtc_sync_pending(dev);
switch (cmd) {
case RTC_PIE_ON:
- stampit();
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
- bfin_write_RTC_ISTAT(RTC_ISTAT_STOPWATCH);
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_set(rtc, RTC_ISTAT_STOPWATCH);
bfin_write_RTC_SWCNT(rtc->rtc_dev->irq_freq);
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | RTC_ISTAT_STOPWATCH);
- spin_unlock_irq(&rtc->lock);
- return 0;
+ break;
case RTC_PIE_OFF:
- stampit();
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
- bfin_write_RTC_SWCNT(0);
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & ~RTC_ISTAT_STOPWATCH);
- spin_unlock_irq(&rtc->lock);
- return 0;
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_clear(rtc, ~RTC_ISTAT_STOPWATCH);
+ break;
case RTC_UIE_ON:
- stampit();
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
- bfin_write_RTC_ISTAT(RTC_ISTAT_SEC);
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | RTC_ISTAT_SEC);
- spin_unlock_irq(&rtc->lock);
- return 0;
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_set(rtc, RTC_ISTAT_SEC);
+ break;
case RTC_UIE_OFF:
- stampit();
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & ~RTC_ISTAT_SEC);
- spin_unlock_irq(&rtc->lock);
- return 0;
-
- case RTC_AIE_ON: {
- unsigned long rtc_alarm;
- u16 which_alarm;
- int ret = 0;
-
- stampit();
-
- spin_lock_irq(&rtc->lock);
-
- rtc_bfin_sync_pending();
- if (rtc->rtc_alarm.tm_yday == -1) {
- struct rtc_time now;
- rtc_bfin_to_tm(bfin_read_RTC_STAT(), &now);
- now.tm_sec = rtc->rtc_alarm.tm_sec;
- now.tm_min = rtc->rtc_alarm.tm_min;
- now.tm_hour = rtc->rtc_alarm.tm_hour;
- ret = rtc_tm_to_time(&now, &rtc_alarm);
- which_alarm = RTC_ISTAT_ALARM;
- } else {
- ret = rtc_tm_to_time(&rtc->rtc_alarm, &rtc_alarm);
- which_alarm = RTC_ISTAT_ALARM_DAY;
- }
- if (ret == 0) {
- bfin_write_RTC_ISTAT(which_alarm);
- bfin_write_RTC_ALARM(rtc_time_to_bfin(rtc_alarm));
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() | which_alarm);
- }
-
- spin_unlock_irq(&rtc->lock);
-
- return ret;
- }
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_clear(rtc, ~RTC_ISTAT_SEC);
+ break;
+
+ case RTC_AIE_ON:
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_set_alarm(rtc);
+ break;
case RTC_AIE_OFF:
- stampit();
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
- bfin_write_RTC_ICTL(bfin_read_RTC_ICTL() & ~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
- spin_unlock_irq(&rtc->lock);
- return 0;
+ dev_dbg_stamp(dev);
+ bfin_rtc_int_clear(rtc, ~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+ break;
+
+ default:
+ dev_dbg_stamp(dev);
+ ret = -ENOIOCTLCMD;
}
- return -ENOIOCTLCMD;
+ return ret;
}
static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct bfin_rtc *rtc = dev_get_drvdata(dev);
- stampit();
+ dev_dbg_stamp(dev);
+
+ if (rtc->rtc_wrote_regs & 0x1)
+ bfin_rtc_sync_pending(dev);
- spin_lock_irq(&rtc->lock);
- rtc_bfin_sync_pending();
rtc_bfin_to_tm(bfin_read_RTC_STAT(), tm);
- spin_unlock_irq(&rtc->lock);
return 0;
}
@@ -304,64 +320,79 @@ static int bfin_rtc_set_time(struct device *dev, struct rtc_time *tm)
int ret;
unsigned long now;
- stampit();
-
- spin_lock_irq(&rtc->lock);
+ dev_dbg_stamp(dev);
ret = rtc_tm_to_time(tm, &now);
if (ret == 0) {
- rtc_bfin_sync_pending();
+ if (rtc->rtc_wrote_regs & 0x1)
+ bfin_rtc_sync_pending(dev);
bfin_write_RTC_STAT(rtc_time_to_bfin(now));
+ rtc->rtc_wrote_regs = 0x1;
}
- spin_unlock_irq(&rtc->lock);
-
return ret;
}
static int bfin_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct bfin_rtc *rtc = dev_get_drvdata(dev);
- stampit();
- memcpy(&alrm->time, &rtc->rtc_alarm, sizeof(struct rtc_time));
- alrm->pending = !!(bfin_read_RTC_ICTL() & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+ dev_dbg_stamp(dev);
+ alrm->time = rtc->rtc_alarm;
+ bfin_rtc_sync_pending(dev);
+ alrm->enabled = !!(bfin_read_RTC_ICTL() & (RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
return 0;
}
static int bfin_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct bfin_rtc *rtc = dev_get_drvdata(dev);
- stampit();
- memcpy(&rtc->rtc_alarm, &alrm->time, sizeof(struct rtc_time));
+ unsigned long rtc_alarm;
+
+ dev_dbg_stamp(dev);
+
+ if (rtc_tm_to_time(&alrm->time, &rtc_alarm))
+ return -EINVAL;
+
+ rtc->rtc_alarm = alrm->time;
+
+ bfin_rtc_sync_pending(dev);
+ bfin_write_RTC_ALARM(rtc_time_to_bfin(rtc_alarm));
+ if (alrm->enabled)
+ bfin_rtc_int_set_alarm(rtc);
+
return 0;
}
static int bfin_rtc_proc(struct device *dev, struct seq_file *seq)
{
-#define yesno(x) (x ? "yes" : "no")
+#define yesno(x) ((x) ? "yes" : "no")
u16 ictl = bfin_read_RTC_ICTL();
- stampit();
- seq_printf(seq, "alarm_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_ALARM));
- seq_printf(seq, "wkalarm_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_ALARM_DAY));
- seq_printf(seq, "seconds_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_SEC));
- seq_printf(seq, "periodic_IRQ\t: %s\n", yesno(ictl & RTC_ISTAT_STOPWATCH));
-#ifdef DEBUG
- seq_printf(seq, "RTC_STAT\t: 0x%08X\n", bfin_read_RTC_STAT());
- seq_printf(seq, "RTC_ICTL\t: 0x%04X\n", bfin_read_RTC_ICTL());
- seq_printf(seq, "RTC_ISTAT\t: 0x%04X\n", bfin_read_RTC_ISTAT());
- seq_printf(seq, "RTC_SWCNT\t: 0x%04X\n", bfin_read_RTC_SWCNT());
- seq_printf(seq, "RTC_ALARM\t: 0x%08X\n", bfin_read_RTC_ALARM());
- seq_printf(seq, "RTC_PREN\t: 0x%04X\n", bfin_read_RTC_PREN());
-#endif
+ dev_dbg_stamp(dev);
+ seq_printf(seq,
+ "alarm_IRQ\t: %s\n"
+ "wkalarm_IRQ\t: %s\n"
+ "seconds_IRQ\t: %s\n"
+ "periodic_IRQ\t: %s\n",
+ yesno(ictl & RTC_ISTAT_ALARM),
+ yesno(ictl & RTC_ISTAT_ALARM_DAY),
+ yesno(ictl & RTC_ISTAT_SEC),
+ yesno(ictl & RTC_ISTAT_STOPWATCH));
return 0;
+#undef yesno
}
+/**
+ * bfin_irq_set_freq - make sure hardware supports requested freq
+ * @dev: pointer to RTC device structure
+ * @freq: requested frequency rate
+ *
+ * The Blackfin RTC can only generate periodic events at 1 per
+ * second (1 Hz), so reject any attempt at changing it.
+ */
static int bfin_irq_set_freq(struct device *dev, int freq)
{
- struct bfin_rtc *rtc = dev_get_drvdata(dev);
- stampit();
- rtc->rtc_dev->irq_freq = freq;
- return 0;
+ dev_dbg_stamp(dev);
+ return -ENOTTY;
}
static struct rtc_class_ops bfin_rtc_ops = {
@@ -381,27 +412,24 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
struct bfin_rtc *rtc;
int ret = 0;
- stampit();
+ dev_dbg_stamp(&pdev->dev);
rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
if (unlikely(!rtc))
return -ENOMEM;
- spin_lock_init(&rtc->lock);
-
rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE);
if (unlikely(IS_ERR(rtc))) {
ret = PTR_ERR(rtc->rtc_dev);
goto err;
}
- rtc->rtc_dev->irq_freq = 0;
- rtc->rtc_dev->max_user_freq = (2 << 16); /* stopwatch is an unsigned 16 bit reg */
+ rtc->rtc_dev->irq_freq = 1;
platform_set_drvdata(pdev, rtc);
return 0;
-err:
+ err:
kfree(rtc);
return ret;
}
@@ -428,7 +456,6 @@ static struct platform_driver bfin_rtc_driver = {
static int __init bfin_rtc_init(void)
{
- stampit();
return platform_driver_register(&bfin_rtc_driver);
}
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 29cf1457ca10..e059f94c79eb 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -36,9 +36,24 @@
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
+#ifdef CONFIG_HPET_EMULATE_RTC
+#include <asm/hpet.h>
+#endif
+
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
+#ifndef CONFIG_HPET_EMULATE_RTC
+#define is_hpet_enabled() 0
+#define hpet_set_alarm_time(hrs, min, sec) do { } while (0)
+#define hpet_set_periodic_freq(arg) 0
+#define hpet_mask_rtc_irq_bit(arg) do { } while (0)
+#define hpet_set_rtc_irq_bit(arg) do { } while (0)
+#define hpet_rtc_timer_init() do { } while (0)
+#define hpet_register_irq_handler(h) 0
+#define hpet_unregister_irq_handler(h) do { } while (0)
+extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
+#endif
struct cmos_rtc {
struct rtc_device *rtc;
@@ -199,6 +214,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
sec = t->time.tm_sec;
sec = (sec < 60) ? BIN2BCD(sec) : 0xff;
+ hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, t->time.tm_sec);
spin_lock_irq(&rtc_lock);
/* next rtc irq must not be from previous alarm setting */
@@ -252,7 +268,8 @@ static int cmos_irq_set_freq(struct device *dev, int freq)
f = 16 - f;
spin_lock_irqsave(&rtc_lock, flags);
- CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT);
+ if (!hpet_set_periodic_freq(freq))
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ | f, RTC_FREQ_SELECT);
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
@@ -314,28 +331,37 @@ cmos_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RTC_AIE_OFF: /* alarm off */
rtc_control &= ~RTC_AIE;
+ hpet_mask_rtc_irq_bit(RTC_AIE);
break;
case RTC_AIE_ON: /* alarm on */
rtc_control |= RTC_AIE;
+ hpet_set_rtc_irq_bit(RTC_AIE);
break;
case RTC_UIE_OFF: /* update off */
rtc_control &= ~RTC_UIE;
+ hpet_mask_rtc_irq_bit(RTC_UIE);
break;
case RTC_UIE_ON: /* update on */
rtc_control |= RTC_UIE;
+ hpet_set_rtc_irq_bit(RTC_UIE);
break;
case RTC_PIE_OFF: /* periodic off */
rtc_control &= ~RTC_PIE;
+ hpet_mask_rtc_irq_bit(RTC_PIE);
break;
case RTC_PIE_ON: /* periodic on */
rtc_control |= RTC_PIE;
+ hpet_set_rtc_irq_bit(RTC_PIE);
break;
}
- CMOS_WRITE(rtc_control, RTC_CONTROL);
+ if (!is_hpet_enabled())
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+
rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
if (is_intr(rtc_intr))
rtc_update_irq(cmos->rtc, 1, rtc_intr);
+
spin_unlock_irqrestore(&rtc_lock, flags);
return 0;
}
@@ -393,15 +419,111 @@ static const struct rtc_class_ops cmos_rtc_ops = {
/*----------------------------------------------------------------*/
+/*
+ * All these chips have at least 64 bytes of address space, shared by
+ * RTC registers and NVRAM. Most of those bytes of NVRAM are used
+ * by boot firmware. Modern chips have 128 or 256 bytes.
+ */
+
+#define NVRAM_OFFSET (RTC_REG_D + 1)
+
+static ssize_t
+cmos_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ int retval;
+
+ if (unlikely(off >= attr->size))
+ return 0;
+ if ((off + count) > attr->size)
+ count = attr->size - off;
+
+ spin_lock_irq(&rtc_lock);
+ for (retval = 0, off += NVRAM_OFFSET; count--; retval++, off++)
+ *buf++ = CMOS_READ(off);
+ spin_unlock_irq(&rtc_lock);
+
+ return retval;
+}
+
+static ssize_t
+cmos_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct cmos_rtc *cmos;
+ int retval;
+
+ cmos = dev_get_drvdata(container_of(kobj, struct device, kobj));
+ if (unlikely(off >= attr->size))
+ return -EFBIG;
+ if ((off + count) > attr->size)
+ count = attr->size - off;
+
+ /* NOTE: on at least PCs and Ataris, the boot firmware uses a
+ * checksum on part of the NVRAM data. That's currently ignored
+ * here. If userspace is smart enough to know what fields of
+ * NVRAM to update, updating checksums is also part of its job.
+ */
+ spin_lock_irq(&rtc_lock);
+ for (retval = 0, off += NVRAM_OFFSET; count--; retval++, off++) {
+ /* don't trash RTC registers */
+ if (off == cmos->day_alrm
+ || off == cmos->mon_alrm
+ || off == cmos->century)
+ buf++;
+ else
+ CMOS_WRITE(*buf++, off);
+ }
+ spin_unlock_irq(&rtc_lock);
+
+ return retval;
+}
+
+static struct bin_attribute nvram = {
+ .attr = {
+ .name = "nvram",
+ .mode = S_IRUGO | S_IWUSR,
+ .owner = THIS_MODULE,
+ },
+
+ .read = cmos_nvram_read,
+ .write = cmos_nvram_write,
+ /* size gets set up later */
+};
+
+/*----------------------------------------------------------------*/
+
static struct cmos_rtc cmos_rtc;
static irqreturn_t cmos_interrupt(int irq, void *p)
{
u8 irqstat;
+ u8 rtc_control;
spin_lock(&rtc_lock);
- irqstat = CMOS_READ(RTC_INTR_FLAGS);
- irqstat &= (CMOS_READ(RTC_CONTROL) & RTC_IRQMASK) | RTC_IRQF;
+ /*
+ * In this case it is HPET RTC interrupt handler
+ * calling us, with the interrupt information
+ * passed as arg1, instead of irq.
+ */
+ if (is_hpet_enabled())
+ irqstat = (unsigned long)irq & 0xF0;
+ else {
+ irqstat = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ irqstat &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
+ }
+
+ /* All Linux RTC alarms should be treated as if they were oneshot.
+ * Similar code may be needed in system wakeup paths, in case the
+ * alarm woke the system.
+ */
+ if (irqstat & RTC_AIE) {
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ rtc_control &= ~RTC_AIE;
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+ CMOS_READ(RTC_INTR_FLAGS);
+ }
spin_unlock(&rtc_lock);
if (is_intr(irqstat)) {
@@ -412,11 +534,9 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
}
#ifdef CONFIG_PNP
-#define is_pnp() 1
#define INITSECTION
#else
-#define is_pnp() 0
#define INITSECTION __init
#endif
@@ -426,6 +546,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
struct cmos_rtc_board_info *info = dev->platform_data;
int retval = 0;
unsigned char rtc_control;
+ unsigned address_space;
/* there can be only one ... */
if (cmos_rtc.dev)
@@ -450,15 +571,36 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
cmos_rtc.irq = rtc_irq;
cmos_rtc.iomem = ports;
+ /* Heuristic to deduce NVRAM size ... do what the legacy NVRAM
+ * driver did, but don't reject unknown configs. Old hardware
+ * won't address 128 bytes, and for now we ignore the way newer
+ * chips can address 256 bytes (using two more i/o ports).
+ */
+#if defined(CONFIG_ATARI)
+ address_space = 64;
+#elif defined(__i386__) || defined(__x86_64__) || defined(__arm__)
+ address_space = 128;
+#else
+#warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
+ address_space = 128;
+#endif
+
/* For ACPI systems extension info comes from the FADT. On others,
* board specific setup provides it as appropriate. Systems where
* the alarm IRQ isn't automatically a wakeup IRQ (like ACPI, and
* some almost-clones) can provide hooks to make that behave.
+ *
+ * Note that ACPI doesn't preclude putting these registers into
+ * "extended" areas of the chip, including some that we won't yet
+ * expect CMOS_READ and friends to handle.
*/
if (info) {
- cmos_rtc.day_alrm = info->rtc_day_alarm;
- cmos_rtc.mon_alrm = info->rtc_mon_alarm;
- cmos_rtc.century = info->rtc_century;
+ if (info->rtc_day_alarm && info->rtc_day_alarm < 128)
+ cmos_rtc.day_alrm = info->rtc_day_alarm;
+ if (info->rtc_mon_alarm && info->rtc_mon_alarm < 128)
+ cmos_rtc.mon_alrm = info->rtc_mon_alarm;
+ if (info->rtc_century && info->rtc_century < 128)
+ cmos_rtc.century = info->rtc_century;
if (info->wake_on && info->wake_off) {
cmos_rtc.wake_on = info->wake_on;
@@ -485,8 +627,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
* doesn't use 32KHz here ... for portability we might need to
* do something about other clock frequencies.
*/
- CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT);
cmos_rtc.rtc->irq_freq = 1024;
+ if (!hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq))
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT);
/* disable irqs.
*
@@ -509,19 +652,39 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
- if (is_valid_irq(rtc_irq))
- retval = request_irq(rtc_irq, cmos_interrupt, IRQF_DISABLED,
- cmos_rtc.rtc->dev.bus_id,
+ if (is_valid_irq(rtc_irq)) {
+ irq_handler_t rtc_cmos_int_handler;
+
+ if (is_hpet_enabled()) {
+ int err;
+
+ rtc_cmos_int_handler = hpet_rtc_interrupt;
+ err = hpet_register_irq_handler(cmos_interrupt);
+ if (err != 0) {
+ printk(KERN_WARNING "hpet_register_irq_handler "
+ " failed in rtc_init().");
+ goto cleanup1;
+ }
+ } else
+ rtc_cmos_int_handler = cmos_interrupt;
+
+ retval = request_irq(rtc_irq, rtc_cmos_int_handler,
+ IRQF_DISABLED, cmos_rtc.rtc->dev.bus_id,
cmos_rtc.rtc);
- if (retval < 0) {
- dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
- goto cleanup1;
+ if (retval < 0) {
+ dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
+ goto cleanup1;
+ }
}
+ hpet_rtc_timer_init();
- /* REVISIT optionally make 50 or 114 bytes NVRAM available,
- * like rtc-ds1553, rtc-ds1742 ... this will often include
- * registers for century, and day/month alarm.
- */
+ /* export at least the first block of NVRAM */
+ nvram.size = address_space - NVRAM_OFFSET;
+ retval = sysfs_create_bin_file(&dev->kobj, &nvram);
+ if (retval < 0) {
+ dev_dbg(dev, "can't create nvram file? %d\n", retval);
+ goto cleanup2;
+ }
pr_info("%s: alarms up to one %s%s\n",
cmos_rtc.rtc->dev.bus_id,
@@ -536,6 +699,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
return 0;
+cleanup2:
+ if (is_valid_irq(rtc_irq))
+ free_irq(rtc_irq, cmos_rtc.rtc);
cleanup1:
cmos_rtc.dev = NULL;
rtc_device_unregister(cmos_rtc.rtc);
@@ -563,8 +729,12 @@ static void __exit cmos_do_remove(struct device *dev)
cmos_do_shutdown();
- if (is_valid_irq(cmos->irq))
+ sysfs_remove_bin_file(&dev->kobj, &nvram);
+
+ if (is_valid_irq(cmos->irq)) {
free_irq(cmos->irq, cmos->rtc);
+ hpet_unregister_irq_handler(cmos_interrupt);
+ }
rtc_device_unregister(cmos->rtc);
cmos->rtc = NULL;
@@ -659,9 +829,12 @@ static int cmos_resume(struct device *dev)
/*----------------------------------------------------------------*/
-/* The "CMOS" RTC normally lives on the platform_bus. On ACPI systems,
- * the device node will always be created as a PNPACPI device. Plus
- * pre-ACPI PCs probably list it in the PNPBIOS tables.
+/* On non-x86 systems, a "CMOS" RTC lives most naturally on platform_bus.
+ * ACPI systems always list these as PNPACPI devices, and pre-ACPI PCs
+ * probably list them in similar PNPBIOS tables; so PNP is more common.
+ *
+ * We don't use legacy "poke at the hardware" probing. Ancient PCs that
+ * predate even PNPBIOS should set up platform_bus devices.
*/
#ifdef CONFIG_PNP
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 025c60a17a4a..90dfa0df747a 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -246,6 +246,15 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file,
/* if the driver does not provide the ioctl interface
* or if that particular ioctl was not implemented
* (-ENOIOCTLCMD), we will try to emulate here.
+ *
+ * Drivers *SHOULD NOT* provide ioctl implementations
+ * for these requests. Instead, provide methods to
+ * support the following code, so that the RTC's main
+ * features are accessible without using ioctls.
+ *
+ * RTC and alarm times will be in UTC, by preference,
+ * but dual-booting with MS-Windows implies RTCs must
+ * use the local wall clock time.
*/
switch (cmd) {
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
new file mode 100644
index 000000000000..7b002ceeaa7d
--- /dev/null
+++ b/drivers/rtc/rtc-ds1302.c
@@ -0,0 +1,262 @@
+/*
+ * Dallas DS1302 RTC Support
+ *
+ * Copyright (C) 2002 David McCullough
+ * Copyright (C) 2003 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License version 2. See the file "COPYING" in the main directory of
+ * this archive for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/rtc.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/bcd.h>
+#include <asm/rtc.h>
+
+#define DRV_NAME "rtc-ds1302"
+#define DRV_VERSION "0.1.0"
+
+#define RTC_CMD_READ 0x81 /* Read command */
+#define RTC_CMD_WRITE 0x80 /* Write command */
+
+#define RTC_ADDR_RAM0 0x20 /* Address of RAM0 */
+#define RTC_ADDR_TCR 0x08 /* Address of trickle charge register */
+#define RTC_ADDR_YEAR 0x06 /* Address of year register */
+#define RTC_ADDR_DAY 0x05 /* Address of day of week register */
+#define RTC_ADDR_MON 0x04 /* Address of month register */
+#define RTC_ADDR_DATE 0x03 /* Address of day of month register */
+#define RTC_ADDR_HOUR 0x02 /* Address of hour register */
+#define RTC_ADDR_MIN 0x01 /* Address of minute register */
+#define RTC_ADDR_SEC 0x00 /* Address of second register */
+
+#define RTC_RESET 0x1000
+#define RTC_IODATA 0x0800
+#define RTC_SCLK 0x0400
+
+#ifdef CONFIG_SH_SECUREEDGE5410
+#include <asm/snapgear.h>
+#define set_dp(x) SECUREEDGE_WRITE_IOPORT(x, 0x1c00)
+#define get_dp() SECUREEDGE_READ_IOPORT()
+#else
+#error "Add support for your platform"
+#endif
+
+struct ds1302_rtc {
+ struct rtc_device *rtc_dev;
+ spinlock_t lock;
+};
+
+static void ds1302_sendbits(unsigned int val)
+{
+ int i;
+
+ for (i = 8; (i); i--, val >>= 1) {
+ set_dp((get_dp() & ~RTC_IODATA) | ((val & 0x1) ?
+ RTC_IODATA : 0));
+ set_dp(get_dp() | RTC_SCLK); /* clock high */
+ set_dp(get_dp() & ~RTC_SCLK); /* clock low */
+ }
+}
+
+static unsigned int ds1302_recvbits(void)
+{
+ unsigned int val;
+ int i;
+
+ for (i = 0, val = 0; (i < 8); i++) {
+ val |= (((get_dp() & RTC_IODATA) ? 1 : 0) << i);
+ set_dp(get_dp() | RTC_SCLK); /* clock high */
+ set_dp(get_dp() & ~RTC_SCLK); /* clock low */
+ }
+
+ return val;
+}
+
+static unsigned int ds1302_readbyte(unsigned int addr)
+{
+ unsigned int val;
+
+ set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+
+ set_dp(get_dp() | RTC_RESET);
+ ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_READ);
+ val = ds1302_recvbits();
+ set_dp(get_dp() & ~RTC_RESET);
+
+ return val;
+}
+
+static void ds1302_writebyte(unsigned int addr, unsigned int val)
+{
+ set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+ set_dp(get_dp() | RTC_RESET);
+ ds1302_sendbits(((addr & 0x3f) << 1) | RTC_CMD_WRITE);
+ ds1302_sendbits(val);
+ set_dp(get_dp() & ~RTC_RESET);
+}
+
+static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ds1302_rtc *rtc = dev_get_drvdata(dev);
+
+ spin_lock_irq(&rtc->lock);
+
+ tm->tm_sec = BCD2BIN(ds1302_readbyte(RTC_ADDR_SEC));
+ tm->tm_min = BCD2BIN(ds1302_readbyte(RTC_ADDR_MIN));
+ tm->tm_hour = BCD2BIN(ds1302_readbyte(RTC_ADDR_HOUR));
+ tm->tm_wday = BCD2BIN(ds1302_readbyte(RTC_ADDR_DAY));
+ tm->tm_mday = BCD2BIN(ds1302_readbyte(RTC_ADDR_DATE));
+ tm->tm_mon = BCD2BIN(ds1302_readbyte(RTC_ADDR_MON)) - 1;
+ tm->tm_year = BCD2BIN(ds1302_readbyte(RTC_ADDR_YEAR));
+
+ if (tm->tm_year < 70)
+ tm->tm_year += 100;
+
+ spin_unlock_irq(&rtc->lock);
+
+ dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
+ "mday=%d, mon=%d, year=%d, wday=%d\n",
+ __FUNCTION__,
+ tm->tm_sec, tm->tm_min, tm->tm_hour,
+ tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
+
+ if (rtc_valid_tm(tm) < 0)
+ dev_err(dev, "invalid date\n");
+
+ return 0;
+}
+
+static int ds1302_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct ds1302_rtc *rtc = dev_get_drvdata(dev);
+
+ spin_lock_irq(&rtc->lock);
+
+ /* Stop RTC */
+ ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) | 0x80);
+
+ ds1302_writebyte(RTC_ADDR_SEC, BIN2BCD(tm->tm_sec));
+ ds1302_writebyte(RTC_ADDR_MIN, BIN2BCD(tm->tm_min));
+ ds1302_writebyte(RTC_ADDR_HOUR, BIN2BCD(tm->tm_hour));
+ ds1302_writebyte(RTC_ADDR_DAY, BIN2BCD(tm->tm_wday));
+ ds1302_writebyte(RTC_ADDR_DATE, BIN2BCD(tm->tm_mday));
+ ds1302_writebyte(RTC_ADDR_MON, BIN2BCD(tm->tm_mon + 1));
+ ds1302_writebyte(RTC_ADDR_YEAR, BIN2BCD(tm->tm_year % 100));
+
+ /* Start RTC */
+ ds1302_writebyte(RTC_ADDR_SEC, ds1302_readbyte(RTC_ADDR_SEC) & ~0x80);
+
+ spin_unlock_irq(&rtc->lock);
+
+ return 0;
+}
+
+static int ds1302_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+#ifdef RTC_SET_CHARGE
+ case RTC_SET_CHARGE:
+ {
+ struct ds1302_rtc *rtc = dev_get_drvdata(dev);
+ int tcs_val;
+
+ if (copy_from_user(&tcs_val, (int __user *)arg, sizeof(int)))
+ return -EFAULT;
+
+ spin_lock_irq(&rtc->lock);
+ ds1302_writebyte(RTC_ADDR_TCR, (0xa0 | tcs_val * 0xf));
+ spin_unlock_irq(&rtc->lock);
+ return 0;
+ }
+#endif
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static struct rtc_class_ops ds1302_rtc_ops = {
+ .read_time = ds1302_rtc_read_time,
+ .set_time = ds1302_rtc_set_time,
+ .ioctl = ds1302_rtc_ioctl,
+};
+
+static int __devinit ds1302_rtc_probe(struct platform_device *pdev)
+{
+ struct ds1302_rtc *rtc;
+ int ret;
+
+ /* Reset */
+ set_dp(get_dp() & ~(RTC_RESET | RTC_IODATA | RTC_SCLK));
+
+ /* Write a magic value to the DS1302 RAM, and see if it sticks. */
+ ds1302_writebyte(RTC_ADDR_RAM0, 0x42);
+ if (ds1302_readbyte(RTC_ADDR_RAM0) != 0x42)
+ return -ENODEV;
+
+ rtc = kzalloc(sizeof(struct ds1302_rtc), GFP_KERNEL);
+ if (unlikely(!rtc))
+ return -ENOMEM;
+
+ spin_lock_init(&rtc->lock);
+ rtc->rtc_dev = rtc_device_register("ds1302", &pdev->dev,
+ &ds1302_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc_dev)) {
+ ret = PTR_ERR(rtc->rtc_dev);
+ goto out;
+ }
+
+ platform_set_drvdata(pdev, rtc);
+
+ return 0;
+out:
+ kfree(rtc);
+ return ret;
+}
+
+static int __devexit ds1302_rtc_remove(struct platform_device *pdev)
+{
+ struct ds1302_rtc *rtc = platform_get_drvdata(pdev);
+
+ if (likely(rtc->rtc_dev))
+ rtc_device_unregister(rtc->rtc_dev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(rtc);
+
+ return 0;
+}
+
+static struct platform_driver ds1302_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = ds1302_rtc_probe,
+ .remove = __devexit_p(ds1302_rtc_remove),
+};
+
+static int __init ds1302_rtc_init(void)
+{
+ return platform_driver_register(&ds1302_platform_driver);
+}
+
+static void __exit ds1302_rtc_exit(void)
+{
+ platform_driver_unregister(&ds1302_platform_driver);
+}
+
+module_init(ds1302_rtc_init);
+module_exit(ds1302_rtc_exit);
+
+MODULE_DESCRIPTION("Dallas DS1302 RTC driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Paul Mundt, David McCullough");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index bc1c7fe94ad3..f389a28720d2 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -256,7 +256,7 @@ ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr,
struct i2c_msg msg[2];
int result;
- client = to_i2c_client(container_of(kobj, struct device, kobj));
+ client = kobj_to_i2c_client(kobj);
ds1307 = i2c_get_clientdata(client);
if (unlikely(off >= NVRAM_SIZE))
@@ -294,7 +294,7 @@ ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr,
u8 buffer[NVRAM_SIZE + 1];
int ret;
- client = to_i2c_client(container_of(kobj, struct device, kobj));
+ client = kobj_to_i2c_client(kobj);
if (unlikely(off >= NVRAM_SIZE))
return -EFBIG;
@@ -412,11 +412,6 @@ read_rtc:
*/
tmp = ds1307->regs[DS1307_REG_SECS];
switch (ds1307->type) {
- case ds_1340:
- /* FIXME read register with DS1340_BIT_OSF, use that to
- * trigger the "set time" warning (*after* restarting the
- * oscillator!) instead of this weaker ds1307/m41t00 test.
- */
case ds_1307:
case m41t00:
/* clock halted? turn it on, so clock can tick. */
@@ -440,6 +435,24 @@ read_rtc:
goto read_rtc;
}
break;
+ case ds_1340:
+ /* clock halted? turn it on, so clock can tick. */
+ if (tmp & DS1340_BIT_nEOSC)
+ i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0);
+
+ tmp = i2c_smbus_read_byte_data(client, DS1340_REG_FLAG);
+ if (tmp < 0) {
+ pr_debug("read error %d\n", tmp);
+ err = -EIO;
+ goto exit_free;
+ }
+
+ /* oscillator fault? clear flag, and warn */
+ if (tmp & DS1340_BIT_OSF) {
+ i2c_smbus_write_byte_data(client, DS1340_REG_FLAG, 0);
+ dev_warn(&client->dev, "SET TIME!\n");
+ }
+ break;
case ds_1337:
case ds_1339:
break;
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
new file mode 100644
index 000000000000..d74b8086fa31
--- /dev/null
+++ b/drivers/rtc/rtc-ds1511.c
@@ -0,0 +1,656 @@
+/*
+ * An rtc driver for the Dallas DS1511
+ *
+ * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ * Copyright (C) 2007 Andrew Sharp <andy.sharp@onstor.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Real time clock driver for the Dallas 1511 chip, which also
+ * contains a watchdog timer. There is a tiny amount of code that
+ * platform code could use to mess with the watchdog device a little
+ * bit, but not a full watchdog driver.
+ */
+
+#include <linux/bcd.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define DRV_VERSION "0.6"
+
+enum ds1511reg {
+ DS1511_SEC = 0x0,
+ DS1511_MIN = 0x1,
+ DS1511_HOUR = 0x2,
+ DS1511_DOW = 0x3,
+ DS1511_DOM = 0x4,
+ DS1511_MONTH = 0x5,
+ DS1511_YEAR = 0x6,
+ DS1511_CENTURY = 0x7,
+ DS1511_AM1_SEC = 0x8,
+ DS1511_AM2_MIN = 0x9,
+ DS1511_AM3_HOUR = 0xa,
+ DS1511_AM4_DATE = 0xb,
+ DS1511_WD_MSEC = 0xc,
+ DS1511_WD_SEC = 0xd,
+ DS1511_CONTROL_A = 0xe,
+ DS1511_CONTROL_B = 0xf,
+ DS1511_RAMADDR_LSB = 0x10,
+ DS1511_RAMDATA = 0x13
+};
+
+#define DS1511_BLF1 0x80
+#define DS1511_BLF2 0x40
+#define DS1511_PRS 0x20
+#define DS1511_PAB 0x10
+#define DS1511_TDF 0x08
+#define DS1511_KSF 0x04
+#define DS1511_WDF 0x02
+#define DS1511_IRQF 0x01
+#define DS1511_TE 0x80
+#define DS1511_CS 0x40
+#define DS1511_BME 0x20
+#define DS1511_TPE 0x10
+#define DS1511_TIE 0x08
+#define DS1511_KIE 0x04
+#define DS1511_WDE 0x02
+#define DS1511_WDS 0x01
+#define DS1511_RAM_MAX 0xff
+
+#define RTC_CMD DS1511_CONTROL_B
+#define RTC_CMD1 DS1511_CONTROL_A
+
+#define RTC_ALARM_SEC DS1511_AM1_SEC
+#define RTC_ALARM_MIN DS1511_AM2_MIN
+#define RTC_ALARM_HOUR DS1511_AM3_HOUR
+#define RTC_ALARM_DATE DS1511_AM4_DATE
+
+#define RTC_SEC DS1511_SEC
+#define RTC_MIN DS1511_MIN
+#define RTC_HOUR DS1511_HOUR
+#define RTC_DOW DS1511_DOW
+#define RTC_DOM DS1511_DOM
+#define RTC_MON DS1511_MONTH
+#define RTC_YEAR DS1511_YEAR
+#define RTC_CENTURY DS1511_CENTURY
+
+#define RTC_TIE DS1511_TIE
+#define RTC_TE DS1511_TE
+
+struct rtc_plat_data {
+ struct rtc_device *rtc;
+ void __iomem *ioaddr; /* virtual base address */
+ unsigned long baseaddr; /* physical base address */
+ int size; /* amount of memory mapped */
+ int irq;
+ unsigned int irqen;
+ int alrm_sec;
+ int alrm_min;
+ int alrm_hour;
+ int alrm_mday;
+};
+
+static DEFINE_SPINLOCK(ds1511_lock);
+
+static __iomem char *ds1511_base;
+static u32 reg_spacing = 1;
+
+ static noinline void
+rtc_write(uint8_t val, uint32_t reg)
+{
+ writeb(val, ds1511_base + (reg * reg_spacing));
+}
+
+ static inline void
+rtc_write_alarm(uint8_t val, enum ds1511reg reg)
+{
+ rtc_write((val | 0x80), reg);
+}
+
+ static noinline uint8_t
+rtc_read(enum ds1511reg reg)
+{
+ return readb(ds1511_base + (reg * reg_spacing));
+}
+
+ static inline void
+rtc_disable_update(void)
+{
+ rtc_write((rtc_read(RTC_CMD) & ~RTC_TE), RTC_CMD);
+}
+
+ static void
+rtc_enable_update(void)
+{
+ rtc_write((rtc_read(RTC_CMD) | RTC_TE), RTC_CMD);
+}
+
+/*
+ * #define DS1511_WDOG_RESET_SUPPORT
+ *
+ * Uncomment this if you want to use these routines in
+ * some platform code.
+ */
+#ifdef DS1511_WDOG_RESET_SUPPORT
+/*
+ * just enough code to set the watchdog timer so that it
+ * will reboot the system
+ */
+ void
+ds1511_wdog_set(unsigned long deciseconds)
+{
+ /*
+ * the wdog timer can take 99.99 seconds
+ */
+ deciseconds %= 10000;
+ /*
+ * set the wdog values in the wdog registers
+ */
+ rtc_write(BIN2BCD(deciseconds % 100), DS1511_WD_MSEC);
+ rtc_write(BIN2BCD(deciseconds / 100), DS1511_WD_SEC);
+ /*
+ * set wdog enable and wdog 'steering' bit to issue a reset
+ */
+ rtc_write(DS1511_WDE | DS1511_WDS, RTC_CMD);
+}
+
+ void
+ds1511_wdog_disable(void)
+{
+ /*
+ * clear wdog enable and wdog 'steering' bits
+ */
+ rtc_write(rtc_read(RTC_CMD) & ~(DS1511_WDE | DS1511_WDS), RTC_CMD);
+ /*
+ * clear the wdog counter
+ */
+ rtc_write(0, DS1511_WD_MSEC);
+ rtc_write(0, DS1511_WD_SEC);
+}
+#endif
+
+/*
+ * set the rtc chip's idea of the time.
+ * stupidly, some callers call with year unmolested;
+ * and some call with year = year - 1900. thanks.
+ */
+ int
+ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
+{
+ u8 mon, day, dow, hrs, min, sec, yrs, cen;
+ unsigned int flags;
+
+ /*
+ * won't have to change this for a while
+ */
+ if (rtc_tm->tm_year < 1900) {
+ rtc_tm->tm_year += 1900;
+ }
+
+ if (rtc_tm->tm_year < 1970) {
+ return -EINVAL;
+ }
+ yrs = rtc_tm->tm_year % 100;
+ cen = rtc_tm->tm_year / 100;
+ mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */
+ day = rtc_tm->tm_mday;
+ dow = rtc_tm->tm_wday & 0x7; /* automatic BCD */
+ hrs = rtc_tm->tm_hour;
+ min = rtc_tm->tm_min;
+ sec = rtc_tm->tm_sec;
+
+ if ((mon > 12) || (day == 0)) {
+ return -EINVAL;
+ }
+
+ if (day > rtc_month_days(rtc_tm->tm_mon, rtc_tm->tm_year)) {
+ return -EINVAL;
+ }
+
+ if ((hrs >= 24) || (min >= 60) || (sec >= 60)) {
+ return -EINVAL;
+ }
+
+ /*
+ * each register is a different number of valid bits
+ */
+ sec = BIN2BCD(sec) & 0x7f;
+ min = BIN2BCD(min) & 0x7f;
+ hrs = BIN2BCD(hrs) & 0x3f;
+ day = BIN2BCD(day) & 0x3f;
+ mon = BIN2BCD(mon) & 0x1f;
+ yrs = BIN2BCD(yrs) & 0xff;
+ cen = BIN2BCD(cen) & 0xff;
+
+ spin_lock_irqsave(&ds1511_lock, flags);
+ rtc_disable_update();
+ rtc_write(cen, RTC_CENTURY);
+ rtc_write(yrs, RTC_YEAR);
+ rtc_write((rtc_read(RTC_MON) & 0xe0) | mon, RTC_MON);
+ rtc_write(day, RTC_DOM);
+ rtc_write(hrs, RTC_HOUR);
+ rtc_write(min, RTC_MIN);
+ rtc_write(sec, RTC_SEC);
+ rtc_write(dow, RTC_DOW);
+ rtc_enable_update();
+ spin_unlock_irqrestore(&ds1511_lock, flags);
+
+ return 0;
+}
+
+ int
+ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
+{
+ unsigned int century;
+ unsigned int flags;
+
+ spin_lock_irqsave(&ds1511_lock, flags);
+ rtc_disable_update();
+
+ rtc_tm->tm_sec = rtc_read(RTC_SEC) & 0x7f;
+ rtc_tm->tm_min = rtc_read(RTC_MIN) & 0x7f;
+ rtc_tm->tm_hour = rtc_read(RTC_HOUR) & 0x3f;
+ rtc_tm->tm_mday = rtc_read(RTC_DOM) & 0x3f;
+ rtc_tm->tm_wday = rtc_read(RTC_DOW) & 0x7;
+ rtc_tm->tm_mon = rtc_read(RTC_MON) & 0x1f;
+ rtc_tm->tm_year = rtc_read(RTC_YEAR) & 0x7f;
+ century = rtc_read(RTC_CENTURY);
+
+ rtc_enable_update();
+ spin_unlock_irqrestore(&ds1511_lock, flags);
+
+ rtc_tm->tm_sec = BCD2BIN(rtc_tm->tm_sec);
+ rtc_tm->tm_min = BCD2BIN(rtc_tm->tm_min);
+ rtc_tm->tm_hour = BCD2BIN(rtc_tm->tm_hour);
+ rtc_tm->tm_mday = BCD2BIN(rtc_tm->tm_mday);
+ rtc_tm->tm_wday = BCD2BIN(rtc_tm->tm_wday);
+ rtc_tm->tm_mon = BCD2BIN(rtc_tm->tm_mon);
+ rtc_tm->tm_year = BCD2BIN(rtc_tm->tm_year);
+ century = BCD2BIN(century) * 100;
+
+ /*
+ * Account for differences between how the RTC uses the values
+ * and how they are defined in a struct rtc_time;
+ */
+ century += rtc_tm->tm_year;
+ rtc_tm->tm_year = century - 1900;
+
+ rtc_tm->tm_mon--;
+
+ if (rtc_valid_tm(rtc_tm) < 0) {
+ dev_err(dev, "retrieved date/time is not valid.\n");
+ rtc_time_to_tm(0, rtc_tm);
+ }
+ return 0;
+}
+
+/*
+ * write the alarm register settings
+ *
+ * we only have the use to interrupt every second, otherwise
+ * known as the update interrupt, or the interrupt if the whole
+ * date/hours/mins/secs matches. the ds1511 has many more
+ * permutations, but the kernel doesn't.
+ */
+ static void
+ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
+ rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_mday) & 0x3f,
+ RTC_ALARM_DATE);
+ rtc_write(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_hour) & 0x3f,
+ RTC_ALARM_HOUR);
+ rtc_write(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_min) & 0x7f,
+ RTC_ALARM_MIN);
+ rtc_write(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
+ 0x80 : BIN2BCD(pdata->alrm_sec) & 0x7f,
+ RTC_ALARM_SEC);
+ rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
+ rtc_read(RTC_CMD1); /* clear interrupts */
+ spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
+}
+
+ static int
+ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq < 0) {
+ return -EINVAL;
+ }
+ pdata->alrm_mday = alrm->time.tm_mday;
+ pdata->alrm_hour = alrm->time.tm_hour;
+ pdata->alrm_min = alrm->time.tm_min;
+ pdata->alrm_sec = alrm->time.tm_sec;
+ if (alrm->enabled) {
+ pdata->irqen |= RTC_AF;
+ }
+ ds1511_rtc_update_alarm(pdata);
+ return 0;
+}
+
+ static int
+ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq < 0) {
+ return -EINVAL;
+ }
+ alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday;
+ alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour;
+ alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min;
+ alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec;
+ alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0;
+ return 0;
+}
+
+ static irqreturn_t
+ds1511_interrupt(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ unsigned long events = RTC_IRQF;
+
+ /*
+ * read and clear interrupt
+ */
+ if (!(rtc_read(RTC_CMD1) & DS1511_IRQF)) {
+ return IRQ_NONE;
+ }
+ if (rtc_read(RTC_ALARM_SEC) & 0x80) {
+ events |= RTC_UF;
+ } else {
+ events |= RTC_AF;
+ }
+ rtc_update_irq(pdata->rtc, 1, events);
+ return IRQ_HANDLED;
+}
+
+ static void
+ds1511_rtc_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq >= 0) {
+ pdata->irqen = 0;
+ ds1511_rtc_update_alarm(pdata);
+ }
+}
+
+ static int
+ds1511_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ if (pdata->irq < 0) {
+ return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
+ }
+ switch (cmd) {
+ case RTC_AIE_OFF:
+ pdata->irqen &= ~RTC_AF;
+ ds1511_rtc_update_alarm(pdata);
+ break;
+ case RTC_AIE_ON:
+ pdata->irqen |= RTC_AF;
+ ds1511_rtc_update_alarm(pdata);
+ break;
+ case RTC_UIE_OFF:
+ pdata->irqen &= ~RTC_UF;
+ ds1511_rtc_update_alarm(pdata);
+ break;
+ case RTC_UIE_ON:
+ pdata->irqen |= RTC_UF;
+ ds1511_rtc_update_alarm(pdata);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static const struct rtc_class_ops ds1511_rtc_ops = {
+ .read_time = ds1511_rtc_read_time,
+ .set_time = ds1511_rtc_set_time,
+ .read_alarm = ds1511_rtc_read_alarm,
+ .set_alarm = ds1511_rtc_set_alarm,
+ .release = ds1511_rtc_release,
+ .ioctl = ds1511_rtc_ioctl,
+};
+
+ static ssize_t
+ds1511_nvram_read(struct kobject *kobj, struct bin_attribute *ba,
+ char *buf, loff_t pos, size_t size)
+{
+ ssize_t count;
+
+ /*
+ * if count is more than one, turn on "burst" mode
+ * turn it off when you're done
+ */
+ if (size > 1) {
+ rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD);
+ }
+ if (pos > DS1511_RAM_MAX) {
+ pos = DS1511_RAM_MAX;
+ }
+ if (size + pos > DS1511_RAM_MAX + 1) {
+ size = DS1511_RAM_MAX - pos + 1;
+ }
+ rtc_write(pos, DS1511_RAMADDR_LSB);
+ for (count = 0; size > 0; count++, size--) {
+ *buf++ = rtc_read(DS1511_RAMDATA);
+ }
+ if (count > 1) {
+ rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD);
+ }
+ return count;
+}
+
+ static ssize_t
+ds1511_nvram_write(struct kobject *kobj, struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t size)
+{
+ ssize_t count;
+
+ /*
+ * if count is more than one, turn on "burst" mode
+ * turn it off when you're done
+ */
+ if (size > 1) {
+ rtc_write((rtc_read(RTC_CMD) | DS1511_BME), RTC_CMD);
+ }
+ if (pos > DS1511_RAM_MAX) {
+ pos = DS1511_RAM_MAX;
+ }
+ if (size + pos > DS1511_RAM_MAX + 1) {
+ size = DS1511_RAM_MAX - pos + 1;
+ }
+ rtc_write(pos, DS1511_RAMADDR_LSB);
+ for (count = 0; size > 0; count++, size--) {
+ rtc_write(*buf++, DS1511_RAMDATA);
+ }
+ if (count > 1) {
+ rtc_write((rtc_read(RTC_CMD) & ~DS1511_BME), RTC_CMD);
+ }
+ return count;
+}
+
+static struct bin_attribute ds1511_nvram_attr = {
+ .attr = {
+ .name = "nvram",
+ .mode = S_IRUGO | S_IWUGO,
+ .owner = THIS_MODULE,
+ },
+ .size = DS1511_RAM_MAX,
+ .read = ds1511_nvram_read,
+ .write = ds1511_nvram_write,
+};
+
+ static int __devinit
+ds1511_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+ struct resource *res;
+ struct rtc_plat_data *pdata = NULL;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ return -ENODEV;
+ }
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ return -ENOMEM;
+ }
+ pdata->irq = -1;
+ pdata->size = res->end - res->start + 1;
+ if (!request_mem_region(res->start, pdata->size, pdev->name)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ pdata->baseaddr = res->start;
+ pdata->size = pdata->size;
+ ds1511_base = ioremap(pdata->baseaddr, pdata->size);
+ if (!ds1511_base) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pdata->ioaddr = ds1511_base;
+ pdata->irq = platform_get_irq(pdev, 0);
+
+ /*
+ * turn on the clock and the crystal, etc.
+ */
+ rtc_write(0, RTC_CMD);
+ rtc_write(0, RTC_CMD1);
+ /*
+ * clear the wdog counter
+ */
+ rtc_write(0, DS1511_WD_MSEC);
+ rtc_write(0, DS1511_WD_SEC);
+ /*
+ * start the clock
+ */
+ rtc_enable_update();
+
+ /*
+ * check for a dying bat-tree
+ */
+ if (rtc_read(RTC_CMD1) & DS1511_BLF1) {
+ dev_warn(&pdev->dev, "voltage-low detected.\n");
+ }
+
+ /*
+ * if the platform has an interrupt in mind for this device,
+ * then by all means, set it
+ */
+ if (pdata->irq >= 0) {
+ rtc_read(RTC_CMD1);
+ if (request_irq(pdata->irq, ds1511_interrupt,
+ IRQF_DISABLED | IRQF_SHARED, pdev->name, pdev) < 0) {
+
+ dev_warn(&pdev->dev, "interrupt not available.\n");
+ pdata->irq = -1;
+ }
+ }
+
+ rtc = rtc_device_register(pdev->name, &pdev->dev, &ds1511_rtc_ops,
+ THIS_MODULE);
+ if (IS_ERR(rtc)) {
+ ret = PTR_ERR(rtc);
+ goto out;
+ }
+ pdata->rtc = rtc;
+ platform_set_drvdata(pdev, pdata);
+ ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
+ if (ret) {
+ goto out;
+ }
+ return 0;
+ out:
+ if (pdata->rtc) {
+ rtc_device_unregister(pdata->rtc);
+ }
+ if (pdata->irq >= 0) {
+ free_irq(pdata->irq, pdev);
+ }
+ if (ds1511_base) {
+ iounmap(ds1511_base);
+ ds1511_base = NULL;
+ }
+ if (pdata->baseaddr) {
+ release_mem_region(pdata->baseaddr, pdata->size);
+ }
+
+ kfree(pdata);
+ return ret;
+}
+
+ static int __devexit
+ds1511_rtc_remove(struct platform_device *pdev)
+{
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+
+ sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr);
+ rtc_device_unregister(pdata->rtc);
+ pdata->rtc = NULL;
+ if (pdata->irq >= 0) {
+ /*
+ * disable the alarm interrupt
+ */
+ rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD);
+ rtc_read(RTC_CMD1);
+ free_irq(pdata->irq, pdev);
+ }
+ iounmap(pdata->ioaddr);
+ ds1511_base = NULL;
+ release_mem_region(pdata->baseaddr, pdata->size);
+ kfree(pdata);
+ return 0;
+}
+
+static struct platform_driver ds1511_rtc_driver = {
+ .probe = ds1511_rtc_probe,
+ .remove = __devexit_p(ds1511_rtc_remove),
+ .driver = {
+ .name = "ds1511",
+ .owner = THIS_MODULE,
+ },
+};
+
+ static int __init
+ds1511_rtc_init(void)
+{
+ return platform_driver_register(&ds1511_rtc_driver);
+}
+
+ static void __exit
+ds1511_rtc_exit(void)
+{
+ return platform_driver_unregister(&ds1511_rtc_driver);
+}
+
+module_init(ds1511_rtc_init);
+module_exit(ds1511_rtc_exit);
+
+MODULE_AUTHOR("Andrew Sharp <andy.sharp@onstor.com>");
+MODULE_DESCRIPTION("Dallas DS1511 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index c973ba94c422..8b3997007506 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -163,27 +163,17 @@ static int pcf8583_read_mem(struct i2c_client *client, struct rtc_mem *mem)
static int pcf8583_write_mem(struct i2c_client *client, struct rtc_mem *mem)
{
- unsigned char addr[1];
- struct i2c_msg msgs[2] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = addr,
- }, {
- .addr = client->addr,
- .flags = I2C_M_NOSTART,
- .len = mem->nr,
- .buf = mem->data,
- }
- };
+ unsigned char buf[9];
+ int ret;
- if (mem->loc < 8)
+ if (mem->loc < 8 || mem->nr > 8)
return -EINVAL;
- addr[0] = mem->loc;
+ buf[0] = mem->loc;
+ memcpy(buf + 1, mem->data, mem->nr);
- return i2c_transfer(client->adapter, msgs, 2) == 2 ? 0 : -EIO;
+ ret = i2c_master_send(client, buf, mem->nr + 1);
+ return ret == mem->nr + 1 ? 0 : -EIO;
}
static int pcf8583_rtc_read_time(struct device *dev, struct rtc_time *tm)
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
new file mode 100644
index 000000000000..a64626a82d0b
--- /dev/null
+++ b/drivers/rtc/rtc-r9701.c
@@ -0,0 +1,178 @@
+/*
+ * Driver for Epson RTC-9701JE
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * Based on rtc-max6902.c
+ *
+ * Copyright (C) 2006 8D Technologies inc.
+ * Copyright (C) 2004 Compulab Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+#include <linux/bcd.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+
+#define RSECCNT 0x00 /* Second Counter */
+#define RMINCNT 0x01 /* Minute Counter */
+#define RHRCNT 0x02 /* Hour Counter */
+#define RWKCNT 0x03 /* Week Counter */
+#define RDAYCNT 0x04 /* Day Counter */
+#define RMONCNT 0x05 /* Month Counter */
+#define RYRCNT 0x06 /* Year Counter */
+#define R100CNT 0x07 /* Y100 Counter */
+#define RMINAR 0x08 /* Minute Alarm */
+#define RHRAR 0x09 /* Hour Alarm */
+#define RWKAR 0x0a /* Week/Day Alarm */
+#define RTIMCNT 0x0c /* Interval Timer */
+#define REXT 0x0d /* Extension Register */
+#define RFLAG 0x0e /* RTC Flag Register */
+#define RCR 0x0f /* RTC Control Register */
+
+static int write_reg(struct device *dev, int address, unsigned char data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ unsigned char buf[2];
+
+ buf[0] = address & 0x7f;
+ buf[1] = data;
+
+ return spi_write(spi, buf, ARRAY_SIZE(buf));
+}
+
+static int read_regs(struct device *dev, unsigned char *regs, int no_regs)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ u8 txbuf[1], rxbuf[1];
+ int k, ret;
+
+ ret = 0;
+
+ for (k = 0; ret == 0 && k < no_regs; k++) {
+ txbuf[0] = 0x80 | regs[k];
+ ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1);
+ regs[k] = rxbuf[0];
+ }
+
+ return ret;
+}
+
+static int r9701_get_datetime(struct device *dev, struct rtc_time *dt)
+{
+ unsigned long time;
+ int ret;
+ unsigned char buf[] = { RSECCNT, RMINCNT, RHRCNT,
+ RDAYCNT, RMONCNT, RYRCNT };
+
+ ret = read_regs(dev, buf, ARRAY_SIZE(buf));
+ if (ret)
+ return ret;
+
+ memset(dt, 0, sizeof(*dt));
+
+ dt->tm_sec = BCD2BIN(buf[0]); /* RSECCNT */
+ dt->tm_min = BCD2BIN(buf[1]); /* RMINCNT */
+ dt->tm_hour = BCD2BIN(buf[2]); /* RHRCNT */
+
+ dt->tm_mday = BCD2BIN(buf[3]); /* RDAYCNT */
+ dt->tm_mon = BCD2BIN(buf[4]) - 1; /* RMONCNT */
+ dt->tm_year = BCD2BIN(buf[5]) + 100; /* RYRCNT */
+
+ /* the rtc device may contain illegal values on power up
+ * according to the data sheet. make sure they are valid.
+ */
+
+ return rtc_valid_tm(dt);
+}
+
+static int r9701_set_datetime(struct device *dev, struct rtc_time *dt)
+{
+ int ret, year;
+
+ year = dt->tm_year + 1900;
+ if (year >= 2100 || year < 2000)
+ return -EINVAL;
+
+ ret = write_reg(dev, RHRCNT, BIN2BCD(dt->tm_hour));
+ ret = ret ? ret : write_reg(dev, RMINCNT, BIN2BCD(dt->tm_min));
+ ret = ret ? ret : write_reg(dev, RSECCNT, BIN2BCD(dt->tm_sec));
+ ret = ret ? ret : write_reg(dev, RDAYCNT, BIN2BCD(dt->tm_mday));
+ ret = ret ? ret : write_reg(dev, RMONCNT, BIN2BCD(dt->tm_mon + 1));
+ ret = ret ? ret : write_reg(dev, RYRCNT, BIN2BCD(dt->tm_year - 100));
+ ret = ret ? ret : write_reg(dev, RWKCNT, 1 << dt->tm_wday);
+
+ return ret;
+}
+
+static const struct rtc_class_ops r9701_rtc_ops = {
+ .read_time = r9701_get_datetime,
+ .set_time = r9701_set_datetime,
+};
+
+static int __devinit r9701_probe(struct spi_device *spi)
+{
+ struct rtc_device *rtc;
+ unsigned char tmp;
+ int res;
+
+ rtc = rtc_device_register("r9701",
+ &spi->dev, &r9701_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ dev_set_drvdata(&spi->dev, rtc);
+
+ tmp = R100CNT;
+ res = read_regs(&spi->dev, &tmp, 1);
+ if (res || tmp != 0x20) {
+ rtc_device_unregister(rtc);
+ return res;
+ }
+
+ return 0;
+}
+
+static int __devexit r9701_remove(struct spi_device *spi)
+{
+ struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
+
+ rtc_device_unregister(rtc);
+ return 0;
+}
+
+static struct spi_driver r9701_driver = {
+ .driver = {
+ .name = "rtc-r9701",
+ .owner = THIS_MODULE,
+ },
+ .probe = r9701_probe,
+ .remove = __devexit_p(r9701_remove),
+};
+
+static __init int r9701_init(void)
+{
+ return spi_register_driver(&r9701_driver);
+}
+module_init(r9701_init);
+
+static __exit void r9701_exit(void)
+{
+ spi_unregister_driver(&r9701_driver);
+}
+module_exit(r9701_exit);
+
+MODULE_DESCRIPTION("r9701 spi RTC driver");
+MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index e2041b4d0c85..86766f1f2496 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -20,6 +20,7 @@
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/clk.h>
+#include <linux/log2.h>
#include <asm/hardware.h>
#include <asm/uaccess.h>
@@ -309,9 +310,7 @@ static int s3c_rtc_ioctl(struct device *dev,
break;
case RTC_IRQP_SET:
- /* check for power of 2 */
-
- if ((arg & (arg-1)) != 0 || arg < 1) {
+ if (!is_power_of_2(arg)) {
ret = -EINVAL;
goto exit;
}
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 2eb38520f0c8..ee253cc45de1 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -357,23 +357,15 @@ static int sa1100_rtc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sa1100_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
- if (pdev->dev.power.power_state.event != state.event) {
- if (state.event == PM_EVENT_SUSPEND &&
- device_may_wakeup(&pdev->dev))
- enable_irq_wake(IRQ_RTCAlrm);
-
- pdev->dev.power.power_state = state;
- }
+ if (device_may_wakeup(&pdev->dev))
+ enable_irq_wake(IRQ_RTCAlrm);
return 0;
}
static int sa1100_rtc_resume(struct platform_device *pdev)
{
- if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
- if (device_may_wakeup(&pdev->dev))
- disable_irq_wake(IRQ_RTCAlrm);
- pdev->dev.power.power_state = PMSG_ON;
- }
+ if (device_may_wakeup(&pdev->dev))
+ disable_irq_wake(IRQ_RTCAlrm);
return 0;
}
#else
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 2ae0e8304d3a..4d27ccc4fc06 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -17,6 +17,13 @@
/* device attributes */
+/*
+ * NOTE: RTC times displayed in sysfs use the RTC's timezone. That's
+ * ideally UTC. However, PCs that also boot to MS-Windows normally use
+ * the local time and change to match daylight savings time. That affects
+ * attributes including date, time, since_epoch, and wakealarm.
+ */
+
static ssize_t
rtc_sysfs_show_name(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -113,13 +120,13 @@ rtc_sysfs_show_wakealarm(struct device *dev, struct device_attribute *attr,
unsigned long alarm;
struct rtc_wkalrm alm;
- /* Don't show disabled alarms; but the RTC could leave the
- * alarm enabled after it's already triggered. Alarms are
- * conceptually one-shot, even though some common hardware
- * (PCs) doesn't actually work that way.
+ /* Don't show disabled alarms. For uniformity, RTC alarms are
+ * conceptually one-shot, even though some common RTCs (on PCs)
+ * don't actually work that way.
*
- * REVISIT maybe we should require RTC implementations to
- * disable the RTC alarm after it triggers, for uniformity.
+ * NOTE: RTC implementations where the alarm doesn't match an
+ * exact YYYY-MM-DD HH:MM[:SS] date *must* disable their RTC
+ * alarms after they trigger, to ensure one-shot semantics.
*/
retval = rtc_read_alarm(to_rtc_device(dev), &alm);
if (retval == 0 && alm.enabled) {
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d640427c74c8..d984e0fae630 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1057,12 +1057,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (device->features & DASD_FEATURE_ERPLOG) {
dasd_log_sense(cqr, irb);
}
- /* If we have no sense data, or we just don't want complex ERP
- * for this request, but if we have retries left, then just
- * reset this request and retry it in the fastpath
+ /*
+ * If we don't want complex ERP for this request, then just
+ * reset this and retry it in the fastpath
*/
- if (!(cqr->irb.esw.esw0.erw.cons &&
- test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) &&
+ if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
cqr->retries > 0) {
DEV_MESSAGE(KERN_DEBUG, device,
"default ERP in fastpath (%i retries left)",
@@ -1707,7 +1706,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
req = (struct request *) cqr->callback_data;
dasd_profile_end(cqr->block, cqr, req);
- status = cqr->memdev->discipline->free_cp(cqr, req);
+ status = cqr->block->base->discipline->free_cp(cqr, req);
if (status <= 0)
error = status ? status : -EIO;
dasd_end_request(req, error);
@@ -1742,12 +1741,8 @@ restart:
/* Process requests that may be recovered */
if (cqr->status == DASD_CQR_NEED_ERP) {
- if (cqr->irb.esw.esw0.erw.cons &&
- test_bit(DASD_CQR_FLAGS_USE_ERP,
- &cqr->flags)) {
- erp_fn = base->discipline->erp_action(cqr);
- erp_fn(cqr);
- }
+ erp_fn = base->discipline->erp_action(cqr);
+ erp_fn(cqr);
goto restart;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index c361ab69ec00..f69714a0e9e7 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -164,7 +164,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
/* reset status to submit the request again... */
erp->status = DASD_CQR_FILLED;
- erp->retries = 1;
+ erp->retries = 10;
} else {
DEV_MESSAGE(KERN_ERR, device,
"No alternate channel path left (lpum=%x / "
@@ -301,8 +301,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_action_4;
} else {
-
- if (sense[25] == 0x1D) { /* state change pending */
+ if (sense && (sense[25] == 0x1D)) { /* state change pending */
DEV_MESSAGE(KERN_INFO, device,
"waiting for state change pending "
@@ -311,7 +310,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
dasd_3990_erp_block_queue(erp, 30*HZ);
- } else if (sense[25] == 0x1E) { /* busy */
+ } else if (sense && (sense[25] == 0x1E)) { /* busy */
DEV_MESSAGE(KERN_INFO, device,
"busy - redriving request later, "
"%d retries left",
@@ -2120,6 +2119,34 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
*/
/*
+ * DASD_3990_ERP_CONTROL_CHECK
+ *
+ * DESCRIPTION
+ * Does a generic inspection if a control check occured and sets up
+ * the related error recovery procedure
+ *
+ * PARAMETER
+ * erp pointer to the currently created default ERP
+ *
+ * RETURN VALUES
+ * erp_filled pointer to the erp
+ */
+
+static struct dasd_ccw_req *
+dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
+{
+ struct dasd_device *device = erp->startdev;
+
+ if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK
+ | SCHN_STAT_CHN_CTRL_CHK)) {
+ DEV_MESSAGE(KERN_DEBUG, device, "%s",
+ "channel or interface control check");
+ erp = dasd_3990_erp_action_4(erp, NULL);
+ }
+ return erp;
+}
+
+/*
* DASD_3990_ERP_INSPECT
*
* DESCRIPTION
@@ -2145,8 +2172,11 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
if (erp_new)
return erp_new;
+ /* check if no concurrent sens is available */
+ if (!erp->refers->irb.esw.esw0.erw.cons)
+ erp_new = dasd_3990_erp_control_check(erp);
/* distinguish between 24 and 32 byte sense data */
- if (sense[27] & DASD_SENSE_BIT_0) {
+ else if (sense[27] & DASD_SENSE_BIT_0) {
/* inspect the 24 byte sense data */
erp_new = dasd_3990_erp_inspect_24(erp, sense);
@@ -2285,6 +2315,17 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
// return 0; /* CCW doesn't match */
}
+ if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons)
+ return 0;
+
+ if ((cqr1->irb.esw.esw0.erw.cons == 0) &&
+ (cqr2->irb.esw.esw0.erw.cons == 0)) {
+ if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_CTRL_CHK)) ==
+ (cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_CTRL_CHK)))
+ return 1; /* match with ifcc*/
+ }
/* check sense data; byte 0-2,25,27 */
if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) &&
(cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) &&
@@ -2560,17 +2601,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
return cqr;
}
- /* check if sense data are available */
- if (!cqr->irb.ecw) {
- DEV_MESSAGE(KERN_DEBUG, device,
- "ERP called witout sense data avail ..."
- "request %p - NO ERP possible", cqr);
-
- cqr->status = DASD_CQR_FAILED;
-
- return cqr;
-
- }
/* check if error happened before */
erp = dasd_3990_erp_in_erp(cqr);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 7779bfce1c31..3faf0538b328 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -415,6 +415,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
dev_info->gd->driverfs_dev = &dev_info->dev;
+ blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
+ blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
/*
* load the segment
*/
@@ -472,9 +474,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto unregister_dev;
- blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
- blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
-
add_disk(dev_info->gd);
switch (dev_info->segment_type) {
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index e3b3d390b4a3..2e616e33891d 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -332,7 +332,7 @@ sclp_tty_write_string(const unsigned char *str, int count)
if (sclp_ttybuf == NULL) {
while (list_empty(&sclp_tty_pages)) {
spin_unlock_irqrestore(&sclp_tty_lock, flags);
- if (in_interrupt())
+ if (in_atomic())
sclp_sync_wait();
else
wait_event(sclp_tty_waitq,
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 40cd21bc5cc4..68071622d4bb 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -400,7 +400,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
while (list_empty(&sclp_vt220_empty)) {
spin_unlock_irqrestore(&sclp_vt220_lock,
flags);
- if (in_interrupt())
+ if (in_atomic())
sclp_sync_wait();
else
wait_event(sclp_vt220_waitq,
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 3964056a9a47..03914fa81174 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -391,12 +391,24 @@ ccwgroup_remove (struct device *dev)
return 0;
}
+static void ccwgroup_shutdown(struct device *dev)
+{
+ struct ccwgroup_device *gdev;
+ struct ccwgroup_driver *gdrv;
+
+ gdev = to_ccwgroupdev(dev);
+ gdrv = to_ccwgroupdrv(dev->driver);
+ if (gdrv && gdrv->shutdown)
+ gdrv->shutdown(gdev);
+}
+
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.match = ccwgroup_bus_match,
.uevent = ccwgroup_uevent,
.probe = ccwgroup_probe,
.remove = ccwgroup_remove,
+ .shutdown = ccwgroup_shutdown,
};
/**
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index e7ba16a74ef7..007aaeb4f532 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -26,6 +26,25 @@
static void *sei_page;
+static int chsc_error_from_response(int response)
+{
+ switch (response) {
+ case 0x0001:
+ return 0;
+ case 0x0002:
+ case 0x0003:
+ case 0x0006:
+ case 0x0007:
+ case 0x0008:
+ case 0x000a:
+ return -EINVAL;
+ case 0x0004:
+ return -EOPNOTSUPP;
+ default:
+ return -EIO;
+ }
+}
+
struct chsc_ssd_area {
struct chsc_header request;
u16 :10;
@@ -75,11 +94,11 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
ret = (ccode == 3) ? -ENODEV : -EBUSY;
goto out_free;
}
- if (ssd_area->response.code != 0x0001) {
+ ret = chsc_error_from_response(ssd_area->response.code);
+ if (ret != 0) {
CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
ssd_area->response.code);
- ret = -EIO;
goto out_free;
}
if (!ssd_area->sch_valid) {
@@ -717,36 +736,15 @@ __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
return (ccode == 3) ? -ENODEV : -EBUSY;
switch (secm_area->response.code) {
- case 0x0001: /* Success. */
- ret = 0;
- break;
- case 0x0003: /* Invalid block. */
- case 0x0007: /* Invalid format. */
- case 0x0008: /* Other invalid block. */
- CIO_CRW_EVENT(2, "Error in chsc request block!\n");
- ret = -EINVAL;
- break;
- case 0x0004: /* Command not provided in model. */
- CIO_CRW_EVENT(2, "Model does not provide secm\n");
- ret = -EOPNOTSUPP;
- break;
- case 0x0102: /* cub adresses incorrect */
- CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
- ret = -EINVAL;
- break;
- case 0x0103: /* key error */
- CIO_CRW_EVENT(2, "Access key error in secm\n");
+ case 0x0102:
+ case 0x0103:
ret = -EINVAL;
- break;
- case 0x0105: /* error while starting */
- CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
- ret = -EIO;
- break;
default:
- CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
- secm_area->response.code);
- ret = -EIO;
+ ret = chsc_error_from_response(secm_area->response.code);
}
+ if (ret != 0)
+ CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
+ secm_area->response.code);
return ret;
}
@@ -827,27 +825,14 @@ int chsc_determine_channel_path_description(struct chp_id chpid,
goto out;
}
- switch (scpd_area->response.code) {
- case 0x0001: /* Success. */
+ ret = chsc_error_from_response(scpd_area->response.code);
+ if (ret == 0)
+ /* Success. */
memcpy(desc, &scpd_area->desc,
sizeof(struct channel_path_desc));
- ret = 0;
- break;
- case 0x0003: /* Invalid block. */
- case 0x0007: /* Invalid format. */
- case 0x0008: /* Other invalid block. */
- CIO_CRW_EVENT(2, "Error in chsc request block!\n");
- ret = -EINVAL;
- break;
- case 0x0004: /* Command not provided in model. */
- CIO_CRW_EVENT(2, "Model does not provide scpd\n");
- ret = -EOPNOTSUPP;
- break;
- default:
- CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
+ else
+ CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
scpd_area->response.code);
- ret = -EIO;
- }
out:
free_page((unsigned long)scpd_area);
return ret;
@@ -923,8 +908,9 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
goto out;
}
- switch (scmc_area->response.code) {
- case 0x0001: /* Success. */
+ ret = chsc_error_from_response(scmc_area->response.code);
+ if (ret == 0) {
+ /* Success. */
if (!scmc_area->not_valid) {
chp->cmg = scmc_area->cmg;
chp->shared = scmc_area->shared;
@@ -935,22 +921,9 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
chp->cmg = -1;
chp->shared = -1;
}
- ret = 0;
- break;
- case 0x0003: /* Invalid block. */
- case 0x0007: /* Invalid format. */
- case 0x0008: /* Invalid bit combination. */
- CIO_CRW_EVENT(2, "Error in chsc request block!\n");
- ret = -EINVAL;
- break;
- case 0x0004: /* Command not provided. */
- CIO_CRW_EVENT(2, "Model does not provide scmc\n");
- ret = -EOPNOTSUPP;
- break;
- default:
- CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
+ } else {
+ CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
scmc_area->response.code);
- ret = -EIO;
}
out:
free_page((unsigned long)scmc_area);
@@ -1002,21 +975,17 @@ chsc_enable_facility(int operation_code)
ret = (ret == 3) ? -ENODEV : -EBUSY;
goto out;
}
+
switch (sda_area->response.code) {
- case 0x0001: /* everything ok */
- ret = 0;
- break;
- case 0x0003: /* invalid request block */
- case 0x0007:
- ret = -EINVAL;
- break;
- case 0x0004: /* command not provided */
- case 0x0101: /* facility not provided */
+ case 0x0101:
ret = -EOPNOTSUPP;
break;
- default: /* something went wrong */
- ret = -EIO;
+ default:
+ ret = chsc_error_from_response(sda_area->response.code);
}
+ if (ret != 0)
+ CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
+ operation_code, sda_area->response.code);
out:
free_page((unsigned long)sda_area);
return ret;
@@ -1041,33 +1010,27 @@ chsc_determine_css_characteristics(void)
} __attribute__ ((packed)) *scsc_area;
scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!scsc_area) {
- CIO_MSG_EVENT(0, "Was not able to determine available "
- "CHSCs due to no memory.\n");
+ if (!scsc_area)
return -ENOMEM;
- }
scsc_area->request.length = 0x0010;
scsc_area->request.code = 0x0010;
result = chsc(scsc_area);
if (result) {
- CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, "
- "cc=%i.\n", result);
- result = -EIO;
+ result = (result == 3) ? -ENODEV : -EBUSY;
goto exit;
}
- if (scsc_area->response.code != 1) {
- CIO_MSG_EVENT(0, "Was not able to determine "
- "available CHSCs.\n");
- result = -EIO;
- goto exit;
- }
- memcpy(&css_general_characteristics, scsc_area->general_char,
- sizeof(css_general_characteristics));
- memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
- sizeof(css_chsc_characteristics));
+ result = chsc_error_from_response(scsc_area->response.code);
+ if (result == 0) {
+ memcpy(&css_general_characteristics, scsc_area->general_char,
+ sizeof(css_general_characteristics));
+ memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
+ sizeof(css_chsc_characteristics));
+ } else
+ CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
+ scsc_area->response.code);
exit:
free_page ((unsigned long) scsc_area);
return result;
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 918b8b89cf9a..dc4d87f77f6c 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -26,17 +26,18 @@
#include "ioasm.h"
#include "io_sch.h"
-/*
- * Input :
- * devno - device number
- * ps - pointer to sense ID data area
- * Output : none
+/**
+ * vm_vdev_to_cu_type - Convert vm virtual device into control unit type
+ * for certain devices.
+ * @class: virtual device class
+ * @type: virtual device type
+ *
+ * Returns control unit type if a match was made or %0xffff otherwise.
*/
-static void
-VM_virtual_device_info (__u16 devno, struct senseid *ps)
+static int vm_vdev_to_cu_type(int class, int type)
{
static struct {
- int vrdcvcla, vrdcvtyp, cu_type;
+ int class, type, cu_type;
} vm_devices[] = {
{ 0x08, 0x01, 0x3480 },
{ 0x08, 0x02, 0x3430 },
@@ -68,8 +69,26 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps)
{ 0x40, 0xc0, 0x5080 },
{ 0x80, 0x00, 0x3215 },
};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vm_devices); i++)
+ if (class == vm_devices[i].class && type == vm_devices[i].type)
+ return vm_devices[i].cu_type;
+
+ return 0xffff;
+}
+
+/**
+ * diag_get_dev_info - retrieve device information via DIAG X'210'
+ * @devno: device number
+ * @ps: pointer to sense ID data area
+ *
+ * Returns zero on success, non-zero otherwise.
+ */
+static int diag_get_dev_info(u16 devno, struct senseid *ps)
+{
struct diag210 diag_data;
- int ccode, i;
+ int ccode;
CIO_TRACE_EVENT (4, "VMvdinf");
@@ -79,21 +98,21 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps)
};
ccode = diag210 (&diag_data);
- ps->reserved = 0xff;
+ if ((ccode == 0) || (ccode == 2)) {
+ ps->reserved = 0xff;
- /* Special case for bloody osa devices. */
- if (diag_data.vrdcvcla == 0x02 &&
- diag_data.vrdcvtyp == 0x20) {
- ps->cu_type = 0x3088;
- ps->cu_model = 0x60;
- return;
- }
- for (i = 0; i < ARRAY_SIZE(vm_devices); i++)
- if (diag_data.vrdcvcla == vm_devices[i].vrdcvcla &&
- diag_data.vrdcvtyp == vm_devices[i].vrdcvtyp) {
- ps->cu_type = vm_devices[i].cu_type;
- return;
+ /* Special case for osa devices. */
+ if (diag_data.vrdcvcla == 0x02 && diag_data.vrdcvtyp == 0x20) {
+ ps->cu_type = 0x3088;
+ ps->cu_model = 0x60;
+ return 0;
}
+ ps->cu_type = vm_vdev_to_cu_type(diag_data.vrdcvcla,
+ diag_data.vrdcvtyp);
+ if (ps->cu_type != 0xffff)
+ return 0;
+ }
+
CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):"
"vdev class : %02X, vdev type : %04X \n ... "
"rdev class : %02X, rdev type : %04X, "
@@ -102,6 +121,8 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps)
diag_data.vrdcvcla, diag_data.vrdcvtyp,
diag_data.vrdcrccl, diag_data.vrdccrty,
diag_data.vrdccrmd);
+
+ return -ENODEV;
}
/*
@@ -130,6 +151,7 @@ __ccw_device_sense_id_start(struct ccw_device *cdev)
/* Try on every path. */
ret = -ENODEV;
while (cdev->private->imask != 0) {
+ cdev->private->senseid.cu_type = 0xFFFF;
if ((sch->opm & cdev->private->imask) != 0 &&
cdev->private->iretry > 0) {
cdev->private->iretry--;
@@ -153,7 +175,6 @@ ccw_device_sense_id_start(struct ccw_device *cdev)
int ret;
memset (&cdev->private->senseid, 0, sizeof (struct senseid));
- cdev->private->senseid.cu_type = 0xFFFF;
cdev->private->imask = 0x80;
cdev->private->iretry = 5;
ret = __ccw_device_sense_id_start(cdev);
@@ -173,13 +194,7 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
sch = to_subchannel(cdev->dev.parent);
irb = &cdev->private->irb;
- /* Did we get a proper answer ? */
- if (cdev->private->senseid.cu_type != 0xFFFF &&
- cdev->private->senseid.reserved == 0xFF) {
- if (irb->scsw.count < sizeof (struct senseid) - 8)
- cdev->private->flags.esid = 1;
- return 0; /* Success */
- }
+
/* Check the error cases. */
if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
/* Retry Sense ID if requested. */
@@ -231,6 +246,15 @@ ccw_device_check_sense_id(struct ccw_device *cdev)
sch->schid.ssid, sch->schid.sch_no);
return -EACCES;
}
+
+ /* Did we get a proper answer ? */
+ if (irb->scsw.cc == 0 && cdev->private->senseid.cu_type != 0xFFFF &&
+ cdev->private->senseid.reserved == 0xFF) {
+ if (irb->scsw.count < sizeof(struct senseid) - 8)
+ cdev->private->flags.esid = 1;
+ return 0; /* Success */
+ }
+
/* Hmm, whatever happened, try again. */
CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on "
"subchannel 0.%x.%04x returns status %02X%02X\n",
@@ -283,20 +307,17 @@ ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event)
break;
/* fall through. */
default: /* Sense ID failed. Try asking VM. */
- if (MACHINE_IS_VM) {
- VM_virtual_device_info (cdev->private->dev_id.devno,
+ if (MACHINE_IS_VM)
+ ret = diag_get_dev_info(cdev->private->dev_id.devno,
&cdev->private->senseid);
- if (cdev->private->senseid.cu_type != 0xFFFF) {
- /* Got the device information from VM. */
- ccw_device_sense_id_done(cdev, 0);
- return;
- }
- }
- /*
- * If we can't couldn't identify the device type we
- * consider the device "not operational".
- */
- ccw_device_sense_id_done(cdev, -ENODEV);
+ else
+ /*
+ * If we can't couldn't identify the device type we
+ * consider the device "not operational".
+ */
+ ret = -ENODEV;
+
+ ccw_device_sense_id_done(cdev, ret);
break;
}
}
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c
index 19343f9675c3..291ff6235fe2 100644
--- a/drivers/s390/sysinfo.c
+++ b/drivers/s390/sysinfo.c
@@ -422,7 +422,7 @@ void s390_adjust_jiffies(void)
/*
* calibrate the delay loop
*/
-void __init calibrate_delay(void)
+void __cpuinit calibrate_delay(void)
{
s390_adjust_jiffies();
/* Print the good old Bogomips line .. */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 14fc7f39e83e..a5f0aaaf0dd4 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -179,7 +179,15 @@ config CHR_DEV_SCH
say M here and read <file:Documentation/kbuild/modules.txt> and
<file:Documentation/scsi/scsi.txt>. The module will be called ch.o.
If unsure, say N.
-
+
+config SCSI_ENCLOSURE
+ tristate "SCSI Enclosure Support"
+ depends on SCSI && ENCLOSURE_SERVICES
+ help
+ Enclosures are devices sitting on or in SCSI backplanes that
+ manage devices. If you have a disk cage, the chances are that
+ it has an enclosure device. Selecting this option will just allow
+ certain enclosure conditions to be reported and is not required.
comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
depends on SCSI
@@ -350,17 +358,6 @@ config SGIWD93_SCSI
If you have a Western Digital WD93 SCSI controller on
an SGI MIPS system, say Y. Otherwise, say N.
-config SCSI_DECNCR
- tristate "DEC NCR53C94 Scsi Driver"
- depends on MACH_DECSTATION && SCSI && TC
- help
- Say Y here to support the NCR53C94 SCSI controller chips on IOASIC
- based TURBOchannel DECstations and TURBOchannel PMAZ-A cards.
-
-config SCSI_DECSII
- tristate "DEC SII Scsi Driver"
- depends on MACH_DECSTATION && SCSI && 32BIT
-
config BLK_DEV_3W_XXXX_RAID
tristate "3ware 5/6/7/8xxx ATA-RAID support"
depends on PCI && SCSI
@@ -1263,17 +1260,6 @@ config SCSI_NCR53C8XX_NO_DISCONNECT
not allow targets to disconnect is not reasonable if there is more
than 1 device on a SCSI bus. The normal answer therefore is N.
-config SCSI_MCA_53C9X
- tristate "NCR MCA 53C9x SCSI support"
- depends on MCA_LEGACY && SCSI && BROKEN_ON_SMP
- help
- Some MicroChannel machines, notably the NCR 35xx line, use a SCSI
- controller based on the NCR 53C94. This driver will allow use of
- the controller on the 3550, and very possibly others.
-
- To compile this driver as a module, choose M here: the
- module will be called mca_53c9x.
-
config SCSI_PAS16
tristate "PAS16 SCSI support"
depends on ISA && SCSI
@@ -1600,45 +1586,6 @@ config GVP11_SCSI
To compile this driver as a module, choose M here: the
module will be called gvp11.
-config CYBERSTORM_SCSI
- tristate "CyberStorm SCSI support"
- depends on ZORRO && SCSI
- help
- If you have an Amiga with an original (MkI) Phase5 Cyberstorm
- accelerator board and the optional Cyberstorm SCSI controller,
- answer Y. Otherwise, say N.
-
-config CYBERSTORMII_SCSI
- tristate "CyberStorm Mk II SCSI support"
- depends on ZORRO && SCSI
- help
- If you have an Amiga with a Phase5 Cyberstorm MkII accelerator board
- and the optional Cyberstorm SCSI controller, say Y. Otherwise,
- answer N.
-
-config BLZ2060_SCSI
- tristate "Blizzard 2060 SCSI support"
- depends on ZORRO && SCSI
- help
- If you have an Amiga with a Phase5 Blizzard 2060 accelerator board
- and want to use the onboard SCSI controller, say Y. Otherwise,
- answer N.
-
-config BLZ1230_SCSI
- tristate "Blizzard 1230IV/1260 SCSI support"
- depends on ZORRO && SCSI
- help
- If you have an Amiga 1200 with a Phase5 Blizzard 1230IV or Blizzard
- 1260 accelerator, and the optional SCSI module, say Y. Otherwise,
- say N.
-
-config FASTLANE_SCSI
- tristate "Fastlane SCSI support"
- depends on ZORRO && SCSI
- help
- If you have the Phase5 Fastlane Z3 SCSI controller, or plan to use
- one in the near future, say Y to this question. Otherwise, say N.
-
config SCSI_A4000T
tristate "A4000T NCR53c710 SCSI support (EXPERIMENTAL)"
depends on AMIGA && SCSI && EXPERIMENTAL
@@ -1666,15 +1613,6 @@ config SCSI_ZORRO7XX
accelerator card for the Amiga 1200,
- the SCSI controller on the GVP Turbo 040/060 accelerator.
-config OKTAGON_SCSI
- tristate "BSC Oktagon SCSI support (EXPERIMENTAL)"
- depends on ZORRO && SCSI && EXPERIMENTAL
- help
- If you have the BSC Oktagon SCSI disk controller for the Amiga, say
- Y to this question. If you're in doubt about whether you have one,
- see the picture at
- <http://amiga.resource.cx/exp/search.pl?product=oktagon>.
-
config ATARI_SCSI
tristate "Atari native SCSI support"
depends on ATARI && SCSI
@@ -1727,18 +1665,6 @@ config MAC_SCSI
SCSI-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
-config SCSI_MAC_ESP
- tristate "Macintosh NCR53c9[46] SCSI"
- depends on MAC && SCSI
- help
- This is the NCR 53c9x SCSI controller found on most of the 68040
- based Macintoshes. If you have one of these say Y and read the
- SCSI-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
-
- To compile this driver as a module, choose M here: the
- module will be called mac_esp.
-
config MVME147_SCSI
bool "WD33C93 SCSI driver for MVME147"
depends on MVME147 && SCSI=y
@@ -1779,6 +1705,7 @@ config SUN3_SCSI
config SUN3X_ESP
bool "Sun3x ESP SCSI"
depends on SUN3X && SCSI=y
+ select SCSI_SPI_ATTRS
help
The ESP was an on-board SCSI controller used on Sun 3/80
machines. Say Y here to compile in support for it.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 93e1428d03fc..925c26b4fff9 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -44,15 +44,8 @@ obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
obj-$(CONFIG_MVME147_SCSI) += mvme147.o wd33c93.o
obj-$(CONFIG_SGIWD93_SCSI) += sgiwd93.o wd33c93.o
-obj-$(CONFIG_CYBERSTORM_SCSI) += NCR53C9x.o cyberstorm.o
-obj-$(CONFIG_CYBERSTORMII_SCSI) += NCR53C9x.o cyberstormII.o
-obj-$(CONFIG_BLZ2060_SCSI) += NCR53C9x.o blz2060.o
-obj-$(CONFIG_BLZ1230_SCSI) += NCR53C9x.o blz1230.o
-obj-$(CONFIG_FASTLANE_SCSI) += NCR53C9x.o fastlane.o
-obj-$(CONFIG_OKTAGON_SCSI) += NCR53C9x.o oktagon_esp_mod.o
obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
-obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
obj-$(CONFIG_MVME16x_SCSI) += 53c700.o mvme16x_scsi.o
obj-$(CONFIG_BVME6000_SCSI) += 53c700.o bvme6000_scsi.o
@@ -95,7 +88,6 @@ obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
-obj-$(CONFIG_SCSI_MCA_53C9X) += NCR53C9x.o mca_53c9x.o
obj-$(CONFIG_SCSI_IBMMCA) += ibmmca.o
obj-$(CONFIG_SCSI_EATA) += eata.o
obj-$(CONFIG_SCSI_DC395x) += dc395x.o
@@ -112,13 +104,12 @@ obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
obj-$(CONFIG_BLK_DEV_IDESCSI) += ide-scsi.o
obj-$(CONFIG_SCSI_MESH) += mesh.o
obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
-obj-$(CONFIG_SCSI_DECNCR) += NCR53C9x.o dec_esp.o
obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_PPA) += ppa.o
obj-$(CONFIG_SCSI_IMM) += imm.o
obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
-obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o
+obj-$(CONFIG_SUN3X_ESP) += esp_scsi.o sun3x_esp.o
obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
@@ -138,6 +129,7 @@ obj-$(CONFIG_BLK_DEV_SD) += sd_mod.o
obj-$(CONFIG_BLK_DEV_SR) += sr_mod.o
obj-$(CONFIG_CHR_DEV_SG) += sg.o
obj-$(CONFIG_CHR_DEV_SCH) += ch.o
+obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o
# This goes last, so that "real" scsi devices probe earlier
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
diff --git a/drivers/scsi/NCR53C9x.c b/drivers/scsi/NCR53C9x.c
deleted file mode 100644
index 5b0efc903918..000000000000
--- a/drivers/scsi/NCR53C9x.c
+++ /dev/null
@@ -1,3654 +0,0 @@
-/* NCR53C9x.c: Generic SCSI driver code for NCR53C9x chips.
- *
- * Originally esp.c : EnhancedScsiProcessor Sun SCSI driver code.
- *
- * Copyright (C) 1995, 1998 David S. Miller (davem@caip.rutgers.edu)
- *
- * Most DMA dependencies put in driver specific files by
- * Jesper Skov (jskov@cygnus.co.uk)
- *
- * Set up to use esp_read/esp_write (preprocessor macros in NCR53c9x.h) by
- * Tymm Twillman (tymm@coe.missouri.edu)
- */
-
-/* TODO:
- *
- * 1) Maybe disable parity checking in config register one for SCSI1
- * targets. (Gilmore says parity error on the SBus can lock up
- * old sun4c's)
- * 2) Add support for DMA2 pipelining.
- * 3) Add tagged queueing.
- * 4) Maybe change use of "esp" to something more "NCR"'ish.
- */
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/init.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
-#include <asm/system.h>
-#include <asm/ptrace.h>
-#include <asm/pgtable.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-
-/* Command phase enumeration. */
-enum {
- not_issued = 0x00, /* Still in the issue_SC queue. */
-
- /* Various forms of selecting a target. */
-#define in_slct_mask 0x10
- in_slct_norm = 0x10, /* ESP is arbitrating, normal selection */
- in_slct_stop = 0x11, /* ESP will select, then stop with IRQ */
- in_slct_msg = 0x12, /* select, then send a message */
- in_slct_tag = 0x13, /* select and send tagged queue msg */
- in_slct_sneg = 0x14, /* select and acquire sync capabilities */
-
- /* Any post selection activity. */
-#define in_phases_mask 0x20
- in_datain = 0x20, /* Data is transferring from the bus */
- in_dataout = 0x21, /* Data is transferring to the bus */
- in_data_done = 0x22, /* Last DMA data operation done (maybe) */
- in_msgin = 0x23, /* Eating message from target */
- in_msgincont = 0x24, /* Eating more msg bytes from target */
- in_msgindone = 0x25, /* Decide what to do with what we got */
- in_msgout = 0x26, /* Sending message to target */
- in_msgoutdone = 0x27, /* Done sending msg out */
- in_cmdbegin = 0x28, /* Sending cmd after abnormal selection */
- in_cmdend = 0x29, /* Done sending slow cmd */
- in_status = 0x2a, /* Was in status phase, finishing cmd */
- in_freeing = 0x2b, /* freeing the bus for cmd cmplt or disc */
- in_the_dark = 0x2c, /* Don't know what bus phase we are in */
-
- /* Special states, ie. not normal bus transitions... */
-#define in_spec_mask 0x80
- in_abortone = 0x80, /* Aborting one command currently */
- in_abortall = 0x81, /* Blowing away all commands we have */
- in_resetdev = 0x82, /* SCSI target reset in progress */
- in_resetbus = 0x83, /* SCSI bus reset in progress */
- in_tgterror = 0x84, /* Target did something stupid */
-};
-
-enum {
- /* Zero has special meaning, see skipahead[12]. */
-/*0*/ do_never,
-
-/*1*/ do_phase_determine,
-/*2*/ do_reset_bus,
-/*3*/ do_reset_complete,
-/*4*/ do_work_bus,
-/*5*/ do_intr_end
-};
-
-/* The master ring of all esp hosts we are managing in this driver. */
-static struct NCR_ESP *espchain;
-int nesps = 0, esps_in_use = 0, esps_running = 0;
-EXPORT_SYMBOL(nesps);
-EXPORT_SYMBOL(esps_running);
-
-irqreturn_t esp_intr(int irq, void *dev_id);
-
-/* Debugging routines */
-static struct esp_cmdstrings {
- unchar cmdchar;
- char *text;
-} esp_cmd_strings[] = {
- /* Miscellaneous */
- { ESP_CMD_NULL, "ESP_NOP", },
- { ESP_CMD_FLUSH, "FIFO_FLUSH", },
- { ESP_CMD_RC, "RSTESP", },
- { ESP_CMD_RS, "RSTSCSI", },
- /* Disconnected State Group */
- { ESP_CMD_RSEL, "RESLCTSEQ", },
- { ESP_CMD_SEL, "SLCTNATN", },
- { ESP_CMD_SELA, "SLCTATN", },
- { ESP_CMD_SELAS, "SLCTATNSTOP", },
- { ESP_CMD_ESEL, "ENSLCTRESEL", },
- { ESP_CMD_DSEL, "DISSELRESEL", },
- { ESP_CMD_SA3, "SLCTATN3", },
- { ESP_CMD_RSEL3, "RESLCTSEQ", },
- /* Target State Group */
- { ESP_CMD_SMSG, "SNDMSG", },
- { ESP_CMD_SSTAT, "SNDSTATUS", },
- { ESP_CMD_SDATA, "SNDDATA", },
- { ESP_CMD_DSEQ, "DISCSEQ", },
- { ESP_CMD_TSEQ, "TERMSEQ", },
- { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", },
- { ESP_CMD_DCNCT, "DISC", },
- { ESP_CMD_RMSG, "RCVMSG", },
- { ESP_CMD_RCMD, "RCVCMD", },
- { ESP_CMD_RDATA, "RCVDATA", },
- { ESP_CMD_RCSEQ, "RCVCMDSEQ", },
- /* Initiator State Group */
- { ESP_CMD_TI, "TRANSINFO", },
- { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", },
- { ESP_CMD_MOK, "MSGACCEPTED", },
- { ESP_CMD_TPAD, "TPAD", },
- { ESP_CMD_SATN, "SATN", },
- { ESP_CMD_RATN, "RATN", },
-};
-#define NUM_ESP_COMMANDS ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings)))
-
-/* Print textual representation of an ESP command */
-static inline void esp_print_cmd(unchar espcmd)
-{
- unchar dma_bit = espcmd & ESP_CMD_DMA;
- int i;
-
- espcmd &= ~dma_bit;
- for(i=0; i<NUM_ESP_COMMANDS; i++)
- if(esp_cmd_strings[i].cmdchar == espcmd)
- break;
- if(i==NUM_ESP_COMMANDS)
- printk("ESP_Unknown");
- else
- printk("%s%s", esp_cmd_strings[i].text,
- ((dma_bit) ? "+DMA" : ""));
-}
-
-/* Print the status register's value */
-static inline void esp_print_statreg(unchar statreg)
-{
- unchar phase;
-
- printk("STATUS<");
- phase = statreg & ESP_STAT_PMASK;
- printk("%s,", (phase == ESP_DOP ? "DATA-OUT" :
- (phase == ESP_DIP ? "DATA-IN" :
- (phase == ESP_CMDP ? "COMMAND" :
- (phase == ESP_STATP ? "STATUS" :
- (phase == ESP_MOP ? "MSG-OUT" :
- (phase == ESP_MIP ? "MSG_IN" :
- "unknown")))))));
- if(statreg & ESP_STAT_TDONE)
- printk("TRANS_DONE,");
- if(statreg & ESP_STAT_TCNT)
- printk("TCOUNT_ZERO,");
- if(statreg & ESP_STAT_PERR)
- printk("P_ERROR,");
- if(statreg & ESP_STAT_SPAM)
- printk("SPAM,");
- if(statreg & ESP_STAT_INTR)
- printk("IRQ,");
- printk(">");
-}
-
-/* Print the interrupt register's value */
-static inline void esp_print_ireg(unchar intreg)
-{
- printk("INTREG< ");
- if(intreg & ESP_INTR_S)
- printk("SLCT_NATN ");
- if(intreg & ESP_INTR_SATN)
- printk("SLCT_ATN ");
- if(intreg & ESP_INTR_RSEL)
- printk("RSLCT ");
- if(intreg & ESP_INTR_FDONE)
- printk("FDONE ");
- if(intreg & ESP_INTR_BSERV)
- printk("BSERV ");
- if(intreg & ESP_INTR_DC)
- printk("DISCNCT ");
- if(intreg & ESP_INTR_IC)
- printk("ILL_CMD ");
- if(intreg & ESP_INTR_SR)
- printk("SCSI_BUS_RESET ");
- printk(">");
-}
-
-/* Print the sequence step registers contents */
-static inline void esp_print_seqreg(unchar stepreg)
-{
- stepreg &= ESP_STEP_VBITS;
- printk("STEP<%s>",
- (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" :
- (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" :
- (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" :
- (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" :
- (stepreg == ESP_STEP_FINI4 ? "CMD_SENT_OK" :
- "UNKNOWN"))))));
-}
-
-static char *phase_string(int phase)
-{
- switch(phase) {
- case not_issued:
- return "UNISSUED";
- case in_slct_norm:
- return "SLCTNORM";
- case in_slct_stop:
- return "SLCTSTOP";
- case in_slct_msg:
- return "SLCTMSG";
- case in_slct_tag:
- return "SLCTTAG";
- case in_slct_sneg:
- return "SLCTSNEG";
- case in_datain:
- return "DATAIN";
- case in_dataout:
- return "DATAOUT";
- case in_data_done:
- return "DATADONE";
- case in_msgin:
- return "MSGIN";
- case in_msgincont:
- return "MSGINCONT";
- case in_msgindone:
- return "MSGINDONE";
- case in_msgout:
- return "MSGOUT";
- case in_msgoutdone:
- return "MSGOUTDONE";
- case in_cmdbegin:
- return "CMDBEGIN";
- case in_cmdend:
- return "CMDEND";
- case in_status:
- return "STATUS";
- case in_freeing:
- return "FREEING";
- case in_the_dark:
- return "CLUELESS";
- case in_abortone:
- return "ABORTONE";
- case in_abortall:
- return "ABORTALL";
- case in_resetdev:
- return "RESETDEV";
- case in_resetbus:
- return "RESETBUS";
- case in_tgterror:
- return "TGTERROR";
- default:
- return "UNKNOWN";
- };
-}
-
-#ifdef DEBUG_STATE_MACHINE
-static inline void esp_advance_phase(Scsi_Cmnd *s, int newphase)
-{
- ESPLOG(("<%s>", phase_string(newphase)));
- s->SCp.sent_command = s->SCp.phase;
- s->SCp.phase = newphase;
-}
-#else
-#define esp_advance_phase(__s, __newphase) \
- (__s)->SCp.sent_command = (__s)->SCp.phase; \
- (__s)->SCp.phase = (__newphase);
-#endif
-
-#ifdef DEBUG_ESP_CMDS
-static inline void esp_cmd(struct NCR_ESP *esp, struct ESP_regs *eregs,
- unchar cmd)
-{
- esp->espcmdlog[esp->espcmdent] = cmd;
- esp->espcmdent = (esp->espcmdent + 1) & 31;
- esp_write(eregs->esp_cmnd, cmd);
-}
-#else
-#define esp_cmd(__esp, __eregs, __cmd) esp_write((__eregs)->esp_cmnd, (__cmd))
-#endif
-
-/* How we use the various Linux SCSI data structures for operation.
- *
- * struct scsi_cmnd:
- *
- * We keep track of the syncronous capabilities of a target
- * in the device member, using sync_min_period and
- * sync_max_offset. These are the values we directly write
- * into the ESP registers while running a command. If offset
- * is zero the ESP will use asynchronous transfers.
- * If the borken flag is set we assume we shouldn't even bother
- * trying to negotiate for synchronous transfer as this target
- * is really stupid. If we notice the target is dropping the
- * bus, and we have been allowing it to disconnect, we clear
- * the disconnect flag.
- */
-
-/* Manipulation of the ESP command queues. Thanks to the aha152x driver
- * and its author, Juergen E. Fischer, for the methods used here.
- * Note that these are per-ESP queues, not global queues like
- * the aha152x driver uses.
- */
-static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
-{
- Scsi_Cmnd *end;
-
- new_SC->host_scribble = (unsigned char *) NULL;
- if(!*SC)
- *SC = new_SC;
- else {
- for(end=*SC;end->host_scribble;end=(Scsi_Cmnd *)end->host_scribble)
- ;
- end->host_scribble = (unsigned char *) new_SC;
- }
-}
-
-static inline void prepend_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
-{
- new_SC->host_scribble = (unsigned char *) *SC;
- *SC = new_SC;
-}
-
-static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd **SC)
-{
- Scsi_Cmnd *ptr;
-
- ptr = *SC;
- if(ptr)
- *SC = (Scsi_Cmnd *) (*SC)->host_scribble;
- return ptr;
-}
-
-static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, int target, int lun)
-{
- Scsi_Cmnd *ptr, *prev;
-
- for(ptr = *SC, prev = NULL;
- ptr && ((ptr->device->id != target) || (ptr->device->lun != lun));
- prev = ptr, ptr = (Scsi_Cmnd *) ptr->host_scribble)
- ;
- if(ptr) {
- if(prev)
- prev->host_scribble=ptr->host_scribble;
- else
- *SC=(Scsi_Cmnd *)ptr->host_scribble;
- }
- return ptr;
-}
-
-/* Resetting various pieces of the ESP scsi driver chipset */
-
-/* Reset the ESP chip, _not_ the SCSI bus. */
-static void esp_reset_esp(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- int family_code, version, i;
- volatile int trash;
-
- /* Now reset the ESP chip */
- esp_cmd(esp, eregs, ESP_CMD_RC);
- esp_cmd(esp, eregs, ESP_CMD_NULL | ESP_CMD_DMA);
- if(esp->erev == fast)
- esp_write(eregs->esp_cfg2, ESP_CONFIG2_FENAB);
- esp_cmd(esp, eregs, ESP_CMD_NULL | ESP_CMD_DMA);
-
- /* This is the only point at which it is reliable to read
- * the ID-code for a fast ESP chip variant.
- */
- esp->max_period = ((35 * esp->ccycle) / 1000);
- if(esp->erev == fast) {
- char *erev2string[] = {
- "Emulex FAS236",
- "Emulex FPESP100A",
- "fast",
- "QLogic FAS366",
- "Emulex FAS216",
- "Symbios Logic 53CF9x-2",
- "unknown!"
- };
-
- version = esp_read(eregs->esp_uid);
- family_code = (version & 0xf8) >> 3;
- if(family_code == 0x02) {
- if ((version & 7) == 2)
- esp->erev = fas216;
- else
- esp->erev = fas236;
- } else if(family_code == 0x0a)
- esp->erev = fas366; /* Version is usually '5'. */
- else if(family_code == 0x00) {
- if ((version & 7) == 2)
- esp->erev = fas100a; /* NCR53C9X */
- else
- esp->erev = espunknown;
- } else if(family_code == 0x14) {
- if ((version & 7) == 2)
- esp->erev = fsc;
- else
- esp->erev = espunknown;
- } else if(family_code == 0x00) {
- if ((version & 7) == 2)
- esp->erev = fas100a; /* NCR53C9X */
- else
- esp->erev = espunknown;
- } else
- esp->erev = espunknown;
- ESPLOG(("esp%d: FAST chip is %s (family=%d, version=%d)\n",
- esp->esp_id, erev2string[esp->erev - fas236],
- family_code, (version & 7)));
-
- esp->min_period = ((4 * esp->ccycle) / 1000);
- } else {
- esp->min_period = ((5 * esp->ccycle) / 1000);
- }
-
- /* Reload the configuration registers */
- esp_write(eregs->esp_cfact, esp->cfact);
- esp->prev_stp = 0;
- esp_write(eregs->esp_stp, 0);
- esp->prev_soff = 0;
- esp_write(eregs->esp_soff, 0);
- esp_write(eregs->esp_timeo, esp->neg_defp);
- esp->max_period = (esp->max_period + 3)>>2;
- esp->min_period = (esp->min_period + 3)>>2;
-
- esp_write(eregs->esp_cfg1, esp->config1);
- switch(esp->erev) {
- case esp100:
- /* nothing to do */
- break;
- case esp100a:
- esp_write(eregs->esp_cfg2, esp->config2);
- break;
- case esp236:
- /* Slow 236 */
- esp_write(eregs->esp_cfg2, esp->config2);
- esp->prev_cfg3 = esp->config3[0];
- esp_write(eregs->esp_cfg3, esp->prev_cfg3);
- break;
- case fas366:
- panic("esp: FAS366 support not present, please notify "
- "jongk@cs.utwente.nl");
- break;
- case fas216:
- case fas236:
- case fsc:
- /* Fast ESP variants */
- esp_write(eregs->esp_cfg2, esp->config2);
- for(i=0; i<8; i++)
- esp->config3[i] |= ESP_CONFIG3_FCLK;
- esp->prev_cfg3 = esp->config3[0];
- esp_write(eregs->esp_cfg3, esp->prev_cfg3);
- if(esp->diff)
- esp->radelay = 0;
- else
- esp->radelay = 16;
- /* Different timeout constant for these chips */
- esp->neg_defp =
- FSC_NEG_DEFP(esp->cfreq,
- (esp->cfact == ESP_CCF_F0 ?
- ESP_CCF_F7 + 1 : esp->cfact));
- esp_write(eregs->esp_timeo, esp->neg_defp);
- /* Enable Active Negotiation if possible */
- if((esp->erev == fsc) && !esp->diff)
- esp_write(eregs->esp_cfg4, ESP_CONFIG4_EAN);
- break;
- case fas100a:
- /* Fast 100a */
- esp_write(eregs->esp_cfg2, esp->config2);
- for(i=0; i<8; i++)
- esp->config3[i] |= ESP_CONFIG3_FCLOCK;
- esp->prev_cfg3 = esp->config3[0];
- esp_write(eregs->esp_cfg3, esp->prev_cfg3);
- esp->radelay = 32;
- break;
- default:
- panic("esp: what could it be... I wonder...");
- break;
- };
-
- /* Eat any bitrot in the chip */
- trash = esp_read(eregs->esp_intrpt);
- udelay(100);
-}
-
-/* This places the ESP into a known state at boot time. */
-void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- volatile unchar trash;
-
- /* Reset the DMA */
- if(esp->dma_reset)
- esp->dma_reset(esp);
-
- /* Reset the ESP */
- esp_reset_esp(esp, eregs);
-
- /* Reset the SCSI bus, but tell ESP not to generate an irq */
- esp_write(eregs->esp_cfg1, (esp_read(eregs->esp_cfg1) | ESP_CONFIG1_SRRDISAB));
- esp_cmd(esp, eregs, ESP_CMD_RS);
- udelay(400);
- esp_write(eregs->esp_cfg1, esp->config1);
-
- /* Eat any bitrot in the chip and we are done... */
- trash = esp_read(eregs->esp_intrpt);
-}
-EXPORT_SYMBOL(esp_bootup_reset);
-
-/* Allocate structure and insert basic data such as SCSI chip frequency
- * data and a pointer to the device
- */
-struct NCR_ESP* esp_allocate(struct scsi_host_template *tpnt, void *esp_dev,
- int hotplug)
-{
- struct NCR_ESP *esp, *elink;
- struct Scsi_Host *esp_host;
-
- if (hotplug)
- esp_host = scsi_host_alloc(tpnt, sizeof(struct NCR_ESP));
- else
- esp_host = scsi_register(tpnt, sizeof(struct NCR_ESP));
- if(!esp_host)
- panic("Cannot register ESP SCSI host");
- esp = (struct NCR_ESP *) esp_host->hostdata;
- if(!esp)
- panic("No esp in hostdata");
- esp->ehost = esp_host;
- esp->edev = esp_dev;
- esp->esp_id = nesps++;
-
- /* Set bitshift value (only used on Amiga with multiple ESPs) */
- esp->shift = 2;
-
- /* Put into the chain of esp chips detected */
- if(espchain) {
- elink = espchain;
- while(elink->next) elink = elink->next;
- elink->next = esp;
- } else {
- espchain = esp;
- }
- esp->next = NULL;
-
- return esp;
-}
-
-void esp_deallocate(struct NCR_ESP *esp)
-{
- struct NCR_ESP *elink;
-
- if(espchain == esp) {
- espchain = NULL;
- } else {
- for(elink = espchain; elink && (elink->next != esp); elink = elink->next);
- if(elink)
- elink->next = esp->next;
- }
- nesps--;
-}
-
-/* Complete initialization of ESP structure and device
- * Caller must have initialized appropriate parts of the ESP structure
- * between the call to esp_allocate and this function.
- */
-void esp_initialize(struct NCR_ESP *esp)
-{
- struct ESP_regs *eregs = esp->eregs;
- unsigned int fmhz;
- unchar ccf;
- int i;
-
- /* Check out the clock properties of the chip. */
-
- /* This is getting messy but it has to be done
- * correctly or else you get weird behavior all
- * over the place. We are trying to basically
- * figure out three pieces of information.
- *
- * a) Clock Conversion Factor
- *
- * This is a representation of the input
- * crystal clock frequency going into the
- * ESP on this machine. Any operation whose
- * timing is longer than 400ns depends on this
- * value being correct. For example, you'll
- * get blips for arbitration/selection during
- * high load or with multiple targets if this
- * is not set correctly.
- *
- * b) Selection Time-Out
- *
- * The ESP isn't very bright and will arbitrate
- * for the bus and try to select a target
- * forever if you let it. This value tells
- * the ESP when it has taken too long to
- * negotiate and that it should interrupt
- * the CPU so we can see what happened.
- * The value is computed as follows (from
- * NCR/Symbios chip docs).
- *
- * (Time Out Period) * (Input Clock)
- * STO = ----------------------------------
- * (8192) * (Clock Conversion Factor)
- *
- * You usually want the time out period to be
- * around 250ms, I think we'll set it a little
- * bit higher to account for fully loaded SCSI
- * bus's and slow devices that don't respond so
- * quickly to selection attempts. (yeah, I know
- * this is out of spec. but there is a lot of
- * buggy pieces of firmware out there so bite me)
- *
- * c) Imperical constants for synchronous offset
- * and transfer period register values
- *
- * This entails the smallest and largest sync
- * period we could ever handle on this ESP.
- */
-
- fmhz = esp->cfreq;
-
- if(fmhz <= (5000000))
- ccf = 0;
- else
- ccf = (((5000000 - 1) + (fmhz))/(5000000));
- if(!ccf || ccf > 8) {
- /* If we can't find anything reasonable,
- * just assume 20MHZ. This is the clock
- * frequency of the older sun4c's where I've
- * been unable to find the clock-frequency
- * PROM property. All other machines provide
- * useful values it seems.
- */
- ccf = ESP_CCF_F4;
- fmhz = (20000000);
- }
- if(ccf==(ESP_CCF_F7+1))
- esp->cfact = ESP_CCF_F0;
- else if(ccf == ESP_CCF_NEVER)
- esp->cfact = ESP_CCF_F2;
- else
- esp->cfact = ccf;
- esp->cfreq = fmhz;
- esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
- esp->ctick = ESP_TICK(ccf, esp->ccycle);
- esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
- esp->sync_defp = SYNC_DEFP_SLOW;
-
- printk("SCSI ID %d Clk %dMHz CCF=%d TOut %d ",
- esp->scsi_id, (esp->cfreq / 1000000),
- ccf, (int) esp->neg_defp);
-
- /* Fill in ehost data */
- esp->ehost->base = (unsigned long)eregs;
- esp->ehost->this_id = esp->scsi_id;
- esp->ehost->irq = esp->irq;
-
- /* SCSI id mask */
- esp->scsi_id_mask = (1 << esp->scsi_id);
-
- /* Probe the revision of this esp */
- esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
- esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
- esp_write(eregs->esp_cfg2, esp->config2);
- if((esp_read(eregs->esp_cfg2) & ~(ESP_CONFIG2_MAGIC)) !=
- (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
- printk("NCR53C90(esp100)\n");
- esp->erev = esp100;
- } else {
- esp->config2 = 0;
- esp_write(eregs->esp_cfg2, 0);
- esp_write(eregs->esp_cfg3, 5);
- if(esp_read(eregs->esp_cfg3) != 5) {
- printk("NCR53C90A(esp100a)\n");
- esp->erev = esp100a;
- } else {
- int target;
-
- for(target=0; target<8; target++)
- esp->config3[target] = 0;
- esp->prev_cfg3 = 0;
- esp_write(eregs->esp_cfg3, 0);
- if(ccf > ESP_CCF_F5) {
- printk("NCR53C9XF(espfast)\n");
- esp->erev = fast;
- esp->sync_defp = SYNC_DEFP_FAST;
- } else {
- printk("NCR53C9x(esp236)\n");
- esp->erev = esp236;
- }
- }
- }
-
- /* Initialize the command queues */
- esp->current_SC = NULL;
- esp->disconnected_SC = NULL;
- esp->issue_SC = NULL;
-
- /* Clear the state machines. */
- esp->targets_present = 0;
- esp->resetting_bus = 0;
- esp->snip = 0;
-
- init_waitqueue_head(&esp->reset_queue);
-
- esp->fas_premature_intr_workaround = 0;
- for(i = 0; i < 32; i++)
- esp->espcmdlog[i] = 0;
- esp->espcmdent = 0;
- for(i = 0; i < 16; i++) {
- esp->cur_msgout[i] = 0;
- esp->cur_msgin[i] = 0;
- }
- esp->prevmsgout = esp->prevmsgin = 0;
- esp->msgout_len = esp->msgin_len = 0;
-
- /* Clear the one behind caches to hold unmatchable values. */
- esp->prev_soff = esp->prev_stp = esp->prev_cfg3 = 0xff;
-
- /* Reset the thing before we try anything... */
- esp_bootup_reset(esp, eregs);
-
- esps_in_use++;
-}
-
-/* The info function will return whatever useful
- * information the developer sees fit. If not provided, then
- * the name field will be used instead.
- */
-const char *esp_info(struct Scsi_Host *host)
-{
- struct NCR_ESP *esp;
-
- esp = (struct NCR_ESP *) host->hostdata;
- switch(esp->erev) {
- case esp100:
- return "ESP100 (NCR53C90)";
- case esp100a:
- return "ESP100A (NCR53C90A)";
- case esp236:
- return "ESP236 (NCR53C9x)";
- case fas216:
- return "Emulex FAS216";
- case fas236:
- return "Emulex FAS236";
- case fas366:
- return "QLogic FAS366";
- case fas100a:
- return "FPESP100A";
- case fsc:
- return "Symbios Logic 53CF9x-2";
- default:
- panic("Bogon ESP revision");
- };
-}
-EXPORT_SYMBOL(esp_info);
-
-/* From Wolfgang Stanglmeier's NCR scsi driver. */
-struct info_str
-{
- char *buffer;
- int length;
- int offset;
- int pos;
-};
-
-static void copy_mem_info(struct info_str *info, char *data, int len)
-{
- if (info->pos + len > info->length)
- len = info->length - info->pos;
-
- if (info->pos + len < info->offset) {
- info->pos += len;
- return;
- }
- if (info->pos < info->offset) {
- data += (info->offset - info->pos);
- len -= (info->offset - info->pos);
- }
-
- if (len > 0) {
- memcpy(info->buffer + info->pos, data, len);
- info->pos += len;
- }
-}
-
-static int copy_info(struct info_str *info, char *fmt, ...)
-{
- va_list args;
- char buf[81];
- int len;
-
- va_start(args, fmt);
- len = vsprintf(buf, fmt, args);
- va_end(args);
-
- copy_mem_info(info, buf, len);
- return len;
-}
-
-static int esp_host_info(struct NCR_ESP *esp, char *ptr, off_t offset, int len)
-{
- struct scsi_device *sdev;
- struct info_str info;
- int i;
-
- info.buffer = ptr;
- info.length = len;
- info.offset = offset;
- info.pos = 0;
-
- copy_info(&info, "ESP Host Adapter:\n");
- copy_info(&info, "\tESP Model\t\t");
- switch(esp->erev) {
- case esp100:
- copy_info(&info, "ESP100 (NCR53C90)\n");
- break;
- case esp100a:
- copy_info(&info, "ESP100A (NCR53C90A)\n");
- break;
- case esp236:
- copy_info(&info, "ESP236 (NCR53C9x)\n");
- break;
- case fas216:
- copy_info(&info, "Emulex FAS216\n");
- break;
- case fas236:
- copy_info(&info, "Emulex FAS236\n");
- break;
- case fas100a:
- copy_info(&info, "FPESP100A\n");
- break;
- case fast:
- copy_info(&info, "Generic FAST\n");
- break;
- case fas366:
- copy_info(&info, "QLogic FAS366\n");
- break;
- case fsc:
- copy_info(&info, "Symbios Logic 53C9x-2\n");
- break;
- case espunknown:
- default:
- copy_info(&info, "Unknown!\n");
- break;
- };
- copy_info(&info, "\tLive Targets\t\t[ ");
- for(i = 0; i < 15; i++) {
- if(esp->targets_present & (1 << i))
- copy_info(&info, "%d ", i);
- }
- copy_info(&info, "]\n\n");
-
- /* Now describe the state of each existing target. */
- copy_info(&info, "Target #\tconfig3\t\tSync Capabilities\tDisconnect\n");
-
- shost_for_each_device(sdev, esp->ehost) {
- struct esp_device *esp_dev = sdev->hostdata;
- uint id = sdev->id;
-
- if (!(esp->targets_present & (1 << id)))
- continue;
-
- copy_info(&info, "%d\t\t", id);
- copy_info(&info, "%08lx\t", esp->config3[id]);
- copy_info(&info, "[%02lx,%02lx]\t\t\t",
- esp_dev->sync_max_offset,
- esp_dev->sync_min_period);
- copy_info(&info, "%s\n", esp_dev->disconnect ? "yes" : "no");
- }
-
- return info.pos > info.offset? info.pos - info.offset : 0;
-}
-
-/* ESP proc filesystem code. */
-int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length,
- int inout)
-{
- struct NCR_ESP *esp = (struct NCR_ESP *)shost->hostdata;
-
- if(inout)
- return -EINVAL; /* not yet */
- if(start)
- *start = buffer;
- return esp_host_info(esp, buffer, offset, length);
-}
-EXPORT_SYMBOL(esp_proc_info);
-
-static void esp_get_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- if(sp->use_sg == 0) {
- sp->SCp.this_residual = sp->request_bufflen;
- sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
- sp->SCp.buffers_residual = 0;
- if (esp->dma_mmu_get_scsi_one)
- esp->dma_mmu_get_scsi_one(esp, sp);
- else
- sp->SCp.ptr =
- (char *) virt_to_phys(sp->request_buffer);
- } else {
- sp->SCp.buffer = (struct scatterlist *) sp->request_buffer;
- sp->SCp.buffers_residual = sp->use_sg - 1;
- sp->SCp.this_residual = sp->SCp.buffer->length;
- if (esp->dma_mmu_get_scsi_sgl)
- esp->dma_mmu_get_scsi_sgl(esp, sp);
- else
- sp->SCp.ptr =
- (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
- }
-}
-
-static void esp_release_dmabufs(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- if(sp->use_sg == 0) {
- if (esp->dma_mmu_release_scsi_one)
- esp->dma_mmu_release_scsi_one(esp, sp);
- } else {
- if (esp->dma_mmu_release_scsi_sgl)
- esp->dma_mmu_release_scsi_sgl(esp, sp);
- }
-}
-
-static void esp_restore_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- struct esp_pointers *ep = &esp->data_pointers[scmd_id(sp)];
-
- sp->SCp.ptr = ep->saved_ptr;
- sp->SCp.buffer = ep->saved_buffer;
- sp->SCp.this_residual = ep->saved_this_residual;
- sp->SCp.buffers_residual = ep->saved_buffers_residual;
-}
-
-static void esp_save_pointers(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- struct esp_pointers *ep = &esp->data_pointers[scmd_id(sp)];
-
- ep->saved_ptr = sp->SCp.ptr;
- ep->saved_buffer = sp->SCp.buffer;
- ep->saved_this_residual = sp->SCp.this_residual;
- ep->saved_buffers_residual = sp->SCp.buffers_residual;
-}
-
-/* Some rules:
- *
- * 1) Never ever panic while something is live on the bus.
- * If there is to be any chance of syncing the disks this
- * rule is to be obeyed.
- *
- * 2) Any target that causes a foul condition will no longer
- * have synchronous transfers done to it, no questions
- * asked.
- *
- * 3) Keep register accesses to a minimum. Think about some
- * day when we have Xbus machines this is running on and
- * the ESP chip is on the other end of the machine on a
- * different board from the cpu where this is running.
- */
-
-/* Fire off a command. We assume the bus is free and that the only
- * case where we could see an interrupt is where we have disconnected
- * commands active and they are trying to reselect us.
- */
-static inline void esp_check_cmd(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- switch(sp->cmd_len) {
- case 6:
- case 10:
- case 12:
- esp->esp_slowcmd = 0;
- break;
-
- default:
- esp->esp_slowcmd = 1;
- esp->esp_scmdleft = sp->cmd_len;
- esp->esp_scmdp = &sp->cmnd[0];
- break;
- };
-}
-
-static inline void build_sync_nego_msg(struct NCR_ESP *esp, int period, int offset)
-{
- esp->cur_msgout[0] = EXTENDED_MESSAGE;
- esp->cur_msgout[1] = 3;
- esp->cur_msgout[2] = EXTENDED_SDTR;
- esp->cur_msgout[3] = period;
- esp->cur_msgout[4] = offset;
- esp->msgout_len = 5;
-}
-
-static void esp_exec_cmd(struct NCR_ESP *esp)
-{
- struct ESP_regs *eregs = esp->eregs;
- struct esp_device *esp_dev;
- Scsi_Cmnd *SCptr;
- struct scsi_device *SDptr;
- volatile unchar *cmdp = esp->esp_command;
- unsigned char the_esp_command;
- int lun, target;
- int i;
-
- /* Hold off if we have disconnected commands and
- * an IRQ is showing...
- */
- if(esp->disconnected_SC && esp->dma_irq_p(esp))
- return;
-
- /* Grab first member of the issue queue. */
- SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC);
-
- /* Safe to panic here because current_SC is null. */
- if(!SCptr)
- panic("esp: esp_exec_cmd and issue queue is NULL");
-
- SDptr = SCptr->device;
- esp_dev = SDptr->hostdata;
- lun = SCptr->device->lun;
- target = SCptr->device->id;
-
- esp->snip = 0;
- esp->msgout_len = 0;
-
- /* Send it out whole, or piece by piece? The ESP
- * only knows how to automatically send out 6, 10,
- * and 12 byte commands. I used to think that the
- * Linux SCSI code would never throw anything other
- * than that to us, but then again there is the
- * SCSI generic driver which can send us anything.
- */
- esp_check_cmd(esp, SCptr);
-
- /* If arbitration/selection is successful, the ESP will leave
- * ATN asserted, causing the target to go into message out
- * phase. The ESP will feed the target the identify and then
- * the target can only legally go to one of command,
- * datain/out, status, or message in phase, or stay in message
- * out phase (should we be trying to send a sync negotiation
- * message after the identify). It is not allowed to drop
- * BSY, but some buggy targets do and we check for this
- * condition in the selection complete code. Most of the time
- * we'll make the command bytes available to the ESP and it
- * will not interrupt us until it finishes command phase, we
- * cannot do this for command sizes the ESP does not
- * understand and in this case we'll get interrupted right
- * when the target goes into command phase.
- *
- * It is absolutely _illegal_ in the presence of SCSI-2 devices
- * to use the ESP select w/o ATN command. When SCSI-2 devices are
- * present on the bus we _must_ always go straight to message out
- * phase with an identify message for the target. Being that
- * selection attempts in SCSI-1 w/o ATN was an option, doing SCSI-2
- * selections should not confuse SCSI-1 we hope.
- */
-
- if(esp_dev->sync) {
- /* this targets sync is known */
-#ifdef CONFIG_SCSI_MAC_ESP
-do_sync_known:
-#endif
- if(esp_dev->disconnect)
- *cmdp++ = IDENTIFY(1, lun);
- else
- *cmdp++ = IDENTIFY(0, lun);
-
- if(esp->esp_slowcmd) {
- the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA);
- esp_advance_phase(SCptr, in_slct_stop);
- } else {
- the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
- esp_advance_phase(SCptr, in_slct_norm);
- }
- } else if(!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) {
- /* After the bootup SCSI code sends both the
- * TEST_UNIT_READY and INQUIRY commands we want
- * to at least attempt allowing the device to
- * disconnect.
- */
- ESPMISC(("esp: Selecting device for first time. target=%d "
- "lun=%d\n", target, SCptr->device->lun));
- if(!SDptr->borken && !esp_dev->disconnect)
- esp_dev->disconnect = 1;
-
- *cmdp++ = IDENTIFY(0, lun);
- esp->prevmsgout = NOP;
- esp_advance_phase(SCptr, in_slct_norm);
- the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA);
-
- /* Take no chances... */
- esp_dev->sync_max_offset = 0;
- esp_dev->sync_min_period = 0;
- } else {
- int toshiba_cdrom_hwbug_wkaround = 0;
-
-#ifdef CONFIG_SCSI_MAC_ESP
- /* Never allow synchronous transfers (disconnect OK) on
- * Macintosh. Well, maybe later when we figured out how to
- * do DMA on the machines that support it ...
- */
- esp_dev->disconnect = 1;
- esp_dev->sync_max_offset = 0;
- esp_dev->sync_min_period = 0;
- esp_dev->sync = 1;
- esp->snip = 0;
- goto do_sync_known;
-#endif
- /* We've talked to this guy before,
- * but never negotiated. Let's try
- * sync negotiation.
- */
- if(!SDptr->borken) {
- if((SDptr->type == TYPE_ROM) &&
- (!strncmp(SDptr->vendor, "TOSHIBA", 7))) {
- /* Nice try sucker... */
- ESPMISC(("esp%d: Disabling sync for buggy "
- "Toshiba CDROM.\n", esp->esp_id));
- toshiba_cdrom_hwbug_wkaround = 1;
- build_sync_nego_msg(esp, 0, 0);
- } else {
- build_sync_nego_msg(esp, esp->sync_defp, 15);
- }
- } else {
- build_sync_nego_msg(esp, 0, 0);
- }
- esp_dev->sync = 1;
- esp->snip = 1;
-
- /* A fix for broken SCSI1 targets, when they disconnect
- * they lock up the bus and confuse ESP. So disallow
- * disconnects for SCSI1 targets for now until we
- * find a better fix.
- *
- * Addendum: This is funny, I figured out what was going
- * on. The blotzed SCSI1 target would disconnect,
- * one of the other SCSI2 targets or both would be
- * disconnected as well. The SCSI1 target would
- * stay disconnected long enough that we start
- * up a command on one of the SCSI2 targets. As
- * the ESP is arbitrating for the bus the SCSI1
- * target begins to arbitrate as well to reselect
- * the ESP. The SCSI1 target refuses to drop it's
- * ID bit on the data bus even though the ESP is
- * at ID 7 and is the obvious winner for any
- * arbitration. The ESP is a poor sport and refuses
- * to lose arbitration, it will continue indefinitely
- * trying to arbitrate for the bus and can only be
- * stopped via a chip reset or SCSI bus reset.
- * Therefore _no_ disconnects for SCSI1 targets
- * thank you very much. ;-)
- */
- if(((SDptr->scsi_level < 3) && (SDptr->type != TYPE_TAPE)) ||
- toshiba_cdrom_hwbug_wkaround || SDptr->borken) {
- ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d "
- "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun));
- esp_dev->disconnect = 0;
- *cmdp++ = IDENTIFY(0, lun);
- } else {
- *cmdp++ = IDENTIFY(1, lun);
- }
-
- /* ESP fifo is only so big...
- * Make this look like a slow command.
- */
- esp->esp_slowcmd = 1;
- esp->esp_scmdleft = SCptr->cmd_len;
- esp->esp_scmdp = &SCptr->cmnd[0];
-
- the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA);
- esp_advance_phase(SCptr, in_slct_msg);
- }
-
- if(!esp->esp_slowcmd)
- for(i = 0; i < SCptr->cmd_len; i++)
- *cmdp++ = SCptr->cmnd[i];
-
- esp_write(eregs->esp_busid, (target & 7));
- if (esp->prev_soff != esp_dev->sync_max_offset ||
- esp->prev_stp != esp_dev->sync_min_period ||
- (esp->erev > esp100a &&
- esp->prev_cfg3 != esp->config3[target])) {
- esp->prev_soff = esp_dev->sync_max_offset;
- esp_write(eregs->esp_soff, esp->prev_soff);
- esp->prev_stp = esp_dev->sync_min_period;
- esp_write(eregs->esp_stp, esp->prev_stp);
- if(esp->erev > esp100a) {
- esp->prev_cfg3 = esp->config3[target];
- esp_write(eregs->esp_cfg3, esp->prev_cfg3);
- }
- }
- i = (cmdp - esp->esp_command);
-
- /* Set up the DMA and ESP counters */
- if(esp->do_pio_cmds){
- int j = 0;
-
- /*
- * XXX MSch:
- *
- * It seems this is required, at least to clean up
- * after failed commands when using PIO mode ...
- */
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
-
- for(;j<i;j++)
- esp_write(eregs->esp_fdata, esp->esp_command[j]);
- the_esp_command &= ~ESP_CMD_DMA;
-
- /* Tell ESP to "go". */
- esp_cmd(esp, eregs, the_esp_command);
- } else {
- /* Set up the ESP counters */
- esp_write(eregs->esp_tclow, i);
- esp_write(eregs->esp_tcmed, 0);
- esp->dma_init_write(esp, esp->esp_command_dvma, i);
-
- /* Tell ESP to "go". */
- esp_cmd(esp, eregs, the_esp_command);
- }
-}
-
-/* Queue a SCSI command delivered from the mid-level Linux SCSI code. */
-int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
-{
- struct NCR_ESP *esp;
-
- /* Set up func ptr and initial driver cmd-phase. */
- SCpnt->scsi_done = done;
- SCpnt->SCp.phase = not_issued;
-
- esp = (struct NCR_ESP *) SCpnt->device->host->hostdata;
-
- if(esp->dma_led_on)
- esp->dma_led_on(esp);
-
- /* We use the scratch area. */
- ESPQUEUE(("esp_queue: target=%d lun=%d ", SCpnt->device->id, SCpnt->lun));
- ESPDISC(("N<%02x,%02x>", SCpnt->device->id, SCpnt->lun));
-
- esp_get_dmabufs(esp, SCpnt);
- esp_save_pointers(esp, SCpnt); /* FIXME for tag queueing */
-
- SCpnt->SCp.Status = CHECK_CONDITION;
- SCpnt->SCp.Message = 0xff;
- SCpnt->SCp.sent_command = 0;
-
- /* Place into our queue. */
- if(SCpnt->cmnd[0] == REQUEST_SENSE) {
- ESPQUEUE(("RQSENSE\n"));
- prepend_SC(&esp->issue_SC, SCpnt);
- } else {
- ESPQUEUE(("\n"));
- append_SC(&esp->issue_SC, SCpnt);
- }
-
- /* Run it now if we can. */
- if(!esp->current_SC && !esp->resetting_bus)
- esp_exec_cmd(esp);
-
- return 0;
-}
-
-/* Dump driver state. */
-static void esp_dump_cmd(Scsi_Cmnd *SCptr)
-{
- ESPLOG(("[tgt<%02x> lun<%02x> "
- "pphase<%s> cphase<%s>]",
- SCptr->device->id, SCptr->device->lun,
- phase_string(SCptr->SCp.sent_command),
- phase_string(SCptr->SCp.phase)));
-}
-
-static void esp_dump_state(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- Scsi_Cmnd *SCptr = esp->current_SC;
-#ifdef DEBUG_ESP_CMDS
- int i;
-#endif
-
- ESPLOG(("esp%d: dumping state\n", esp->esp_id));
-
- /* Print DMA status */
- esp->dma_dump_state(esp);
-
- ESPLOG(("esp%d: SW [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
- esp->esp_id, esp->sreg, esp->seqreg, esp->ireg));
- ESPLOG(("esp%d: HW reread [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
- esp->esp_id, esp_read(eregs->esp_status), esp_read(eregs->esp_sstep),
- esp_read(eregs->esp_intrpt)));
-#ifdef DEBUG_ESP_CMDS
- printk("esp%d: last ESP cmds [", esp->esp_id);
- i = (esp->espcmdent - 1) & 31;
- printk("<");
- esp_print_cmd(esp->espcmdlog[i]);
- printk(">");
- i = (i - 1) & 31;
- printk("<");
- esp_print_cmd(esp->espcmdlog[i]);
- printk(">");
- i = (i - 1) & 31;
- printk("<");
- esp_print_cmd(esp->espcmdlog[i]);
- printk(">");
- i = (i - 1) & 31;
- printk("<");
- esp_print_cmd(esp->espcmdlog[i]);
- printk(">");
- printk("]\n");
-#endif /* (DEBUG_ESP_CMDS) */
-
- if(SCptr) {
- ESPLOG(("esp%d: current command ", esp->esp_id));
- esp_dump_cmd(SCptr);
- }
- ESPLOG(("\n"));
- SCptr = esp->disconnected_SC;
- ESPLOG(("esp%d: disconnected ", esp->esp_id));
- while(SCptr) {
- esp_dump_cmd(SCptr);
- SCptr = (Scsi_Cmnd *) SCptr->host_scribble;
- }
- ESPLOG(("\n"));
-}
-
-/* Abort a command. The host_lock is acquired by caller. */
-int esp_abort(Scsi_Cmnd *SCptr)
-{
- struct NCR_ESP *esp = (struct NCR_ESP *) SCptr->device->host->hostdata;
- struct ESP_regs *eregs = esp->eregs;
- int don;
-
- ESPLOG(("esp%d: Aborting command\n", esp->esp_id));
- esp_dump_state(esp, eregs);
-
- /* Wheee, if this is the current command on the bus, the
- * best we can do is assert ATN and wait for msgout phase.
- * This should even fix a hung SCSI bus when we lose state
- * in the driver and timeout because the eventual phase change
- * will cause the ESP to (eventually) give an interrupt.
- */
- if(esp->current_SC == SCptr) {
- esp->cur_msgout[0] = ABORT;
- esp->msgout_len = 1;
- esp->msgout_ctr = 0;
- esp_cmd(esp, eregs, ESP_CMD_SATN);
- return SUCCESS;
- }
-
- /* If it is still in the issue queue then we can safely
- * call the completion routine and report abort success.
- */
- don = esp->dma_ports_p(esp);
- if(don) {
- esp->dma_ints_off(esp);
- synchronize_irq(esp->irq);
- }
- if(esp->issue_SC) {
- Scsi_Cmnd **prev, *this;
- for(prev = (&esp->issue_SC), this = esp->issue_SC;
- this;
- prev = (Scsi_Cmnd **) &(this->host_scribble),
- this = (Scsi_Cmnd *) this->host_scribble) {
- if(this == SCptr) {
- *prev = (Scsi_Cmnd *) this->host_scribble;
- this->host_scribble = NULL;
- esp_release_dmabufs(esp, this);
- this->result = DID_ABORT << 16;
- this->scsi_done(this);
- if(don)
- esp->dma_ints_on(esp);
- return SUCCESS;
- }
- }
- }
-
- /* Yuck, the command to abort is disconnected, it is not
- * worth trying to abort it now if something else is live
- * on the bus at this time. So, we let the SCSI code wait
- * a little bit and try again later.
- */
- if(esp->current_SC) {
- if(don)
- esp->dma_ints_on(esp);
- return FAILED;
- }
-
- /* It's disconnected, we have to reconnect to re-establish
- * the nexus and tell the device to abort. However, we really
- * cannot 'reconnect' per se. Don't try to be fancy, just
- * indicate failure, which causes our caller to reset the whole
- * bus.
- */
-
- if(don)
- esp->dma_ints_on(esp);
- return FAILED;
-}
-
-/* We've sent ESP_CMD_RS to the ESP, the interrupt had just
- * arrived indicating the end of the SCSI bus reset. Our job
- * is to clean out the command queues and begin re-execution
- * of SCSI commands once more.
- */
-static int esp_finish_reset(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- Scsi_Cmnd *sp = esp->current_SC;
-
- /* Clean up currently executing command, if any. */
- if (sp != NULL) {
- esp_release_dmabufs(esp, sp);
- sp->result = (DID_RESET << 16);
- sp->scsi_done(sp);
- esp->current_SC = NULL;
- }
-
- /* Clean up disconnected queue, they have been invalidated
- * by the bus reset.
- */
- if (esp->disconnected_SC) {
- while((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) {
- esp_release_dmabufs(esp, sp);
- sp->result = (DID_RESET << 16);
- sp->scsi_done(sp);
- }
- }
-
- /* SCSI bus reset is complete. */
- esp->resetting_bus = 0;
- wake_up(&esp->reset_queue);
-
- /* Ok, now it is safe to get commands going once more. */
- if(esp->issue_SC)
- esp_exec_cmd(esp);
-
- return do_intr_end;
-}
-
-static int esp_do_resetbus(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- ESPLOG(("esp%d: Resetting scsi bus\n", esp->esp_id));
- esp->resetting_bus = 1;
- esp_cmd(esp, eregs, ESP_CMD_RS);
-
- return do_intr_end;
-}
-
-/* Reset ESP chip, reset hanging bus, then kill active and
- * disconnected commands for targets without soft reset.
- *
- * The host_lock is acquired by caller.
- */
-int esp_reset(Scsi_Cmnd *SCptr)
-{
- struct NCR_ESP *esp = (struct NCR_ESP *) SCptr->device->host->hostdata;
-
- spin_lock_irq(esp->ehost->host_lock);
- (void) esp_do_resetbus(esp, esp->eregs);
- spin_unlock_irq(esp->ehost->host_lock);
-
- wait_event(esp->reset_queue, (esp->resetting_bus == 0));
-
- return SUCCESS;
-}
-
-/* Internal ESP done function. */
-static void esp_done(struct NCR_ESP *esp, int error)
-{
- Scsi_Cmnd *done_SC;
-
- if(esp->current_SC) {
- done_SC = esp->current_SC;
- esp->current_SC = NULL;
- esp_release_dmabufs(esp, done_SC);
- done_SC->result = error;
- done_SC->scsi_done(done_SC);
-
- /* Bus is free, issue any commands in the queue. */
- if(esp->issue_SC && !esp->current_SC)
- esp_exec_cmd(esp);
- } else {
- /* Panic is safe as current_SC is null so we may still
- * be able to accept more commands to sync disk buffers.
- */
- ESPLOG(("panicing\n"));
- panic("esp: done() called with NULL esp->current_SC");
- }
-}
-
-/* Wheee, ESP interrupt engine. */
-
-/* Forward declarations. */
-static int esp_do_phase_determine(struct NCR_ESP *esp,
- struct ESP_regs *eregs);
-static int esp_do_data_finale(struct NCR_ESP *esp, struct ESP_regs *eregs);
-static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs);
-static int esp_do_status(struct NCR_ESP *esp, struct ESP_regs *eregs);
-static int esp_do_msgin(struct NCR_ESP *esp, struct ESP_regs *eregs);
-static int esp_do_msgindone(struct NCR_ESP *esp, struct ESP_regs *eregs);
-static int esp_do_msgout(struct NCR_ESP *esp, struct ESP_regs *eregs);
-static int esp_do_cmdbegin(struct NCR_ESP *esp, struct ESP_regs *eregs);
-
-#define sreg_datainp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DIP)
-#define sreg_dataoutp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DOP)
-
-/* We try to avoid some interrupts by jumping ahead and see if the ESP
- * has gotten far enough yet. Hence the following.
- */
-static inline int skipahead1(struct NCR_ESP *esp, struct ESP_regs *eregs,
- Scsi_Cmnd *scp, int prev_phase, int new_phase)
-{
- if(scp->SCp.sent_command != prev_phase)
- return 0;
-
- if(esp->dma_irq_p(esp)) {
- /* Yes, we are able to save an interrupt. */
- esp->sreg = (esp_read(eregs->esp_status) & ~(ESP_STAT_INTR));
- esp->ireg = esp_read(eregs->esp_intrpt);
- if(!(esp->ireg & ESP_INTR_SR))
- return 0;
- else
- return do_reset_complete;
- }
- /* Ho hum, target is taking forever... */
- scp->SCp.sent_command = new_phase; /* so we don't recurse... */
- return do_intr_end;
-}
-
-static inline int skipahead2(struct NCR_ESP *esp,
- struct ESP_regs *eregs,
- Scsi_Cmnd *scp, int prev_phase1, int prev_phase2,
- int new_phase)
-{
- if(scp->SCp.sent_command != prev_phase1 &&
- scp->SCp.sent_command != prev_phase2)
- return 0;
- if(esp->dma_irq_p(esp)) {
- /* Yes, we are able to save an interrupt. */
- esp->sreg = (esp_read(eregs->esp_status) & ~(ESP_STAT_INTR));
- esp->ireg = esp_read(eregs->esp_intrpt);
- if(!(esp->ireg & ESP_INTR_SR))
- return 0;
- else
- return do_reset_complete;
- }
- /* Ho hum, target is taking forever... */
- scp->SCp.sent_command = new_phase; /* so we don't recurse... */
- return do_intr_end;
-}
-
-/* Misc. esp helper macros. */
-#define esp_setcount(__eregs, __cnt) \
- esp_write((__eregs)->esp_tclow, ((__cnt) & 0xff)); \
- esp_write((__eregs)->esp_tcmed, (((__cnt) >> 8) & 0xff))
-
-#define esp_getcount(__eregs) \
- ((esp_read((__eregs)->esp_tclow)&0xff) | \
- ((esp_read((__eregs)->esp_tcmed)&0xff) << 8))
-
-#define fcount(__esp, __eregs) \
- (esp_read((__eregs)->esp_fflags) & ESP_FF_FBYTES)
-
-#define fnzero(__esp, __eregs) \
- (esp_read((__eregs)->esp_fflags) & ESP_FF_ONOTZERO)
-
-/* XXX speculative nops unnecessary when continuing amidst a data phase
- * XXX even on esp100!!! another case of flooding the bus with I/O reg
- * XXX writes...
- */
-#define esp_maybe_nop(__esp, __eregs) \
- if((__esp)->erev == esp100) \
- esp_cmd((__esp), (__eregs), ESP_CMD_NULL)
-
-#define sreg_to_dataphase(__sreg) \
- ((((__sreg) & ESP_STAT_PMASK) == ESP_DOP) ? in_dataout : in_datain)
-
-/* The ESP100 when in synchronous data phase, can mistake a long final
- * REQ pulse from the target as an extra byte, it places whatever is on
- * the data lines into the fifo. For now, we will assume when this
- * happens that the target is a bit quirky and we don't want to
- * be talking synchronously to it anyways. Regardless, we need to
- * tell the ESP to eat the extraneous byte so that we can proceed
- * to the next phase.
- */
-static inline int esp100_sync_hwbug(struct NCR_ESP *esp, struct ESP_regs *eregs,
- Scsi_Cmnd *sp, int fifocnt)
-{
- /* Do not touch this piece of code. */
- if((!(esp->erev == esp100)) ||
- (!(sreg_datainp((esp->sreg = esp_read(eregs->esp_status))) && !fifocnt) &&
- !(sreg_dataoutp(esp->sreg) && !fnzero(esp, eregs)))) {
- if(sp->SCp.phase == in_dataout)
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
- return 0;
- } else {
- /* Async mode for this guy. */
- build_sync_nego_msg(esp, 0, 0);
-
- /* Ack the bogus byte, but set ATN first. */
- esp_cmd(esp, eregs, ESP_CMD_SATN);
- esp_cmd(esp, eregs, ESP_CMD_MOK);
- return 1;
- }
-}
-
-/* This closes the window during a selection with a reselect pending, because
- * we use DMA for the selection process the FIFO should hold the correct
- * contents if we get reselected during this process. So we just need to
- * ack the possible illegal cmd interrupt pending on the esp100.
- */
-static inline int esp100_reconnect_hwbug(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- volatile unchar junk;
-
- if(esp->erev != esp100)
- return 0;
- junk = esp_read(eregs->esp_intrpt);
-
- if(junk & ESP_INTR_SR)
- return 1;
- return 0;
-}
-
-/* This verifies the BUSID bits during a reselection so that we know which
- * target is talking to us.
- */
-static inline int reconnect_target(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- int it, me = esp->scsi_id_mask, targ = 0;
-
- if(2 != fcount(esp, eregs))
- return -1;
- it = esp_read(eregs->esp_fdata);
- if(!(it & me))
- return -1;
- it &= ~me;
- if(it & (it - 1))
- return -1;
- while(!(it & 1))
- targ++, it >>= 1;
- return targ;
-}
-
-/* This verifies the identify from the target so that we know which lun is
- * being reconnected.
- */
-static inline int reconnect_lun(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- int lun;
-
- if((esp->sreg & ESP_STAT_PMASK) != ESP_MIP)
- return -1;
- lun = esp_read(eregs->esp_fdata);
-
- /* Yes, you read this correctly. We report lun of zero
- * if we see parity error. ESP reports parity error for
- * the lun byte, and this is the only way to hope to recover
- * because the target is connected.
- */
- if(esp->sreg & ESP_STAT_PERR)
- return 0;
-
- /* Check for illegal bits being set in the lun. */
- if((lun & 0x40) || !(lun & 0x80))
- return -1;
-
- return lun & 7;
-}
-
-/* This puts the driver in a state where it can revitalize a command that
- * is being continued due to reselection.
- */
-static inline void esp_connect(struct NCR_ESP *esp, struct ESP_regs *eregs,
- Scsi_Cmnd *sp)
-{
- struct scsi_device *dp = sp->device;
- struct esp_device *esp_dev = dp->hostdata;
-
- if(esp->prev_soff != esp_dev->sync_max_offset ||
- esp->prev_stp != esp_dev->sync_min_period ||
- (esp->erev > esp100a &&
- esp->prev_cfg3 != esp->config3[scmd_id(sp)])) {
- esp->prev_soff = esp_dev->sync_max_offset;
- esp_write(eregs->esp_soff, esp->prev_soff);
- esp->prev_stp = esp_dev->sync_min_period;
- esp_write(eregs->esp_stp, esp->prev_stp);
- if(esp->erev > esp100a) {
- esp->prev_cfg3 = esp->config3[scmd_id(sp)];
- esp_write(eregs->esp_cfg3, esp->prev_cfg3);
- }
- }
- esp->current_SC = sp;
-}
-
-/* This will place the current working command back into the issue queue
- * if we are to receive a reselection amidst a selection attempt.
- */
-static inline void esp_reconnect(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- if(!esp->disconnected_SC)
- ESPLOG(("esp%d: Weird, being reselected but disconnected "
- "command queue is empty.\n", esp->esp_id));
- esp->snip = 0;
- esp->current_SC = NULL;
- sp->SCp.phase = not_issued;
- append_SC(&esp->issue_SC, sp);
-}
-
-/* Begin message in phase. */
-static int esp_do_msgin(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
- esp_maybe_nop(esp, eregs);
- esp_cmd(esp, eregs, ESP_CMD_TI);
- esp->msgin_len = 1;
- esp->msgin_ctr = 0;
- esp_advance_phase(esp->current_SC, in_msgindone);
- return do_work_bus;
-}
-
-static inline void advance_sg(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- ++sp->SCp.buffer;
- --sp->SCp.buffers_residual;
- sp->SCp.this_residual = sp->SCp.buffer->length;
- if (esp->dma_advance_sg)
- esp->dma_advance_sg (sp);
- else
- sp->SCp.ptr = (char *) virt_to_phys(sg_virt(sp->SCp.buffer));
-
-}
-
-/* Please note that the way I've coded these routines is that I _always_
- * check for a disconnect during any and all information transfer
- * phases. The SCSI standard states that the target _can_ cause a BUS
- * FREE condition by dropping all MSG/CD/IO/BSY signals. Also note
- * that during information transfer phases the target controls every
- * change in phase, the only thing the initiator can do is "ask" for
- * a message out phase by driving ATN true. The target can, and sometimes
- * will, completely ignore this request so we cannot assume anything when
- * we try to force a message out phase to abort/reset a target. Most of
- * the time the target will eventually be nice and go to message out, so
- * we may have to hold on to our state about what we want to tell the target
- * for some period of time.
- */
-
-/* I think I have things working here correctly. Even partial transfers
- * within a buffer or sub-buffer should not upset us at all no matter
- * how bad the target and/or ESP fucks things up.
- */
-static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- Scsi_Cmnd *SCptr = esp->current_SC;
- int thisphase, hmuch;
-
- ESPDATA(("esp_do_data: "));
- esp_maybe_nop(esp, eregs);
- thisphase = sreg_to_dataphase(esp->sreg);
- esp_advance_phase(SCptr, thisphase);
- ESPDATA(("newphase<%s> ", (thisphase == in_datain) ? "DATAIN" : "DATAOUT"));
- hmuch = esp->dma_can_transfer(esp, SCptr);
-
- /*
- * XXX MSch: cater for PIO transfer here; PIO used if hmuch == 0
- */
- if (hmuch) { /* DMA */
- /*
- * DMA
- */
- ESPDATA(("hmuch<%d> ", hmuch));
- esp->current_transfer_size = hmuch;
- esp_setcount(eregs, (esp->fas_premature_intr_workaround ?
- (hmuch + 0x40) : hmuch));
- esp->dma_setup(esp, (__u32)((unsigned long)SCptr->SCp.ptr),
- hmuch, (thisphase == in_datain));
- ESPDATA(("DMA|TI --> do_intr_end\n"));
- esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
- return do_intr_end;
- /*
- * end DMA
- */
- } else {
- /*
- * PIO
- */
- int oldphase, i = 0; /* or where we left off last time ?? esp->current_data ?? */
- int fifocnt = 0;
- unsigned char *p = phys_to_virt((unsigned long)SCptr->SCp.ptr);
-
- oldphase = esp_read(eregs->esp_status) & ESP_STAT_PMASK;
-
- /*
- * polled transfer; ugly, can we make this happen in a DRQ
- * interrupt handler ??
- * requires keeping track of state information in host or
- * command struct!
- * Problem: I've never seen a DRQ happen on Mac, not even
- * with ESP_CMD_DMA ...
- */
-
- /* figure out how much needs to be transferred */
- hmuch = SCptr->SCp.this_residual;
- ESPDATA(("hmuch<%d> pio ", hmuch));
- esp->current_transfer_size = hmuch;
-
- /* tell the ESP ... */
- esp_setcount(eregs, hmuch);
-
- /* loop */
- while (hmuch) {
- int j, fifo_stuck = 0, newphase;
- unsigned long timeout;
-#if 0
- unsigned long flags;
-#endif
-#if 0
- if ( i % 10 )
- ESPDATA(("\r"));
- else
- ESPDATA(( /*"\n"*/ "\r"));
-#endif
-#if 0
- local_irq_save(flags);
-#endif
- if(thisphase == in_datain) {
- /* 'go' ... */
- esp_cmd(esp, eregs, ESP_CMD_TI);
-
- /* wait for data */
- timeout = 1000000;
- while (!((esp->sreg=esp_read(eregs->esp_status)) & ESP_STAT_INTR) && --timeout)
- udelay(2);
- if (timeout == 0)
- printk("DRQ datain timeout! \n");
-
- newphase = esp->sreg & ESP_STAT_PMASK;
-
- /* see how much we got ... */
- fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
-
- if (!fifocnt)
- fifo_stuck++;
- else
- fifo_stuck = 0;
-
- ESPDATA(("\rgot %d st %x ph %x", fifocnt, esp->sreg, newphase));
-
- /* read fifo */
- for(j=0;j<fifocnt;j++)
- p[i++] = esp_read(eregs->esp_fdata);
-
- ESPDATA(("(%d) ", i));
-
- /* how many to go ?? */
- hmuch -= fifocnt;
-
- /* break if status phase !! */
- if(newphase == ESP_STATP) {
- /* clear int. */
- esp->ireg = esp_read(eregs->esp_intrpt);
- break;
- }
- } else {
-#define MAX_FIFO 8
- /* how much will fit ? */
- int this_count = MAX_FIFO - fifocnt;
- if (this_count > hmuch)
- this_count = hmuch;
-
- /* fill fifo */
- for(j=0;j<this_count;j++)
- esp_write(eregs->esp_fdata, p[i++]);
-
- /* how many left if this goes out ?? */
- hmuch -= this_count;
-
- /* 'go' ... */
- esp_cmd(esp, eregs, ESP_CMD_TI);
-
- /* wait for 'got it' */
- timeout = 1000000;
- while (!((esp->sreg=esp_read(eregs->esp_status)) & ESP_STAT_INTR) && --timeout)
- udelay(2);
- if (timeout == 0)
- printk("DRQ dataout timeout! \n");
-
- newphase = esp->sreg & ESP_STAT_PMASK;
-
- /* need to check how much was sent ?? */
- fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
-
- ESPDATA(("\rsent %d st %x ph %x", this_count - fifocnt, esp->sreg, newphase));
-
- ESPDATA(("(%d) ", i));
-
- /* break if status phase !! */
- if(newphase == ESP_STATP) {
- /* clear int. */
- esp->ireg = esp_read(eregs->esp_intrpt);
- break;
- }
-
- }
-
- /* clear int. */
- esp->ireg = esp_read(eregs->esp_intrpt);
-
- ESPDATA(("ir %x ... ", esp->ireg));
-
- if (hmuch == 0)
- ESPDATA(("done! \n"));
-
-#if 0
- local_irq_restore(flags);
-#endif
-
- /* check new bus phase */
- if (newphase != oldphase && i < esp->current_transfer_size) {
- /* something happened; disconnect ?? */
- ESPDATA(("phase change, dropped out with %d done ... ", i));
- break;
- }
-
- /* check int. status */
- if (esp->ireg & ESP_INTR_DC) {
- /* disconnect */
- ESPDATA(("disconnect; %d transferred ... ", i));
- break;
- } else if (esp->ireg & ESP_INTR_FDONE) {
- /* function done */
- ESPDATA(("function done; %d transferred ... ", i));
- break;
- }
-
- /* XXX fixme: bail out on stall */
- if (fifo_stuck > 10) {
- /* we're stuck */
- ESPDATA(("fifo stall; %d transferred ... ", i));
- break;
- }
- }
-
- ESPDATA(("\n"));
- /* check successful completion ?? */
-
- if (thisphase == in_dataout)
- hmuch += fifocnt; /* stuck?? adjust data pointer ...*/
-
- /* tell do_data_finale how much was transferred */
- esp->current_transfer_size -= hmuch;
-
- /* still not completely sure on this one ... */
- return /*do_intr_end*/ do_work_bus /*do_phase_determine*/ ;
-
- /*
- * end PIO
- */
- }
- return do_intr_end;
-}
-
-/* See how successful the data transfer was. */
-static int esp_do_data_finale(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- Scsi_Cmnd *SCptr = esp->current_SC;
- struct esp_device *esp_dev = SCptr->device->hostdata;
- int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0;
-
- if(esp->dma_led_off)
- esp->dma_led_off(esp);
-
- ESPDATA(("esp_do_data_finale: "));
-
- if(SCptr->SCp.phase == in_datain) {
- if(esp->sreg & ESP_STAT_PERR) {
- /* Yuck, parity error. The ESP asserts ATN
- * so that we can go to message out phase
- * immediately and inform the target that
- * something bad happened.
- */
- ESPLOG(("esp%d: data bad parity detected.\n",
- esp->esp_id));
- esp->cur_msgout[0] = INITIATOR_ERROR;
- esp->msgout_len = 1;
- }
- if(esp->dma_drain)
- esp->dma_drain(esp);
- }
- if(esp->dma_invalidate)
- esp->dma_invalidate(esp);
-
- /* This could happen for the above parity error case. */
- if(!(esp->ireg == ESP_INTR_BSERV)) {
- /* Please go to msgout phase, please please please... */
- ESPLOG(("esp%d: !BSERV after data, probably to msgout\n",
- esp->esp_id));
- return esp_do_phase_determine(esp, eregs);
- }
-
- /* Check for partial transfers and other horrible events. */
- fifocnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
- ecount = esp_getcount(eregs);
- if(esp->fas_premature_intr_workaround)
- ecount -= 0x40;
- bytes_sent = esp->current_transfer_size;
-
- ESPDATA(("trans_sz=%d, ", bytes_sent));
- if(!(esp->sreg & ESP_STAT_TCNT))
- bytes_sent -= ecount;
- if(SCptr->SCp.phase == in_dataout)
- bytes_sent -= fifocnt;
-
- ESPDATA(("bytes_sent=%d (ecount=%d, fifocnt=%d), ", bytes_sent,
- ecount, fifocnt));
-
- /* If we were in synchronous mode, check for peculiarities. */
- if(esp_dev->sync_max_offset)
- bogus_data = esp100_sync_hwbug(esp, eregs, SCptr, fifocnt);
- else
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
-
- /* Until we are sure of what has happened, we are certainly
- * in the dark.
- */
- esp_advance_phase(SCptr, in_the_dark);
-
- /* Check for premature interrupt condition. Can happen on FAS2x6
- * chips. QLogic recommends a workaround by overprogramming the
- * transfer counters, but this makes doing scatter-gather impossible.
- * Until there is a way to disable scatter-gather for a single target,
- * and not only for the entire host adapter as it is now, the workaround
- * is way to expensive performance wise.
- * Instead, it turns out that when this happens the target has disconnected
- * already but it doesn't show in the interrupt register. Compensate for
- * that here to try and avoid a SCSI bus reset.
- */
- if(!esp->fas_premature_intr_workaround && (fifocnt == 1) &&
- sreg_dataoutp(esp->sreg)) {
- ESPLOG(("esp%d: Premature interrupt, enabling workaround\n",
- esp->esp_id));
-#if 0
- /* Disable scatter-gather operations, they are not possible
- * when using this workaround.
- */
- esp->ehost->sg_tablesize = 0;
- esp->ehost->use_clustering = ENABLE_CLUSTERING;
- esp->fas_premature_intr_workaround = 1;
- bytes_sent = 0;
- if(SCptr->use_sg) {
- ESPLOG(("esp%d: Aborting scatter-gather operation\n",
- esp->esp_id));
- esp->cur_msgout[0] = ABORT;
- esp->msgout_len = 1;
- esp->msgout_ctr = 0;
- esp_cmd(esp, eregs, ESP_CMD_SATN);
- esp_setcount(eregs, 0xffff);
- esp_cmd(esp, eregs, ESP_CMD_NULL);
- esp_cmd(esp, eregs, ESP_CMD_TPAD | ESP_CMD_DMA);
- return do_intr_end;
- }
-#else
- /* Just set the disconnected bit. That's what appears to
- * happen anyway. The state machine will pick it up when
- * we return.
- */
- esp->ireg |= ESP_INTR_DC;
-#endif
- }
-
- if(bytes_sent < 0) {
- /* I've seen this happen due to lost state in this
- * driver. No idea why it happened, but allowing
- * this value to be negative caused things to
- * lock up. This allows greater chance of recovery.
- * In fact every time I've seen this, it has been
- * a driver bug without question.
- */
- ESPLOG(("esp%d: yieee, bytes_sent < 0!\n", esp->esp_id));
- ESPLOG(("esp%d: csz=%d fifocount=%d ecount=%d\n",
- esp->esp_id,
- esp->current_transfer_size, fifocnt, ecount));
- ESPLOG(("esp%d: use_sg=%d ptr=%p this_residual=%d\n",
- esp->esp_id,
- SCptr->use_sg, SCptr->SCp.ptr, SCptr->SCp.this_residual));
- ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id,
- SCptr->device->id));
- SCptr->device->borken = 1;
- esp_dev->sync = 0;
- bytes_sent = 0;
- }
-
- /* Update the state of our transfer. */
- SCptr->SCp.ptr += bytes_sent;
- SCptr->SCp.this_residual -= bytes_sent;
- if(SCptr->SCp.this_residual < 0) {
- /* shit */
- ESPLOG(("esp%d: Data transfer overrun.\n", esp->esp_id));
- SCptr->SCp.this_residual = 0;
- }
-
- /* Maybe continue. */
- if(!bogus_data) {
- ESPDATA(("!bogus_data, "));
- /* NO MATTER WHAT, we advance the scatterlist,
- * if the target should decide to disconnect
- * in between scatter chunks (which is common)
- * we could die horribly! I used to have the sg
- * advance occur only if we are going back into
- * (or are staying in) a data phase, you can
- * imagine the hell I went through trying to
- * figure this out.
- */
- if(!SCptr->SCp.this_residual && SCptr->SCp.buffers_residual)
- advance_sg(esp, SCptr);
-#ifdef DEBUG_ESP_DATA
- if(sreg_datainp(esp->sreg) || sreg_dataoutp(esp->sreg)) {
- ESPDATA(("to more data\n"));
- } else {
- ESPDATA(("to new phase\n"));
- }
-#endif
- return esp_do_phase_determine(esp, eregs);
- }
- /* Bogus data, just wait for next interrupt. */
- ESPLOG(("esp%d: bogus_data during end of data phase\n",
- esp->esp_id));
- return do_intr_end;
-}
-
-/* We received a non-good status return at the end of
- * running a SCSI command. This is used to decide if
- * we should clear our synchronous transfer state for
- * such a device when that happens.
- *
- * The idea is that when spinning up a disk or rewinding
- * a tape, we don't want to go into a loop re-negotiating
- * synchronous capabilities over and over.
- */
-static int esp_should_clear_sync(Scsi_Cmnd *sp)
-{
- unchar cmd = sp->cmnd[0];
-
- /* These cases are for spinning up a disk and
- * waiting for that spinup to complete.
- */
- if(cmd == START_STOP)
- return 0;
-
- if(cmd == TEST_UNIT_READY)
- return 0;
-
- /* One more special case for SCSI tape drives,
- * this is what is used to probe the device for
- * completion of a rewind or tape load operation.
- */
- if(sp->device->type == TYPE_TAPE && cmd == MODE_SENSE)
- return 0;
-
- return 1;
-}
-
-/* Either a command is completing or a target is dropping off the bus
- * to continue the command in the background so we can do other work.
- */
-static int esp_do_freebus(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- Scsi_Cmnd *SCptr = esp->current_SC;
- int rval;
-
- rval = skipahead2(esp, eregs, SCptr, in_status, in_msgindone, in_freeing);
- if(rval)
- return rval;
-
- if(esp->ireg != ESP_INTR_DC) {
- ESPLOG(("esp%d: Target will not disconnect\n", esp->esp_id));
- return do_reset_bus; /* target will not drop BSY... */
- }
- esp->msgout_len = 0;
- esp->prevmsgout = NOP;
- if(esp->prevmsgin == COMMAND_COMPLETE) {
- struct esp_device *esp_dev = SCptr->device->hostdata;
- /* Normal end of nexus. */
- if(esp->disconnected_SC)
- esp_cmd(esp, eregs, ESP_CMD_ESEL);
-
- if(SCptr->SCp.Status != GOOD &&
- SCptr->SCp.Status != CONDITION_GOOD &&
- ((1<<scmd_id(SCptr)) & esp->targets_present) &&
- esp_dev->sync && esp_dev->sync_max_offset) {
- /* SCSI standard says that the synchronous capabilities
- * should be renegotiated at this point. Most likely
- * we are about to request sense from this target
- * in which case we want to avoid using sync
- * transfers until we are sure of the current target
- * state.
- */
- ESPMISC(("esp: Status <%d> for target %d lun %d\n",
- SCptr->SCp.Status, SCptr->device->id, SCptr->device->lun));
-
- /* But don't do this when spinning up a disk at
- * boot time while we poll for completion as it
- * fills up the console with messages. Also, tapes
- * can report not ready many times right after
- * loading up a tape.
- */
- if(esp_should_clear_sync(SCptr) != 0)
- esp_dev->sync = 0;
- }
- ESPDISC(("F<%02x,%02x>", SCptr->device->id, SCptr->device->lun));
- esp_done(esp, ((SCptr->SCp.Status & 0xff) |
- ((SCptr->SCp.Message & 0xff)<<8) |
- (DID_OK << 16)));
- } else if(esp->prevmsgin == DISCONNECT) {
- /* Normal disconnect. */
- esp_cmd(esp, eregs, ESP_CMD_ESEL);
- ESPDISC(("D<%02x,%02x>", SCptr->device->id, SCptr->device->lun));
- append_SC(&esp->disconnected_SC, SCptr);
- esp->current_SC = NULL;
- if(esp->issue_SC)
- esp_exec_cmd(esp);
- } else {
- /* Driver bug, we do not expect a disconnect here
- * and should not have advanced the state engine
- * to in_freeing.
- */
- ESPLOG(("esp%d: last msg not disc and not cmd cmplt.\n",
- esp->esp_id));
- return do_reset_bus;
- }
- return do_intr_end;
-}
-
-/* When a reselect occurs, and we cannot find the command to
- * reconnect to in our queues, we do this.
- */
-static int esp_bad_reconnect(struct NCR_ESP *esp)
-{
- Scsi_Cmnd *sp;
-
- ESPLOG(("esp%d: Eieeee, reconnecting unknown command!\n",
- esp->esp_id));
- ESPLOG(("QUEUE DUMP\n"));
- sp = esp->issue_SC;
- ESPLOG(("esp%d: issue_SC[", esp->esp_id));
- while(sp) {
- ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
- sp = (Scsi_Cmnd *) sp->host_scribble;
- }
- ESPLOG(("]\n"));
- sp = esp->current_SC;
- ESPLOG(("esp%d: current_SC[", esp->esp_id));
- while(sp) {
- ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
- sp = (Scsi_Cmnd *) sp->host_scribble;
- }
- ESPLOG(("]\n"));
- sp = esp->disconnected_SC;
- ESPLOG(("esp%d: disconnected_SC[", esp->esp_id));
- while(sp) {
- ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun));
- sp = (Scsi_Cmnd *) sp->host_scribble;
- }
- ESPLOG(("]\n"));
- return do_reset_bus;
-}
-
-/* Do the needy when a target tries to reconnect to us. */
-static int esp_do_reconnect(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- int lun, target;
- Scsi_Cmnd *SCptr;
-
- /* Check for all bogus conditions first. */
- target = reconnect_target(esp, eregs);
- if(target < 0) {
- ESPDISC(("bad bus bits\n"));
- return do_reset_bus;
- }
- lun = reconnect_lun(esp, eregs);
- if(lun < 0) {
- ESPDISC(("target=%2x, bad identify msg\n", target));
- return do_reset_bus;
- }
-
- /* Things look ok... */
- ESPDISC(("R<%02x,%02x>", target, lun));
-
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
- if(esp100_reconnect_hwbug(esp, eregs))
- return do_reset_bus;
- esp_cmd(esp, eregs, ESP_CMD_NULL);
-
- SCptr = remove_SC(&esp->disconnected_SC, (unchar) target, (unchar) lun);
- if(!SCptr)
- return esp_bad_reconnect(esp);
-
- esp_connect(esp, eregs, SCptr);
- esp_cmd(esp, eregs, ESP_CMD_MOK);
-
- /* Reconnect implies a restore pointers operation. */
- esp_restore_pointers(esp, SCptr);
-
- esp->snip = 0;
- esp_advance_phase(SCptr, in_the_dark);
- return do_intr_end;
-}
-
-/* End of NEXUS (hopefully), pick up status + message byte then leave if
- * all goes well.
- */
-static int esp_do_status(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- Scsi_Cmnd *SCptr = esp->current_SC;
- int intr, rval;
-
- rval = skipahead1(esp, eregs, SCptr, in_the_dark, in_status);
- if(rval)
- return rval;
-
- intr = esp->ireg;
- ESPSTAT(("esp_do_status: "));
- if(intr != ESP_INTR_DC) {
- int message_out = 0; /* for parity problems */
-
- /* Ack the message. */
- ESPSTAT(("ack msg, "));
- esp_cmd(esp, eregs, ESP_CMD_MOK);
-
- if(esp->dma_poll)
- esp->dma_poll(esp, (unsigned char *) esp->esp_command);
-
- ESPSTAT(("got something, "));
- /* ESP chimes in with one of
- *
- * 1) function done interrupt:
- * both status and message in bytes
- * are available
- *
- * 2) bus service interrupt:
- * only status byte was acquired
- *
- * 3) Anything else:
- * can't happen, but we test for it
- * anyways
- *
- * ALSO: If bad parity was detected on either
- * the status _or_ the message byte then
- * the ESP has asserted ATN on the bus
- * and we must therefore wait for the
- * next phase change.
- */
- if(intr & ESP_INTR_FDONE) {
- /* We got it all, hallejulia. */
- ESPSTAT(("got both, "));
- SCptr->SCp.Status = esp->esp_command[0];
- SCptr->SCp.Message = esp->esp_command[1];
- esp->prevmsgin = SCptr->SCp.Message;
- esp->cur_msgin[0] = SCptr->SCp.Message;
- if(esp->sreg & ESP_STAT_PERR) {
- /* There was bad parity for the
- * message byte, the status byte
- * was ok.
- */
- message_out = MSG_PARITY_ERROR;
- }
- } else if(intr == ESP_INTR_BSERV) {
- /* Only got status byte. */
- ESPLOG(("esp%d: got status only, ", esp->esp_id));
- if(!(esp->sreg & ESP_STAT_PERR)) {
- SCptr->SCp.Status = esp->esp_command[0];
- SCptr->SCp.Message = 0xff;
- } else {
- /* The status byte had bad parity.
- * we leave the scsi_pointer Status
- * field alone as we set it to a default
- * of CHECK_CONDITION in esp_queue.
- */
- message_out = INITIATOR_ERROR;
- }
- } else {
- /* This shouldn't happen ever. */
- ESPSTAT(("got bolixed\n"));
- esp_advance_phase(SCptr, in_the_dark);
- return esp_do_phase_determine(esp, eregs);
- }
-
- if(!message_out) {
- ESPSTAT(("status=%2x msg=%2x, ", SCptr->SCp.Status,
- SCptr->SCp.Message));
- if(SCptr->SCp.Message == COMMAND_COMPLETE) {
- ESPSTAT(("and was COMMAND_COMPLETE\n"));
- esp_advance_phase(SCptr, in_freeing);
- return esp_do_freebus(esp, eregs);
- } else {
- ESPLOG(("esp%d: and _not_ COMMAND_COMPLETE\n",
- esp->esp_id));
- esp->msgin_len = esp->msgin_ctr = 1;
- esp_advance_phase(SCptr, in_msgindone);
- return esp_do_msgindone(esp, eregs);
- }
- } else {
- /* With luck we'll be able to let the target
- * know that bad parity happened, it will know
- * which byte caused the problems and send it
- * again. For the case where the status byte
- * receives bad parity, I do not believe most
- * targets recover very well. We'll see.
- */
- ESPLOG(("esp%d: bad parity somewhere mout=%2x\n",
- esp->esp_id, message_out));
- esp->cur_msgout[0] = message_out;
- esp->msgout_len = esp->msgout_ctr = 1;
- esp_advance_phase(SCptr, in_the_dark);
- return esp_do_phase_determine(esp, eregs);
- }
- } else {
- /* If we disconnect now, all hell breaks loose. */
- ESPLOG(("esp%d: whoops, disconnect\n", esp->esp_id));
- esp_advance_phase(SCptr, in_the_dark);
- return esp_do_phase_determine(esp, eregs);
- }
-}
-
-static int esp_enter_status(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- unchar thecmd = ESP_CMD_ICCSEQ;
-
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
-
- if(esp->do_pio_cmds) {
- esp_advance_phase(esp->current_SC, in_status);
- esp_cmd(esp, eregs, thecmd);
- while(!(esp_read(esp->eregs->esp_status) & ESP_STAT_INTR));
- esp->esp_command[0] = esp_read(eregs->esp_fdata);
- while(!(esp_read(esp->eregs->esp_status) & ESP_STAT_INTR));
- esp->esp_command[1] = esp_read(eregs->esp_fdata);
- } else {
- esp->esp_command[0] = esp->esp_command[1] = 0xff;
- esp_write(eregs->esp_tclow, 2);
- esp_write(eregs->esp_tcmed, 0);
- esp->dma_init_read(esp, esp->esp_command_dvma, 2);
- thecmd |= ESP_CMD_DMA;
- esp_cmd(esp, eregs, thecmd);
- esp_advance_phase(esp->current_SC, in_status);
- }
-
- return esp_do_status(esp, eregs);
-}
-
-static int esp_disconnect_amidst_phases(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- Scsi_Cmnd *sp = esp->current_SC;
- struct esp_device *esp_dev = sp->device->hostdata;
-
- /* This means real problems if we see this
- * here. Unless we were actually trying
- * to force the device to abort/reset.
- */
- ESPLOG(("esp%d: Disconnect amidst phases, ", esp->esp_id));
- ESPLOG(("pphase<%s> cphase<%s>, ",
- phase_string(sp->SCp.phase),
- phase_string(sp->SCp.sent_command)));
-
- if(esp->disconnected_SC)
- esp_cmd(esp, eregs, ESP_CMD_ESEL);
-
- switch(esp->cur_msgout[0]) {
- default:
- /* We didn't expect this to happen at all. */
- ESPLOG(("device is bolixed\n"));
- esp_advance_phase(sp, in_tgterror);
- esp_done(esp, (DID_ERROR << 16));
- break;
-
- case BUS_DEVICE_RESET:
- ESPLOG(("device reset successful\n"));
- esp_dev->sync_max_offset = 0;
- esp_dev->sync_min_period = 0;
- esp_dev->sync = 0;
- esp_advance_phase(sp, in_resetdev);
- esp_done(esp, (DID_RESET << 16));
- break;
-
- case ABORT:
- ESPLOG(("device abort successful\n"));
- esp_advance_phase(sp, in_abortone);
- esp_done(esp, (DID_ABORT << 16));
- break;
-
- };
- return do_intr_end;
-}
-
-static int esp_enter_msgout(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- esp_advance_phase(esp->current_SC, in_msgout);
- return esp_do_msgout(esp, eregs);
-}
-
-static int esp_enter_msgin(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- esp_advance_phase(esp->current_SC, in_msgin);
- return esp_do_msgin(esp, eregs);
-}
-
-static int esp_enter_cmd(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- esp_advance_phase(esp->current_SC, in_cmdbegin);
- return esp_do_cmdbegin(esp, eregs);
-}
-
-static int esp_enter_badphase(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- ESPLOG(("esp%d: Bizarre bus phase %2x.\n", esp->esp_id,
- esp->sreg & ESP_STAT_PMASK));
- return do_reset_bus;
-}
-
-typedef int (*espfunc_t)(struct NCR_ESP *,
- struct ESP_regs *);
-
-static espfunc_t phase_vector[] = {
- esp_do_data, /* ESP_DOP */
- esp_do_data, /* ESP_DIP */
- esp_enter_cmd, /* ESP_CMDP */
- esp_enter_status, /* ESP_STATP */
- esp_enter_badphase, /* ESP_STAT_PMSG */
- esp_enter_badphase, /* ESP_STAT_PMSG | ESP_STAT_PIO */
- esp_enter_msgout, /* ESP_MOP */
- esp_enter_msgin, /* ESP_MIP */
-};
-
-/* The target has control of the bus and we have to see where it has
- * taken us.
- */
-static int esp_do_phase_determine(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- if ((esp->ireg & ESP_INTR_DC) != 0)
- return esp_disconnect_amidst_phases(esp, eregs);
- return phase_vector[esp->sreg & ESP_STAT_PMASK](esp, eregs);
-}
-
-/* First interrupt after exec'ing a cmd comes here. */
-static int esp_select_complete(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- Scsi_Cmnd *SCptr = esp->current_SC;
- struct esp_device *esp_dev = SCptr->device->hostdata;
- int cmd_bytes_sent, fcnt;
-
- fcnt = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES);
- cmd_bytes_sent = esp->dma_bytes_sent(esp, fcnt);
- if(esp->dma_invalidate)
- esp->dma_invalidate(esp);
-
- /* Let's check to see if a reselect happened
- * while we we're trying to select. This must
- * be checked first.
- */
- if(esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
- esp_reconnect(esp, SCptr);
- return esp_do_reconnect(esp, eregs);
- }
-
- /* Looks like things worked, we should see a bus service &
- * a function complete interrupt at this point. Note we
- * are doing a direct comparison because we don't want to
- * be fooled into thinking selection was successful if
- * ESP_INTR_DC is set, see below.
- */
- if(esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
- /* target speaks... */
- esp->targets_present |= (1<<scmd_id(SCptr));
-
- /* What if the target ignores the sdtr? */
- if(esp->snip)
- esp_dev->sync = 1;
-
- /* See how far, if at all, we got in getting
- * the information out to the target.
- */
- switch(esp->seqreg) {
- default:
-
- case ESP_STEP_ASEL:
- /* Arbitration won, target selected, but
- * we are in some phase which is not command
- * phase nor is it message out phase.
- *
- * XXX We've confused the target, obviously.
- * XXX So clear it's state, but we also end
- * XXX up clearing everyone elses. That isn't
- * XXX so nice. I'd like to just reset this
- * XXX target, but if I cannot even get it's
- * XXX attention and finish selection to talk
- * XXX to it, there is not much more I can do.
- * XXX If we have a loaded bus we're going to
- * XXX spend the next second or so renegotiating
- * XXX for synchronous transfers.
- */
- ESPLOG(("esp%d: STEP_ASEL for tgt %d\n",
- esp->esp_id, SCptr->device->id));
-
- case ESP_STEP_SID:
- /* Arbitration won, target selected, went
- * to message out phase, sent one message
- * byte, then we stopped. ATN is asserted
- * on the SCSI bus and the target is still
- * there hanging on. This is a legal
- * sequence step if we gave the ESP a select
- * and stop command.
- *
- * XXX See above, I could set the borken flag
- * XXX in the device struct and retry the
- * XXX command. But would that help for
- * XXX tagged capable targets?
- */
-
- case ESP_STEP_NCMD:
- /* Arbitration won, target selected, maybe
- * sent the one message byte in message out
- * phase, but we did not go to command phase
- * in the end. Actually, we could have sent
- * only some of the message bytes if we tried
- * to send out the entire identify and tag
- * message using ESP_CMD_SA3.
- */
- cmd_bytes_sent = 0;
- break;
-
- case ESP_STEP_PPC:
- /* No, not the powerPC pinhead. Arbitration
- * won, all message bytes sent if we went to
- * message out phase, went to command phase
- * but only part of the command was sent.
- *
- * XXX I've seen this, but usually in conjunction
- * XXX with a gross error which appears to have
- * XXX occurred between the time I told the
- * XXX ESP to arbitrate and when I got the
- * XXX interrupt. Could I have misloaded the
- * XXX command bytes into the fifo? Actually,
- * XXX I most likely missed a phase, and therefore
- * XXX went into never never land and didn't even
- * XXX know it. That was the old driver though.
- * XXX What is even more peculiar is that the ESP
- * XXX showed the proper function complete and
- * XXX bus service bits in the interrupt register.
- */
-
- case ESP_STEP_FINI4:
- case ESP_STEP_FINI5:
- case ESP_STEP_FINI6:
- case ESP_STEP_FINI7:
- /* Account for the identify message */
- if(SCptr->SCp.phase == in_slct_norm)
- cmd_bytes_sent -= 1;
- };
- esp_cmd(esp, eregs, ESP_CMD_NULL);
-
- /* Be careful, we could really get fucked during synchronous
- * data transfers if we try to flush the fifo now.
- */
- if(!fcnt && /* Fifo is empty and... */
- /* either we are not doing synchronous transfers or... */
- (!esp_dev->sync_max_offset ||
- /* We are not going into data in phase. */
- ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
- esp_cmd(esp, eregs, ESP_CMD_FLUSH); /* flush is safe */
-
- /* See how far we got if this is not a slow command. */
- if(!esp->esp_slowcmd) {
- if(cmd_bytes_sent < 0)
- cmd_bytes_sent = 0;
- if(cmd_bytes_sent != SCptr->cmd_len) {
- /* Crapola, mark it as a slowcmd
- * so that we have some chance of
- * keeping the command alive with
- * good luck.
- *
- * XXX Actually, if we didn't send it all
- * XXX this means either we didn't set things
- * XXX up properly (driver bug) or the target
- * XXX or the ESP detected parity on one of
- * XXX the command bytes. This makes much
- * XXX more sense, and therefore this code
- * XXX should be changed to send out a
- * XXX parity error message or if the status
- * XXX register shows no parity error then
- * XXX just expect the target to bring the
- * XXX bus into message in phase so that it
- * XXX can send us the parity error message.
- * XXX SCSI sucks...
- */
- esp->esp_slowcmd = 1;
- esp->esp_scmdp = &(SCptr->cmnd[cmd_bytes_sent]);
- esp->esp_scmdleft = (SCptr->cmd_len - cmd_bytes_sent);
- }
- }
-
- /* Now figure out where we went. */
- esp_advance_phase(SCptr, in_the_dark);
- return esp_do_phase_determine(esp, eregs);
- }
-
- /* Did the target even make it? */
- if(esp->ireg == ESP_INTR_DC) {
- /* wheee... nobody there or they didn't like
- * what we told it to do, clean up.
- */
-
- /* If anyone is off the bus, but working on
- * a command in the background for us, tell
- * the ESP to listen for them.
- */
- if(esp->disconnected_SC)
- esp_cmd(esp, eregs, ESP_CMD_ESEL);
-
- if(((1<<SCptr->device->id) & esp->targets_present) &&
- esp->seqreg && esp->cur_msgout[0] == EXTENDED_MESSAGE &&
- (SCptr->SCp.phase == in_slct_msg ||
- SCptr->SCp.phase == in_slct_stop)) {
- /* shit */
- esp->snip = 0;
- ESPLOG(("esp%d: Failed synchronous negotiation for target %d "
- "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun));
- esp_dev->sync_max_offset = 0;
- esp_dev->sync_min_period = 0;
- esp_dev->sync = 1; /* so we don't negotiate again */
-
- /* Run the command again, this time though we
- * won't try to negotiate for synchronous transfers.
- *
- * XXX I'd like to do something like send an
- * XXX INITIATOR_ERROR or ABORT message to the
- * XXX target to tell it, "Sorry I confused you,
- * XXX please come back and I will be nicer next
- * XXX time". But that requires having the target
- * XXX on the bus, and it has dropped BSY on us.
- */
- esp->current_SC = NULL;
- esp_advance_phase(SCptr, not_issued);
- prepend_SC(&esp->issue_SC, SCptr);
- esp_exec_cmd(esp);
- return do_intr_end;
- }
-
- /* Ok, this is normal, this is what we see during boot
- * or whenever when we are scanning the bus for targets.
- * But first make sure that is really what is happening.
- */
- if(((1<<SCptr->device->id) & esp->targets_present)) {
- ESPLOG(("esp%d: Warning, live target %d not responding to "
- "selection.\n", esp->esp_id, SCptr->device->id));
-
- /* This _CAN_ happen. The SCSI standard states that
- * the target is to _not_ respond to selection if
- * _it_ detects bad parity on the bus for any reason.
- * Therefore, we assume that if we've talked successfully
- * to this target before, bad parity is the problem.
- */
- esp_done(esp, (DID_PARITY << 16));
- } else {
- /* Else, there really isn't anyone there. */
- ESPMISC(("esp: selection failure, maybe nobody there?\n"));
- ESPMISC(("esp: target %d lun %d\n",
- SCptr->device->id, SCptr->device->lun));
- esp_done(esp, (DID_BAD_TARGET << 16));
- }
- return do_intr_end;
- }
-
-
- ESPLOG(("esp%d: Selection failure.\n", esp->esp_id));
- printk("esp%d: Currently -- ", esp->esp_id);
- esp_print_ireg(esp->ireg);
- printk(" ");
- esp_print_statreg(esp->sreg);
- printk(" ");
- esp_print_seqreg(esp->seqreg);
- printk("\n");
- printk("esp%d: New -- ", esp->esp_id);
- esp->sreg = esp_read(eregs->esp_status);
- esp->seqreg = esp_read(eregs->esp_sstep);
- esp->ireg = esp_read(eregs->esp_intrpt);
- esp_print_ireg(esp->ireg);
- printk(" ");
- esp_print_statreg(esp->sreg);
- printk(" ");
- esp_print_seqreg(esp->seqreg);
- printk("\n");
- ESPLOG(("esp%d: resetting bus\n", esp->esp_id));
- return do_reset_bus; /* ugh... */
-}
-
-/* Continue reading bytes for msgin phase. */
-static int esp_do_msgincont(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- if(esp->ireg & ESP_INTR_BSERV) {
- /* in the right phase too? */
- if((esp->sreg & ESP_STAT_PMASK) == ESP_MIP) {
- /* phew... */
- esp_cmd(esp, eregs, ESP_CMD_TI);
- esp_advance_phase(esp->current_SC, in_msgindone);
- return do_intr_end;
- }
-
- /* We changed phase but ESP shows bus service,
- * in this case it is most likely that we, the
- * hacker who has been up for 20hrs straight
- * staring at the screen, drowned in coffee
- * smelling like retched cigarette ashes
- * have miscoded something..... so, try to
- * recover as best we can.
- */
- ESPLOG(("esp%d: message in mis-carriage.\n", esp->esp_id));
- }
- esp_advance_phase(esp->current_SC, in_the_dark);
- return do_phase_determine;
-}
-
-static int check_singlebyte_msg(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- esp->prevmsgin = esp->cur_msgin[0];
- if(esp->cur_msgin[0] & 0x80) {
- /* wheee... */
- ESPLOG(("esp%d: target sends identify amidst phases\n",
- esp->esp_id));
- esp_advance_phase(esp->current_SC, in_the_dark);
- return 0;
- } else if(((esp->cur_msgin[0] & 0xf0) == 0x20) ||
- (esp->cur_msgin[0] == EXTENDED_MESSAGE)) {
- esp->msgin_len = 2;
- esp_advance_phase(esp->current_SC, in_msgincont);
- return 0;
- }
- esp_advance_phase(esp->current_SC, in_the_dark);
- switch(esp->cur_msgin[0]) {
- default:
- /* We don't want to hear about it. */
- ESPLOG(("esp%d: msg %02x which we don't know about\n", esp->esp_id,
- esp->cur_msgin[0]));
- return MESSAGE_REJECT;
-
- case NOP:
- ESPLOG(("esp%d: target %d sends a nop\n", esp->esp_id,
- esp->current_SC->device->id));
- return 0;
-
- case RESTORE_POINTERS:
- /* In this case we might also have to backup the
- * "slow command" pointer. It is rare to get such
- * a save/restore pointer sequence so early in the
- * bus transition sequences, but cover it.
- */
- if(esp->esp_slowcmd) {
- esp->esp_scmdleft = esp->current_SC->cmd_len;
- esp->esp_scmdp = &esp->current_SC->cmnd[0];
- }
- esp_restore_pointers(esp, esp->current_SC);
- return 0;
-
- case SAVE_POINTERS:
- esp_save_pointers(esp, esp->current_SC);
- return 0;
-
- case COMMAND_COMPLETE:
- case DISCONNECT:
- /* Freeing the bus, let it go. */
- esp->current_SC->SCp.phase = in_freeing;
- return 0;
-
- case MESSAGE_REJECT:
- ESPMISC(("msg reject, "));
- if(esp->prevmsgout == EXTENDED_MESSAGE) {
- struct esp_device *esp_dev = esp->current_SC->device->hostdata;
-
- /* Doesn't look like this target can
- * do synchronous or WIDE transfers.
- */
- ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n"));
- esp_dev->sync = 1;
- esp_dev->wide = 1;
- esp_dev->sync_min_period = 0;
- esp_dev->sync_max_offset = 0;
- return 0;
- } else {
- ESPMISC(("not sync nego, sending ABORT\n"));
- return ABORT;
- }
- };
-}
-
-/* Target negotiates for synchronous transfers before we do, this
- * is legal although very strange. What is even funnier is that
- * the SCSI2 standard specifically recommends against targets doing
- * this because so many initiators cannot cope with this occurring.
- */
-static int target_with_ants_in_pants(struct NCR_ESP *esp,
- Scsi_Cmnd *SCptr,
- struct esp_device *esp_dev)
-{
- if(esp_dev->sync || SCptr->device->borken) {
- /* sorry, no can do */
- ESPSDTR(("forcing to async, "));
- build_sync_nego_msg(esp, 0, 0);
- esp_dev->sync = 1;
- esp->snip = 1;
- ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id));
- esp_advance_phase(SCptr, in_the_dark);
- return EXTENDED_MESSAGE;
- }
-
- /* Ok, we'll check them out... */
- return 0;
-}
-
-static void sync_report(struct NCR_ESP *esp)
-{
- int msg3, msg4;
- char *type;
-
- msg3 = esp->cur_msgin[3];
- msg4 = esp->cur_msgin[4];
- if(msg4) {
- int hz = 1000000000 / (msg3 * 4);
- int integer = hz / 1000000;
- int fraction = (hz - (integer * 1000000)) / 10000;
- if((msg3 * 4) < 200) {
- type = "FAST";
- } else {
- type = "synchronous";
- }
-
- /* Do not transform this back into one big printk
- * again, it triggers a bug in our sparc64-gcc272
- * sibling call optimization. -DaveM
- */
- ESPLOG((KERN_INFO "esp%d: target %d ",
- esp->esp_id, esp->current_SC->device->id));
- ESPLOG(("[period %dns offset %d %d.%02dMHz ",
- (int) msg3 * 4, (int) msg4,
- integer, fraction));
- ESPLOG(("%s SCSI%s]\n", type,
- (((msg3 * 4) < 200) ? "-II" : "")));
- } else {
- ESPLOG((KERN_INFO "esp%d: target %d asynchronous\n",
- esp->esp_id, esp->current_SC->device->id));
- }
-}
-
-static int check_multibyte_msg(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- Scsi_Cmnd *SCptr = esp->current_SC;
- struct esp_device *esp_dev = SCptr->device->hostdata;
- unchar regval = 0;
- int message_out = 0;
-
- ESPSDTR(("chk multibyte msg: "));
- if(esp->cur_msgin[2] == EXTENDED_SDTR) {
- int period = esp->cur_msgin[3];
- int offset = esp->cur_msgin[4];
-
- ESPSDTR(("is sync nego response, "));
- if(!esp->snip) {
- int rval;
-
- /* Target negotiates first! */
- ESPSDTR(("target jumps the gun, "));
- message_out = EXTENDED_MESSAGE; /* we must respond */
- rval = target_with_ants_in_pants(esp, SCptr, esp_dev);
- if(rval)
- return rval;
- }
-
- ESPSDTR(("examining sdtr, "));
-
- /* Offset cannot be larger than ESP fifo size. */
- if(offset > 15) {
- ESPSDTR(("offset too big %2x, ", offset));
- offset = 15;
- ESPSDTR(("sending back new offset\n"));
- build_sync_nego_msg(esp, period, offset);
- return EXTENDED_MESSAGE;
- }
-
- if(offset && period > esp->max_period) {
- /* Yeee, async for this slow device. */
- ESPSDTR(("period too long %2x, ", period));
- build_sync_nego_msg(esp, 0, 0);
- ESPSDTR(("hoping for msgout\n"));
- esp_advance_phase(esp->current_SC, in_the_dark);
- return EXTENDED_MESSAGE;
- } else if (offset && period < esp->min_period) {
- ESPSDTR(("period too short %2x, ", period));
- period = esp->min_period;
- if(esp->erev > esp236)
- regval = 4;
- else
- regval = 5;
- } else if(offset) {
- int tmp;
-
- ESPSDTR(("period is ok, "));
- tmp = esp->ccycle / 1000;
- regval = (((period << 2) + tmp - 1) / tmp);
- if(regval && (esp->erev > esp236)) {
- if(period >= 50)
- regval--;
- }
- }
-
- if(offset) {
- unchar bit;
-
- esp_dev->sync_min_period = (regval & 0x1f);
- esp_dev->sync_max_offset = (offset | esp->radelay);
- if(esp->erev > esp236) {
- if(esp->erev == fas100a)
- bit = ESP_CONFIG3_FAST;
- else
- bit = ESP_CONFIG3_FSCSI;
- if(period < 50)
- esp->config3[SCptr->device->id] |= bit;
- else
- esp->config3[SCptr->device->id] &= ~bit;
- esp->prev_cfg3 = esp->config3[SCptr->device->id];
- esp_write(eregs->esp_cfg3, esp->prev_cfg3);
- }
- esp->prev_soff = esp_dev->sync_min_period;
- esp_write(eregs->esp_soff, esp->prev_soff);
- esp->prev_stp = esp_dev->sync_max_offset;
- esp_write(eregs->esp_stp, esp->prev_stp);
-
- ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n",
- esp_dev->sync_max_offset,
- esp_dev->sync_min_period,
- esp->config3[scmd_id(SCptr)]));
-
- esp->snip = 0;
- } else if(esp_dev->sync_max_offset) {
- unchar bit;
-
- /* back to async mode */
- ESPSDTR(("unaccaptable sync nego, forcing async\n"));
- esp_dev->sync_max_offset = 0;
- esp_dev->sync_min_period = 0;
- esp->prev_soff = 0;
- esp_write(eregs->esp_soff, 0);
- esp->prev_stp = 0;
- esp_write(eregs->esp_stp, 0);
- if(esp->erev > esp236) {
- if(esp->erev == fas100a)
- bit = ESP_CONFIG3_FAST;
- else
- bit = ESP_CONFIG3_FSCSI;
- esp->config3[SCptr->device->id] &= ~bit;
- esp->prev_cfg3 = esp->config3[SCptr->device->id];
- esp_write(eregs->esp_cfg3, esp->prev_cfg3);
- }
- }
-
- sync_report(esp);
-
- ESPSDTR(("chk multibyte msg: sync is known, "));
- esp_dev->sync = 1;
-
- if(message_out) {
- ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n",
- esp->esp_id));
- build_sync_nego_msg(esp, period, offset);
- esp_advance_phase(SCptr, in_the_dark);
- return EXTENDED_MESSAGE;
- }
-
- ESPSDTR(("returning zero\n"));
- esp_advance_phase(SCptr, in_the_dark); /* ...or else! */
- return 0;
- } else if(esp->cur_msgin[2] == EXTENDED_WDTR) {
- ESPLOG(("esp%d: AIEEE wide msg received\n", esp->esp_id));
- message_out = MESSAGE_REJECT;
- } else if(esp->cur_msgin[2] == EXTENDED_MODIFY_DATA_POINTER) {
- ESPLOG(("esp%d: rejecting modify data ptr msg\n", esp->esp_id));
- message_out = MESSAGE_REJECT;
- }
- esp_advance_phase(SCptr, in_the_dark);
- return message_out;
-}
-
-static int esp_do_msgindone(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- Scsi_Cmnd *SCptr = esp->current_SC;
- int message_out = 0, it = 0, rval;
-
- rval = skipahead1(esp, eregs, SCptr, in_msgin, in_msgindone);
- if(rval)
- return rval;
- if(SCptr->SCp.sent_command != in_status) {
- if(!(esp->ireg & ESP_INTR_DC)) {
- if(esp->msgin_len && (esp->sreg & ESP_STAT_PERR)) {
- message_out = MSG_PARITY_ERROR;
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
- } else if((it = (esp_read(eregs->esp_fflags) & ESP_FF_FBYTES))!=1) {
- /* We certainly dropped the ball somewhere. */
- message_out = INITIATOR_ERROR;
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
- } else if(!esp->msgin_len) {
- it = esp_read(eregs->esp_fdata);
- esp_advance_phase(SCptr, in_msgincont);
- } else {
- /* it is ok and we want it */
- it = esp->cur_msgin[esp->msgin_ctr] =
- esp_read(eregs->esp_fdata);
- esp->msgin_ctr++;
- }
- } else {
- esp_advance_phase(SCptr, in_the_dark);
- return do_work_bus;
- }
- } else {
- it = esp->cur_msgin[0];
- }
- if(!message_out && esp->msgin_len) {
- if(esp->msgin_ctr < esp->msgin_len) {
- esp_advance_phase(SCptr, in_msgincont);
- } else if(esp->msgin_len == 1) {
- message_out = check_singlebyte_msg(esp, eregs);
- } else if(esp->msgin_len == 2) {
- if(esp->cur_msgin[0] == EXTENDED_MESSAGE) {
- if((it+2) >= 15) {
- message_out = MESSAGE_REJECT;
- } else {
- esp->msgin_len = (it + 2);
- esp_advance_phase(SCptr, in_msgincont);
- }
- } else {
- message_out = MESSAGE_REJECT; /* foo on you */
- }
- } else {
- message_out = check_multibyte_msg(esp, eregs);
- }
- }
- if(message_out < 0) {
- return -message_out;
- } else if(message_out) {
- if(((message_out != 1) &&
- ((message_out < 0x20) || (message_out & 0x80))))
- esp->msgout_len = 1;
- esp->cur_msgout[0] = message_out;
- esp_cmd(esp, eregs, ESP_CMD_SATN);
- esp_advance_phase(SCptr, in_the_dark);
- esp->msgin_len = 0;
- }
- esp->sreg = esp_read(eregs->esp_status);
- esp->sreg &= ~(ESP_STAT_INTR);
- if((esp->sreg & (ESP_STAT_PMSG|ESP_STAT_PCD)) == (ESP_STAT_PMSG|ESP_STAT_PCD))
- esp_cmd(esp, eregs, ESP_CMD_MOK);
- if((SCptr->SCp.sent_command == in_msgindone) &&
- (SCptr->SCp.phase == in_freeing))
- return esp_do_freebus(esp, eregs);
- return do_intr_end;
-}
-
-static int esp_do_cmdbegin(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- unsigned char tmp;
- Scsi_Cmnd *SCptr = esp->current_SC;
-
- esp_advance_phase(SCptr, in_cmdend);
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
- tmp = *esp->esp_scmdp++;
- esp->esp_scmdleft--;
- esp_write(eregs->esp_fdata, tmp);
- esp_cmd(esp, eregs, ESP_CMD_TI);
- return do_intr_end;
-}
-
-static int esp_do_cmddone(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- esp_cmd(esp, eregs, ESP_CMD_NULL);
- if(esp->ireg & ESP_INTR_BSERV) {
- esp_advance_phase(esp->current_SC, in_the_dark);
- return esp_do_phase_determine(esp, eregs);
- }
- ESPLOG(("esp%d: in do_cmddone() but didn't get BSERV interrupt.\n",
- esp->esp_id));
- return do_reset_bus;
-}
-
-static int esp_do_msgout(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
- switch(esp->msgout_len) {
- case 1:
- esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
- esp_cmd(esp, eregs, ESP_CMD_TI);
- break;
-
- case 2:
- if(esp->do_pio_cmds){
- esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
- esp_write(eregs->esp_fdata, esp->cur_msgout[1]);
- esp_cmd(esp, eregs, ESP_CMD_TI);
- } else {
- esp->esp_command[0] = esp->cur_msgout[0];
- esp->esp_command[1] = esp->cur_msgout[1];
- esp->dma_setup(esp, esp->esp_command_dvma, 2, 0);
- esp_setcount(eregs, 2);
- esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
- }
- break;
-
- case 4:
- esp->snip = 1;
- if(esp->do_pio_cmds){
- esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
- esp_write(eregs->esp_fdata, esp->cur_msgout[1]);
- esp_write(eregs->esp_fdata, esp->cur_msgout[2]);
- esp_write(eregs->esp_fdata, esp->cur_msgout[3]);
- esp_cmd(esp, eregs, ESP_CMD_TI);
- } else {
- esp->esp_command[0] = esp->cur_msgout[0];
- esp->esp_command[1] = esp->cur_msgout[1];
- esp->esp_command[2] = esp->cur_msgout[2];
- esp->esp_command[3] = esp->cur_msgout[3];
- esp->dma_setup(esp, esp->esp_command_dvma, 4, 0);
- esp_setcount(eregs, 4);
- esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
- }
- break;
-
- case 5:
- esp->snip = 1;
- if(esp->do_pio_cmds){
- esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
- esp_write(eregs->esp_fdata, esp->cur_msgout[1]);
- esp_write(eregs->esp_fdata, esp->cur_msgout[2]);
- esp_write(eregs->esp_fdata, esp->cur_msgout[3]);
- esp_write(eregs->esp_fdata, esp->cur_msgout[4]);
- esp_cmd(esp, eregs, ESP_CMD_TI);
- } else {
- esp->esp_command[0] = esp->cur_msgout[0];
- esp->esp_command[1] = esp->cur_msgout[1];
- esp->esp_command[2] = esp->cur_msgout[2];
- esp->esp_command[3] = esp->cur_msgout[3];
- esp->esp_command[4] = esp->cur_msgout[4];
- esp->dma_setup(esp, esp->esp_command_dvma, 5, 0);
- esp_setcount(eregs, 5);
- esp_cmd(esp, eregs, ESP_CMD_DMA | ESP_CMD_TI);
- }
- break;
-
- default:
- /* whoops */
- ESPMISC(("bogus msgout sending NOP\n"));
- esp->cur_msgout[0] = NOP;
- esp_write(eregs->esp_fdata, esp->cur_msgout[0]);
- esp->msgout_len = 1;
- esp_cmd(esp, eregs, ESP_CMD_TI);
- break;
- }
- esp_advance_phase(esp->current_SC, in_msgoutdone);
- return do_intr_end;
-}
-
-static int esp_do_msgoutdone(struct NCR_ESP *esp,
- struct ESP_regs *eregs)
-{
- if((esp->msgout_len > 1) && esp->dma_barrier)
- esp->dma_barrier(esp);
-
- if(!(esp->ireg & ESP_INTR_DC)) {
- esp_cmd(esp, eregs, ESP_CMD_NULL);
- switch(esp->sreg & ESP_STAT_PMASK) {
- case ESP_MOP:
- /* whoops, parity error */
- ESPLOG(("esp%d: still in msgout, parity error assumed\n",
- esp->esp_id));
- if(esp->msgout_len > 1)
- esp_cmd(esp, eregs, ESP_CMD_SATN);
- esp_advance_phase(esp->current_SC, in_msgout);
- return do_work_bus;
-
- case ESP_DIP:
- break;
-
- default:
- if(!fcount(esp, eregs) &&
- !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset))
- esp_cmd(esp, eregs, ESP_CMD_FLUSH);
- break;
-
- };
- }
-
- /* If we sent out a synchronous negotiation message, update
- * our state.
- */
- if(esp->cur_msgout[2] == EXTENDED_MESSAGE &&
- esp->cur_msgout[4] == EXTENDED_SDTR) {
- esp->snip = 1; /* anal retentiveness... */
- }
-
- esp->prevmsgout = esp->cur_msgout[0];
- esp->msgout_len = 0;
- esp_advance_phase(esp->current_SC, in_the_dark);
- return esp_do_phase_determine(esp, eregs);
-}
-
-static int esp_bus_unexpected(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- ESPLOG(("esp%d: command in weird state %2x\n",
- esp->esp_id, esp->current_SC->SCp.phase));
- return do_reset_bus;
-}
-
-static espfunc_t bus_vector[] = {
- esp_do_data_finale,
- esp_do_data_finale,
- esp_bus_unexpected,
- esp_do_msgin,
- esp_do_msgincont,
- esp_do_msgindone,
- esp_do_msgout,
- esp_do_msgoutdone,
- esp_do_cmdbegin,
- esp_do_cmddone,
- esp_do_status,
- esp_do_freebus,
- esp_do_phase_determine,
- esp_bus_unexpected,
- esp_bus_unexpected,
- esp_bus_unexpected,
-};
-
-/* This is the second tier in our dual-level SCSI state machine. */
-static int esp_work_bus(struct NCR_ESP *esp, struct ESP_regs *eregs)
-{
- Scsi_Cmnd *SCptr = esp->current_SC;
- unsigned int phase;
-
- ESPBUS(("esp_work_bus: "));
- if(!SCptr) {
- ESPBUS(("reconnect\n"));
- return esp_do_reconnect(esp, eregs);
- }
- phase = SCptr->SCp.phase;
- if ((phase & 0xf0) == in_phases_mask)
- return bus_vector[(phase & 0x0f)](esp, eregs);
- else if((phase & 0xf0) == in_slct_mask)
- return esp_select_complete(esp, eregs);
- else
- return esp_bus_unexpected(esp, eregs);
-}
-
-static espfunc_t isvc_vector[] = {
- NULL,
- esp_do_phase_determine,
- esp_do_resetbus,
- esp_finish_reset,
- esp_work_bus
-};
-
-/* Main interrupt handler for an esp adapter. */
-void esp_handle(struct NCR_ESP *esp)
-{
- struct ESP_regs *eregs;
- Scsi_Cmnd *SCptr;
- int what_next = do_intr_end;
- eregs = esp->eregs;
- SCptr = esp->current_SC;
-
- if(esp->dma_irq_entry)
- esp->dma_irq_entry(esp);
-
- /* Check for errors. */
- esp->sreg = esp_read(eregs->esp_status);
- esp->sreg &= (~ESP_STAT_INTR);
- esp->seqreg = (esp_read(eregs->esp_sstep) & ESP_STEP_VBITS);
- esp->ireg = esp_read(eregs->esp_intrpt); /* Unlatch intr and stat regs */
- ESPIRQ(("handle_irq: [sreg<%02x> sstep<%02x> ireg<%02x>]\n",
- esp->sreg, esp->seqreg, esp->ireg));
- if(esp->sreg & (ESP_STAT_SPAM)) {
- /* Gross error, could be due to one of:
- *
- * - top of fifo overwritten, could be because
- * we tried to do a synchronous transfer with
- * an offset greater than ESP fifo size
- *
- * - top of command register overwritten
- *
- * - DMA setup to go in one direction, SCSI
- * bus points in the other, whoops
- *
- * - weird phase change during asynchronous
- * data phase while we are initiator
- */
- ESPLOG(("esp%d: Gross error sreg=%2x\n", esp->esp_id, esp->sreg));
-
- /* If a command is live on the bus we cannot safely
- * reset the bus, so we'll just let the pieces fall
- * where they may. Here we are hoping that the
- * target will be able to cleanly go away soon
- * so we can safely reset things.
- */
- if(!SCptr) {
- ESPLOG(("esp%d: No current cmd during gross error, "
- "resetting bus\n", esp->esp_id));
- what_next = do_reset_bus;
- goto state_machine;
- }
- }
-
- /* No current cmd is only valid at this point when there are
- * commands off the bus or we are trying a reset.
- */
- if(!SCptr && !esp->disconnected_SC && !(esp->ireg & ESP_INTR_SR)) {
- /* Panic is safe, since current_SC is null. */
- ESPLOG(("esp%d: no command in esp_handle()\n", esp->esp_id));
- panic("esp_handle: current_SC == penguin within interrupt!");
- }
-
- if(esp->ireg & (ESP_INTR_IC)) {
- /* Illegal command fed to ESP. Outside of obvious
- * software bugs that could cause this, there is
- * a condition with ESP100 where we can confuse the
- * ESP into an erroneous illegal command interrupt
- * because it does not scrape the FIFO properly
- * for reselection. See esp100_reconnect_hwbug()
- * to see how we try very hard to avoid this.
- */
- ESPLOG(("esp%d: invalid command\n", esp->esp_id));
-
- esp_dump_state(esp, eregs);
-
- if(SCptr) {
- /* Devices with very buggy firmware can drop BSY
- * during a scatter list interrupt when using sync
- * mode transfers. We continue the transfer as
- * expected, the target drops the bus, the ESP
- * gets confused, and we get a illegal command
- * interrupt because the bus is in the disconnected
- * state now and ESP_CMD_TI is only allowed when
- * a nexus is alive on the bus.
- */
- ESPLOG(("esp%d: Forcing async and disabling disconnect for "
- "target %d\n", esp->esp_id, SCptr->device->id));
- SCptr->device->borken = 1; /* foo on you */
- }
-
- what_next = do_reset_bus;
- } else if(!(esp->ireg & ~(ESP_INTR_FDONE | ESP_INTR_BSERV | ESP_INTR_DC))) {
- int phase;
-
- if(SCptr) {
- phase = SCptr->SCp.phase;
- if(phase & in_phases_mask) {
- what_next = esp_work_bus(esp, eregs);
- } else if(phase & in_slct_mask) {
- what_next = esp_select_complete(esp, eregs);
- } else {
- ESPLOG(("esp%d: interrupt for no good reason...\n",
- esp->esp_id));
- what_next = do_intr_end;
- }
- } else {
- ESPLOG(("esp%d: BSERV or FDONE or DC while SCptr==NULL\n",
- esp->esp_id));
- what_next = do_reset_bus;
- }
- } else if(esp->ireg & ESP_INTR_SR) {
- ESPLOG(("esp%d: SCSI bus reset interrupt\n", esp->esp_id));
- what_next = do_reset_complete;
- } else if(esp->ireg & (ESP_INTR_S | ESP_INTR_SATN)) {
- ESPLOG(("esp%d: AIEEE we have been selected by another initiator!\n",
- esp->esp_id));
- what_next = do_reset_bus;
- } else if(esp->ireg & ESP_INTR_RSEL) {
- if(!SCptr) {
- /* This is ok. */
- what_next = esp_do_reconnect(esp, eregs);
- } else if(SCptr->SCp.phase & in_slct_mask) {
- /* Only selection code knows how to clean
- * up properly.
- */
- ESPDISC(("Reselected during selection attempt\n"));
- what_next = esp_select_complete(esp, eregs);
- } else {
- ESPLOG(("esp%d: Reselected while bus is busy\n",
- esp->esp_id));
- what_next = do_reset_bus;
- }
- }
-
- /* This is tier-one in our dual level SCSI state machine. */
-state_machine:
- while(what_next != do_intr_end) {
- if (what_next >= do_phase_determine &&
- what_next < do_intr_end)
- what_next = isvc_vector[what_next](esp, eregs);
- else {
- /* state is completely lost ;-( */
- ESPLOG(("esp%d: interrupt engine loses state, resetting bus\n",
- esp->esp_id));
- what_next = do_reset_bus;
- }
- }
- if(esp->dma_irq_exit)
- esp->dma_irq_exit(esp);
-}
-EXPORT_SYMBOL(esp_handle);
-
-#ifndef CONFIG_SMP
-irqreturn_t esp_intr(int irq, void *dev_id)
-{
- struct NCR_ESP *esp;
- unsigned long flags;
- int again;
- struct Scsi_Host *dev = dev_id;
-
- /* Handle all ESP interrupts showing at this IRQ level. */
- spin_lock_irqsave(dev->host_lock, flags);
-repeat:
- again = 0;
- for_each_esp(esp) {
-#ifndef __mips__
- if(((esp)->irq & 0xff) == irq) {
-#endif
- if(esp->dma_irq_p(esp)) {
- again = 1;
-
- esp->dma_ints_off(esp);
-
- ESPIRQ(("I%d(", esp->esp_id));
- esp_handle(esp);
- ESPIRQ((")"));
-
- esp->dma_ints_on(esp);
- }
-#ifndef __mips__
- }
-#endif
- }
- if(again)
- goto repeat;
- spin_unlock_irqrestore(dev->host_lock, flags);
- return IRQ_HANDLED;
-}
-#else
-/* For SMP we only service one ESP on the list list at our IRQ level! */
-irqreturn_t esp_intr(int irq, void *dev_id)
-{
- struct NCR_ESP *esp;
- unsigned long flags;
- struct Scsi_Host *dev = dev_id;
-
- /* Handle all ESP interrupts showing at this IRQ level. */
- spin_lock_irqsave(dev->host_lock, flags);
- for_each_esp(esp) {
- if(((esp)->irq & 0xf) == irq) {
- if(esp->dma_irq_p(esp)) {
- esp->dma_ints_off(esp);
-
- ESPIRQ(("I[%d:%d](",
- smp_processor_id(), esp->esp_id));
- esp_handle(esp);
- ESPIRQ((")"));
-
- esp->dma_ints_on(esp);
- goto out;
- }
- }
- }
-out:
- spin_unlock_irqrestore(dev->host_lock, flags);
- return IRQ_HANDLED;
-}
-#endif
-
-int esp_slave_alloc(struct scsi_device *SDptr)
-{
- struct esp_device *esp_dev =
- kzalloc(sizeof(struct esp_device), GFP_ATOMIC);
-
- if (!esp_dev)
- return -ENOMEM;
- SDptr->hostdata = esp_dev;
- return 0;
-}
-
-void esp_slave_destroy(struct scsi_device *SDptr)
-{
- struct NCR_ESP *esp = (struct NCR_ESP *) SDptr->host->hostdata;
-
- esp->targets_present &= ~(1 << sdev_id(SDptr));
- kfree(SDptr->hostdata);
- SDptr->hostdata = NULL;
-}
-
-#ifdef MODULE
-int init_module(void) { return 0; }
-void cleanup_module(void) {}
-void esp_release(void)
-{
- esps_in_use--;
- esps_running = esps_in_use;
-}
-EXPORT_SYMBOL(esp_release);
-#endif
-
-EXPORT_SYMBOL(esp_abort);
-EXPORT_SYMBOL(esp_allocate);
-EXPORT_SYMBOL(esp_deallocate);
-EXPORT_SYMBOL(esp_initialize);
-EXPORT_SYMBOL(esp_intr);
-EXPORT_SYMBOL(esp_queue);
-EXPORT_SYMBOL(esp_reset);
-EXPORT_SYMBOL(esp_slave_alloc);
-EXPORT_SYMBOL(esp_slave_destroy);
-EXPORT_SYMBOL(esps_in_use);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/NCR53C9x.h b/drivers/scsi/NCR53C9x.h
deleted file mode 100644
index 00a0ba040dba..000000000000
--- a/drivers/scsi/NCR53C9x.h
+++ /dev/null
@@ -1,668 +0,0 @@
-/* NCR53C9x.c: Defines and structures for the NCR53C9x generic driver.
- *
- * Originally esp.h: Defines and structures for the Sparc ESP
- * (Enhanced SCSI Processor) driver under Linux.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- *
- * Generalization by Jesper Skov (jskov@cygnus.co.uk)
- *
- * More generalization (for i386 stuff) by Tymm Twillman (tymm@computer.org)
- */
-
-#ifndef NCR53C9X_H
-#define NCR53C9X_H
-
-#include <linux/interrupt.h>
-
-/* djweis for mac driver */
-#if defined(CONFIG_MAC)
-#define PAD_SIZE 15
-#else
-#define PAD_SIZE 3
-#endif
-
-/* Handle multiple hostadapters on Amiga
- * generally PAD_SIZE = 3
- * but there is one exception: Oktagon (PAD_SIZE = 1) */
-#if defined(CONFIG_OKTAGON_SCSI) || defined(CONFIG_OKTAGON_SCSI_MODULE)
-#undef PAD_SIZE
-#if defined(CONFIG_BLZ1230_SCSI) || defined(CONFIG_BLZ1230_SCSI_MODULE) || \
- defined(CONFIG_BLZ2060_SCSI) || defined(CONFIG_BLZ2060_SCSI_MODULE) || \
- defined(CONFIG_CYBERSTORM_SCSI) || defined(CONFIG_CYBERSTORM_SCSI_MODULE) || \
- defined(CONFIG_CYBERSTORMII_SCSI) || defined(CONFIG_CYBERSTORMII_SCSI_MODULE) || \
- defined(CONFIG_FASTLANE_SCSI) || defined(CONFIG_FASTLANE_SCSI_MODULE)
-#define MULTIPLE_PAD_SIZES
-#else
-#define PAD_SIZE 1
-#endif
-#endif
-
-/* Macros for debugging messages */
-
-#define DEBUG_ESP
-/* #define DEBUG_ESP_DATA */
-/* #define DEBUG_ESP_QUEUE */
-/* #define DEBUG_ESP_DISCONNECT */
-/* #define DEBUG_ESP_STATUS */
-/* #define DEBUG_ESP_PHASES */
-/* #define DEBUG_ESP_WORKBUS */
-/* #define DEBUG_STATE_MACHINE */
-/* #define DEBUG_ESP_CMDS */
-/* #define DEBUG_ESP_IRQS */
-/* #define DEBUG_SDTR */
-/* #define DEBUG_ESP_SG */
-
-/* Use the following to sprinkle debugging messages in a way which
- * suits you if combinations of the above become too verbose when
- * trying to track down a specific problem.
- */
-/* #define DEBUG_ESP_MISC */
-
-#if defined(DEBUG_ESP)
-#define ESPLOG(foo) printk foo
-#else
-#define ESPLOG(foo)
-#endif /* (DEBUG_ESP) */
-
-#if defined(DEBUG_ESP_DATA)
-#define ESPDATA(foo) printk foo
-#else
-#define ESPDATA(foo)
-#endif
-
-#if defined(DEBUG_ESP_QUEUE)
-#define ESPQUEUE(foo) printk foo
-#else
-#define ESPQUEUE(foo)
-#endif
-
-#if defined(DEBUG_ESP_DISCONNECT)
-#define ESPDISC(foo) printk foo
-#else
-#define ESPDISC(foo)
-#endif
-
-#if defined(DEBUG_ESP_STATUS)
-#define ESPSTAT(foo) printk foo
-#else
-#define ESPSTAT(foo)
-#endif
-
-#if defined(DEBUG_ESP_PHASES)
-#define ESPPHASE(foo) printk foo
-#else
-#define ESPPHASE(foo)
-#endif
-
-#if defined(DEBUG_ESP_WORKBUS)
-#define ESPBUS(foo) printk foo
-#else
-#define ESPBUS(foo)
-#endif
-
-#if defined(DEBUG_ESP_IRQS)
-#define ESPIRQ(foo) printk foo
-#else
-#define ESPIRQ(foo)
-#endif
-
-#if defined(DEBUG_SDTR)
-#define ESPSDTR(foo) printk foo
-#else
-#define ESPSDTR(foo)
-#endif
-
-#if defined(DEBUG_ESP_MISC)
-#define ESPMISC(foo) printk foo
-#else
-#define ESPMISC(foo)
-#endif
-
-/*
- * padding for register structure
- */
-#ifdef CONFIG_JAZZ_ESP
-#define EREGS_PAD(n)
-#else
-#ifndef MULTIPLE_PAD_SIZES
-#define EREGS_PAD(n) unchar n[PAD_SIZE];
-#endif
-#endif
-
-/* The ESP SCSI controllers have their register sets in three
- * "classes":
- *
- * 1) Registers which are both read and write.
- * 2) Registers which are read only.
- * 3) Registers which are write only.
- *
- * Yet, they all live within the same IO space.
- */
-
-#if !defined(__i386__) && !defined(__x86_64__)
-
-#ifndef MULTIPLE_PAD_SIZES
-
-#ifdef CONFIG_CPU_HAS_WB
-#include <asm/wbflush.h>
-#define esp_write(__reg, __val) do{(__reg) = (__val); wbflush();} while(0)
-#else
-#define esp_write(__reg, __val) ((__reg) = (__val))
-#endif
-#define esp_read(__reg) (__reg)
-
-struct ESP_regs {
- /* Access Description Offset */
- volatile unchar esp_tclow; /* rw Low bits of the transfer count 0x00 */
- EREGS_PAD(tlpad1);
- volatile unchar esp_tcmed; /* rw Mid bits of the transfer count 0x04 */
- EREGS_PAD(fdpad);
- volatile unchar esp_fdata; /* rw FIFO data bits 0x08 */
- EREGS_PAD(cbpad);
- volatile unchar esp_cmnd; /* rw SCSI command bits 0x0c */
- EREGS_PAD(stpad);
- volatile unchar esp_status; /* ro ESP status register 0x10 */
-#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
- EREGS_PAD(irqpd);
- volatile unchar esp_intrpt; /* ro Kind of interrupt 0x14 */
-#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
- EREGS_PAD(sspad);
- volatile unchar esp_sstep; /* ro Sequence step register 0x18 */
-#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
- EREGS_PAD(ffpad);
- volatile unchar esp_fflags; /* ro Bits of current FIFO info 0x1c */
-#define esp_soff esp_fflags /* wo Sync offset 0x1c */
- EREGS_PAD(cf1pd);
- volatile unchar esp_cfg1; /* rw First configuration register 0x20 */
- EREGS_PAD(cfpad);
- volatile unchar esp_cfact; /* wo Clock conversion factor 0x24 */
- EREGS_PAD(ctpad);
- volatile unchar esp_ctest; /* wo Chip test register 0x28 */
- EREGS_PAD(cf2pd);
- volatile unchar esp_cfg2; /* rw Second configuration register 0x2c */
- EREGS_PAD(cf3pd);
-
- /* The following is only found on the 53C9X series SCSI chips */
- volatile unchar esp_cfg3; /* rw Third configuration register 0x30 */
- EREGS_PAD(cf4pd);
- volatile unchar esp_cfg4; /* rw Fourth configuration register 0x34 */
- EREGS_PAD(thpd);
- /* The following is found on all chips except the NCR53C90 (ESP100) */
- volatile unchar esp_tchi; /* rw High bits of transfer count 0x38 */
-#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
- EREGS_PAD(fgpad);
- volatile unchar esp_fgrnd; /* rw Data base for fifo 0x3c */
-};
-
-#else /* MULTIPLE_PAD_SIZES */
-
-#define esp_write(__reg, __val) (*(__reg) = (__val))
-#define esp_read(__reg) (*(__reg))
-
-struct ESP_regs {
- unsigned char io_addr[64]; /* dummy */
- /* Access Description Offset */
-#define esp_tclow io_addr /* rw Low bits of the transfer count 0x00 */
-#define esp_tcmed io_addr + (1<<(esp->shift)) /* rw Mid bits of the transfer count 0x04 */
-#define esp_fdata io_addr + (2<<(esp->shift)) /* rw FIFO data bits 0x08 */
-#define esp_cmnd io_addr + (3<<(esp->shift)) /* rw SCSI command bits 0x0c */
-#define esp_status io_addr + (4<<(esp->shift)) /* ro ESP status register 0x10 */
-#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
-#define esp_intrpt io_addr + (5<<(esp->shift)) /* ro Kind of interrupt 0x14 */
-#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
-#define esp_sstep io_addr + (6<<(esp->shift)) /* ro Sequence step register 0x18 */
-#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
-#define esp_fflags io_addr + (7<<(esp->shift)) /* ro Bits of current FIFO info 0x1c */
-#define esp_soff esp_fflags /* wo Sync offset 0x1c */
-#define esp_cfg1 io_addr + (8<<(esp->shift)) /* rw First configuration register 0x20 */
-#define esp_cfact io_addr + (9<<(esp->shift)) /* wo Clock conversion factor 0x24 */
-#define esp_ctest io_addr + (10<<(esp->shift)) /* wo Chip test register 0x28 */
-#define esp_cfg2 io_addr + (11<<(esp->shift)) /* rw Second configuration register 0x2c */
-
- /* The following is only found on the 53C9X series SCSI chips */
-#define esp_cfg3 io_addr + (12<<(esp->shift)) /* rw Third configuration register 0x30 */
-#define esp_cfg4 io_addr + (13<<(esp->shift)) /* rw Fourth configuration register 0x34 */
-
- /* The following is found on all chips except the NCR53C90 (ESP100) */
-#define esp_tchi io_addr + (14<<(esp->shift)) /* rw High bits of transfer count 0x38 */
-#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
-#define esp_fgrnd io_addr + (15<<(esp->shift)) /* rw Data base for fifo 0x3c */
-};
-
-#endif
-
-#else /* !defined(__i386__) && !defined(__x86_64__) */
-
-#define esp_write(__reg, __val) outb((__val), (__reg))
-#define esp_read(__reg) inb((__reg))
-
-struct ESP_regs {
- unsigned int io_addr;
- /* Access Description Offset */
-#define esp_tclow io_addr /* rw Low bits of the transfer count 0x00 */
-#define esp_tcmed io_addr + 1 /* rw Mid bits of the transfer count 0x04 */
-#define esp_fdata io_addr + 2 /* rw FIFO data bits 0x08 */
-#define esp_cmnd io_addr + 3 /* rw SCSI command bits 0x0c */
-#define esp_status io_addr + 4 /* ro ESP status register 0x10 */
-#define esp_busid esp_status /* wo Bus ID for select/reselect 0x10 */
-#define esp_intrpt io_addr + 5 /* ro Kind of interrupt 0x14 */
-#define esp_timeo esp_intrpt /* wo Timeout value for select/resel 0x14 */
-#define esp_sstep io_addr + 6 /* ro Sequence step register 0x18 */
-#define esp_stp esp_sstep /* wo Transfer period per sync 0x18 */
-#define esp_fflags io_addr + 7 /* ro Bits of current FIFO info 0x1c */
-#define esp_soff esp_fflags /* wo Sync offset 0x1c */
-#define esp_cfg1 io_addr + 8 /* rw First configuration register 0x20 */
-#define esp_cfact io_addr + 9 /* wo Clock conversion factor 0x24 */
-#define esp_ctest io_addr + 10 /* wo Chip test register 0x28 */
-#define esp_cfg2 io_addr + 11 /* rw Second configuration register 0x2c */
-
- /* The following is only found on the 53C9X series SCSI chips */
-#define esp_cfg3 io_addr + 12 /* rw Third configuration register 0x30 */
-#define esp_cfg4 io_addr + 13 /* rw Fourth configuration register 0x34 */
-
- /* The following is found on all chips except the NCR53C90 (ESP100) */
-#define esp_tchi io_addr + 14 /* rw High bits of transfer count 0x38 */
-#define esp_uid esp_tchi /* ro Unique ID code 0x38 */
-#define esp_fgrnd io_addr + 15 /* rw Data base for fifo 0x3c */
-};
-
-#endif /* !defined(__i386__) && !defined(__x86_64__) */
-
-/* Various revisions of the ESP board. */
-enum esp_rev {
- esp100 = 0x00, /* NCR53C90 - very broken */
- esp100a = 0x01, /* NCR53C90A */
- esp236 = 0x02,
- fas236 = 0x03,
- fas100a = 0x04,
- fast = 0x05,
- fas366 = 0x06,
- fas216 = 0x07,
- fsc = 0x08, /* SYM53C94-2 */
- espunknown = 0x09
-};
-
-/* We allocate one of these for each scsi device and attach it to
- * SDptr->hostdata for use in the driver
- */
-struct esp_device {
- unsigned char sync_min_period;
- unsigned char sync_max_offset;
- unsigned sync:1;
- unsigned wide:1;
- unsigned disconnect:1;
-};
-
-/* We get one of these for each ESP probed. */
-struct NCR_ESP {
- struct NCR_ESP *next; /* Next ESP on probed or NULL */
- struct ESP_regs *eregs; /* All esp registers */
- int dma; /* Who I do transfers with. */
- void *dregs; /* And his registers. */
- struct Scsi_Host *ehost; /* Backpointer to SCSI Host */
-
- void *edev; /* Pointer to controller base/SBus */
- int esp_id; /* Unique per-ESP ID number */
-
- /* ESP Configuration Registers */
- unsigned char config1; /* Copy of the 1st config register */
- unsigned char config2; /* Copy of the 2nd config register */
- unsigned char config3[16]; /* Copy of the 3rd config register */
-
- /* The current command we are sending to the ESP chip. This esp_command
- * ptr needs to be mapped in DVMA area so we can send commands and read
- * from the ESP fifo without burning precious CPU cycles. Programmed I/O
- * sucks when we have the DVMA to do it for us. The ESP is stupid and will
- * only send out 6, 10, and 12 byte SCSI commands, others we need to send
- * one byte at a time. esp_slowcmd being set says that we are doing one
- * of the command types ESP doesn't understand, esp_scmdp keeps track of
- * which byte we are sending, esp_scmdleft says how many bytes to go.
- */
- volatile unchar *esp_command; /* Location of command (CPU view) */
- __u32 esp_command_dvma; /* Location of command (DVMA view) */
- unsigned char esp_clen; /* Length of this command */
- unsigned char esp_slowcmd;
- unsigned char *esp_scmdp;
- unsigned char esp_scmdleft;
-
- /* The following are used to determine the cause of an IRQ. Upon every
- * IRQ entry we synchronize these with the hardware registers.
- */
- unchar ireg; /* Copy of ESP interrupt register */
- unchar sreg; /* Same for ESP status register */
- unchar seqreg; /* The ESP sequence register */
-
- /* The following is set when a premature interrupt condition is detected
- * in some FAS revisions.
- */
- unchar fas_premature_intr_workaround;
-
- /* To save register writes to the ESP, which can be expensive, we
- * keep track of the previous value that various registers had for
- * the last target we connected to. If they are the same for the
- * current target, we skip the register writes as they are not needed.
- */
- unchar prev_soff, prev_stp, prev_cfg3;
-
- /* For each target we keep track of save/restore data
- * pointer information. This needs to be updated majorly
- * when we add support for tagged queueing. -DaveM
- */
- struct esp_pointers {
- char *saved_ptr;
- struct scatterlist *saved_buffer;
- int saved_this_residual;
- int saved_buffers_residual;
- } data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/;
-
- /* Clock periods, frequencies, synchronization, etc. */
- unsigned int cfreq; /* Clock frequency in HZ */
- unsigned int cfact; /* Clock conversion factor */
- unsigned int ccycle; /* One ESP clock cycle */
- unsigned int ctick; /* One ESP clock time */
- unsigned int radelay; /* FAST chip req/ack delay */
- unsigned int neg_defp; /* Default negotiation period */
- unsigned int sync_defp; /* Default sync transfer period */
- unsigned int max_period; /* longest our period can be */
- unsigned int min_period; /* shortest period we can withstand */
- /* For slow to medium speed input clock rates we shoot for 5mb/s,
- * but for high input clock rates we try to do 10mb/s although I
- * don't think a transfer can even run that fast with an ESP even
- * with DMA2 scatter gather pipelining.
- */
-#define SYNC_DEFP_SLOW 0x32 /* 5mb/s */
-#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
-
- unsigned int snip; /* Sync. negotiation in progress */
- unsigned int wnip; /* WIDE negotiation in progress */
- unsigned int targets_present; /* targets spoken to before */
-
- int current_transfer_size; /* Set at beginning of data dma */
-
- unchar espcmdlog[32]; /* Log of current esp cmds sent. */
- unchar espcmdent; /* Current entry in esp cmd log. */
-
- /* Misc. info about this ESP */
- enum esp_rev erev; /* ESP revision */
- int irq; /* IRQ for this ESP */
- int scsi_id; /* Who am I as initiator? */
- int scsi_id_mask; /* Bitmask of 'me'. */
- int diff; /* Differential SCSI bus? */
- int slot; /* Slot the adapter occupies */
-
- /* Our command queues, only one cmd lives in the current_SC queue. */
- Scsi_Cmnd *issue_SC; /* Commands to be issued */
- Scsi_Cmnd *current_SC; /* Who is currently working the bus */
- Scsi_Cmnd *disconnected_SC; /* Commands disconnected from the bus */
-
- /* Message goo */
- unchar cur_msgout[16];
- unchar cur_msgin[16];
- unchar prevmsgout, prevmsgin;
- unchar msgout_len, msgin_len;
- unchar msgout_ctr, msgin_ctr;
-
- /* States that we cannot keep in the per cmd structure because they
- * cannot be assosciated with any specific command.
- */
- unchar resetting_bus;
- wait_queue_head_t reset_queue;
-
- unchar do_pio_cmds; /* Do command transfer with pio */
-
- /* How much bits do we have to shift the registers */
- unsigned char shift;
-
- /* Functions handling DMA
- */
- /* Required functions */
- int (*dma_bytes_sent)(struct NCR_ESP *, int);
- int (*dma_can_transfer)(struct NCR_ESP *, Scsi_Cmnd *);
- void (*dma_dump_state)(struct NCR_ESP *);
- void (*dma_init_read)(struct NCR_ESP *, __u32, int);
- void (*dma_init_write)(struct NCR_ESP *, __u32, int);
- void (*dma_ints_off)(struct NCR_ESP *);
- void (*dma_ints_on)(struct NCR_ESP *);
- int (*dma_irq_p)(struct NCR_ESP *);
- int (*dma_ports_p)(struct NCR_ESP *);
- void (*dma_setup)(struct NCR_ESP *, __u32, int, int);
-
- /* Optional functions (i.e. may be initialized to 0) */
- void (*dma_barrier)(struct NCR_ESP *);
- void (*dma_drain)(struct NCR_ESP *);
- void (*dma_invalidate)(struct NCR_ESP *);
- void (*dma_irq_entry)(struct NCR_ESP *);
- void (*dma_irq_exit)(struct NCR_ESP *);
- void (*dma_led_off)(struct NCR_ESP *);
- void (*dma_led_on)(struct NCR_ESP *);
- void (*dma_poll)(struct NCR_ESP *, unsigned char *);
- void (*dma_reset)(struct NCR_ESP *);
-
- /* Optional virtual DMA functions */
- void (*dma_mmu_get_scsi_one)(struct NCR_ESP *, Scsi_Cmnd *);
- void (*dma_mmu_get_scsi_sgl)(struct NCR_ESP *, Scsi_Cmnd *);
- void (*dma_mmu_release_scsi_one)(struct NCR_ESP *, Scsi_Cmnd *);
- void (*dma_mmu_release_scsi_sgl)(struct NCR_ESP *, Scsi_Cmnd *);
- void (*dma_advance_sg)(Scsi_Cmnd *);
-};
-
-/* Bitfield meanings for the above registers. */
-
-/* ESP config reg 1, read-write, found on all ESP chips */
-#define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */
-#define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */
-#define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */
-#define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */
-#define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */
-#define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */
-
-/* ESP config reg 2, read-write, found only on esp100a+esp200+esp236+fsc chips */
-#define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236,fsc) */
-#define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236,fsc) */
-#define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */
-#define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */
-#define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */
-#define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */
-#define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236,fsc) */
-#define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216,fsc) */
-#define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */
-#define ESP_CONFIG2_RFB 0x80 /* Reserve FIFO byte (fsc) */
-#define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */
-
-/* ESP config register 3 read-write, found only esp236+fas236+fas100a+fsc chips */
-#define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/fas366) */
-#define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236/fsc) */
-#define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a) */
-#define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236/fsc) */
-#define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a) */
-#define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236/fsc) */
-#define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a) */
-#define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236/fsc) */
-#define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a) */
-#define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236/fsc) */
-#define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236/fsc) */
-#define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236/fsc) */
-#define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236/fsc) */
-
-/* ESP config register 4 read-write, found only on fsc chips */
-#define ESP_CONFIG4_BBTE 0x01 /* Back-to-Back transfer enable */
-#define ESP_CONFIG4_TEST 0x02 /* Transfer counter test mode */
-#define ESP_CONFIG4_EAN 0x04 /* Enable Active Negotiation */
-
-/* ESP command register read-write */
-/* Group 1 commands: These may be sent at any point in time to the ESP
- * chip. None of them can generate interrupts 'cept
- * the "SCSI bus reset" command if you have not disabled
- * SCSI reset interrupts in the config1 ESP register.
- */
-#define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */
-#define ESP_CMD_FLUSH 0x01 /* FIFO Flush */
-#define ESP_CMD_RC 0x02 /* Chip reset */
-#define ESP_CMD_RS 0x03 /* SCSI bus reset */
-
-/* Group 2 commands: ESP must be an initiator and connected to a target
- * for these commands to work.
- */
-#define ESP_CMD_TI 0x10 /* Transfer Information */
-#define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */
-#define ESP_CMD_MOK 0x12 /* Message okie-dokie */
-#define ESP_CMD_TPAD 0x18 /* Transfer Pad */
-#define ESP_CMD_SATN 0x1a /* Set ATN */
-#define ESP_CMD_RATN 0x1b /* De-assert ATN */
-
-/* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected
- * to a target as the initiator for these commands to work.
- */
-#define ESP_CMD_SMSG 0x20 /* Send message */
-#define ESP_CMD_SSTAT 0x21 /* Send status */
-#define ESP_CMD_SDATA 0x22 /* Send data */
-#define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */
-#define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */
-#define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */
-#define ESP_CMD_DCNCT 0x27 /* Disconnect */
-#define ESP_CMD_RMSG 0x28 /* Receive Message */
-#define ESP_CMD_RCMD 0x29 /* Receive Command */
-#define ESP_CMD_RDATA 0x2a /* Receive Data */
-#define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */
-
-/* Group 4 commands: The ESP must be in the disconnected state and must
- * not be connected to any targets as initiator for
- * these commands to work.
- */
-#define ESP_CMD_RSEL 0x40 /* Reselect */
-#define ESP_CMD_SEL 0x41 /* Select w/o ATN */
-#define ESP_CMD_SELA 0x42 /* Select w/ATN */
-#define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */
-#define ESP_CMD_ESEL 0x44 /* Enable selection */
-#define ESP_CMD_DSEL 0x45 /* Disable selections */
-#define ESP_CMD_SA3 0x46 /* Select w/ATN3 */
-#define ESP_CMD_RSEL3 0x47 /* Reselect3 */
-
-/* This bit enables the ESP's DMA */
-#define ESP_CMD_DMA 0x80 /* Do DMA? */
-
-/* ESP status register read-only */
-#define ESP_STAT_PIO 0x01 /* IO phase bit */
-#define ESP_STAT_PCD 0x02 /* CD phase bit */
-#define ESP_STAT_PMSG 0x04 /* MSG phase bit */
-#define ESP_STAT_PMASK 0x07 /* Mask of phase bits */
-#define ESP_STAT_TDONE 0x08 /* Transfer Completed */
-#define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */
-#define ESP_STAT_PERR 0x20 /* Parity error */
-#define ESP_STAT_SPAM 0x40 /* Real bad error */
-/* This indicates the 'interrupt pending' condition, it is a reserved
- * bit on old revs of the ESP (ESP100, ESP100A, FAS100A).
- */
-#define ESP_STAT_INTR 0x80 /* Interrupt */
-
-/* The status register can be masked with ESP_STAT_PMASK and compared
- * with the following values to determine the current phase the ESP
- * (at least thinks it) is in. For our purposes we also add our own
- * software 'done' bit for our phase management engine.
- */
-#define ESP_DOP (0) /* Data Out */
-#define ESP_DIP (ESP_STAT_PIO) /* Data In */
-#define ESP_CMDP (ESP_STAT_PCD) /* Command */
-#define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */
-#define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */
-#define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */
-
-/* ESP interrupt register read-only */
-#define ESP_INTR_S 0x01 /* Select w/o ATN */
-#define ESP_INTR_SATN 0x02 /* Select w/ATN */
-#define ESP_INTR_RSEL 0x04 /* Reselected */
-#define ESP_INTR_FDONE 0x08 /* Function done */
-#define ESP_INTR_BSERV 0x10 /* Bus service */
-#define ESP_INTR_DC 0x20 /* Disconnect */
-#define ESP_INTR_IC 0x40 /* Illegal command given */
-#define ESP_INTR_SR 0x80 /* SCSI bus reset detected */
-
-/* Interrupt status macros */
-#define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR))
-#define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC))
-#define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN))
-#define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S))
-#define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \
- (ESP_SELECT_WITHOUT_ATN_IRQ(esp)))
-#define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL))
-
-/* ESP sequence step register read-only */
-#define ESP_STEP_VBITS 0x07 /* Valid bits */
-#define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */
-#define ESP_STEP_SID 0x01 /* One msg byte sent */
-#define ESP_STEP_NCMD 0x02 /* Was not in command phase */
-#define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd
- * bytes to be lost
- */
-#define ESP_STEP_FINI4 0x04 /* Command was sent ok */
-
-/* Ho hum, some ESP's set the step register to this as well... */
-#define ESP_STEP_FINI5 0x05
-#define ESP_STEP_FINI6 0x06
-#define ESP_STEP_FINI7 0x07
-#define ESP_STEP_SOM 0x08 /* Synchronous Offset Max */
-
-/* ESP chip-test register read-write */
-#define ESP_TEST_TARG 0x01 /* Target test mode */
-#define ESP_TEST_INI 0x02 /* Initiator test mode */
-#define ESP_TEST_TS 0x04 /* Tristate test mode */
-
-/* ESP unique ID register read-only, found on fas236+fas100a+fsc only */
-#define ESP_UID_F100A 0x00 /* FAS100A */
-#define ESP_UID_F236 0x02 /* FAS236 */
-#define ESP_UID_FSC 0xa2 /* NCR53CF9x-2 */
-#define ESP_UID_REV 0x07 /* ESP revision */
-#define ESP_UID_FAM 0xf8 /* ESP family */
-
-/* ESP fifo flags register read-only */
-/* Note that the following implies a 16 byte FIFO on the ESP. */
-#define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */
-#define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100,fsc) */
-#define ESP_FF_SSTEP 0xe0 /* Sequence step */
-
-/* ESP clock conversion factor register write-only */
-#define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */
-#define ESP_CCF_NEVER 0x01 /* Set it to this and die */
-#define ESP_CCF_F2 0x02 /* 10MHz */
-#define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */
-#define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */
-#define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */
-#define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */
-#define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */
-
-#define ESP_BUS_TIMEOUT 275 /* In milli-seconds */
-#define ESP_TIMEO_CONST 8192
-#define FSC_TIMEO_CONST 7668
-#define ESP_NEG_DEFP(mhz, cfact) \
- ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
-#define FSC_NEG_DEFP(mhz, cfact) \
- ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (7668 * (cfact)))
-#define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000))
-#define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000))
-
-
-/* UGLY, UGLY, UGLY! */
-extern int nesps, esps_in_use, esps_running;
-
-/* For our interrupt engine. */
-#define for_each_esp(esp) \
- for((esp) = espchain; (esp); (esp) = (esp)->next)
-
-
-/* External functions */
-extern void esp_bootup_reset(struct NCR_ESP *esp, struct ESP_regs *eregs);
-extern struct NCR_ESP *esp_allocate(struct scsi_host_template *, void *, int);
-extern void esp_deallocate(struct NCR_ESP *);
-extern void esp_release(void);
-extern void esp_initialize(struct NCR_ESP *);
-extern irqreturn_t esp_intr(int, void *);
-extern const char *esp_info(struct Scsi_Host *);
-extern int esp_queue(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
-extern int esp_abort(Scsi_Cmnd *);
-extern int esp_reset(Scsi_Cmnd *);
-extern int esp_proc_info(struct Scsi_Host *shost, char *buffer, char **start, off_t offset, int length,
- int inout);
-extern int esp_slave_alloc(struct scsi_device *);
-extern void esp_slave_destroy(struct scsi_device *);
-#endif /* !(NCR53C9X_H) */
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 23f27c9c9895..5ac3a3e8dfaf 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -46,8 +46,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
struct Scsi_Host *instance = cmd->device->host;
/* don't allow DMA if the physical address is bad */
- if (addr & A2091_XFER_MASK ||
- (!dir_in && mm_end_of_chunk (addr, cmd->SCp.this_residual)))
+ if (addr & A2091_XFER_MASK)
{
HDATA(instance)->dma_bounce_len = (cmd->SCp.this_residual + 511)
& ~0x1ff;
diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c
index d7255c8bf281..3aeec963940b 100644
--- a/drivers/scsi/a3000.c
+++ b/drivers/scsi/a3000.c
@@ -54,8 +54,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
* end of a physical memory chunk, then allocate a bounce
* buffer
*/
- if (addr & A3000_XFER_MASK ||
- (!dir_in && mm_end_of_chunk (addr, cmd->SCp.this_residual)))
+ if (addr & A3000_XFER_MASK)
{
HDATA(a3000_host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
& ~0x1ff;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index d7235f42cf5f..bfd0e64964ac 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -859,44 +859,31 @@ static int setinqserial(struct aac_dev *dev, void *data, int cid)
le32_to_cpu(dev->adapter_info.serial[0]), cid);
}
-static void set_sense(u8 *sense_buf, u8 sense_key, u8 sense_code,
- u8 a_sense_code, u8 incorrect_length,
- u8 bit_pointer, u16 field_pointer,
- u32 residue)
+static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
+ u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
{
- sense_buf[0] = 0xF0; /* Sense data valid, err code 70h (current error) */
+ u8 *sense_buf = (u8 *)sense_data;
+ /* Sense data valid, err code 70h */
+ sense_buf[0] = 0x70; /* No info field */
sense_buf[1] = 0; /* Segment number, always zero */
- if (incorrect_length) {
- sense_buf[2] = sense_key | 0x20;/* Set ILI bit | sense key */
- sense_buf[3] = BYTE3(residue);
- sense_buf[4] = BYTE2(residue);
- sense_buf[5] = BYTE1(residue);
- sense_buf[6] = BYTE0(residue);
- } else
- sense_buf[2] = sense_key; /* Sense key */
-
- if (sense_key == ILLEGAL_REQUEST)
- sense_buf[7] = 10; /* Additional sense length */
- else
- sense_buf[7] = 6; /* Additional sense length */
+ sense_buf[2] = sense_key; /* Sense key */
sense_buf[12] = sense_code; /* Additional sense code */
sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
+
if (sense_key == ILLEGAL_REQUEST) {
- sense_buf[15] = 0;
+ sense_buf[7] = 10; /* Additional sense length */
- if (sense_code == SENCODE_INVALID_PARAM_FIELD)
- sense_buf[15] = 0x80;/* Std sense key specific field */
+ sense_buf[15] = bit_pointer;
/* Illegal parameter is in the parameter block */
-
if (sense_code == SENCODE_INVALID_CDB_FIELD)
- sense_buf[15] = 0xc0;/* Std sense key specific field */
+ sense_buf[15] |= 0xc0;/* Std sense key specific field */
/* Illegal parameter is in the CDB block */
- sense_buf[15] |= bit_pointer;
sense_buf[16] = field_pointer >> 8; /* MSB */
sense_buf[17] = field_pointer; /* LSB */
- }
+ } else
+ sense_buf[7] = 6; /* Additional sense length */
}
static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
@@ -906,11 +893,9 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR,
- SENCODE_INTERNAL_TARGET_FAILURE,
- ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
- 0, 0);
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
@@ -1520,11 +1505,9 @@ static void io_callback(void *context, struct fib * fibptr)
le32_to_cpu(readreply->status));
#endif
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR,
- SENCODE_INTERNAL_TARGET_FAILURE,
- ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
- 0, 0);
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
@@ -1733,11 +1716,9 @@ static void synchronize_callback(void *context, struct fib *fibptr)
le32_to_cpu(synchronizereply->status));
cmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *)&dev->fsa_dev[cid].sense_data,
- HARDWARE_ERROR,
- SENCODE_INTERNAL_TARGET_FAILURE,
- ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0,
- 0, 0);
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
@@ -1945,10 +1926,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
{
dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
- ILLEGAL_REQUEST,
- SENCODE_INVALID_COMMAND,
- ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
+ ASENCODE_INVALID_COMMAND, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
SCSI_SENSE_BUFFERSIZE));
@@ -1995,10 +1975,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsicmd->result = DID_OK << 16 |
COMMAND_COMPLETE << 8 |
SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
- ILLEGAL_REQUEST,
- SENCODE_INVALID_CDB_FIELD,
- ASENCODE_NO_SENSE, 0, 7, 2, 0);
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
+ ASENCODE_NO_SENSE, 7, 2);
memcpy(scsicmd->sense_buffer,
&dev->fsa_dev[cid].sense_data,
min_t(size_t,
@@ -2254,9 +2233,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
*/
dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
- set_sense((u8 *) &dev->fsa_dev[cid].sense_data,
- ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
- ASENCODE_INVALID_COMMAND, 0, 0, 0, 0);
+ set_sense(&dev->fsa_dev[cid].sense_data,
+ ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
+ ASENCODE_INVALID_COMMAND, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
min_t(size_t,
sizeof(dev->fsa_dev[cid].sense_data),
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index f8afa358b6b6..abef05146d75 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -243,6 +243,7 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
* Search the list of AdapterFibContext addresses on the adapter
* to be sure this is a valid address
*/
+ spin_lock_irqsave(&dev->fib_lock, flags);
entry = dev->fib_list.next;
fibctx = NULL;
@@ -251,24 +252,25 @@ static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
/*
* Extract the AdapterFibContext from the Input parameters.
*/
- if (fibctx->unique == f.fibctx) { /* We found a winner */
+ if (fibctx->unique == f.fibctx) { /* We found a winner */
break;
}
entry = entry->next;
fibctx = NULL;
}
if (!fibctx) {
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context not found\n"));
return -EINVAL;
}
if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
(fibctx->size != sizeof(struct aac_fib_context))) {
+ spin_unlock_irqrestore(&dev->fib_lock, flags);
dprintk ((KERN_INFO "Fib Context corrupt?\n"));
return -EINVAL;
}
status = 0;
- spin_lock_irqsave(&dev->fib_lock, flags);
/*
* If there are no fibs to send back, then either wait or return
* -EAGAIN
@@ -414,8 +416,8 @@ static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
* @arg: ioctl arguments
*
* This routine returns the driver version.
- * Under Linux, there have been no version incompatibilities, so this is
- * simple!
+ * Under Linux, there have been no version incompatibilities, so this is
+ * simple!
*/
static int check_revision(struct aac_dev *dev, void __user *arg)
@@ -463,7 +465,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
u32 data_dir;
void __user *sg_user[32];
void *sg_list[32];
- u32 sg_indx = 0;
+ u32 sg_indx = 0;
u32 byte_count = 0;
u32 actual_fibsize64, actual_fibsize = 0;
int i;
@@ -517,11 +519,11 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
// Fix up srb for endian and force some values
srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
- srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
+ srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
srbcmd->id = cpu_to_le32(user_srbcmd->id);
- srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
- srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
- srbcmd->flags = cpu_to_le32(flags);
+ srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
+ srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
+ srbcmd->flags = cpu_to_le32(flags);
srbcmd->retry_limit = 0; // Obsolete parameter
srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
@@ -786,9 +788,9 @@ static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
pci_info.bus = dev->pdev->bus->number;
pci_info.slot = PCI_SLOT(dev->pdev->devfn);
- if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
- dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
- return -EFAULT;
+ if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
+ dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
+ return -EFAULT;
}
return 0;
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0e8267c1e915..e80d2a0c46af 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -449,9 +449,6 @@ static int aac_slave_configure(struct scsi_device *sdev)
else if (depth < 2)
depth = 2;
scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, depth);
- if (!(((struct aac_dev *)host->hostdata)->adapter_info.options &
- AAC_OPT_NEW_COMM))
- blk_queue_max_segment_size(sdev->request_queue, 65536);
} else
scsi_adjust_queue_depth(sdev, 0, 1);
@@ -1137,21 +1134,25 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
* Lets override negotiations and drop the maximum SG limit to 34
*/
if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
- (aac->scsi_host_ptr->sg_tablesize > 34)) {
- aac->scsi_host_ptr->sg_tablesize = 34;
- aac->scsi_host_ptr->max_sectors
- = (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
+ (shost->sg_tablesize > 34)) {
+ shost->sg_tablesize = 34;
+ shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
- (aac->scsi_host_ptr->sg_tablesize > 17)) {
- aac->scsi_host_ptr->sg_tablesize = 17;
- aac->scsi_host_ptr->max_sectors
- = (aac->scsi_host_ptr->sg_tablesize * 8) + 112;
+ (shost->sg_tablesize > 17)) {
+ shost->sg_tablesize = 17;
+ shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
+ error = pci_set_dma_max_seg_size(pdev,
+ (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
+ (shost->max_sectors << 9) : 65536);
+ if (error)
+ goto out_deinit;
+
/*
- * Firware printf works only with older firmware.
+ * Firmware printf works only with older firmware.
*/
if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
aac->printf_enabled = 1;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 374ed025dc5a..ccef891d642f 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -12261,7 +12261,7 @@ static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
/*
* Write the EEPROM from 'cfg_buf'.
*/
-void __devinit
+static void __devinit
AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
{
ushort *wbuf;
@@ -12328,7 +12328,7 @@ AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
/*
* Write the EEPROM from 'cfg_buf'.
*/
-void __devinit
+static void __devinit
AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
{
ushort *wbuf;
@@ -12395,7 +12395,7 @@ AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
/*
* Write the EEPROM from 'cfg_buf'.
*/
-void __devinit
+static void __devinit
AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
{
ushort *wbuf;
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index 3bfd9296bbfa..93984c9dfe14 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -6472,7 +6472,7 @@ do_aic7xxx_isr(int irq, void *dev_id)
unsigned long cpu_flags;
struct aic7xxx_host *p;
- p = (struct aic7xxx_host *)dev_id;
+ p = dev_id;
if(!p)
return IRQ_NONE;
spin_lock_irqsave(p->host->host_lock, cpu_flags);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index a67e29f83ae5..57786502e3ec 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,7 +48,7 @@ struct class_device_attribute;
/*The limit of outstanding scsi command that firmware can handle*/
#define ARCMSR_MAX_OUTSTANDING_CMD 256
#define ARCMSR_MAX_FREECCB_NUM 320
-#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/08/30"
+#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24"
#define ARCMSR_SCSI_INITIATOR_ID 255
#define ARCMSR_MAX_XFER_SECTORS 512
#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -248,6 +248,7 @@ struct FIRMWARE_INFO
#define ARCMSR_MESSAGE_START_BGRB 0x00060008
#define ARCMSR_MESSAGE_START_DRIVER_MODE 0x000E0008
#define ARCMSR_MESSAGE_SET_POST_WINDOW 0x000F0008
+#define ARCMSR_MESSAGE_ACTIVE_EOI_MODE 0x00100008
/* ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK */
#define ARCMSR_MESSAGE_FIRMWARE_OK 0x80000000
/* ioctl transfer */
@@ -256,6 +257,7 @@ struct FIRMWARE_INFO
#define ARCMSR_DRV2IOP_DATA_READ_OK 0x00000002
#define ARCMSR_DRV2IOP_CDB_POSTED 0x00000004
#define ARCMSR_DRV2IOP_MESSAGE_CMD_POSTED 0x00000008
+#define ARCMSR_DRV2IOP_END_OF_INTERRUPT 0x00000010
/* data tunnel buffer between user space program and its firmware */
/* user space data to iop 128bytes */
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f4a202e8df26..4f9ff32cfed0 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -315,9 +315,6 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
(0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
}
- reg = (struct MessageUnit_B *)(dma_coherent +
- ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
-
dma_addr = dma_coherent_handle;
ccb_tmp = (struct CommandControlBlock *)dma_coherent;
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
@@ -371,8 +368,8 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
out:
dma_free_coherent(&acb->pdev->dev,
- ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20,
- acb->dma_coherent, acb->dma_coherent_handle);
+ (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
+ sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
return -ENOMEM;
}
@@ -509,6 +506,7 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
& ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
, reg->iop2drv_doorbell_reg);
+ writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
return 0x00;
}
msleep(10);
@@ -748,6 +746,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t fla
, ccb->startdone
, atomic_read(&acb->ccboutstandingcount));
}
+ else
arcmsr_report_ccb_state(acb, ccb, flag_ccb);
}
@@ -886,7 +885,7 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
}
}
-static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
+static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
{
struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
@@ -906,6 +905,8 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
nseg = scsi_dma_map(pcmd);
+ if (nseg > ARCMSR_MAX_SG_ENTRIES)
+ return FAILED;
BUG_ON(nseg < 0);
if (nseg) {
@@ -946,6 +947,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
ccb->ccb_flags |= CCB_FLAG_WRITE;
}
+ return SUCCESS;
}
static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
@@ -1036,18 +1038,22 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
iounmap(acb->pmuA);
+ dma_free_coherent(&acb->pdev->dev,
+ ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
+ acb->dma_coherent,
+ acb->dma_coherent_handle);
break;
}
case ACB_ADAPTER_TYPE_B: {
struct MessageUnit_B *reg = acb->pmuB;
iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
+ dma_free_coherent(&acb->pdev->dev,
+ (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
+ sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
}
}
- dma_free_coherent(&acb->pdev->dev,
- ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
- acb->dma_coherent,
- acb->dma_coherent_handle);
+
}
void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
@@ -1273,7 +1279,9 @@ static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
return 1;
writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
-
+ /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/
+ readl(reg->iop2drv_doorbell_reg);
+ writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
arcmsr_iop2drv_data_wrote_handle(acb);
}
@@ -1380,12 +1388,13 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
case ARCMSR_MESSAGE_READ_RQBUFFER: {
unsigned long *ver_addr;
- dma_addr_t buf_handle;
uint8_t *pQbuffer, *ptmpQbuffer;
int32_t allxfer_len = 0;
+ void *tmp;
- ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
- if (!ver_addr) {
+ tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA);
+ ver_addr = (unsigned long *)tmp;
+ if (!tmp) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
@@ -1421,18 +1430,19 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
memcpy(pcmdmessagefld->messagedatabuffer, (uint8_t *)ver_addr, allxfer_len);
pcmdmessagefld->cmdmessage.Length = allxfer_len;
pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
- pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
+ kfree(tmp);
}
break;
case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
unsigned long *ver_addr;
- dma_addr_t buf_handle;
int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
uint8_t *pQbuffer, *ptmpuserbuffer;
+ void *tmp;
- ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
- if (!ver_addr) {
+ tmp = kmalloc(1032, GFP_KERNEL|GFP_DMA);
+ ver_addr = (unsigned long *)tmp;
+ if (!tmp) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
}
@@ -1482,7 +1492,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
retvalue = ARCMSR_MESSAGE_FAIL;
}
}
- pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
+ kfree(tmp);
}
break;
@@ -1682,8 +1692,11 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd,
ccb = arcmsr_get_freeccb(acb);
if (!ccb)
return SCSI_MLQUEUE_HOST_BUSY;
-
- arcmsr_build_ccb(acb, ccb, cmd);
+ if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) {
+ cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
+ cmd->scsi_done(cmd);
+ return 0;
+ }
arcmsr_post_ccb(acb, ccb);
return 0;
}
@@ -1844,7 +1857,7 @@ static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
}
}
-static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
+static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
struct MessageUnit_B *reg = acb->pmuB;
@@ -1878,7 +1891,7 @@ static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
poll_ccb_done = (ccb == poll_ccb) ? 1:0;
if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
- if (ccb->startdone == ARCMSR_CCB_ABORTED) {
+ if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
printk(KERN_NOTICE "arcmsr%d: \
scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
,acb->host->host_no
@@ -1901,7 +1914,7 @@ static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, \
} /*drain reply FIFO*/
}
-static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, \
+static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
struct CommandControlBlock *poll_ccb)
{
switch (acb->adapter_type) {
@@ -2026,6 +2039,7 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
do {
firmware_state = readl(reg->iop2drv_doorbell_reg);
} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
+ writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
}
break;
}
@@ -2090,19 +2104,39 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
}
}
+static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A:
+ return;
+ case ACB_ADAPTER_TYPE_B:
+ {
+ struct MessageUnit_B *reg = acb->pmuB;
+ writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg);
+ if(arcmsr_hbb_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
+ return;
+ }
+ }
+ break;
+ }
+ return;
+}
+
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
uint32_t intmask_org;
- arcmsr_wait_firmware_ready(acb);
- arcmsr_iop_confirm(acb);
/* disable all outbound interrupt */
intmask_org = arcmsr_disable_outbound_ints(acb);
+ arcmsr_wait_firmware_ready(acb);
+ arcmsr_iop_confirm(acb);
arcmsr_get_firmware_spec(acb);
/*start background rebuild*/
arcmsr_start_adapter_bgrb(acb);
/* empty doorbell Qbuffer if door bell ringed */
arcmsr_clear_doorbell_queue_buffer(acb);
+ arcmsr_enable_eoi_mode(acb);
/* enable outbound Post Queue,outbound doorbell Interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
acb->acb_flags |= ACB_F_IOP_INITED;
@@ -2275,6 +2309,7 @@ static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
arcmsr_start_adapter_bgrb(acb);
/* empty doorbell Qbuffer if door bell ringed */
arcmsr_clear_doorbell_queue_buffer(acb);
+ arcmsr_enable_eoi_mode(acb);
/* enable outbound Post Queue,outbound doorbell Interrupt */
arcmsr_enable_outbound_ints(acb, intmask_org);
acb->acb_flags |= ACB_F_IOP_INITED;
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index eceacf6d49ea..3bedf2466bd1 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -1790,7 +1790,7 @@ int acornscsi_starttransfer(AS_Host *host)
return 0;
}
- residual = host->SCpnt->request_bufflen - host->scsi.SCp.scsi_xferred;
+ residual = scsi_bufflen(host->SCpnt) - host->scsi.SCp.scsi_xferred;
sbic_arm_write(host->scsi.io_port, SBIC_SYNCHTRANSFER, host->device[host->SCpnt->device->id].sync_xfer);
sbic_arm_writenext(host->scsi.io_port, residual >> 16);
@@ -2270,7 +2270,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4b: /* -> PHASE_STATUSIN */
case 0x8b: /* -> PHASE_STATUSIN */
/* DATA IN -> STATUS */
- host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_readstatusbyte(host);
@@ -2281,7 +2281,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4e: /* -> PHASE_MSGOUT */
case 0x8e: /* -> PHASE_MSGOUT */
/* DATA IN -> MESSAGE OUT */
- host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_sendmessage(host);
@@ -2291,7 +2291,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4f: /* message in */
case 0x8f: /* message in */
/* DATA IN -> MESSAGE IN */
- host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_message(host); /* -> PHASE_MSGIN, PHASE_DISCONNECT */
@@ -2319,7 +2319,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4b: /* -> PHASE_STATUSIN */
case 0x8b: /* -> PHASE_STATUSIN */
/* DATA OUT -> STATUS */
- host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_dma_adjust(host);
@@ -2331,7 +2331,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4e: /* -> PHASE_MSGOUT */
case 0x8e: /* -> PHASE_MSGOUT */
/* DATA OUT -> MESSAGE OUT */
- host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_dma_adjust(host);
@@ -2342,7 +2342,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x4f: /* message in */
case 0x8f: /* message in */
/* DATA OUT -> MESSAGE IN */
- host->scsi.SCp.scsi_xferred = host->SCpnt->request_bufflen -
+ host->scsi.SCp.scsi_xferred = scsi_bufflen(host->SCpnt) -
acornscsi_sbic_xfcount(host);
acornscsi_dma_stop(host);
acornscsi_dma_adjust(host);
diff --git a/drivers/scsi/arm/scsi.h b/drivers/scsi/arm/scsi.h
index bb6550e31926..138a521ba1a8 100644
--- a/drivers/scsi/arm/scsi.h
+++ b/drivers/scsi/arm/scsi.h
@@ -18,17 +18,32 @@
* The scatter-gather list handling. This contains all
* the yucky stuff that needs to be fixed properly.
*/
+
+/*
+ * copy_SCp_to_sg() Assumes contiguous allocation at @sg of at-most @max
+ * entries of uninitialized memory. SCp is from scsi-ml and has a valid
+ * (possibly chained) sg-list
+ */
static inline int copy_SCp_to_sg(struct scatterlist *sg, struct scsi_pointer *SCp, int max)
{
int bufs = SCp->buffers_residual;
+ /* FIXME: It should be easy for drivers to loop on copy_SCp_to_sg().
+ * and to remove this BUG_ON. Use min() in-its-place
+ */
BUG_ON(bufs + 1 > max);
sg_set_buf(sg, SCp->ptr, SCp->this_residual);
- if (bufs)
- memcpy(sg + 1, SCp->buffer + 1,
- sizeof(struct scatterlist) * bufs);
+ if (bufs) {
+ struct scatterlist *src_sg;
+ unsigned i;
+
+ for_each_sg(sg_next(SCp->buffer), src_sg, bufs, i)
+ *(++sg) = *src_sg;
+ sg_mark_end(sg);
+ }
+
return bufs + 1;
}
@@ -36,7 +51,7 @@ static inline int next_SCp(struct scsi_pointer *SCp)
{
int ret = SCp->buffers_residual;
if (ret) {
- SCp->buffer++;
+ SCp->buffer = sg_next(SCp->buffer);
SCp->buffers_residual--;
SCp->ptr = sg_virt(SCp->buffer);
SCp->this_residual = SCp->buffer->length;
@@ -68,46 +83,46 @@ static inline void init_SCp(struct scsi_cmnd *SCpnt)
{
memset(&SCpnt->SCp, 0, sizeof(struct scsi_pointer));
- if (SCpnt->use_sg) {
+ if (scsi_bufflen(SCpnt)) {
unsigned long len = 0;
- int buf;
- SCpnt->SCp.buffer = (struct scatterlist *) SCpnt->request_buffer;
- SCpnt->SCp.buffers_residual = SCpnt->use_sg - 1;
+ SCpnt->SCp.buffer = scsi_sglist(SCpnt);
+ SCpnt->SCp.buffers_residual = scsi_sg_count(SCpnt) - 1;
SCpnt->SCp.ptr = sg_virt(SCpnt->SCp.buffer);
SCpnt->SCp.this_residual = SCpnt->SCp.buffer->length;
- SCpnt->SCp.phase = SCpnt->request_bufflen;
+ SCpnt->SCp.phase = scsi_bufflen(SCpnt);
#ifdef BELT_AND_BRACES
- /*
- * Calculate correct buffer length. Some commands
- * come in with the wrong request_bufflen.
- */
- for (buf = 0; buf <= SCpnt->SCp.buffers_residual; buf++)
- len += SCpnt->SCp.buffer[buf].length;
-
- if (SCpnt->request_bufflen != len)
- printk(KERN_WARNING "scsi%d.%c: bad request buffer "
- "length %d, should be %ld\n", SCpnt->device->host->host_no,
- '0' + SCpnt->device->id, SCpnt->request_bufflen, len);
- SCpnt->request_bufflen = len;
+ { /*
+ * Calculate correct buffer length. Some commands
+ * come in with the wrong scsi_bufflen.
+ */
+ struct scatterlist *sg;
+ unsigned i, sg_count = scsi_sg_count(SCpnt);
+
+ scsi_for_each_sg(SCpnt, sg, sg_count, i)
+ len += sg->length;
+
+ if (scsi_bufflen(SCpnt) != len) {
+ printk(KERN_WARNING
+ "scsi%d.%c: bad request buffer "
+ "length %d, should be %ld\n",
+ SCpnt->device->host->host_no,
+ '0' + SCpnt->device->id,
+ scsi_bufflen(SCpnt), len);
+ /*
+ * FIXME: Totaly naive fixup. We should abort
+ * with error
+ */
+ SCpnt->SCp.phase =
+ min_t(unsigned long, len,
+ scsi_bufflen(SCpnt));
+ }
+ }
#endif
} else {
- SCpnt->SCp.ptr = (unsigned char *)SCpnt->request_buffer;
- SCpnt->SCp.this_residual = SCpnt->request_bufflen;
- SCpnt->SCp.phase = SCpnt->request_bufflen;
- }
-
- /*
- * If the upper SCSI layers pass a buffer, but zero length,
- * we aren't interested in the buffer pointer.
- */
- if (SCpnt->SCp.this_residual == 0 && SCpnt->SCp.ptr) {
-#if 0 //def BELT_AND_BRACES
- printk(KERN_WARNING "scsi%d.%c: zero length buffer passed for "
- "command ", SCpnt->host->host_no, '0' + SCpnt->target);
- __scsi_print_command(SCpnt->cmnd);
-#endif
SCpnt->SCp.ptr = NULL;
+ SCpnt->SCp.this_residual = 0;
+ SCpnt->SCp.phase = 0;
}
}
diff --git a/drivers/scsi/blz1230.c b/drivers/scsi/blz1230.c
deleted file mode 100644
index 23f7c24ab809..000000000000
--- a/drivers/scsi/blz1230.c
+++ /dev/null
@@ -1,353 +0,0 @@
-/* blz1230.c: Driver for Blizzard 1230 SCSI IV Controller.
- *
- * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
- *
- * This driver is based on the CyberStorm driver, hence the occasional
- * reference to CyberStorm.
- */
-
-/* TODO:
- *
- * 1) Figure out how to make a cleaner merge with the sparc driver with regard
- * to the caches and the Sparc MMU mapping.
- * 2) Make as few routines required outside the generic driver. A lot of the
- * routines in this file used to be inline!
- */
-
-#include <linux/module.h>
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/interrupt.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
-#include <linux/zorro.h>
-#include <asm/irq.h>
-#include <asm/amigaints.h>
-#include <asm/amigahw.h>
-
-#include <asm/pgtable.h>
-
-#define MKIV 1
-
-/* The controller registers can be found in the Z2 config area at these
- * offsets:
- */
-#define BLZ1230_ESP_ADDR 0x8000
-#define BLZ1230_DMA_ADDR 0x10000
-#define BLZ1230II_ESP_ADDR 0x10000
-#define BLZ1230II_DMA_ADDR 0x10021
-
-
-/* The Blizzard 1230 DMA interface
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * Only two things can be programmed in the Blizzard DMA:
- * 1) The data direction is controlled by the status of bit 31 (1 = write)
- * 2) The source/dest address (word aligned, shifted one right) in bits 30-0
- *
- * Program DMA by first latching the highest byte of the address/direction
- * (i.e. bits 31-24 of the long word constructed as described in steps 1+2
- * above). Then write each byte of the address/direction (starting with the
- * top byte, working down) to the DMA address register.
- *
- * Figure out interrupt status by reading the ESP status byte.
- */
-struct blz1230_dma_registers {
- volatile unsigned char dma_addr; /* DMA address [0x0000] */
- unsigned char dmapad2[0x7fff];
- volatile unsigned char dma_latch; /* DMA latch [0x8000] */
-};
-
-struct blz1230II_dma_registers {
- volatile unsigned char dma_addr; /* DMA address [0x0000] */
- unsigned char dmapad2[0xf];
- volatile unsigned char dma_latch; /* DMA latch [0x0010] */
-};
-
-#define BLZ1230_DMA_WRITE 0x80000000
-
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_dump_state(struct NCR_ESP *esp);
-static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
-static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
-static void dma_ints_off(struct NCR_ESP *esp);
-static void dma_ints_on(struct NCR_ESP *esp);
-static int dma_irq_p(struct NCR_ESP *esp);
-static int dma_ports_p(struct NCR_ESP *esp);
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
-
-static volatile unsigned char cmd_buffer[16];
- /* This is where all commands are put
- * before they are transferred to the ESP chip
- * via PIO.
- */
-
-/***************************************************************** Detection */
-int __init blz1230_esp_detect(struct scsi_host_template *tpnt)
-{
- struct NCR_ESP *esp;
- struct zorro_dev *z = NULL;
- unsigned long address;
- struct ESP_regs *eregs;
- unsigned long board;
-
-#if MKIV
-#define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_IV_1260
-#define REAL_BLZ1230_ESP_ADDR BLZ1230_ESP_ADDR
-#define REAL_BLZ1230_DMA_ADDR BLZ1230_DMA_ADDR
-#else
-#define REAL_BLZ1230_ID ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060
-#define REAL_BLZ1230_ESP_ADDR BLZ1230II_ESP_ADDR
-#define REAL_BLZ1230_DMA_ADDR BLZ1230II_DMA_ADDR
-#endif
-
- if ((z = zorro_find_device(REAL_BLZ1230_ID, z))) {
- board = z->resource.start;
- if (request_mem_region(board+REAL_BLZ1230_ESP_ADDR,
- sizeof(struct ESP_regs), "NCR53C9x")) {
- /* Do some magic to figure out if the blizzard is
- * equipped with a SCSI controller
- */
- address = ZTWO_VADDR(board);
- eregs = (struct ESP_regs *)(address + REAL_BLZ1230_ESP_ADDR);
- esp = esp_allocate(tpnt, (void *)board + REAL_BLZ1230_ESP_ADDR,
- 0);
-
- esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
- udelay(5);
- if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
- goto err_out;
-
- /* Do command transfer with programmed I/O */
- esp->do_pio_cmds = 1;
-
- /* Required functions */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &dma_init_read;
- esp->dma_init_write = &dma_init_write;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &dma_setup;
-
- /* Optional functions */
- esp->dma_barrier = 0;
- esp->dma_drain = 0;
- esp->dma_invalidate = 0;
- esp->dma_irq_entry = 0;
- esp->dma_irq_exit = 0;
- esp->dma_led_on = 0;
- esp->dma_led_off = 0;
- esp->dma_poll = 0;
- esp->dma_reset = 0;
-
- /* SCSI chip speed */
- esp->cfreq = 40000000;
-
- /* The DMA registers on the Blizzard are mapped
- * relative to the device (i.e. in the same Zorro
- * I/O block).
- */
- esp->dregs = (void *)(address + REAL_BLZ1230_DMA_ADDR);
-
- /* ESP register base */
- esp->eregs = eregs;
-
- /* Set the command buffer */
- esp->esp_command = cmd_buffer;
- esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
-
- esp->irq = IRQ_AMIGA_PORTS;
- esp->slot = board+REAL_BLZ1230_ESP_ADDR;
- if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
- "Blizzard 1230 SCSI IV", esp->ehost))
- goto err_out;
-
- /* Figure out our scsi ID on the bus */
- esp->scsi_id = 7;
-
- /* We don't have a differential SCSI-bus. */
- esp->diff = 0;
-
- esp_initialize(esp);
-
- printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
- esps_running = esps_in_use;
- return esps_in_use;
- }
- }
- return 0;
-
- err_out:
- scsi_unregister(esp->ehost);
- esp_deallocate(esp);
- release_mem_region(board+REAL_BLZ1230_ESP_ADDR,
- sizeof(struct ESP_regs));
- return 0;
-}
-
-/************************************************************* DMA Functions */
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
-{
- /* Since the Blizzard DMA is fully dedicated to the ESP chip,
- * the number of bytes sent (to the ESP chip) equals the number
- * of bytes in the FIFO - there is no buffering in the DMA controller.
- * XXXX Do I read this right? It is from host to ESP, right?
- */
- return fifo_count;
-}
-
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- /* I don't think there's any limit on the Blizzard DMA. So we use what
- * the ESP chip can handle (24 bit).
- */
- unsigned long sz = sp->SCp.this_residual;
- if(sz > 0x1000000)
- sz = 0x1000000;
- return sz;
-}
-
-static void dma_dump_state(struct NCR_ESP *esp)
-{
- ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
- amiga_custom.intreqr, amiga_custom.intenar));
-}
-
-void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
-{
-#if MKIV
- struct blz1230_dma_registers *dregs =
- (struct blz1230_dma_registers *) (esp->dregs);
-#else
- struct blz1230II_dma_registers *dregs =
- (struct blz1230II_dma_registers *) (esp->dregs);
-#endif
-
- cache_clear(addr, length);
-
- addr >>= 1;
- addr &= ~(BLZ1230_DMA_WRITE);
-
- /* First set latch */
- dregs->dma_latch = (addr >> 24) & 0xff;
-
- /* Then pump the address to the DMA address register */
-#if MKIV
- dregs->dma_addr = (addr >> 24) & 0xff;
-#endif
- dregs->dma_addr = (addr >> 16) & 0xff;
- dregs->dma_addr = (addr >> 8) & 0xff;
- dregs->dma_addr = (addr ) & 0xff;
-}
-
-void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
-{
-#if MKIV
- struct blz1230_dma_registers *dregs =
- (struct blz1230_dma_registers *) (esp->dregs);
-#else
- struct blz1230II_dma_registers *dregs =
- (struct blz1230II_dma_registers *) (esp->dregs);
-#endif
-
- cache_push(addr, length);
-
- addr >>= 1;
- addr |= BLZ1230_DMA_WRITE;
-
- /* First set latch */
- dregs->dma_latch = (addr >> 24) & 0xff;
-
- /* Then pump the address to the DMA address register */
-#if MKIV
- dregs->dma_addr = (addr >> 24) & 0xff;
-#endif
- dregs->dma_addr = (addr >> 16) & 0xff;
- dregs->dma_addr = (addr >> 8) & 0xff;
- dregs->dma_addr = (addr ) & 0xff;
-}
-
-static void dma_ints_off(struct NCR_ESP *esp)
-{
- disable_irq(esp->irq);
-}
-
-static void dma_ints_on(struct NCR_ESP *esp)
-{
- enable_irq(esp->irq);
-}
-
-static int dma_irq_p(struct NCR_ESP *esp)
-{
- return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
-}
-
-static int dma_ports_p(struct NCR_ESP *esp)
-{
- return ((amiga_custom.intenar) & IF_PORTS);
-}
-
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
-{
- /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
- * so when (write) is true, it actually means READ!
- */
- if(write){
- dma_init_read(esp, addr, count);
- } else {
- dma_init_write(esp, addr, count);
- }
-}
-
-#define HOSTS_C
-
-int blz1230_esp_release(struct Scsi_Host *instance)
-{
-#ifdef MODULE
- unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
- esp_deallocate((struct NCR_ESP *)instance->hostdata);
- esp_release();
- release_mem_region(address, sizeof(struct ESP_regs));
- free_irq(IRQ_AMIGA_PORTS, esp_intr);
-#endif
- return 1;
-}
-
-
-static struct scsi_host_template driver_template = {
- .proc_name = "esp-blz1230",
- .proc_info = esp_proc_info,
- .name = "Blizzard1230 SCSI IV",
- .detect = blz1230_esp_detect,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .release = blz1230_esp_release,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .can_queue = 7,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/blz2060.c b/drivers/scsi/blz2060.c
deleted file mode 100644
index b6203ec00961..000000000000
--- a/drivers/scsi/blz2060.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/* blz2060.c: Driver for Blizzard 2060 SCSI Controller.
- *
- * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
- *
- * This driver is based on the CyberStorm driver, hence the occasional
- * reference to CyberStorm.
- */
-
-/* TODO:
- *
- * 1) Figure out how to make a cleaner merge with the sparc driver with regard
- * to the caches and the Sparc MMU mapping.
- * 2) Make as few routines required outside the generic driver. A lot of the
- * routines in this file used to be inline!
- */
-
-#include <linux/module.h>
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/interrupt.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
-#include <linux/zorro.h>
-#include <asm/irq.h>
-#include <asm/amigaints.h>
-#include <asm/amigahw.h>
-
-#include <asm/pgtable.h>
-
-/* The controller registers can be found in the Z2 config area at these
- * offsets:
- */
-#define BLZ2060_ESP_ADDR 0x1ff00
-#define BLZ2060_DMA_ADDR 0x1ffe0
-
-
-/* The Blizzard 2060 DMA interface
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * Only two things can be programmed in the Blizzard DMA:
- * 1) The data direction is controlled by the status of bit 31 (1 = write)
- * 2) The source/dest address (word aligned, shifted one right) in bits 30-0
- *
- * Figure out interrupt status by reading the ESP status byte.
- */
-struct blz2060_dma_registers {
- volatile unsigned char dma_led_ctrl; /* DMA led control [0x000] */
- unsigned char dmapad1[0x0f];
- volatile unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
- unsigned char dmapad2[0x03];
- volatile unsigned char dma_addr1; /* DMA address [0x014] */
- unsigned char dmapad3[0x03];
- volatile unsigned char dma_addr2; /* DMA address [0x018] */
- unsigned char dmapad4[0x03];
- volatile unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
-};
-
-#define BLZ2060_DMA_WRITE 0x80000000
-
-/* DMA control bits */
-#define BLZ2060_DMA_LED 0x02 /* HD led control 1 = off */
-
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_dump_state(struct NCR_ESP *esp);
-static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
-static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
-static void dma_ints_off(struct NCR_ESP *esp);
-static void dma_ints_on(struct NCR_ESP *esp);
-static int dma_irq_p(struct NCR_ESP *esp);
-static void dma_led_off(struct NCR_ESP *esp);
-static void dma_led_on(struct NCR_ESP *esp);
-static int dma_ports_p(struct NCR_ESP *esp);
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
-
-static volatile unsigned char cmd_buffer[16];
- /* This is where all commands are put
- * before they are transferred to the ESP chip
- * via PIO.
- */
-
-/***************************************************************** Detection */
-int __init blz2060_esp_detect(struct scsi_host_template *tpnt)
-{
- struct NCR_ESP *esp;
- struct zorro_dev *z = NULL;
- unsigned long address;
-
- if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_2060, z))) {
- unsigned long board = z->resource.start;
- if (request_mem_region(board+BLZ2060_ESP_ADDR,
- sizeof(struct ESP_regs), "NCR53C9x")) {
- esp = esp_allocate(tpnt, (void *)board + BLZ2060_ESP_ADDR, 0);
-
- /* Do command transfer with programmed I/O */
- esp->do_pio_cmds = 1;
-
- /* Required functions */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &dma_init_read;
- esp->dma_init_write = &dma_init_write;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &dma_setup;
-
- /* Optional functions */
- esp->dma_barrier = 0;
- esp->dma_drain = 0;
- esp->dma_invalidate = 0;
- esp->dma_irq_entry = 0;
- esp->dma_irq_exit = 0;
- esp->dma_led_on = &dma_led_on;
- esp->dma_led_off = &dma_led_off;
- esp->dma_poll = 0;
- esp->dma_reset = 0;
-
- /* SCSI chip speed */
- esp->cfreq = 40000000;
-
- /* The DMA registers on the Blizzard are mapped
- * relative to the device (i.e. in the same Zorro
- * I/O block).
- */
- address = (unsigned long)ZTWO_VADDR(board);
- esp->dregs = (void *)(address + BLZ2060_DMA_ADDR);
-
- /* ESP register base */
- esp->eregs = (struct ESP_regs *)(address + BLZ2060_ESP_ADDR);
-
- /* Set the command buffer */
- esp->esp_command = cmd_buffer;
- esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
-
- esp->irq = IRQ_AMIGA_PORTS;
- request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
- "Blizzard 2060 SCSI", esp->ehost);
-
- /* Figure out our scsi ID on the bus */
- esp->scsi_id = 7;
-
- /* We don't have a differential SCSI-bus. */
- esp->diff = 0;
-
- esp_initialize(esp);
-
- printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
- esps_running = esps_in_use;
- return esps_in_use;
- }
- }
- return 0;
-}
-
-/************************************************************* DMA Functions */
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
-{
- /* Since the Blizzard DMA is fully dedicated to the ESP chip,
- * the number of bytes sent (to the ESP chip) equals the number
- * of bytes in the FIFO - there is no buffering in the DMA controller.
- * XXXX Do I read this right? It is from host to ESP, right?
- */
- return fifo_count;
-}
-
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- /* I don't think there's any limit on the Blizzard DMA. So we use what
- * the ESP chip can handle (24 bit).
- */
- unsigned long sz = sp->SCp.this_residual;
- if(sz > 0x1000000)
- sz = 0x1000000;
- return sz;
-}
-
-static void dma_dump_state(struct NCR_ESP *esp)
-{
- ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
- amiga_custom.intreqr, amiga_custom.intenar));
-}
-
-static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
-{
- struct blz2060_dma_registers *dregs =
- (struct blz2060_dma_registers *) (esp->dregs);
-
- cache_clear(addr, length);
-
- addr >>= 1;
- addr &= ~(BLZ2060_DMA_WRITE);
- dregs->dma_addr3 = (addr ) & 0xff;
- dregs->dma_addr2 = (addr >> 8) & 0xff;
- dregs->dma_addr1 = (addr >> 16) & 0xff;
- dregs->dma_addr0 = (addr >> 24) & 0xff;
-}
-
-static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
-{
- struct blz2060_dma_registers *dregs =
- (struct blz2060_dma_registers *) (esp->dregs);
-
- cache_push(addr, length);
-
- addr >>= 1;
- addr |= BLZ2060_DMA_WRITE;
- dregs->dma_addr3 = (addr ) & 0xff;
- dregs->dma_addr2 = (addr >> 8) & 0xff;
- dregs->dma_addr1 = (addr >> 16) & 0xff;
- dregs->dma_addr0 = (addr >> 24) & 0xff;
-}
-
-static void dma_ints_off(struct NCR_ESP *esp)
-{
- disable_irq(esp->irq);
-}
-
-static void dma_ints_on(struct NCR_ESP *esp)
-{
- enable_irq(esp->irq);
-}
-
-static int dma_irq_p(struct NCR_ESP *esp)
-{
- return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
-}
-
-static void dma_led_off(struct NCR_ESP *esp)
-{
- ((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl =
- BLZ2060_DMA_LED;
-}
-
-static void dma_led_on(struct NCR_ESP *esp)
-{
- ((struct blz2060_dma_registers *) (esp->dregs))->dma_led_ctrl = 0;
-}
-
-static int dma_ports_p(struct NCR_ESP *esp)
-{
- return ((amiga_custom.intenar) & IF_PORTS);
-}
-
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
-{
- /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
- * so when (write) is true, it actually means READ!
- */
- if(write){
- dma_init_read(esp, addr, count);
- } else {
- dma_init_write(esp, addr, count);
- }
-}
-
-#define HOSTS_C
-
-int blz2060_esp_release(struct Scsi_Host *instance)
-{
-#ifdef MODULE
- unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
-
- esp_deallocate((struct NCR_ESP *)instance->hostdata);
- esp_release();
- release_mem_region(address, sizeof(struct ESP_regs));
- free_irq(IRQ_AMIGA_PORTS, esp_intr);
-#endif
- return 1;
-}
-
-
-static struct scsi_host_template driver_template = {
- .proc_name = "esp-blz2060",
- .proc_info = esp_proc_info,
- .name = "Blizzard2060 SCSI",
- .detect = blz2060_esp_detect,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .release = blz2060_esp_release,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .can_queue = 7,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/cyberstorm.c b/drivers/scsi/cyberstorm.c
deleted file mode 100644
index c6b98a42e89d..000000000000
--- a/drivers/scsi/cyberstorm.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/* cyberstorm.c: Driver for CyberStorm SCSI Controller.
- *
- * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
- *
- * The CyberStorm SCSI driver is based on David S. Miller's ESP driver
- * for the Sparc computers.
- *
- * This work was made possible by Phase5 who willingly (and most generously)
- * supported me with hardware and all the information I needed.
- */
-
-/* TODO:
- *
- * 1) Figure out how to make a cleaner merge with the sparc driver with regard
- * to the caches and the Sparc MMU mapping.
- * 2) Make as few routines required outside the generic driver. A lot of the
- * routines in this file used to be inline!
- */
-
-#include <linux/module.h>
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/interrupt.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
-#include <linux/zorro.h>
-#include <asm/irq.h>
-#include <asm/amigaints.h>
-#include <asm/amigahw.h>
-
-#include <asm/pgtable.h>
-
-/* The controller registers can be found in the Z2 config area at these
- * offsets:
- */
-#define CYBER_ESP_ADDR 0xf400
-#define CYBER_DMA_ADDR 0xf800
-
-
-/* The CyberStorm DMA interface */
-struct cyber_dma_registers {
- volatile unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
- unsigned char dmapad1[1];
- volatile unsigned char dma_addr1; /* DMA address [0x002] */
- unsigned char dmapad2[1];
- volatile unsigned char dma_addr2; /* DMA address [0x004] */
- unsigned char dmapad3[1];
- volatile unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
- unsigned char dmapad4[0x3fb];
- volatile unsigned char cond_reg; /* DMA cond (ro) [0x402] */
-#define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
-};
-
-/* DMA control bits */
-#define CYBER_DMA_LED 0x80 /* HD led control 1 = on */
-#define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
-#define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
-
-/* DMA status bits */
-#define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
-
-/* The bits below appears to be Phase5 Debug bits only; they were not
- * described by Phase5 so using them may seem a bit stupid...
- */
-#define CYBER_HOST_ID 0x02 /* If set, host ID should be 7, otherwise
- * it should be 6.
- */
-#define CYBER_SLOW_CABLE 0x08 /* If *not* set, assume SLOW_CABLE */
-
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_dump_state(struct NCR_ESP *esp);
-static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
-static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
-static void dma_ints_off(struct NCR_ESP *esp);
-static void dma_ints_on(struct NCR_ESP *esp);
-static int dma_irq_p(struct NCR_ESP *esp);
-static void dma_led_off(struct NCR_ESP *esp);
-static void dma_led_on(struct NCR_ESP *esp);
-static int dma_ports_p(struct NCR_ESP *esp);
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
-
-static unsigned char ctrl_data = 0; /* Keep backup of the stuff written
- * to ctrl_reg. Always write a copy
- * to this register when writing to
- * the hardware register!
- */
-
-static volatile unsigned char cmd_buffer[16];
- /* This is where all commands are put
- * before they are transferred to the ESP chip
- * via PIO.
- */
-
-/***************************************************************** Detection */
-int __init cyber_esp_detect(struct scsi_host_template *tpnt)
-{
- struct NCR_ESP *esp;
- struct zorro_dev *z = NULL;
- unsigned long address;
-
- while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
- unsigned long board = z->resource.start;
- if ((z->id == ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM ||
- z->id == ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060) &&
- request_mem_region(board+CYBER_ESP_ADDR,
- sizeof(struct ESP_regs), "NCR53C9x")) {
- /* Figure out if this is a CyberStorm or really a
- * Fastlane/Blizzard Mk II by looking at the board size.
- * CyberStorm maps 64kB
- * (ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM does anyway)
- */
- if(z->resource.end-board != 0xffff) {
- release_mem_region(board+CYBER_ESP_ADDR,
- sizeof(struct ESP_regs));
- return 0;
- }
- esp = esp_allocate(tpnt, (void *)board + CYBER_ESP_ADDR, 0);
-
- /* Do command transfer with programmed I/O */
- esp->do_pio_cmds = 1;
-
- /* Required functions */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &dma_init_read;
- esp->dma_init_write = &dma_init_write;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &dma_setup;
-
- /* Optional functions */
- esp->dma_barrier = 0;
- esp->dma_drain = 0;
- esp->dma_invalidate = 0;
- esp->dma_irq_entry = 0;
- esp->dma_irq_exit = 0;
- esp->dma_led_on = &dma_led_on;
- esp->dma_led_off = &dma_led_off;
- esp->dma_poll = 0;
- esp->dma_reset = 0;
-
- /* SCSI chip speed */
- esp->cfreq = 40000000;
-
- /* The DMA registers on the CyberStorm are mapped
- * relative to the device (i.e. in the same Zorro
- * I/O block).
- */
- address = (unsigned long)ZTWO_VADDR(board);
- esp->dregs = (void *)(address + CYBER_DMA_ADDR);
-
- /* ESP register base */
- esp->eregs = (struct ESP_regs *)(address + CYBER_ESP_ADDR);
-
- /* Set the command buffer */
- esp->esp_command = cmd_buffer;
- esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
-
- esp->irq = IRQ_AMIGA_PORTS;
- request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
- "CyberStorm SCSI", esp->ehost);
- /* Figure out our scsi ID on the bus */
- /* The DMA cond flag contains a hardcoded jumper bit
- * which can be used to select host number 6 or 7.
- * However, even though it may change, we use a hardcoded
- * value of 7.
- */
- esp->scsi_id = 7;
-
- /* We don't have a differential SCSI-bus. */
- esp->diff = 0;
-
- esp_initialize(esp);
-
- printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
- esps_running = esps_in_use;
- return esps_in_use;
- }
- }
- return 0;
-}
-
-/************************************************************* DMA Functions */
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
-{
- /* Since the CyberStorm DMA is fully dedicated to the ESP chip,
- * the number of bytes sent (to the ESP chip) equals the number
- * of bytes in the FIFO - there is no buffering in the DMA controller.
- * XXXX Do I read this right? It is from host to ESP, right?
- */
- return fifo_count;
-}
-
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- /* I don't think there's any limit on the CyberDMA. So we use what
- * the ESP chip can handle (24 bit).
- */
- unsigned long sz = sp->SCp.this_residual;
- if(sz > 0x1000000)
- sz = 0x1000000;
- return sz;
-}
-
-static void dma_dump_state(struct NCR_ESP *esp)
-{
- ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
- esp->esp_id, ((struct cyber_dma_registers *)
- (esp->dregs))->cond_reg));
- ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
- amiga_custom.intreqr, amiga_custom.intenar));
-}
-
-static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
-{
- struct cyber_dma_registers *dregs =
- (struct cyber_dma_registers *) esp->dregs;
-
- cache_clear(addr, length);
-
- addr &= ~(1);
- dregs->dma_addr0 = (addr >> 24) & 0xff;
- dregs->dma_addr1 = (addr >> 16) & 0xff;
- dregs->dma_addr2 = (addr >> 8) & 0xff;
- dregs->dma_addr3 = (addr ) & 0xff;
- ctrl_data &= ~(CYBER_DMA_WRITE);
-
- /* Check if physical address is outside Z2 space and of
- * block length/block aligned in memory. If this is the
- * case, enable 32 bit transfer. In all other cases, fall back
- * to 16 bit transfer.
- * Obviously 32 bit transfer should be enabled if the DMA address
- * and length are 32 bit aligned. However, this leads to some
- * strange behavior. Even 64 bit aligned addr/length fails.
- * Until I've found a reason for this, 32 bit transfer is only
- * used for full-block transfers (1kB).
- * -jskov
- */
-#if 0
- if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) &&
- (addr < 0xff0000)))
- ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
- else
- ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */
-#else
- ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
-#endif
- dregs->ctrl_reg = ctrl_data;
-}
-
-static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
-{
- struct cyber_dma_registers *dregs =
- (struct cyber_dma_registers *) esp->dregs;
-
- cache_push(addr, length);
-
- addr |= 1;
- dregs->dma_addr0 = (addr >> 24) & 0xff;
- dregs->dma_addr1 = (addr >> 16) & 0xff;
- dregs->dma_addr2 = (addr >> 8) & 0xff;
- dregs->dma_addr3 = (addr ) & 0xff;
- ctrl_data |= CYBER_DMA_WRITE;
-
- /* See comment above */
-#if 0
- if((addr & 0x3fc) || length & 0x3ff || ((addr > 0x200000) &&
- (addr < 0xff0000)))
- ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
- else
- ctrl_data |= CYBER_DMA_Z3; /* CHIP/Z3, do 32 bit DMA */
-#else
- ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
-#endif
- dregs->ctrl_reg = ctrl_data;
-}
-
-static void dma_ints_off(struct NCR_ESP *esp)
-{
- disable_irq(esp->irq);
-}
-
-static void dma_ints_on(struct NCR_ESP *esp)
-{
- enable_irq(esp->irq);
-}
-
-static int dma_irq_p(struct NCR_ESP *esp)
-{
- /* It's important to check the DMA IRQ bit in the correct way! */
- return ((esp_read(esp->eregs->esp_status) & ESP_STAT_INTR) &&
- ((((struct cyber_dma_registers *)(esp->dregs))->cond_reg) &
- CYBER_DMA_HNDL_INTR));
-}
-
-static void dma_led_off(struct NCR_ESP *esp)
-{
- ctrl_data &= ~CYBER_DMA_LED;
- ((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
-}
-
-static void dma_led_on(struct NCR_ESP *esp)
-{
- ctrl_data |= CYBER_DMA_LED;
- ((struct cyber_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
-}
-
-static int dma_ports_p(struct NCR_ESP *esp)
-{
- return ((amiga_custom.intenar) & IF_PORTS);
-}
-
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
-{
- /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
- * so when (write) is true, it actually means READ!
- */
- if(write){
- dma_init_read(esp, addr, count);
- } else {
- dma_init_write(esp, addr, count);
- }
-}
-
-#define HOSTS_C
-
-int cyber_esp_release(struct Scsi_Host *instance)
-{
-#ifdef MODULE
- unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
-
- esp_deallocate((struct NCR_ESP *)instance->hostdata);
- esp_release();
- release_mem_region(address, sizeof(struct ESP_regs));
- free_irq(IRQ_AMIGA_PORTS, esp_intr);
-#endif
- return 1;
-}
-
-
-static struct scsi_host_template driver_template = {
- .proc_name = "esp-cyberstorm",
- .proc_info = esp_proc_info,
- .name = "CyberStorm SCSI",
- .detect = cyber_esp_detect,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .release = cyber_esp_release,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .can_queue = 7,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/cyberstormII.c b/drivers/scsi/cyberstormII.c
deleted file mode 100644
index e336e853e66f..000000000000
--- a/drivers/scsi/cyberstormII.c
+++ /dev/null
@@ -1,314 +0,0 @@
-/* cyberstormII.c: Driver for CyberStorm SCSI Mk II
- *
- * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
- *
- * This driver is based on cyberstorm.c
- */
-
-/* TODO:
- *
- * 1) Figure out how to make a cleaner merge with the sparc driver with regard
- * to the caches and the Sparc MMU mapping.
- * 2) Make as few routines required outside the generic driver. A lot of the
- * routines in this file used to be inline!
- */
-
-#include <linux/module.h>
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/interrupt.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
-#include <linux/zorro.h>
-#include <asm/irq.h>
-#include <asm/amigaints.h>
-#include <asm/amigahw.h>
-
-#include <asm/pgtable.h>
-
-/* The controller registers can be found in the Z2 config area at these
- * offsets:
- */
-#define CYBERII_ESP_ADDR 0x1ff03
-#define CYBERII_DMA_ADDR 0x1ff43
-
-
-/* The CyberStorm II DMA interface */
-struct cyberII_dma_registers {
- volatile unsigned char cond_reg; /* DMA cond (ro) [0x000] */
-#define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
- unsigned char dmapad4[0x3f];
- volatile unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
- unsigned char dmapad1[3];
- volatile unsigned char dma_addr1; /* DMA address [0x044] */
- unsigned char dmapad2[3];
- volatile unsigned char dma_addr2; /* DMA address [0x048] */
- unsigned char dmapad3[3];
- volatile unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
-};
-
-/* DMA control bits */
-#define CYBERII_DMA_LED 0x02 /* HD led control 1 = on */
-
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_dump_state(struct NCR_ESP *esp);
-static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
-static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length);
-static void dma_ints_off(struct NCR_ESP *esp);
-static void dma_ints_on(struct NCR_ESP *esp);
-static int dma_irq_p(struct NCR_ESP *esp);
-static void dma_led_off(struct NCR_ESP *esp);
-static void dma_led_on(struct NCR_ESP *esp);
-static int dma_ports_p(struct NCR_ESP *esp);
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
-
-static volatile unsigned char cmd_buffer[16];
- /* This is where all commands are put
- * before they are transferred to the ESP chip
- * via PIO.
- */
-
-/***************************************************************** Detection */
-int __init cyberII_esp_detect(struct scsi_host_template *tpnt)
-{
- struct NCR_ESP *esp;
- struct zorro_dev *z = NULL;
- unsigned long address;
- struct ESP_regs *eregs;
-
- if ((z = zorro_find_device(ZORRO_PROD_PHASE5_CYBERSTORM_MK_II, z))) {
- unsigned long board = z->resource.start;
- if (request_mem_region(board+CYBERII_ESP_ADDR,
- sizeof(struct ESP_regs), "NCR53C9x")) {
- /* Do some magic to figure out if the CyberStorm Mk II
- * is equipped with a SCSI controller
- */
- address = (unsigned long)ZTWO_VADDR(board);
- eregs = (struct ESP_regs *)(address + CYBERII_ESP_ADDR);
-
- esp = esp_allocate(tpnt, (void *)board + CYBERII_ESP_ADDR, 0);
-
- esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
- udelay(5);
- if(esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7)) {
- esp_deallocate(esp);
- scsi_unregister(esp->ehost);
- release_mem_region(board+CYBERII_ESP_ADDR,
- sizeof(struct ESP_regs));
- return 0; /* Bail out if address did not hold data */
- }
-
- /* Do command transfer with programmed I/O */
- esp->do_pio_cmds = 1;
-
- /* Required functions */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &dma_init_read;
- esp->dma_init_write = &dma_init_write;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &dma_setup;
-
- /* Optional functions */
- esp->dma_barrier = 0;
- esp->dma_drain = 0;
- esp->dma_invalidate = 0;
- esp->dma_irq_entry = 0;
- esp->dma_irq_exit = 0;
- esp->dma_led_on = &dma_led_on;
- esp->dma_led_off = &dma_led_off;
- esp->dma_poll = 0;
- esp->dma_reset = 0;
-
- /* SCSI chip speed */
- esp->cfreq = 40000000;
-
- /* The DMA registers on the CyberStorm are mapped
- * relative to the device (i.e. in the same Zorro
- * I/O block).
- */
- esp->dregs = (void *)(address + CYBERII_DMA_ADDR);
-
- /* ESP register base */
- esp->eregs = eregs;
-
- /* Set the command buffer */
- esp->esp_command = cmd_buffer;
- esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
-
- esp->irq = IRQ_AMIGA_PORTS;
- request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
- "CyberStorm SCSI Mk II", esp->ehost);
-
- /* Figure out our scsi ID on the bus */
- esp->scsi_id = 7;
-
- /* We don't have a differential SCSI-bus. */
- esp->diff = 0;
-
- esp_initialize(esp);
-
- printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
- esps_running = esps_in_use;
- return esps_in_use;
- }
- }
- return 0;
-}
-
-/************************************************************* DMA Functions */
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
-{
- /* Since the CyberStorm DMA is fully dedicated to the ESP chip,
- * the number of bytes sent (to the ESP chip) equals the number
- * of bytes in the FIFO - there is no buffering in the DMA controller.
- * XXXX Do I read this right? It is from host to ESP, right?
- */
- return fifo_count;
-}
-
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- /* I don't think there's any limit on the CyberDMA. So we use what
- * the ESP chip can handle (24 bit).
- */
- unsigned long sz = sp->SCp.this_residual;
- if(sz > 0x1000000)
- sz = 0x1000000;
- return sz;
-}
-
-static void dma_dump_state(struct NCR_ESP *esp)
-{
- ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
- esp->esp_id, ((struct cyberII_dma_registers *)
- (esp->dregs))->cond_reg));
- ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
- amiga_custom.intreqr, amiga_custom.intenar));
-}
-
-static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
-{
- struct cyberII_dma_registers *dregs =
- (struct cyberII_dma_registers *) esp->dregs;
-
- cache_clear(addr, length);
-
- addr &= ~(1);
- dregs->dma_addr0 = (addr >> 24) & 0xff;
- dregs->dma_addr1 = (addr >> 16) & 0xff;
- dregs->dma_addr2 = (addr >> 8) & 0xff;
- dregs->dma_addr3 = (addr ) & 0xff;
-}
-
-static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
-{
- struct cyberII_dma_registers *dregs =
- (struct cyberII_dma_registers *) esp->dregs;
-
- cache_push(addr, length);
-
- addr |= 1;
- dregs->dma_addr0 = (addr >> 24) & 0xff;
- dregs->dma_addr1 = (addr >> 16) & 0xff;
- dregs->dma_addr2 = (addr >> 8) & 0xff;
- dregs->dma_addr3 = (addr ) & 0xff;
-}
-
-static void dma_ints_off(struct NCR_ESP *esp)
-{
- disable_irq(esp->irq);
-}
-
-static void dma_ints_on(struct NCR_ESP *esp)
-{
- enable_irq(esp->irq);
-}
-
-static int dma_irq_p(struct NCR_ESP *esp)
-{
- /* It's important to check the DMA IRQ bit in the correct way! */
- return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
-}
-
-static void dma_led_off(struct NCR_ESP *esp)
-{
- ((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg &= ~CYBERII_DMA_LED;
-}
-
-static void dma_led_on(struct NCR_ESP *esp)
-{
- ((struct cyberII_dma_registers *)(esp->dregs))->ctrl_reg |= CYBERII_DMA_LED;
-}
-
-static int dma_ports_p(struct NCR_ESP *esp)
-{
- return ((amiga_custom.intenar) & IF_PORTS);
-}
-
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
-{
- /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
- * so when (write) is true, it actually means READ!
- */
- if(write){
- dma_init_read(esp, addr, count);
- } else {
- dma_init_write(esp, addr, count);
- }
-}
-
-#define HOSTS_C
-
-int cyberII_esp_release(struct Scsi_Host *instance)
-{
-#ifdef MODULE
- unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
-
- esp_deallocate((struct NCR_ESP *)instance->hostdata);
- esp_release();
- release_mem_region(address, sizeof(struct ESP_regs));
- free_irq(IRQ_AMIGA_PORTS, esp_intr);
-#endif
- return 1;
-}
-
-
-static struct scsi_host_template driver_template = {
- .proc_name = "esp-cyberstormII",
- .proc_info = esp_proc_info,
- .name = "CyberStorm Mk II SCSI",
- .detect = cyberII_esp_detect,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .release = cyberII_esp_release,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .can_queue = 7,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 22ef3716e786..e351db6c0077 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -4267,7 +4267,7 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
int srb_idx = 0;
unsigned i = 0;
- struct SGentry *ptr;
+ struct SGentry *uninitialized_var(ptr);
for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
acb->srb_array[i].segment_x = NULL;
diff --git a/drivers/scsi/dec_esp.c b/drivers/scsi/dec_esp.c
deleted file mode 100644
index d42ad663ffee..000000000000
--- a/drivers/scsi/dec_esp.c
+++ /dev/null
@@ -1,687 +0,0 @@
-/*
- * dec_esp.c: Driver for SCSI chips on IOASIC based TURBOchannel DECstations
- * and TURBOchannel PMAZ-A cards
- *
- * TURBOchannel changes by Harald Koerfgen
- * PMAZ-A support by David Airlie
- *
- * based on jazz_esp.c:
- * Copyright (C) 1997 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
- *
- * jazz_esp is based on David S. Miller's ESP driver and cyber_esp
- *
- * 20000819 - Small PMAZ-AA fixes by Florian Lohoff <flo@rfc822.org>
- * Be warned the PMAZ-AA works currently as a single card.
- * Dont try to put multiple cards in one machine - They are
- * both detected but it may crash under high load garbling your
- * data.
- * 20001005 - Initialization fixes for 2.4.0-test9
- * Florian Lohoff <flo@rfc822.org>
- *
- * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
- */
-
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/spinlock.h>
-#include <linux/stat.h>
-#include <linux/tc.h>
-
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/pgtable.h>
-#include <asm/system.h>
-
-#include <asm/dec/interrupts.h>
-#include <asm/dec/ioasic.h>
-#include <asm/dec/ioasic_addrs.h>
-#include <asm/dec/ioasic_ints.h>
-#include <asm/dec/machtype.h>
-#include <asm/dec/system.h>
-
-#define DEC_SCSI_SREG 0
-#define DEC_SCSI_DMAREG 0x40000
-#define DEC_SCSI_SRAM 0x80000
-#define DEC_SCSI_DIAG 0xC0000
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
-static void dma_drain(struct NCR_ESP *esp);
-static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd *sp);
-static void dma_dump_state(struct NCR_ESP *esp);
-static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
-static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
-static void dma_ints_off(struct NCR_ESP *esp);
-static void dma_ints_on(struct NCR_ESP *esp);
-static int dma_irq_p(struct NCR_ESP *esp);
-static int dma_ports_p(struct NCR_ESP *esp);
-static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
-static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
-static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp);
-static void dma_advance_sg(struct scsi_cmnd * sp);
-
-static void pmaz_dma_drain(struct NCR_ESP *esp);
-static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length);
-static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length);
-static void pmaz_dma_ints_off(struct NCR_ESP *esp);
-static void pmaz_dma_ints_on(struct NCR_ESP *esp);
-static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write);
-static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp);
-
-#define TC_ESP_RAM_SIZE 0x20000
-#define ESP_TGT_DMA_SIZE ((TC_ESP_RAM_SIZE/7) & ~(sizeof(int)-1))
-#define ESP_NCMD 7
-
-#define TC_ESP_DMAR_MASK 0x1ffff
-#define TC_ESP_DMAR_WRITE 0x80000000
-#define TC_ESP_DMA_ADDR(x) ((unsigned)(x) & TC_ESP_DMAR_MASK)
-
-u32 esp_virt_buffer;
-int scsi_current_length;
-
-volatile unsigned char cmd_buffer[16];
-volatile unsigned char pmaz_cmd_buffer[16];
- /* This is where all commands are put
- * before they are trasfered to the ESP chip
- * via PIO.
- */
-
-static irqreturn_t scsi_dma_merr_int(int, void *);
-static irqreturn_t scsi_dma_err_int(int, void *);
-static irqreturn_t scsi_dma_int(int, void *);
-
-static struct scsi_host_template dec_esp_template = {
- .module = THIS_MODULE,
- .name = "NCR53C94",
- .info = esp_info,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .proc_info = esp_proc_info,
- .proc_name = "dec_esp",
- .can_queue = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = DISABLE_CLUSTERING,
-};
-
-static struct NCR_ESP *dec_esp_platform;
-
-/***************************************************************** Detection */
-static int dec_esp_platform_probe(void)
-{
- struct NCR_ESP *esp;
- int err = 0;
-
- if (IOASIC) {
- esp = esp_allocate(&dec_esp_template, NULL, 1);
-
- /* Do command transfer with programmed I/O */
- esp->do_pio_cmds = 1;
-
- /* Required functions */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &dma_init_read;
- esp->dma_init_write = &dma_init_write;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &dma_setup;
-
- /* Optional functions */
- esp->dma_barrier = 0;
- esp->dma_drain = &dma_drain;
- esp->dma_invalidate = 0;
- esp->dma_irq_entry = 0;
- esp->dma_irq_exit = 0;
- esp->dma_poll = 0;
- esp->dma_reset = 0;
- esp->dma_led_off = 0;
- esp->dma_led_on = 0;
-
- /* virtual DMA functions */
- esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
- esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
- esp->dma_mmu_release_scsi_one = 0;
- esp->dma_mmu_release_scsi_sgl = 0;
- esp->dma_advance_sg = &dma_advance_sg;
-
-
- /* SCSI chip speed */
- esp->cfreq = 25000000;
-
- esp->dregs = 0;
-
- /* ESP register base */
- esp->eregs = (void *)CKSEG1ADDR(dec_kn_slot_base +
- IOASIC_SCSI);
-
- /* Set the command buffer */
- esp->esp_command = (volatile unsigned char *) cmd_buffer;
-
- /* get virtual dma address for command buffer */
- esp->esp_command_dvma = virt_to_phys(cmd_buffer);
-
- esp->irq = dec_interrupt[DEC_IRQ_ASC];
-
- esp->scsi_id = 7;
-
- /* Check for differential SCSI-bus */
- esp->diff = 0;
-
- err = request_irq(esp->irq, esp_intr, IRQF_DISABLED,
- "ncr53c94", esp->ehost);
- if (err)
- goto err_alloc;
- err = request_irq(dec_interrupt[DEC_IRQ_ASC_MERR],
- scsi_dma_merr_int, IRQF_DISABLED,
- "ncr53c94 error", esp->ehost);
- if (err)
- goto err_irq;
- err = request_irq(dec_interrupt[DEC_IRQ_ASC_ERR],
- scsi_dma_err_int, IRQF_DISABLED,
- "ncr53c94 overrun", esp->ehost);
- if (err)
- goto err_irq_merr;
- err = request_irq(dec_interrupt[DEC_IRQ_ASC_DMA], scsi_dma_int,
- IRQF_DISABLED, "ncr53c94 dma", esp->ehost);
- if (err)
- goto err_irq_err;
-
- esp_initialize(esp);
-
- err = scsi_add_host(esp->ehost, NULL);
- if (err) {
- printk(KERN_ERR "ESP: Unable to register adapter\n");
- goto err_irq_dma;
- }
-
- scsi_scan_host(esp->ehost);
-
- dec_esp_platform = esp;
- }
-
- return 0;
-
-err_irq_dma:
- free_irq(dec_interrupt[DEC_IRQ_ASC_DMA], esp->ehost);
-err_irq_err:
- free_irq(dec_interrupt[DEC_IRQ_ASC_ERR], esp->ehost);
-err_irq_merr:
- free_irq(dec_interrupt[DEC_IRQ_ASC_MERR], esp->ehost);
-err_irq:
- free_irq(esp->irq, esp->ehost);
-err_alloc:
- esp_deallocate(esp);
- scsi_host_put(esp->ehost);
- return err;
-}
-
-static int __init dec_esp_probe(struct device *dev)
-{
- struct NCR_ESP *esp;
- resource_size_t start, len;
- int err;
-
- esp = esp_allocate(&dec_esp_template, NULL, 1);
-
- dev_set_drvdata(dev, esp);
-
- start = to_tc_dev(dev)->resource.start;
- len = to_tc_dev(dev)->resource.end - start + 1;
-
- if (!request_mem_region(start, len, dev->bus_id)) {
- printk(KERN_ERR "%s: Unable to reserve MMIO resource\n",
- dev->bus_id);
- err = -EBUSY;
- goto err_alloc;
- }
-
- /* Store base addr into esp struct. */
- esp->slot = start;
-
- esp->dregs = 0;
- esp->eregs = (void *)CKSEG1ADDR(start + DEC_SCSI_SREG);
- esp->do_pio_cmds = 1;
-
- /* Set the command buffer. */
- esp->esp_command = (volatile unsigned char *)pmaz_cmd_buffer;
-
- /* Get virtual dma address for command buffer. */
- esp->esp_command_dvma = virt_to_phys(pmaz_cmd_buffer);
-
- esp->cfreq = tc_get_speed(to_tc_dev(dev)->bus);
-
- esp->irq = to_tc_dev(dev)->interrupt;
-
- /* Required functions. */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &pmaz_dma_init_read;
- esp->dma_init_write = &pmaz_dma_init_write;
- esp->dma_ints_off = &pmaz_dma_ints_off;
- esp->dma_ints_on = &pmaz_dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &pmaz_dma_setup;
-
- /* Optional functions. */
- esp->dma_barrier = 0;
- esp->dma_drain = &pmaz_dma_drain;
- esp->dma_invalidate = 0;
- esp->dma_irq_entry = 0;
- esp->dma_irq_exit = 0;
- esp->dma_poll = 0;
- esp->dma_reset = 0;
- esp->dma_led_off = 0;
- esp->dma_led_on = 0;
-
- esp->dma_mmu_get_scsi_one = pmaz_dma_mmu_get_scsi_one;
- esp->dma_mmu_get_scsi_sgl = 0;
- esp->dma_mmu_release_scsi_one = 0;
- esp->dma_mmu_release_scsi_sgl = 0;
- esp->dma_advance_sg = 0;
-
- err = request_irq(esp->irq, esp_intr, IRQF_DISABLED, "PMAZ_AA",
- esp->ehost);
- if (err) {
- printk(KERN_ERR "%s: Unable to get IRQ %d\n",
- dev->bus_id, esp->irq);
- goto err_resource;
- }
-
- esp->scsi_id = 7;
- esp->diff = 0;
- esp_initialize(esp);
-
- err = scsi_add_host(esp->ehost, dev);
- if (err) {
- printk(KERN_ERR "%s: Unable to register adapter\n",
- dev->bus_id);
- goto err_irq;
- }
-
- scsi_scan_host(esp->ehost);
-
- return 0;
-
-err_irq:
- free_irq(esp->irq, esp->ehost);
-
-err_resource:
- release_mem_region(start, len);
-
-err_alloc:
- esp_deallocate(esp);
- scsi_host_put(esp->ehost);
- return err;
-}
-
-static void __exit dec_esp_platform_remove(void)
-{
- struct NCR_ESP *esp = dec_esp_platform;
-
- free_irq(esp->irq, esp->ehost);
- esp_deallocate(esp);
- scsi_host_put(esp->ehost);
- dec_esp_platform = NULL;
-}
-
-static void __exit dec_esp_remove(struct device *dev)
-{
- struct NCR_ESP *esp = dev_get_drvdata(dev);
-
- free_irq(esp->irq, esp->ehost);
- esp_deallocate(esp);
- scsi_host_put(esp->ehost);
-}
-
-
-/************************************************************* DMA Functions */
-static irqreturn_t scsi_dma_merr_int(int irq, void *dev_id)
-{
- printk("Got unexpected SCSI DMA Interrupt! < ");
- printk("SCSI_DMA_MEMRDERR ");
- printk(">\n");
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t scsi_dma_err_int(int irq, void *dev_id)
-{
- /* empty */
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t scsi_dma_int(int irq, void *dev_id)
-{
- u32 scsi_next_ptr;
-
- scsi_next_ptr = ioasic_read(IO_REG_SCSI_DMA_P);
-
- /* next page */
- scsi_next_ptr = (((scsi_next_ptr >> 3) + PAGE_SIZE) & PAGE_MASK) << 3;
- ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
- fast_iob();
-
- return IRQ_HANDLED;
-}
-
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
-{
- return fifo_count;
-}
-
-static void dma_drain(struct NCR_ESP *esp)
-{
- u32 nw, data0, data1, scsi_data_ptr;
- u16 *p;
-
- nw = ioasic_read(IO_REG_SCSI_SCR);
-
- /*
- * Is there something in the dma buffers left?
- */
- if (nw) {
- scsi_data_ptr = ioasic_read(IO_REG_SCSI_DMA_P) >> 3;
- p = phys_to_virt(scsi_data_ptr);
- switch (nw) {
- case 1:
- data0 = ioasic_read(IO_REG_SCSI_SDR0);
- p[0] = data0 & 0xffff;
- break;
- case 2:
- data0 = ioasic_read(IO_REG_SCSI_SDR0);
- p[0] = data0 & 0xffff;
- p[1] = (data0 >> 16) & 0xffff;
- break;
- case 3:
- data0 = ioasic_read(IO_REG_SCSI_SDR0);
- data1 = ioasic_read(IO_REG_SCSI_SDR1);
- p[0] = data0 & 0xffff;
- p[1] = (data0 >> 16) & 0xffff;
- p[2] = data1 & 0xffff;
- break;
- default:
- printk("Strange: %d words in dma buffer left\n", nw);
- break;
- }
- }
-}
-
-static int dma_can_transfer(struct NCR_ESP *esp, struct scsi_cmnd * sp)
-{
- return sp->SCp.this_residual;
-}
-
-static void dma_dump_state(struct NCR_ESP *esp)
-{
-}
-
-static void dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
-{
- u32 scsi_next_ptr, ioasic_ssr;
- unsigned long flags;
-
- if (vaddress & 3)
- panic("dec_esp.c: unable to handle partial word transfers, yet...");
-
- dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
-
- spin_lock_irqsave(&ioasic_ssr_lock, flags);
-
- fast_mb();
- ioasic_ssr = ioasic_read(IO_REG_SSR);
-
- ioasic_ssr &= ~IO_SSR_SCSI_DMA_EN;
- ioasic_write(IO_REG_SSR, ioasic_ssr);
-
- fast_wmb();
- ioasic_write(IO_REG_SCSI_SCR, 0);
- ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
-
- /* prepare for next page */
- scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
- ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
-
- ioasic_ssr |= (IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
- fast_wmb();
- ioasic_write(IO_REG_SSR, ioasic_ssr);
-
- fast_iob();
- spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
-}
-
-static void dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
-{
- u32 scsi_next_ptr, ioasic_ssr;
- unsigned long flags;
-
- if (vaddress & 3)
- panic("dec_esp.c: unable to handle partial word transfers, yet...");
-
- dma_cache_wback_inv((unsigned long) phys_to_virt(vaddress), length);
-
- spin_lock_irqsave(&ioasic_ssr_lock, flags);
-
- fast_mb();
- ioasic_ssr = ioasic_read(IO_REG_SSR);
-
- ioasic_ssr &= ~(IO_SSR_SCSI_DMA_DIR | IO_SSR_SCSI_DMA_EN);
- ioasic_write(IO_REG_SSR, ioasic_ssr);
-
- fast_wmb();
- ioasic_write(IO_REG_SCSI_SCR, 0);
- ioasic_write(IO_REG_SCSI_DMA_P, vaddress << 3);
-
- /* prepare for next page */
- scsi_next_ptr = ((vaddress + PAGE_SIZE) & PAGE_MASK) << 3;
- ioasic_write(IO_REG_SCSI_DMA_BP, scsi_next_ptr);
-
- ioasic_ssr |= IO_SSR_SCSI_DMA_EN;
- fast_wmb();
- ioasic_write(IO_REG_SSR, ioasic_ssr);
-
- fast_iob();
- spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
-}
-
-static void dma_ints_off(struct NCR_ESP *esp)
-{
- disable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
-}
-
-static void dma_ints_on(struct NCR_ESP *esp)
-{
- enable_irq(dec_interrupt[DEC_IRQ_ASC_DMA]);
-}
-
-static int dma_irq_p(struct NCR_ESP *esp)
-{
- return (esp->eregs->esp_status & ESP_STAT_INTR);
-}
-
-static int dma_ports_p(struct NCR_ESP *esp)
-{
- /*
- * FIXME: what's this good for?
- */
- return 1;
-}
-
-static void dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
-{
- /*
- * DMA_ST_WRITE means "move data from device to memory"
- * so when (write) is true, it actually means READ!
- */
- if (write)
- dma_init_read(esp, addr, count);
- else
- dma_init_write(esp, addr, count);
-}
-
-static void dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
-{
- sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
-}
-
-static void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, struct scsi_cmnd * sp)
-{
- int sz = sp->SCp.buffers_residual;
- struct scatterlist *sg = sp->SCp.buffer;
-
- while (sz >= 0) {
- sg[sz].dma_address = page_to_phys(sg[sz].page) + sg[sz].offset;
- sz--;
- }
- sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
-}
-
-static void dma_advance_sg(struct scsi_cmnd * sp)
-{
- sp->SCp.ptr = (char *)(sp->SCp.buffer->dma_address);
-}
-
-static void pmaz_dma_drain(struct NCR_ESP *esp)
-{
- memcpy(phys_to_virt(esp_virt_buffer),
- (void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM +
- ESP_TGT_DMA_SIZE),
- scsi_current_length);
-}
-
-static void pmaz_dma_init_read(struct NCR_ESP *esp, u32 vaddress, int length)
-{
- volatile u32 *dmareg =
- (volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
-
- if (length > ESP_TGT_DMA_SIZE)
- length = ESP_TGT_DMA_SIZE;
-
- *dmareg = TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
-
- iob();
-
- esp_virt_buffer = vaddress;
- scsi_current_length = length;
-}
-
-static void pmaz_dma_init_write(struct NCR_ESP *esp, u32 vaddress, int length)
-{
- volatile u32 *dmareg =
- (volatile u32 *)CKSEG1ADDR(esp->slot + DEC_SCSI_DMAREG);
-
- memcpy((void *)CKSEG1ADDR(esp->slot + DEC_SCSI_SRAM +
- ESP_TGT_DMA_SIZE),
- phys_to_virt(vaddress), length);
-
- wmb();
- *dmareg = TC_ESP_DMAR_WRITE | TC_ESP_DMA_ADDR(ESP_TGT_DMA_SIZE);
-
- iob();
-}
-
-static void pmaz_dma_ints_off(struct NCR_ESP *esp)
-{
-}
-
-static void pmaz_dma_ints_on(struct NCR_ESP *esp)
-{
-}
-
-static void pmaz_dma_setup(struct NCR_ESP *esp, u32 addr, int count, int write)
-{
- /*
- * DMA_ST_WRITE means "move data from device to memory"
- * so when (write) is true, it actually means READ!
- */
- if (write)
- pmaz_dma_init_read(esp, addr, count);
- else
- pmaz_dma_init_write(esp, addr, count);
-}
-
-static void pmaz_dma_mmu_get_scsi_one(struct NCR_ESP *esp, struct scsi_cmnd * sp)
-{
- sp->SCp.ptr = (char *)virt_to_phys(sp->request_buffer);
-}
-
-
-#ifdef CONFIG_TC
-static int __init dec_esp_tc_probe(struct device *dev);
-static int __exit dec_esp_tc_remove(struct device *dev);
-
-static const struct tc_device_id dec_esp_tc_table[] = {
- { "DEC ", "PMAZ-AA " },
- { }
-};
-MODULE_DEVICE_TABLE(tc, dec_esp_tc_table);
-
-static struct tc_driver dec_esp_tc_driver = {
- .id_table = dec_esp_tc_table,
- .driver = {
- .name = "dec_esp",
- .bus = &tc_bus_type,
- .probe = dec_esp_tc_probe,
- .remove = __exit_p(dec_esp_tc_remove),
- },
-};
-
-static int __init dec_esp_tc_probe(struct device *dev)
-{
- int status = dec_esp_probe(dev);
- if (!status)
- get_device(dev);
- return status;
-}
-
-static int __exit dec_esp_tc_remove(struct device *dev)
-{
- put_device(dev);
- dec_esp_remove(dev);
- return 0;
-}
-#endif
-
-static int __init dec_esp_init(void)
-{
- int status;
-
- status = tc_register_driver(&dec_esp_tc_driver);
- if (!status)
- dec_esp_platform_probe();
-
- if (nesps) {
- pr_info("ESP: Total of %d ESP hosts found, "
- "%d actually in use.\n", nesps, esps_in_use);
- esps_running = esps_in_use;
- }
-
- return status;
-}
-
-static void __exit dec_esp_exit(void)
-{
- dec_esp_platform_remove();
- tc_unregister_driver(&dec_esp_tc_driver);
-}
-
-
-module_init(dec_esp_init);
-module_exit(dec_esp_exit);
diff --git a/drivers/scsi/fastlane.c b/drivers/scsi/fastlane.c
deleted file mode 100644
index 4266a2139b5f..000000000000
--- a/drivers/scsi/fastlane.c
+++ /dev/null
@@ -1,421 +0,0 @@
-/* fastlane.c: Driver for Phase5's Fastlane SCSI Controller.
- *
- * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
- *
- * This driver is based on the CyberStorm driver, hence the occasional
- * reference to CyberStorm.
- *
- * Betatesting & crucial adjustments by
- * Patrik Rak (prak3264@ss1000.ms.mff.cuni.cz)
- *
- */
-
-/* TODO:
- *
- * o According to the doc from laire, it is required to reset the DMA when
- * the transfer is done. ATM we reset DMA just before every new
- * dma_init_(read|write).
- *
- * 1) Figure out how to make a cleaner merge with the sparc driver with regard
- * to the caches and the Sparc MMU mapping.
- * 2) Make as few routines required outside the generic driver. A lot of the
- * routines in this file used to be inline!
- */
-
-#include <linux/module.h>
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/interrupt.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
-#include <linux/zorro.h>
-#include <asm/irq.h>
-
-#include <asm/amigaints.h>
-#include <asm/amigahw.h>
-
-#include <asm/pgtable.h>
-
-/* Such day has just come... */
-#if 0
-/* Let this defined unless you really need to enable DMA IRQ one day */
-#define NODMAIRQ
-#endif
-
-/* The controller registers can be found in the Z2 config area at these
- * offsets:
- */
-#define FASTLANE_ESP_ADDR 0x1000001
-#define FASTLANE_DMA_ADDR 0x1000041
-
-
-/* The Fastlane DMA interface */
-struct fastlane_dma_registers {
- volatile unsigned char cond_reg; /* DMA status (ro) [0x0000] */
-#define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
- unsigned char dmapad1[0x3f];
- volatile unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
-};
-
-
-/* DMA status bits */
-#define FASTLANE_DMA_MINT 0x80
-#define FASTLANE_DMA_IACT 0x40
-#define FASTLANE_DMA_CREQ 0x20
-
-/* DMA control bits */
-#define FASTLANE_DMA_FCODE 0xa0
-#define FASTLANE_DMA_MASK 0xf3
-#define FASTLANE_DMA_LED 0x10 /* HD led control 1 = on */
-#define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
-#define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
-#define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
-#define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
-
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_dump_state(struct NCR_ESP *esp);
-static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length);
-static void dma_init_write(struct NCR_ESP *esp, __u32 vaddr, int length);
-static void dma_ints_off(struct NCR_ESP *esp);
-static void dma_ints_on(struct NCR_ESP *esp);
-static int dma_irq_p(struct NCR_ESP *esp);
-static void dma_irq_exit(struct NCR_ESP *esp);
-static void dma_led_off(struct NCR_ESP *esp);
-static void dma_led_on(struct NCR_ESP *esp);
-static int dma_ports_p(struct NCR_ESP *esp);
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
-
-static unsigned char ctrl_data = 0; /* Keep backup of the stuff written
- * to ctrl_reg. Always write a copy
- * to this register when writing to
- * the hardware register!
- */
-
-static volatile unsigned char cmd_buffer[16];
- /* This is where all commands are put
- * before they are transferred to the ESP chip
- * via PIO.
- */
-
-static inline void dma_clear(struct NCR_ESP *esp)
-{
- struct fastlane_dma_registers *dregs =
- (struct fastlane_dma_registers *) (esp->dregs);
- unsigned long *t;
-
- ctrl_data = (ctrl_data & FASTLANE_DMA_MASK);
- dregs->ctrl_reg = ctrl_data;
-
- t = (unsigned long *)(esp->edev);
-
- dregs->clear_strobe = 0;
- *t = 0 ;
-}
-
-/***************************************************************** Detection */
-int __init fastlane_esp_detect(struct scsi_host_template *tpnt)
-{
- struct NCR_ESP *esp;
- struct zorro_dev *z = NULL;
- unsigned long address;
-
- if ((z = zorro_find_device(ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060, z))) {
- unsigned long board = z->resource.start;
- if (request_mem_region(board+FASTLANE_ESP_ADDR,
- sizeof(struct ESP_regs), "NCR53C9x")) {
- /* Check if this is really a fastlane controller. The problem
- * is that also the cyberstorm and blizzard controllers use
- * this ID value. Fortunately only Fastlane maps in Z3 space
- */
- if (board < 0x1000000) {
- goto err_release;
- }
- esp = esp_allocate(tpnt, (void *)board + FASTLANE_ESP_ADDR, 0);
-
- /* Do command transfer with programmed I/O */
- esp->do_pio_cmds = 1;
-
- /* Required functions */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &dma_init_read;
- esp->dma_init_write = &dma_init_write;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &dma_setup;
-
- /* Optional functions */
- esp->dma_barrier = 0;
- esp->dma_drain = 0;
- esp->dma_invalidate = 0;
- esp->dma_irq_entry = 0;
- esp->dma_irq_exit = &dma_irq_exit;
- esp->dma_led_on = &dma_led_on;
- esp->dma_led_off = &dma_led_off;
- esp->dma_poll = 0;
- esp->dma_reset = 0;
-
- /* Initialize the portBits (enable IRQs) */
- ctrl_data = (FASTLANE_DMA_FCODE |
-#ifndef NODMAIRQ
- FASTLANE_DMA_EDI |
-#endif
- FASTLANE_DMA_ESI);
-
-
- /* SCSI chip clock */
- esp->cfreq = 40000000;
-
-
- /* Map the physical address space into virtual kernel space */
- address = (unsigned long)
- z_ioremap(board, z->resource.end-board+1);
-
- if(!address){
- printk("Could not remap Fastlane controller memory!");
- goto err_unregister;
- }
-
-
- /* The DMA registers on the Fastlane are mapped
- * relative to the device (i.e. in the same Zorro
- * I/O block).
- */
- esp->dregs = (void *)(address + FASTLANE_DMA_ADDR);
-
- /* ESP register base */
- esp->eregs = (struct ESP_regs *)(address + FASTLANE_ESP_ADDR);
-
- /* Board base */
- esp->edev = (void *) address;
-
- /* Set the command buffer */
- esp->esp_command = cmd_buffer;
- esp->esp_command_dvma = virt_to_bus((void *)cmd_buffer);
-
- esp->irq = IRQ_AMIGA_PORTS;
- esp->slot = board+FASTLANE_ESP_ADDR;
- if (request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
- "Fastlane SCSI", esp->ehost)) {
- printk(KERN_WARNING "Fastlane: Could not get IRQ%d, aborting.\n", IRQ_AMIGA_PORTS);
- goto err_unmap;
- }
-
- /* Controller ID */
- esp->scsi_id = 7;
-
- /* We don't have a differential SCSI-bus. */
- esp->diff = 0;
-
- dma_clear(esp);
- esp_initialize(esp);
-
- printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps, esps_in_use);
- esps_running = esps_in_use;
- return esps_in_use;
- }
- }
- return 0;
-
- err_unmap:
- z_iounmap((void *)address);
- err_unregister:
- scsi_unregister (esp->ehost);
- err_release:
- release_mem_region(z->resource.start+FASTLANE_ESP_ADDR,
- sizeof(struct ESP_regs));
- return 0;
-}
-
-
-/************************************************************* DMA Functions */
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
-{
- /* Since the Fastlane DMA is fully dedicated to the ESP chip,
- * the number of bytes sent (to the ESP chip) equals the number
- * of bytes in the FIFO - there is no buffering in the DMA controller.
- * XXXX Do I read this right? It is from host to ESP, right?
- */
- return fifo_count;
-}
-
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- unsigned long sz = sp->SCp.this_residual;
- if(sz > 0xfffc)
- sz = 0xfffc;
- return sz;
-}
-
-static void dma_dump_state(struct NCR_ESP *esp)
-{
- ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
- esp->esp_id, ((struct fastlane_dma_registers *)
- (esp->dregs))->cond_reg));
- ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
- amiga_custom.intreqr, amiga_custom.intenar));
-}
-
-static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
-{
- struct fastlane_dma_registers *dregs =
- (struct fastlane_dma_registers *) (esp->dregs);
- unsigned long *t;
-
- cache_clear(addr, length);
-
- dma_clear(esp);
-
- t = (unsigned long *)((addr & 0x00ffffff) + esp->edev);
-
- dregs->clear_strobe = 0;
- *t = addr;
-
- ctrl_data = (ctrl_data & FASTLANE_DMA_MASK) | FASTLANE_DMA_ENABLE;
- dregs->ctrl_reg = ctrl_data;
-}
-
-static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
-{
- struct fastlane_dma_registers *dregs =
- (struct fastlane_dma_registers *) (esp->dregs);
- unsigned long *t;
-
- cache_push(addr, length);
-
- dma_clear(esp);
-
- t = (unsigned long *)((addr & 0x00ffffff) + (esp->edev));
-
- dregs->clear_strobe = 0;
- *t = addr;
-
- ctrl_data = ((ctrl_data & FASTLANE_DMA_MASK) |
- FASTLANE_DMA_ENABLE |
- FASTLANE_DMA_WRITE);
- dregs->ctrl_reg = ctrl_data;
-}
-
-
-static void dma_ints_off(struct NCR_ESP *esp)
-{
- disable_irq(esp->irq);
-}
-
-static void dma_ints_on(struct NCR_ESP *esp)
-{
- enable_irq(esp->irq);
-}
-
-static void dma_irq_exit(struct NCR_ESP *esp)
-{
- struct fastlane_dma_registers *dregs =
- (struct fastlane_dma_registers *) (esp->dregs);
-
- dregs->ctrl_reg = ctrl_data & ~(FASTLANE_DMA_EDI|FASTLANE_DMA_ESI);
-#ifdef __mc68000__
- nop();
-#endif
- dregs->ctrl_reg = ctrl_data;
-}
-
-static int dma_irq_p(struct NCR_ESP *esp)
-{
- struct fastlane_dma_registers *dregs =
- (struct fastlane_dma_registers *) (esp->dregs);
- unsigned char dma_status;
-
- dma_status = dregs->cond_reg;
-
- if(dma_status & FASTLANE_DMA_IACT)
- return 0; /* not our IRQ */
-
- /* Return non-zero if ESP requested IRQ */
- return (
-#ifndef NODMAIRQ
- (dma_status & FASTLANE_DMA_CREQ) &&
-#endif
- (!(dma_status & FASTLANE_DMA_MINT)) &&
- (esp_read(((struct ESP_regs *) (esp->eregs))->esp_status) & ESP_STAT_INTR));
-}
-
-static void dma_led_off(struct NCR_ESP *esp)
-{
- ctrl_data &= ~FASTLANE_DMA_LED;
- ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
-}
-
-static void dma_led_on(struct NCR_ESP *esp)
-{
- ctrl_data |= FASTLANE_DMA_LED;
- ((struct fastlane_dma_registers *)(esp->dregs))->ctrl_reg = ctrl_data;
-}
-
-static int dma_ports_p(struct NCR_ESP *esp)
-{
- return ((amiga_custom.intenar) & IF_PORTS);
-}
-
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
-{
- /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
- * so when (write) is true, it actually means READ!
- */
- if(write){
- dma_init_read(esp, addr, count);
- } else {
- dma_init_write(esp, addr, count);
- }
-}
-
-#define HOSTS_C
-
-int fastlane_esp_release(struct Scsi_Host *instance)
-{
-#ifdef MODULE
- unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
- esp_deallocate((struct NCR_ESP *)instance->hostdata);
- esp_release();
- release_mem_region(address, sizeof(struct ESP_regs));
- free_irq(IRQ_AMIGA_PORTS, esp_intr);
-#endif
- return 1;
-}
-
-
-static struct scsi_host_template driver_template = {
- .proc_name = "esp-fastlane",
- .proc_info = esp_proc_info,
- .name = "Fastlane SCSI",
- .detect = fastlane_esp_detect,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .release = fastlane_esp_release,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .can_queue = 7,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING
-};
-
-#include "scsi_module.c"
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 37741e9b5c3b..91f85226d08f 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -54,8 +54,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
static int scsi_alloc_out_of_range = 0;
/* use bounce buffer if the physical address is bad */
- if (addr & HDATA(cmd->device->host)->dma_xfer_mask ||
- (!dir_in && mm_end_of_chunk (addr, cmd->SCp.this_residual)))
+ if (addr & HDATA(cmd->device->host)->dma_xfer_mask)
{
HDATA(cmd->device->host)->dma_bounce_len = (cmd->SCp.this_residual + 511)
& ~0x1ff;
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index d63f11e95abf..bd62131b97a1 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -539,9 +539,9 @@ out:
srp_iu_put(iue);
}
-static irqreturn_t ibmvstgt_interrupt(int irq, void *data)
+static irqreturn_t ibmvstgt_interrupt(int dummy, void *data)
{
- struct srp_target *target = (struct srp_target *) data;
+ struct srp_target *target = data;
struct vio_port *vport = target_to_port(target);
vio_disable_interrupts(vport->dma_dev);
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index 6c4f0f081785..68e5c632c5d5 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -287,7 +287,7 @@ static int idescsi_end_request(ide_drive_t *, int, int);
static ide_startstop_t
idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
{
- if (HWIF(drive)->INB(IDE_STATUS_REG) & (BUSY_STAT|DRQ_STAT))
+ if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
/* force an abort */
HWIF(drive)->OUTB(WIN_IDLEIMMEDIATE,IDE_COMMAND_REG);
@@ -423,7 +423,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
}
/* Clear the interrupt */
- stat = drive->hwif->INB(IDE_STATUS_REG);
+ stat = ide_read_status(drive);
if ((stat & DRQ_STAT) == 0) {
/* No more interrupts */
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b6f99dfbb038..8a178674cb18 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -629,8 +629,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
int rc;
if (tcp_conn->in.datalen) {
- printk(KERN_ERR "iscsi_tcp: invalid R2t with datalen %d\n",
- tcp_conn->in.datalen);
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2t with datalen %d\n",
+ tcp_conn->in.datalen);
return ISCSI_ERR_DATALEN;
}
@@ -644,8 +645,9 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
if (!ctask->sc || session->state != ISCSI_STATE_LOGGED_IN) {
- printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
- "recovery...\n", ctask->itt);
+ iscsi_conn_printk(KERN_INFO, conn,
+ "dropping R2T itt %d in recovery.\n",
+ ctask->itt);
return 0;
}
@@ -655,7 +657,8 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->exp_statsn = rhdr->statsn;
r2t->data_length = be32_to_cpu(rhdr->data_length);
if (r2t->data_length == 0) {
- printk(KERN_ERR "iscsi_tcp: invalid R2T with zero data len\n");
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with zero data len\n");
__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
@@ -668,9 +671,10 @@ iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
r2t->data_offset = be32_to_cpu(rhdr->data_offset);
if (r2t->data_offset + r2t->data_length > scsi_bufflen(ctask->sc)) {
- printk(KERN_ERR "iscsi_tcp: invalid R2T with data len %u at "
- "offset %u and total length %d\n", r2t->data_length,
- r2t->data_offset, scsi_bufflen(ctask->sc));
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid R2T with data len %u at offset %u "
+ "and total length %d\n", r2t->data_length,
+ r2t->data_offset, scsi_bufflen(ctask->sc));
__kfifo_put(tcp_ctask->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
@@ -736,8 +740,9 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
/* verify PDU length */
tcp_conn->in.datalen = ntoh24(hdr->dlength);
if (tcp_conn->in.datalen > conn->max_recv_dlength) {
- printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
- tcp_conn->in.datalen, conn->max_recv_dlength);
+ iscsi_conn_printk(KERN_ERR, conn,
+ "iscsi_tcp: datalen %d > %d\n",
+ tcp_conn->in.datalen, conn->max_recv_dlength);
return ISCSI_ERR_DATALEN;
}
@@ -819,10 +824,12 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
* For now we fail until we find a vendor that needs it
*/
if (ISCSI_DEF_MAX_RECV_SEG_LEN < tcp_conn->in.datalen) {
- printk(KERN_ERR "iscsi_tcp: received buffer of len %u "
- "but conn buffer is only %u (opcode %0x)\n",
- tcp_conn->in.datalen,
- ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
+ iscsi_conn_printk(KERN_ERR, conn,
+ "iscsi_tcp: received buffer of "
+ "len %u but conn buffer is only %u "
+ "(opcode %0x)\n",
+ tcp_conn->in.datalen,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, opcode);
rc = ISCSI_ERR_PROTO;
break;
}
@@ -1496,30 +1503,25 @@ iscsi_tcp_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
tcp_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
tcp_conn->tx_hash.flags = 0;
- if (IS_ERR(tcp_conn->tx_hash.tfm)) {
- printk(KERN_ERR "Could not create connection due to crc32c "
- "loading error %ld. Make sure the crc32c module is "
- "built as a module or into the kernel\n",
- PTR_ERR(tcp_conn->tx_hash.tfm));
+ if (IS_ERR(tcp_conn->tx_hash.tfm))
goto free_tcp_conn;
- }
tcp_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
CRYPTO_ALG_ASYNC);
tcp_conn->rx_hash.flags = 0;
- if (IS_ERR(tcp_conn->rx_hash.tfm)) {
- printk(KERN_ERR "Could not create connection due to crc32c "
- "loading error %ld. Make sure the crc32c module is "
- "built as a module or into the kernel\n",
- PTR_ERR(tcp_conn->rx_hash.tfm));
+ if (IS_ERR(tcp_conn->rx_hash.tfm))
goto free_tx_tfm;
- }
return cls_conn;
free_tx_tfm:
crypto_free_hash(tcp_conn->tx_hash.tfm);
free_tcp_conn:
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Could not create connection due to crc32c "
+ "loading error. Make sure the crc32c "
+ "module is built as a module or into the "
+ "kernel\n");
kfree(tcp_conn);
tcp_conn_alloc_fail:
iscsi_conn_teardown(cls_conn);
@@ -1627,7 +1629,8 @@ iscsi_tcp_conn_bind(struct iscsi_cls_session *cls_session,
/* lookup for existing socket */
sock = sockfd_lookup((int)transport_eph, &err);
if (!sock) {
- printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
+ iscsi_conn_printk(KERN_ERR, conn,
+ "sockfd_lookup failed %d\n", err);
return -EEXIST;
}
/*
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 553168ae44f1..59f8445eab0d 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -160,7 +160,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
hdr->opcode = ISCSI_OP_SCSI_CMD;
hdr->flags = ISCSI_ATTR_SIMPLE;
int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
- hdr->itt = build_itt(ctask->itt, conn->id, session->age);
+ hdr->itt = build_itt(ctask->itt, session->age);
hdr->data_length = cpu_to_be32(scsi_bufflen(sc));
hdr->cmdsn = cpu_to_be32(session->cmdsn);
session->cmdsn++;
@@ -416,8 +416,9 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (datalen < 2) {
invalid_datalen:
- printk(KERN_ERR "iscsi: Got CHECK_CONDITION but "
- "invalid data buffer size of %d\n", datalen);
+ iscsi_conn_printk(KERN_ERR, conn,
+ "Got CHECK_CONDITION but invalid data "
+ "buffer size of %d\n", datalen);
sc->result = DID_BAD_TARGET << 16;
goto out;
}
@@ -494,7 +495,7 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
if (!mtask) {
- printk(KERN_ERR "Could not send nopout\n");
+ iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
return;
}
@@ -522,9 +523,10 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
itt = get_itt(rejected_pdu.itt);
- printk(KERN_ERR "itt 0x%x had pdu (op 0x%x) rejected "
- "due to DataDigest error.\n", itt,
- rejected_pdu.opcode);
+ iscsi_conn_printk(KERN_ERR, conn,
+ "itt 0x%x had pdu (op 0x%x) rejected "
+ "due to DataDigest error.\n", itt,
+ rejected_pdu.opcode);
}
}
return 0;
@@ -541,8 +543,8 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
* queuecommand or send generic. session lock must be held and verify
* itt must have been called.
*/
-int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- char *data, int datalen)
+static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *data, int datalen)
{
struct iscsi_session *session = conn->session;
int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
@@ -672,7 +674,6 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
return rc;
}
-EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *data, int datalen)
@@ -697,18 +698,13 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (hdr->itt != RESERVED_ITT) {
if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
(session->age << ISCSI_AGE_SHIFT)) {
- printk(KERN_ERR "iscsi: received itt %x expected "
- "session age (%x)\n", (__force u32)hdr->itt,
- session->age & ISCSI_AGE_MASK);
+ iscsi_conn_printk(KERN_ERR, conn,
+ "received itt %x expected session "
+ "age (%x)\n", (__force u32)hdr->itt,
+ session->age & ISCSI_AGE_MASK);
return ISCSI_ERR_BAD_ITT;
}
- if (((__force u32)hdr->itt & ISCSI_CID_MASK) !=
- (conn->id << ISCSI_CID_SHIFT)) {
- printk(KERN_ERR "iscsi: received itt %x, expected "
- "CID (%x)\n", (__force u32)hdr->itt, conn->id);
- return ISCSI_ERR_BAD_ITT;
- }
itt = get_itt(hdr->itt);
} else
itt = ~0U;
@@ -717,16 +713,17 @@ int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
ctask = session->cmds[itt];
if (!ctask->sc) {
- printk(KERN_INFO "iscsi: dropping ctask with "
- "itt 0x%x\n", ctask->itt);
+ iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
+ "with itt 0x%x\n", ctask->itt);
/* force drop */
return ISCSI_ERR_NO_SCSI_CMD;
}
if (ctask->sc->SCp.phase != session->age) {
- printk(KERN_ERR "iscsi: ctask's session age %d, "
- "expected %d\n", ctask->sc->SCp.phase,
- session->age);
+ iscsi_conn_printk(KERN_ERR, conn,
+ "iscsi: ctask's session age %d, "
+ "expected %d\n", ctask->sc->SCp.phase,
+ session->age);
return ISCSI_ERR_SESSION_FAILED;
}
}
@@ -771,7 +768,7 @@ static void iscsi_prep_mtask(struct iscsi_conn *conn,
*/
nop->cmdsn = cpu_to_be32(session->cmdsn);
if (hdr->itt != RESERVED_ITT) {
- hdr->itt = build_itt(mtask->itt, conn->id, session->age);
+ hdr->itt = build_itt(mtask->itt, session->age);
/*
* TODO: We always use immediate, so we never hit this.
* If we start to send tmfs or nops as non-immediate then
@@ -997,6 +994,7 @@ enum {
FAILURE_SESSION_IN_RECOVERY,
FAILURE_SESSION_RECOVERY_TIMEOUT,
FAILURE_SESSION_LOGGING_OUT,
+ FAILURE_SESSION_NOT_READY,
};
int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
@@ -1017,6 +1015,12 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
session = iscsi_hostdata(host->hostdata);
spin_lock(&session->lock);
+ reason = iscsi_session_chkready(session_to_cls(session));
+ if (reason) {
+ sc->result = reason;
+ goto fault;
+ }
+
/*
* ISCSI_STATE_FAILED is a temp. state. The recovery
* code will decide what is best to do with command queued
@@ -1033,18 +1037,23 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
switch (session->state) {
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
- goto reject;
+ sc->result = DID_IMM_RETRY << 16;
+ break;
case ISCSI_STATE_LOGGING_OUT:
reason = FAILURE_SESSION_LOGGING_OUT;
- goto reject;
+ sc->result = DID_IMM_RETRY << 16;
+ break;
case ISCSI_STATE_RECOVERY_FAILED:
reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
+ sc->result = DID_NO_CONNECT << 16;
break;
case ISCSI_STATE_TERMINATE:
reason = FAILURE_SESSION_TERMINATE;
+ sc->result = DID_NO_CONNECT << 16;
break;
default:
reason = FAILURE_SESSION_FREED;
+ sc->result = DID_NO_CONNECT << 16;
}
goto fault;
}
@@ -1052,6 +1061,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
conn = session->leadconn;
if (!conn) {
reason = FAILURE_SESSION_FREED;
+ sc->result = DID_NO_CONNECT << 16;
goto fault;
}
@@ -1091,9 +1101,7 @@ reject:
fault:
spin_unlock(&session->lock);
- printk(KERN_ERR "iscsi: cmd 0x%x is not queued (%d)\n",
- sc->cmnd[0], reason);
- sc->result = (DID_NO_CONNECT << 16);
+ debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
scsi_set_resid(sc, scsi_bufflen(sc));
sc->scsi_done(sc);
spin_lock(host->host_lock);
@@ -1160,7 +1168,8 @@ failed:
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
if (session->state == ISCSI_STATE_LOGGED_IN)
- printk(KERN_INFO "iscsi: host reset succeeded\n");
+ iscsi_session_printk(KERN_INFO, session,
+ "host reset succeeded\n");
else
goto failed;
spin_unlock_bh(&session->lock);
@@ -1239,7 +1248,8 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
* Fail commands. session lock held and recv side suspended and xmit
* thread flushed
*/
-static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
+static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
+ int error)
{
struct iscsi_cmd_task *ctask, *tmp;
@@ -1251,7 +1261,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
if (lun == ctask->sc->device->lun || lun == -1) {
debug_scsi("failing pending sc %p itt 0x%x\n",
ctask->sc, ctask->itt);
- fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ fail_command(conn, ctask, error << 16);
}
}
@@ -1259,7 +1269,7 @@ static void fail_all_commands(struct iscsi_conn *conn, unsigned lun)
if (lun == ctask->sc->device->lun || lun == -1) {
debug_scsi("failing requeued sc %p itt 0x%x\n",
ctask->sc, ctask->itt);
- fail_command(conn, ctask, DID_BUS_BUSY << 16);
+ fail_command(conn, ctask, error << 16);
}
}
@@ -1357,10 +1367,10 @@ static void iscsi_check_transport_timeouts(unsigned long data)
last_recv = conn->last_recv;
if (time_before_eq(last_recv + timeout + (conn->ping_timeout * HZ),
jiffies)) {
- printk(KERN_ERR "ping timeout of %d secs expired, "
- "last rx %lu, last ping %lu, now %lu\n",
- conn->ping_timeout, last_recv,
- conn->last_ping, jiffies);
+ iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
+ "expired, last rx %lu, last ping %lu, "
+ "now %lu\n", conn->ping_timeout, last_recv,
+ conn->last_ping, jiffies);
spin_unlock(&session->lock);
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return;
@@ -1373,14 +1383,11 @@ static void iscsi_check_transport_timeouts(unsigned long data)
iscsi_send_nopout(conn, NULL);
}
next_timeout = last_recv + timeout + (conn->ping_timeout * HZ);
- } else {
+ } else
next_timeout = last_recv + timeout;
- }
- if (next_timeout) {
- debug_scsi("Setting next tmo %lu\n", next_timeout);
- mod_timer(&conn->transport_timer, next_timeout);
- }
+ debug_scsi("Setting next tmo %lu\n", next_timeout);
+ mod_timer(&conn->transport_timer, next_timeout);
done:
spin_unlock(&session->lock);
}
@@ -1573,7 +1580,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
/* need to grab the recv lock then session lock */
write_lock_bh(conn->recv_lock);
spin_lock(&session->lock);
- fail_all_commands(conn, sc->device->lun);
+ fail_all_commands(conn, sc->device->lun, DID_ERROR);
conn->tmf_state = TMF_INITIAL;
spin_unlock(&session->lock);
write_unlock_bh(conn->recv_lock);
@@ -1944,9 +1951,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_irqrestore(session->host->host_lock, flags);
msleep_interruptible(500);
- printk(KERN_INFO "iscsi: scsi conn_destroy(): host_busy %d "
- "host_failed %d\n", session->host->host_busy,
- session->host->host_failed);
+ iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
+ "host_busy %d host_failed %d\n",
+ session->host->host_busy,
+ session->host->host_failed);
/*
* force eh_abort() to unblock
*/
@@ -1975,27 +1983,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
struct iscsi_session *session = conn->session;
if (!session) {
- printk(KERN_ERR "iscsi: can't start unbound connection\n");
+ iscsi_conn_printk(KERN_ERR, conn,
+ "can't start unbound connection\n");
return -EPERM;
}
if ((session->imm_data_en || !session->initial_r2t_en) &&
session->first_burst > session->max_burst) {
- printk("iscsi: invalid burst lengths: "
- "first_burst %d max_burst %d\n",
- session->first_burst, session->max_burst);
+ iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
+ "first_burst %d max_burst %d\n",
+ session->first_burst, session->max_burst);
return -EINVAL;
}
if (conn->ping_timeout && !conn->recv_timeout) {
- printk(KERN_ERR "iscsi: invalid recv timeout of zero "
- "Using 5 seconds\n.");
+ iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
+ "zero. Using 5 seconds\n.");
conn->recv_timeout = 5;
}
if (conn->recv_timeout && !conn->ping_timeout) {
- printk(KERN_ERR "iscsi: invalid ping timeout of zero "
- "Using 5 seconds.\n");
+ iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
+ "zero. Using 5 seconds.\n");
conn->ping_timeout = 5;
}
@@ -2019,11 +2028,9 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
conn->stop_stage = 0;
conn->tmf_state = TMF_INITIAL;
session->age++;
- spin_unlock_bh(&session->lock);
-
- iscsi_unblock_session(session_to_cls(session));
- wake_up(&conn->ehwait);
- return 0;
+ if (session->age == 16)
+ session->age = 0;
+ break;
case STOP_CONN_TERM:
conn->stop_stage = 0;
break;
@@ -2032,6 +2039,8 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->lock);
+ iscsi_unblock_session(session_to_cls(session));
+ wake_up(&conn->ehwait);
return 0;
}
EXPORT_SYMBOL_GPL(iscsi_conn_start);
@@ -2123,7 +2132,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
* flush queues.
*/
spin_lock_bh(&session->lock);
- fail_all_commands(conn, -1);
+ fail_all_commands(conn, -1,
+ STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
flush_control_queues(session, conn);
spin_unlock_bh(&session->lock);
mutex_unlock(&session->eh_mutex);
@@ -2140,7 +2150,8 @@ void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
iscsi_start_session_recovery(session, conn, flag);
break;
default:
- printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
+ iscsi_conn_printk(KERN_ERR, conn,
+ "invalid stop flag %d\n", flag);
}
}
EXPORT_SYMBOL_GPL(iscsi_conn_stop);
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
deleted file mode 100644
index bcb49021b7e2..000000000000
--- a/drivers/scsi/mac_esp.c
+++ /dev/null
@@ -1,751 +0,0 @@
-/*
- * 68k mac 53c9[46] scsi driver
- *
- * copyright (c) 1998, David Weis weisd3458@uni.edu
- *
- * debugging on Quadra 800 and 660AV Michael Schmitz, Dave Kilzer 7/98
- *
- * based loosely on cyber_esp.c
- */
-
-/* these are unused for now */
-#define myreadl(addr) (*(volatile unsigned int *) (addr))
-#define mywritel(b, addr) ((*(volatile unsigned int *) (addr)) = (b))
-
-
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
-#include <asm/io.h>
-
-#include <asm/setup.h>
-#include <asm/irq.h>
-#include <asm/macints.h>
-#include <asm/machw.h>
-#include <asm/mac_via.h>
-
-#include <asm/pgtable.h>
-
-#include <asm/macintosh.h>
-
-/* #define DEBUG_MAC_ESP */
-
-extern void esp_handle(struct NCR_ESP *esp);
-extern void mac_esp_intr(int irq, void *dev_id);
-
-static int dma_bytes_sent(struct NCR_ESP * esp, int fifo_count);
-static int dma_can_transfer(struct NCR_ESP * esp, Scsi_Cmnd *sp);
-static void dma_dump_state(struct NCR_ESP * esp);
-static void dma_init_read(struct NCR_ESP * esp, char * vaddress, int length);
-static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length);
-static void dma_ints_off(struct NCR_ESP * esp);
-static void dma_ints_on(struct NCR_ESP * esp);
-static int dma_irq_p(struct NCR_ESP * esp);
-static int dma_irq_p_quick(struct NCR_ESP * esp);
-static void dma_led_off(struct NCR_ESP * esp);
-static void dma_led_on(struct NCR_ESP *esp);
-static int dma_ports_p(struct NCR_ESP *esp);
-static void dma_setup(struct NCR_ESP * esp, __u32 addr, int count, int write);
-static void dma_setup_quick(struct NCR_ESP * esp, __u32 addr, int count, int write);
-
-static int esp_dafb_dma_irq_p(struct NCR_ESP * espdev);
-static int esp_iosb_dma_irq_p(struct NCR_ESP * espdev);
-
-static volatile unsigned char cmd_buffer[16];
- /* This is where all commands are put
- * before they are transferred to the ESP chip
- * via PIO.
- */
-
-static int esp_initialized = 0;
-
-static int setup_num_esps = -1;
-static int setup_disconnect = -1;
-static int setup_nosync = -1;
-static int setup_can_queue = -1;
-static int setup_cmd_per_lun = -1;
-static int setup_sg_tablesize = -1;
-#ifdef SUPPORT_TAGS
-static int setup_use_tagged_queuing = -1;
-#endif
-static int setup_hostid = -1;
-
-/*
- * Experimental ESP inthandler; check macints.c to make sure dev_id is
- * set up properly!
- */
-
-void mac_esp_intr(int irq, void *dev_id)
-{
- struct NCR_ESP *esp = (struct NCR_ESP *) dev_id;
- int irq_p = 0;
-
- /* Handle the one ESP interrupt showing at this IRQ level. */
- if(((esp)->irq & 0xff) == irq) {
- /*
- * Debug ..
- */
- irq_p = esp->dma_irq_p(esp);
- printk("mac_esp: irq_p %x current %p disconnected %p\n",
- irq_p, esp->current_SC, esp->disconnected_SC);
-
- /*
- * Mac: if we're here, it's an ESP interrupt for sure!
- */
- if((esp->current_SC || esp->disconnected_SC)) {
- esp->dma_ints_off(esp);
-
- ESPIRQ(("I%d(", esp->esp_id));
- esp_handle(esp);
- ESPIRQ((")"));
-
- esp->dma_ints_on(esp);
- }
- }
-}
-
-/*
- * Debug hooks; use for playing with the interrupt flag testing and interrupt
- * acknowledge on the various machines
- */
-
-void scsi_esp_polled(int irq, void *dev_id)
-{
- if (esp_initialized == 0)
- return;
-
- mac_esp_intr(irq, dev_id);
-}
-
-void fake_intr(int irq, void *dev_id)
-{
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: got irq\n");
-#endif
-
- mac_esp_intr(irq, dev_id);
-}
-
-irqreturn_t fake_drq(int irq, void *dev_id)
-{
- printk("mac_esp: got drq\n");
- return IRQ_HANDLED;
-}
-
-#define DRIVER_SETUP
-
-/*
- * Function : mac_esp_setup(char *str)
- *
- * Purpose : booter command line initialization of the overrides array,
- *
- * Inputs : str - parameters, separated by commas.
- *
- * Currently unused in the new driver; need to add settable parameters to the
- * detect function.
- *
- */
-
-static int __init mac_esp_setup(char *str) {
-#ifdef DRIVER_SETUP
- /* Format of mac53c9x parameter is:
- * mac53c9x=<num_esps>,<disconnect>,<nosync>,<can_queue>,<cmd_per_lun>,<sg_tablesize>,<hostid>,<use_tags>
- * Negative values mean don't change.
- */
-
- char *this_opt;
- long opt;
-
- this_opt = strsep (&str, ",");
- if(this_opt) {
- opt = simple_strtol( this_opt, NULL, 0 );
-
- if (opt >= 0 && opt <= 2)
- setup_num_esps = opt;
- else if (opt > 2)
- printk( "mac_esp_setup: invalid number of hosts %ld !\n", opt );
-
- this_opt = strsep (&str, ",");
- }
- if(this_opt) {
- opt = simple_strtol( this_opt, NULL, 0 );
-
- if (opt > 0)
- setup_disconnect = opt;
-
- this_opt = strsep (&str, ",");
- }
- if(this_opt) {
- opt = simple_strtol( this_opt, NULL, 0 );
-
- if (opt >= 0)
- setup_nosync = opt;
-
- this_opt = strsep (&str, ",");
- }
- if(this_opt) {
- opt = simple_strtol( this_opt, NULL, 0 );
-
- if (opt > 0)
- setup_can_queue = opt;
-
- this_opt = strsep (&str, ",");
- }
- if(this_opt) {
- opt = simple_strtol( this_opt, NULL, 0 );
-
- if (opt > 0)
- setup_cmd_per_lun = opt;
-
- this_opt = strsep (&str, ",");
- }
- if(this_opt) {
- opt = simple_strtol( this_opt, NULL, 0 );
-
- if (opt >= 0) {
- setup_sg_tablesize = opt;
- /* Must be <= SG_ALL (255) */
- if (setup_sg_tablesize > SG_ALL)
- setup_sg_tablesize = SG_ALL;
- }
-
- this_opt = strsep (&str, ",");
- }
- if(this_opt) {
- opt = simple_strtol( this_opt, NULL, 0 );
-
- /* Must be between 0 and 7 */
- if (opt >= 0 && opt <= 7)
- setup_hostid = opt;
- else if (opt > 7)
- printk( "mac_esp_setup: invalid host ID %ld !\n", opt);
-
- this_opt = strsep (&str, ",");
- }
-#ifdef SUPPORT_TAGS
- if(this_opt) {
- opt = simple_strtol( this_opt, NULL, 0 );
- if (opt >= 0)
- setup_use_tagged_queuing = !!opt;
- }
-#endif
-#endif
- return 1;
-}
-
-__setup("mac53c9x=", mac_esp_setup);
-
-
-/*
- * ESP address 'detection'
- */
-
-unsigned long get_base(int chip_num)
-{
- /*
- * using the chip_num and mac model, figure out where the
- * chips are mapped
- */
-
- unsigned long io_base = 0x50f00000;
- unsigned int second_offset = 0x402;
- unsigned long scsi_loc = 0;
-
- switch (macintosh_config->scsi_type) {
-
- /* 950, 900, 700 */
- case MAC_SCSI_QUADRA2:
- scsi_loc = io_base + 0xf000 + ((chip_num == 0) ? 0 : second_offset);
- break;
-
- /* av's */
- case MAC_SCSI_QUADRA3:
- scsi_loc = io_base + 0x18000 + ((chip_num == 0) ? 0 : second_offset);
- break;
-
- /* most quadra/centris models are like this */
- case MAC_SCSI_QUADRA:
- scsi_loc = io_base + 0x10000;
- break;
-
- default:
- printk("mac_esp: get_base: hit default!\n");
- scsi_loc = io_base + 0x10000;
- break;
-
- } /* switch */
-
- printk("mac_esp: io base at 0x%lx\n", scsi_loc);
-
- return scsi_loc;
-}
-
-/*
- * Model dependent ESP setup
- */
-
-int mac_esp_detect(struct scsi_host_template * tpnt)
-{
- int quick = 0;
- int chipnum, chipspresent = 0;
-#if 0
- unsigned long timeout;
-#endif
-
- if (esp_initialized > 0)
- return -ENODEV;
-
- /* what do we have in this machine... */
- if (MACHW_PRESENT(MAC_SCSI_96)) {
- chipspresent ++;
- }
-
- if (MACHW_PRESENT(MAC_SCSI_96_2)) {
- chipspresent ++;
- }
-
- /* number of ESPs present ? */
- if (setup_num_esps >= 0) {
- if (chipspresent >= setup_num_esps)
- chipspresent = setup_num_esps;
- else
- printk("mac_esp_detect: num_hosts detected %d setup %d \n",
- chipspresent, setup_num_esps);
- }
-
- /* TODO: add disconnect / nosync flags */
-
- /* setup variables */
- tpnt->can_queue =
- (setup_can_queue > 0) ? setup_can_queue : 7;
- tpnt->cmd_per_lun =
- (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : 1;
- tpnt->sg_tablesize =
- (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_ALL;
-
- if (setup_hostid >= 0)
- tpnt->this_id = setup_hostid;
- else {
- /* use 7 as default */
- tpnt->this_id = 7;
- }
-
-#ifdef SUPPORT_TAGS
- if (setup_use_tagged_queuing < 0)
- setup_use_tagged_queuing = DEFAULT_USE_TAGGED_QUEUING;
-#endif
-
- for (chipnum = 0; chipnum < chipspresent; chipnum ++) {
- struct NCR_ESP * esp;
-
- esp = esp_allocate(tpnt, NULL, 0);
- esp->eregs = (struct ESP_regs *) get_base(chipnum);
-
- esp->dma_irq_p = &esp_dafb_dma_irq_p;
- if (chipnum == 0) {
-
- if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) {
- /* most machines except those below :-) */
- quick = 1;
- esp->dma_irq_p = &esp_iosb_dma_irq_p;
- } else if (macintosh_config->scsi_type == MAC_SCSI_QUADRA3) {
- /* mostly av's */
- quick = 0;
- } else {
- /* q950, 900, 700 */
- quick = 1;
- out_be32(0xf9800024, 0x1d1);
- esp->dregs = (void *) 0xf9800024;
- }
-
- } else { /* chipnum */
-
- quick = 1;
- out_be32(0xf9800028, 0x1d1);
- esp->dregs = (void *) 0xf9800028;
-
- } /* chipnum == 0 */
-
- /* use pio for command bytes; pio for message/data: TBI */
- esp->do_pio_cmds = 1;
-
- /* Set the command buffer */
- esp->esp_command = (volatile unsigned char*) cmd_buffer;
- esp->esp_command_dvma = (__u32) cmd_buffer;
-
- /* various functions */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = NULL;
- esp->dma_init_write = NULL;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
-
- esp->dma_ports_p = &dma_ports_p;
-
-
- /* Optional functions */
- esp->dma_barrier = NULL;
- esp->dma_drain = NULL;
- esp->dma_invalidate = NULL;
- esp->dma_irq_entry = NULL;
- esp->dma_irq_exit = NULL;
- esp->dma_led_on = NULL;
- esp->dma_led_off = NULL;
- esp->dma_poll = NULL;
- esp->dma_reset = NULL;
-
- /* SCSI chip speed */
- /* below esp->cfreq = 40000000; */
-
-
- if (quick) {
- /* 'quick' means there's handshake glue logic like in the 5380 case */
- esp->dma_setup = &dma_setup_quick;
- } else {
- esp->dma_setup = &dma_setup;
- }
-
- if (chipnum == 0) {
-
- esp->irq = IRQ_MAC_SCSI;
-
- request_irq(IRQ_MAC_SCSI, esp_intr, 0, "Mac ESP SCSI", esp->ehost);
-#if 0 /* conflicts with IOP ADB */
- request_irq(IRQ_MAC_SCSIDRQ, fake_drq, 0, "Mac ESP DRQ", esp->ehost);
-#endif
-
- if (macintosh_config->scsi_type == MAC_SCSI_QUADRA) {
- esp->cfreq = 16500000;
- } else {
- esp->cfreq = 25000000;
- }
-
-
- } else { /* chipnum == 1 */
-
- esp->irq = IRQ_MAC_SCSIDRQ;
-#if 0 /* conflicts with IOP ADB */
- request_irq(IRQ_MAC_SCSIDRQ, esp_intr, 0, "Mac ESP SCSI 2", esp->ehost);
-#endif
-
- esp->cfreq = 25000000;
-
- }
-
- if (quick) {
- printk("esp: using quick version\n");
- }
-
- printk("esp: addr at 0x%p\n", esp->eregs);
-
- esp->scsi_id = 7;
- esp->diff = 0;
-
- esp_initialize(esp);
-
- } /* for chipnum */
-
- if (chipspresent)
- printk("\nmac_esp: %d esp controllers found\n", chipspresent);
-
- esp_initialized = chipspresent;
-
- return chipspresent;
-}
-
-static int mac_esp_release(struct Scsi_Host *shost)
-{
- if (shost->irq)
- free_irq(shost->irq, NULL);
- if (shost->io_port && shost->n_io_port)
- release_region(shost->io_port, shost->n_io_port);
- scsi_unregister(shost);
- return 0;
-}
-
-/*
- * I've been wondering what this is supposed to do, for some time. Talking
- * to Allen Briggs: These machines have an extra register someplace where the
- * DRQ pin of the ESP can be monitored. That isn't useful for determining
- * anything else (such as reselect interrupt or other magic) though.
- * Maybe make the semantics should be changed like
- * if (esp->current_SC)
- * ... check DRQ flag ...
- * else
- * ... disconnected, check pending VIA interrupt ...
- *
- * There's a problem with using the dabf flag or mac_irq_pending() here: both
- * seem to return 1 even though no interrupt is currently pending, resulting
- * in esp_exec_cmd() holding off the next command, and possibly infinite loops
- * in esp_intr().
- * Short term fix: just use esp_status & ESP_STAT_INTR here, as long as we
- * use simple PIO. The DRQ status will be important when implementing pseudo
- * DMA mode (set up ESP transfer count, return, do a batch of bytes in PIO or
- * 'hardware handshake' mode upon DRQ).
- * If you plan on changing this (i.e. to save the esp_status register access in
- * favor of a VIA register access or a shadow register for the IFR), make sure
- * to try a debug version of this first to monitor what registers would be a good
- * indicator of the ESP interrupt.
- */
-
-static int esp_dafb_dma_irq_p(struct NCR_ESP * esp)
-{
- unsigned int ret;
- int sreg = esp_read(esp->eregs->esp_status);
-
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: esp_dafb_dma_irq_p dafb %d irq %d\n",
- readl(esp->dregs), mac_irq_pending(IRQ_MAC_SCSI));
-#endif
-
- sreg &= ESP_STAT_INTR;
-
- /*
- * maybe working; this is essentially what's used for iosb_dma_irq_p
- */
- if (sreg)
- return 1;
- else
- return 0;
-
- /*
- * didn't work ...
- */
-#if 0
- if (esp->current_SC)
- ret = readl(esp->dregs) & 0x200;
- else if (esp->disconnected_SC)
- ret = 1; /* sreg ?? */
- else
- ret = mac_irq_pending(IRQ_MAC_SCSI);
-
- return(ret);
-#endif
-
-}
-
-/*
- * See above: testing mac_irq_pending always returned 8 (SCSI IRQ) regardless
- * of the actual ESP status.
- */
-
-static int esp_iosb_dma_irq_p(struct NCR_ESP * esp)
-{
- int ret = mac_irq_pending(IRQ_MAC_SCSI) || mac_irq_pending(IRQ_MAC_SCSIDRQ);
- int sreg = esp_read(esp->eregs->esp_status);
-
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: dma_irq_p drq %d irq %d sreg %x curr %p disc %p\n",
- mac_irq_pending(IRQ_MAC_SCSIDRQ), mac_irq_pending(IRQ_MAC_SCSI),
- sreg, esp->current_SC, esp->disconnected_SC);
-#endif
-
- sreg &= ESP_STAT_INTR;
-
- if (sreg)
- return (sreg);
- else
- return 0;
-}
-
-/*
- * This seems to be OK for PIO at least ... usually 0 after PIO.
- */
-
-static int dma_bytes_sent(struct NCR_ESP * esp, int fifo_count)
-{
-
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: dma bytes sent = %x\n", fifo_count);
-#endif
-
- return fifo_count;
-}
-
-/*
- * dma_can_transfer is used to switch between DMA and PIO, if DMA (pseudo)
- * is ever implemented. Returning 0 here will use PIO.
- */
-
-static int dma_can_transfer(struct NCR_ESP * esp, Scsi_Cmnd * sp)
-{
- unsigned long sz = sp->SCp.this_residual;
-#if 0 /* no DMA yet; make conditional */
- if (sz > 0x10000000) {
- sz = 0x10000000;
- }
- printk("mac_esp: dma can transfer = 0lx%x\n", sz);
-#else
-
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: pio to transfer = %ld\n", sz);
-#endif
-
- sz = 0;
-#endif
- return sz;
-}
-
-/*
- * Not yet ...
- */
-
-static void dma_dump_state(struct NCR_ESP * esp)
-{
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: dma_dump_state: called\n");
-#endif
-#if 0
- ESPLOG(("esp%d: dma -- cond_reg<%02x>\n",
- esp->esp_id, ((struct mac_dma_registers *)
- (esp->dregs))->cond_reg));
-#endif
-}
-
-/*
- * DMA setup: should be used to set up the ESP transfer count for pseudo
- * DMA transfers; need a DRQ transfer function to do the actual transfer
- */
-
-static void dma_init_read(struct NCR_ESP * esp, char * vaddress, int length)
-{
- printk("mac_esp: dma_init_read\n");
-}
-
-
-static void dma_init_write(struct NCR_ESP * esp, char * vaddress, int length)
-{
- printk("mac_esp: dma_init_write\n");
-}
-
-
-static void dma_ints_off(struct NCR_ESP * esp)
-{
- disable_irq(esp->irq);
-}
-
-
-static void dma_ints_on(struct NCR_ESP * esp)
-{
- enable_irq(esp->irq);
-}
-
-/*
- * generic dma_irq_p(), unused
- */
-
-static int dma_irq_p(struct NCR_ESP * esp)
-{
- int i = esp_read(esp->eregs->esp_status);
-
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: dma_irq_p status %d\n", i);
-#endif
-
- return (i & ESP_STAT_INTR);
-}
-
-static int dma_irq_p_quick(struct NCR_ESP * esp)
-{
- /*
- * Copied from iosb_dma_irq_p()
- */
- int ret = mac_irq_pending(IRQ_MAC_SCSI) || mac_irq_pending(IRQ_MAC_SCSIDRQ);
- int sreg = esp_read(esp->eregs->esp_status);
-
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: dma_irq_p drq %d irq %d sreg %x curr %p disc %p\n",
- mac_irq_pending(IRQ_MAC_SCSIDRQ), mac_irq_pending(IRQ_MAC_SCSI),
- sreg, esp->current_SC, esp->disconnected_SC);
-#endif
-
- sreg &= ESP_STAT_INTR;
-
- if (sreg)
- return (sreg);
- else
- return 0;
-
-}
-
-static void dma_led_off(struct NCR_ESP * esp)
-{
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: dma_led_off: called\n");
-#endif
-}
-
-
-static void dma_led_on(struct NCR_ESP * esp)
-{
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: dma_led_on: called\n");
-#endif
-}
-
-
-static int dma_ports_p(struct NCR_ESP * esp)
-{
- return 0;
-}
-
-
-static void dma_setup(struct NCR_ESP * esp, __u32 addr, int count, int write)
-{
-
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: dma_setup\n");
-#endif
-
- if (write) {
- dma_init_read(esp, (char *) addr, count);
- } else {
- dma_init_write(esp, (char *) addr, count);
- }
-}
-
-
-static void dma_setup_quick(struct NCR_ESP * esp, __u32 addr, int count, int write)
-{
-#ifdef DEBUG_MAC_ESP
- printk("mac_esp: dma_setup_quick\n");
-#endif
-}
-
-static struct scsi_host_template driver_template = {
- .proc_name = "mac_esp",
- .name = "Mac 53C9x SCSI",
- .detect = mac_esp_detect,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .release = mac_esp_release,
- .info = esp_info,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .can_queue = 7,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = DISABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/mca_53c9x.c b/drivers/scsi/mca_53c9x.c
deleted file mode 100644
index d693d0f21395..000000000000
--- a/drivers/scsi/mca_53c9x.c
+++ /dev/null
@@ -1,520 +0,0 @@
-/* mca_53c9x.c: Driver for the SCSI adapter found on NCR 35xx
- * (and maybe some other) Microchannel machines
- *
- * Code taken mostly from Cyberstorm SCSI drivers
- * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
- *
- * Hacked to work with the NCR MCA stuff by Tymm Twillman (tymm@computer.org)
- *
- * The CyberStorm SCSI driver (and this driver) is based on David S. Miller's
- * ESP driver * for the Sparc computers.
- *
- * Special thanks to Ken Stewart at Symbios (LSI) for helping with info on
- * the 86C01. I was on the brink of going ga-ga...
- *
- * Also thanks to Jesper Skov for helping me with info on how the Amiga
- * does things...
- */
-
-/*
- * This is currently only set up to use one 53c9x card at a time; it could be
- * changed fairly easily to detect/use more than one, but I'm not too sure how
- * many cards that use the 53c9x on MCA systems there are (if, in fact, there
- * are cards that use them, other than the one built into some NCR systems)...
- * If anyone requests this, I'll throw it in, otherwise it's not worth the
- * effort.
- */
-
-/*
- * Info on the 86C01 MCA interface chip at the bottom, if you care enough to
- * look.
- */
-
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/mca.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/mca-legacy.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
-#include <asm/dma.h>
-#include <asm/irq.h>
-#include <asm/mca_dma.h>
-#include <asm/pgtable.h>
-
-/*
- * From ibmmca.c (IBM scsi controller card driver) -- used for turning PS2 disk
- * activity LED on and off
- */
-
-#define PS2_SYS_CTR 0x92
-
-/* Ports the ncr's 53c94 can be put at; indexed by pos register value */
-
-#define MCA_53C9X_IO_PORTS { \
- 0x0000, 0x0240, 0x0340, 0x0400, \
- 0x0420, 0x3240, 0x8240, 0xA240, \
- }
-
-/*
- * Supposedly there were some cards put together with the 'c9x and 86c01. If
- * they have different ID's from the ones on the 3500 series machines,
- * you can add them here and hopefully things will work out.
- */
-
-#define MCA_53C9X_IDS { \
- 0x7F4C, \
- 0x0000, \
- }
-
-static int dma_bytes_sent(struct NCR_ESP *, int);
-static int dma_can_transfer(struct NCR_ESP *, Scsi_Cmnd *);
-static void dma_dump_state(struct NCR_ESP *);
-static void dma_init_read(struct NCR_ESP *, __u32, int);
-static void dma_init_write(struct NCR_ESP *, __u32, int);
-static void dma_ints_off(struct NCR_ESP *);
-static void dma_ints_on(struct NCR_ESP *);
-static int dma_irq_p(struct NCR_ESP *);
-static int dma_ports_p(struct NCR_ESP *);
-static void dma_setup(struct NCR_ESP *, __u32, int, int);
-static void dma_led_on(struct NCR_ESP *);
-static void dma_led_off(struct NCR_ESP *);
-
-/* This is where all commands are put before they are trasfered to the
- * 53c9x via PIO.
- */
-
-static volatile unsigned char cmd_buffer[16];
-
-/*
- * We keep the structure that is used to access the registers on the 53c9x
- * here.
- */
-
-static struct ESP_regs eregs;
-
-/***************************************************************** Detection */
-static int mca_esp_detect(struct scsi_host_template *tpnt)
-{
- struct NCR_ESP *esp;
- static int io_port_by_pos[] = MCA_53C9X_IO_PORTS;
- int mca_53c9x_ids[] = MCA_53C9X_IDS;
- int *id_to_check = mca_53c9x_ids;
- int slot;
- int pos[3];
- unsigned int tmp_io_addr;
- unsigned char tmp_byte;
-
-
- if (!MCA_bus)
- return 0;
-
- while (*id_to_check) {
- if ((slot = mca_find_adapter(*id_to_check, 0)) !=
- MCA_NOTFOUND)
- {
- esp = esp_allocate(tpnt, NULL, 0);
-
- pos[0] = mca_read_stored_pos(slot, 2);
- pos[1] = mca_read_stored_pos(slot, 3);
- pos[2] = mca_read_stored_pos(slot, 4);
-
- esp->eregs = &eregs;
-
- /*
- * IO port base is given in the first (non-ID) pos
- * register, like so:
- *
- * Bits 3 2 1 IO base
- * ----------------------------
- * 0 0 0 <disabled>
- * 0 0 1 0x0240
- * 0 1 0 0x0340
- * 0 1 1 0x0400
- * 1 0 0 0x0420
- * 1 0 1 0x3240
- * 1 1 0 0x8240
- * 1 1 1 0xA240
- */
-
- tmp_io_addr =
- io_port_by_pos[(pos[0] & 0x0E) >> 1];
-
- esp->eregs->io_addr = tmp_io_addr + 0x10;
-
- if (esp->eregs->io_addr == 0x0000) {
- printk("Adapter is disabled.\n");
- break;
- }
-
- /*
- * IRQ is specified in bits 4 and 5:
- *
- * Bits 4 5 IRQ
- * -----------------------
- * 0 0 3
- * 0 1 5
- * 1 0 7
- * 1 1 9
- */
-
- esp->irq = ((pos[0] & 0x30) >> 3) + 3;
-
- /*
- * DMA channel is in the low 3 bits of the second
- * POS register
- */
-
- esp->dma = pos[1] & 7;
- esp->slot = slot;
-
- if (request_irq(esp->irq, esp_intr, 0,
- "NCR 53c9x SCSI", esp->ehost))
- {
- printk("Unable to request IRQ %d.\n", esp->irq);
- esp_deallocate(esp);
- scsi_unregister(esp->ehost);
- return 0;
- }
-
- if (request_dma(esp->dma, "NCR 53c9x SCSI")) {
- printk("Unable to request DMA channel %d.\n",
- esp->dma);
- free_irq(esp->irq, esp_intr);
- esp_deallocate(esp);
- scsi_unregister(esp->ehost);
- return 0;
- }
-
- request_region(tmp_io_addr, 32, "NCR 53c9x SCSI");
-
- /*
- * 86C01 handles DMA, IO mode, from address
- * (base + 0x0a)
- */
-
- mca_disable_dma(esp->dma);
- mca_set_dma_io(esp->dma, tmp_io_addr + 0x0a);
- mca_enable_dma(esp->dma);
-
- /* Tell the 86C01 to give us interrupts */
-
- tmp_byte = inb(tmp_io_addr + 0x02) | 0x40;
- outb(tmp_byte, tmp_io_addr + 0x02);
-
- /*
- * Scsi ID -- general purpose register, hi
- * 2 bits; add 4 to this number to get the
- * ID
- */
-
- esp->scsi_id = ((pos[2] & 0xC0) >> 6) + 4;
-
- /* Do command transfer with programmed I/O */
-
- esp->do_pio_cmds = 1;
-
- /* Required functions */
-
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &dma_init_read;
- esp->dma_init_write = &dma_init_write;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &dma_setup;
-
- /* Optional functions */
-
- esp->dma_barrier = NULL;
- esp->dma_drain = NULL;
- esp->dma_invalidate = NULL;
- esp->dma_irq_entry = NULL;
- esp->dma_irq_exit = NULL;
- esp->dma_led_on = dma_led_on;
- esp->dma_led_off = dma_led_off;
- esp->dma_poll = NULL;
- esp->dma_reset = NULL;
-
- /* Set the command buffer */
-
- esp->esp_command = (volatile unsigned char*)
- cmd_buffer;
- esp->esp_command_dvma = isa_virt_to_bus(cmd_buffer);
-
- /* SCSI chip speed */
-
- esp->cfreq = 25000000;
-
- /* Differential SCSI? I think not. */
-
- esp->diff = 0;
-
- esp_initialize(esp);
-
- printk(" Adapter found in slot %2d: io port 0x%x "
- "irq %d dma channel %d\n", slot + 1, tmp_io_addr,
- esp->irq, esp->dma);
-
- mca_set_adapter_name(slot, "NCR 53C9X SCSI Adapter");
- mca_mark_as_used(slot);
-
- break;
- }
-
- id_to_check++;
- }
-
- return esps_in_use;
-}
-
-
-/******************************************************************* Release */
-
-static int mca_esp_release(struct Scsi_Host *host)
-{
- struct NCR_ESP *esp = (struct NCR_ESP *)host->hostdata;
- unsigned char tmp_byte;
-
- esp_deallocate(esp);
- /*
- * Tell the 86C01 to stop sending interrupts
- */
-
- tmp_byte = inb(esp->eregs->io_addr - 0x0E);
- tmp_byte &= ~0x40;
- outb(tmp_byte, esp->eregs->io_addr - 0x0E);
-
- free_irq(esp->irq, esp_intr);
- free_dma(esp->dma);
-
- mca_mark_as_unused(esp->slot);
-
- return 0;
-}
-
-/************************************************************* DMA Functions */
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
-{
- /* Ask the 53c9x. It knows. */
-
- return fifo_count;
-}
-
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- /*
- * The MCA dma channels can only do up to 128K bytes at a time.
- * (16 bit mode)
- */
-
- unsigned long sz = sp->SCp.this_residual;
- if(sz > 0x20000)
- sz = 0x20000;
- return sz;
-}
-
-static void dma_dump_state(struct NCR_ESP *esp)
-{
- /*
- * Doesn't quite match up to the other drivers, but we do what we
- * can.
- */
-
- ESPLOG(("esp%d: dma channel <%d>\n", esp->esp_id, esp->dma));
- ESPLOG(("bytes left to dma: %d\n", mca_get_dma_residue(esp->dma)));
-}
-
-static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
-{
- unsigned long flags;
-
-
- save_flags(flags);
- cli();
-
- mca_disable_dma(esp->dma);
- mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_16 |
- MCA_DMA_MODE_IO);
- mca_set_dma_addr(esp->dma, addr);
- mca_set_dma_count(esp->dma, length / 2); /* !!! */
- mca_enable_dma(esp->dma);
-
- restore_flags(flags);
-}
-
-static void dma_init_write(struct NCR_ESP *esp, __u32 addr, int length)
-{
- unsigned long flags;
-
-
- save_flags(flags);
- cli();
-
- mca_disable_dma(esp->dma);
- mca_set_dma_mode(esp->dma, MCA_DMA_MODE_XFER | MCA_DMA_MODE_WRITE |
- MCA_DMA_MODE_16 | MCA_DMA_MODE_IO);
- mca_set_dma_addr(esp->dma, addr);
- mca_set_dma_count(esp->dma, length / 2); /* !!! */
- mca_enable_dma(esp->dma);
-
- restore_flags(flags);
-}
-
-static void dma_ints_off(struct NCR_ESP *esp)
-{
- /*
- * Tell the 'C01 to shut up. All interrupts are routed through it.
- */
-
- outb(inb(esp->eregs->io_addr - 0x0E) & ~0x40,
- esp->eregs->io_addr - 0x0E);
-}
-
-static void dma_ints_on(struct NCR_ESP *esp)
-{
- /*
- * Ok. You can speak again.
- */
-
- outb(inb(esp->eregs->io_addr - 0x0E) | 0x40,
- esp->eregs->io_addr - 0x0E);
-}
-
-static int dma_irq_p(struct NCR_ESP *esp)
-{
- /*
- * DaveM says that this should return a "yes" if there is an interrupt
- * or a DMA error occurred. I copied the Amiga driver's semantics,
- * though, because it seems to work and we can't really tell if
- * a DMA error happened. This gives the "yes" if the scsi chip
- * is sending an interrupt and no DMA activity is taking place
- */
-
- return (!(inb(esp->eregs->io_addr - 0x04) & 1) &&
- !(inb(esp->eregs->io_addr - 0x04) & 2) );
-}
-
-static int dma_ports_p(struct NCR_ESP *esp)
-{
- /*
- * Check to see if interrupts are enabled on the 'C01 (in case abort
- * is entered multiple times, so we only do the abort once)
- */
-
- return (inb(esp->eregs->io_addr - 0x0E) & 0x40) ? 1:0;
-}
-
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
-{
- if(write){
- dma_init_write(esp, addr, count);
- } else {
- dma_init_read(esp, addr, count);
- }
-}
-
-/*
- * These will not play nicely with other disk controllers that try to use the
- * disk active LED... but what can you do? Don't answer that.
- *
- * Stolen shamelessly from ibmmca.c -- IBM Microchannel SCSI adapter driver
- *
- */
-
-static void dma_led_on(struct NCR_ESP *esp)
-{
- outb(inb(PS2_SYS_CTR) | 0xc0, PS2_SYS_CTR);
-}
-
-static void dma_led_off(struct NCR_ESP *esp)
-{
- outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR);
-}
-
-static struct scsi_host_template driver_template = {
- .proc_name = "mca_53c9x",
- .name = "NCR 53c9x SCSI",
- .detect = mca_esp_detect,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .release = mca_esp_release,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .can_queue = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .unchecked_isa_dma = 1,
- .use_clustering = DISABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
-
-/*
- * OK, here's the goods I promised. The NCR 86C01 is an MCA interface chip
- * that handles enabling/diabling IRQ, dma interfacing, IO port selection
- * and other fun stuff. It takes up 16 addresses, and the chip it is
- * connnected to gets the following 16. Registers are as follows:
- *
- * Offsets 0-1 : Card ID
- *
- * Offset 2 : Mode enable register --
- * Bit 7 : Data Word width (1 = 16, 0 = 8)
- * Bit 6 : IRQ enable (1 = enabled)
- * Bits 5,4 : IRQ select
- * 0 0 : IRQ 3
- * 0 1 : IRQ 5
- * 1 0 : IRQ 7
- * 1 1 : IRQ 9
- * Bits 3-1 : Base Address
- * 0 0 0 : <disabled>
- * 0 0 1 : 0x0240
- * 0 1 0 : 0x0340
- * 0 1 1 : 0x0400
- * 1 0 0 : 0x0420
- * 1 0 1 : 0x3240
- * 1 1 0 : 0x8240
- * 1 1 1 : 0xA240
- * Bit 0 : Card enable (1 = enabled)
- *
- * Offset 3 : DMA control register --
- * Bit 7 : DMA enable (1 = enabled)
- * Bits 6,5 : Preemt Count Select (transfers to complete after
- * 'C01 has been preempted on MCA bus)
- * 0 0 : 0
- * 0 1 : 1
- * 1 0 : 3
- * 1 1 : 7
- * (all these wacky numbers; I'm sure there's a reason somewhere)
- * Bit 4 : Fairness enable (1 = fair bus priority)
- * Bits 3-0 : Arbitration level (0-15 consecutive)
- *
- * Offset 4 : General purpose register
- * Bits 7-3 : User definable (here, 7,6 are SCSI ID)
- * Bits 2-0 : reserved
- *
- * Offset 10 : DMA decode register (used for IO based DMA; also can do
- * PIO through this port)
- *
- * Offset 12 : Status
- * Bits 7-2 : reserved
- * Bit 1 : DMA pending (1 = pending)
- * Bit 0 : IRQ pending (0 = pending)
- *
- * Exciting, huh?
- *
- */
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 672c759ac24d..77a62a1b12c3 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -31,7 +31,6 @@
#include <linux/moduleparam.h>
#include <linux/module.h>
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/uio.h>
diff --git a/drivers/scsi/oktagon_esp.c b/drivers/scsi/oktagon_esp.c
deleted file mode 100644
index 8e5eadbd5c51..000000000000
--- a/drivers/scsi/oktagon_esp.c
+++ /dev/null
@@ -1,606 +0,0 @@
-/*
- * Oktagon_esp.c -- Driver for bsc Oktagon
- *
- * Written by Carsten Pluntke 1998
- *
- * Based on cyber_esp.c
- */
-
-
-#if defined(CONFIG_AMIGA) || defined(CONFIG_APUS)
-#define USE_BOTTOM_HALF
-#endif
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
-#include <linux/reboot.h>
-#include <asm/system.h>
-#include <asm/ptrace.h>
-#include <asm/pgtable.h>
-
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
-#include <linux/zorro.h>
-#include <asm/irq.h>
-#include <asm/amigaints.h>
-#include <asm/amigahw.h>
-
-#ifdef USE_BOTTOM_HALF
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#endif
-
-/* The controller registers can be found in the Z2 config area at these
- * offsets:
- */
-#define OKTAGON_ESP_ADDR 0x03000
-#define OKTAGON_DMA_ADDR 0x01000
-
-
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_dump_state(struct NCR_ESP *esp);
-static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
-static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
-static void dma_ints_off(struct NCR_ESP *esp);
-static void dma_ints_on(struct NCR_ESP *esp);
-static int dma_irq_p(struct NCR_ESP *esp);
-static void dma_led_off(struct NCR_ESP *esp);
-static void dma_led_on(struct NCR_ESP *esp);
-static int dma_ports_p(struct NCR_ESP *esp);
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
-
-static void dma_irq_exit(struct NCR_ESP *esp);
-static void dma_invalidate(struct NCR_ESP *esp);
-
-static void dma_mmu_get_scsi_one(struct NCR_ESP *,Scsi_Cmnd *);
-static void dma_mmu_get_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *);
-static void dma_mmu_release_scsi_one(struct NCR_ESP *,Scsi_Cmnd *);
-static void dma_mmu_release_scsi_sgl(struct NCR_ESP *,Scsi_Cmnd *);
-static void dma_advance_sg(Scsi_Cmnd *);
-static int oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x);
-
-#ifdef USE_BOTTOM_HALF
-static void dma_commit(struct work_struct *unused);
-
-long oktag_to_io(long *paddr, long *addr, long len);
-long oktag_from_io(long *addr, long *paddr, long len);
-
-static DECLARE_WORK(tq_fake_dma, dma_commit);
-
-#define DMA_MAXTRANSFER 0x8000
-
-#else
-
-/*
- * No bottom half. Use transfer directly from IRQ. Find a narrow path
- * between too much IRQ overhead and clogging the IRQ for too long.
- */
-
-#define DMA_MAXTRANSFER 0x1000
-
-#endif
-
-static struct notifier_block oktagon_notifier = {
- oktagon_notify_reboot,
- NULL,
- 0
-};
-
-static long *paddress;
-static long *address;
-static long len;
-static long dma_on;
-static int direction;
-static struct NCR_ESP *current_esp;
-
-
-static volatile unsigned char cmd_buffer[16];
- /* This is where all commands are put
- * before they are trasfered to the ESP chip
- * via PIO.
- */
-
-/***************************************************************** Detection */
-int oktagon_esp_detect(struct scsi_host_template *tpnt)
-{
- struct NCR_ESP *esp;
- struct zorro_dev *z = NULL;
- unsigned long address;
- struct ESP_regs *eregs;
-
- while ((z = zorro_find_device(ZORRO_PROD_BSC_OKTAGON_2008, z))) {
- unsigned long board = z->resource.start;
- if (request_mem_region(board+OKTAGON_ESP_ADDR,
- sizeof(struct ESP_regs), "NCR53C9x")) {
- /*
- * It is a SCSI controller.
- * Hardwire Host adapter to SCSI ID 7
- */
-
- address = (unsigned long)ZTWO_VADDR(board);
- eregs = (struct ESP_regs *)(address + OKTAGON_ESP_ADDR);
-
- /* This line was 5 lines lower */
- esp = esp_allocate(tpnt, (void *)board + OKTAGON_ESP_ADDR, 0);
-
- /* we have to shift the registers only one bit for oktagon */
- esp->shift = 1;
-
- esp_write(eregs->esp_cfg1, (ESP_CONFIG1_PENABLE | 7));
- udelay(5);
- if (esp_read(eregs->esp_cfg1) != (ESP_CONFIG1_PENABLE | 7))
- return 0; /* Bail out if address did not hold data */
-
- /* Do command transfer with programmed I/O */
- esp->do_pio_cmds = 1;
-
- /* Required functions */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &dma_init_read;
- esp->dma_init_write = &dma_init_write;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &dma_setup;
-
- /* Optional functions */
- esp->dma_barrier = 0;
- esp->dma_drain = 0;
- esp->dma_invalidate = &dma_invalidate;
- esp->dma_irq_entry = 0;
- esp->dma_irq_exit = &dma_irq_exit;
- esp->dma_led_on = &dma_led_on;
- esp->dma_led_off = &dma_led_off;
- esp->dma_poll = 0;
- esp->dma_reset = 0;
-
- esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
- esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
- esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one;
- esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl;
- esp->dma_advance_sg = &dma_advance_sg;
-
- /* SCSI chip speed */
- /* Looking at the quartz of the SCSI board... */
- esp->cfreq = 25000000;
-
- /* The DMA registers on the CyberStorm are mapped
- * relative to the device (i.e. in the same Zorro
- * I/O block).
- */
- esp->dregs = (void *)(address + OKTAGON_DMA_ADDR);
-
- paddress = (long *) esp->dregs;
-
- /* ESP register base */
- esp->eregs = eregs;
-
- /* Set the command buffer */
- esp->esp_command = (volatile unsigned char*) cmd_buffer;
-
- /* Yes, the virtual address. See below. */
- esp->esp_command_dvma = (__u32) cmd_buffer;
-
- esp->irq = IRQ_AMIGA_PORTS;
- request_irq(IRQ_AMIGA_PORTS, esp_intr, IRQF_SHARED,
- "BSC Oktagon SCSI", esp->ehost);
-
- /* Figure out our scsi ID on the bus */
- esp->scsi_id = 7;
-
- /* We don't have a differential SCSI-bus. */
- esp->diff = 0;
-
- esp_initialize(esp);
-
- printk("ESP_Oktagon Driver 1.1"
-#ifdef USE_BOTTOM_HALF
- " [BOTTOM_HALF]"
-#else
- " [IRQ]"
-#endif
- " registered.\n");
- printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,esps_in_use);
- esps_running = esps_in_use;
- current_esp = esp;
- register_reboot_notifier(&oktagon_notifier);
- return esps_in_use;
- }
- }
- return 0;
-}
-
-
-/*
- * On certain configurations the SCSI equipment gets confused on reboot,
- * so we have to reset it then.
- */
-
-static int
-oktagon_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
-{
- struct NCR_ESP *esp;
-
- if((code == SYS_DOWN || code == SYS_HALT) && (esp = current_esp))
- {
- esp_bootup_reset(esp,esp->eregs);
- udelay(500); /* Settle time. Maybe unnecessary. */
- }
- return NOTIFY_DONE;
-}
-
-
-
-#ifdef USE_BOTTOM_HALF
-
-
-/*
- * The bsc Oktagon controller has no real DMA, so we have to do the 'DMA
- * transfer' in the interrupt (Yikes!) or use a bottom half to not to clutter
- * IRQ's for longer-than-good.
- *
- * FIXME
- * BIG PROBLEM: 'len' is usually the buffer length, not the expected length
- * of the data. So DMA may finish prematurely, further reads lead to
- * 'machine check' on APUS systems (don't know about m68k systems, AmigaOS
- * deliberately ignores the bus faults) and a normal copy-loop can't
- * be exited prematurely just at the right moment by the dma_invalidate IRQ.
- * So do it the hard way, write an own copier in assembler and
- * catch the exception.
- * -- Carsten
- */
-
-
-static void dma_commit(struct work_struct *unused)
-{
- long wait,len2,pos;
- struct NCR_ESP *esp;
-
- ESPDATA(("Transfer: %ld bytes, Address 0x%08lX, Direction: %d\n",
- len,(long) address,direction));
- dma_ints_off(current_esp);
-
- pos = 0;
- wait = 1;
- if(direction) /* write? (memory to device) */
- {
- while(len > 0)
- {
- len2 = oktag_to_io(paddress, address+pos, len);
- if(!len2)
- {
- if(wait > 1000)
- {
- printk("Expedited DMA exit (writing) %ld\n",len);
- break;
- }
- mdelay(wait);
- wait *= 2;
- }
- pos += len2;
- len -= len2*sizeof(long);
- }
- } else {
- while(len > 0)
- {
- len2 = oktag_from_io(address+pos, paddress, len);
- if(!len2)
- {
- if(wait > 1000)
- {
- printk("Expedited DMA exit (reading) %ld\n",len);
- break;
- }
- mdelay(wait);
- wait *= 2;
- }
- pos += len2;
- len -= len2*sizeof(long);
- }
- }
-
- /* to make esp->shift work */
- esp=current_esp;
-
-#if 0
- len2 = (esp_read(current_esp->eregs->esp_tclow) & 0xff) |
- ((esp_read(current_esp->eregs->esp_tcmed) & 0xff) << 8);
-
- /*
- * Uh uh. If you see this, len and transfer count registers were out of
- * sync. That means really serious trouble.
- */
-
- if(len2)
- printk("Eeeek!! Transfer count still %ld!\n",len2);
-#endif
-
- /*
- * Normally we just need to exit and wait for the interrupt to come.
- * But at least one device (my Microtek ScanMaker 630) regularly mis-
- * calculates the bytes it should send which is really ugly because
- * it locks up the SCSI bus if not accounted for.
- */
-
- if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
- {
- long len = 100;
- long trash[10];
-
- /*
- * Interrupt bit was not set. Either the device is just plain lazy
- * so we give it a 10 ms chance or...
- */
- while(len-- && (!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)))
- udelay(100);
-
-
- if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
- {
- /*
- * So we think that the transfer count is out of sync. Since we
- * have all we want we are happy and can ditch the trash.
- */
-
- len = DMA_MAXTRANSFER;
-
- while(len-- && (!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR)))
- oktag_from_io(trash,paddress,2);
-
- if(!(esp_read(current_esp->eregs->esp_status) & ESP_STAT_INTR))
- {
- /*
- * Things really have gone wrong. If we leave the system in that
- * state, the SCSI bus is locked forever. I hope that this will
- * turn the system in a more or less running state.
- */
- printk("Device is bolixed, trying bus reset...\n");
- esp_bootup_reset(current_esp,current_esp->eregs);
- }
- }
- }
-
- ESPDATA(("Transfer_finale: do_data_finale should come\n"));
-
- len = 0;
- dma_on = 0;
- dma_ints_on(current_esp);
-}
-
-#endif
-
-/************************************************************* DMA Functions */
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
-{
- /* Since the CyberStorm DMA is fully dedicated to the ESP chip,
- * the number of bytes sent (to the ESP chip) equals the number
- * of bytes in the FIFO - there is no buffering in the DMA controller.
- * XXXX Do I read this right? It is from host to ESP, right?
- */
- return fifo_count;
-}
-
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- unsigned long sz = sp->SCp.this_residual;
- if(sz > DMA_MAXTRANSFER)
- sz = DMA_MAXTRANSFER;
- return sz;
-}
-
-static void dma_dump_state(struct NCR_ESP *esp)
-{
-}
-
-/*
- * What the f$@& is this?
- *
- * Some SCSI devices (like my Microtek ScanMaker 630 scanner) want to transfer
- * more data than requested. How much? Dunno. So ditch the bogus data into
- * the sink, hoping the device will advance to the next phase sooner or later.
- *
- * -- Carsten
- */
-
-static long oktag_eva_buffer[16]; /* The data sink */
-
-static void oktag_check_dma(void)
-{
- struct NCR_ESP *esp;
-
- esp=current_esp;
- if(!len)
- {
- address = oktag_eva_buffer;
- len = 2;
- /* esp_do_data sets them to zero like len */
- esp_write(current_esp->eregs->esp_tclow,2);
- esp_write(current_esp->eregs->esp_tcmed,0);
- }
-}
-
-static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length)
-{
- /* Zorro is noncached, everything else done using processor. */
- /* cache_clear(addr, length); */
-
- if(dma_on)
- panic("dma_init_read while dma process is initialized/running!\n");
- direction = 0;
- address = (long *) vaddress;
- current_esp = esp;
- len = length;
- oktag_check_dma();
- dma_on = 1;
-}
-
-static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length)
-{
- /* cache_push(addr, length); */
-
- if(dma_on)
- panic("dma_init_write while dma process is initialized/running!\n");
- direction = 1;
- address = (long *) vaddress;
- current_esp = esp;
- len = length;
- oktag_check_dma();
- dma_on = 1;
-}
-
-static void dma_ints_off(struct NCR_ESP *esp)
-{
- disable_irq(esp->irq);
-}
-
-static void dma_ints_on(struct NCR_ESP *esp)
-{
- enable_irq(esp->irq);
-}
-
-static int dma_irq_p(struct NCR_ESP *esp)
-{
- /* It's important to check the DMA IRQ bit in the correct way! */
- return (esp_read(esp->eregs->esp_status) & ESP_STAT_INTR);
-}
-
-static void dma_led_off(struct NCR_ESP *esp)
-{
-}
-
-static void dma_led_on(struct NCR_ESP *esp)
-{
-}
-
-static int dma_ports_p(struct NCR_ESP *esp)
-{
- return ((amiga_custom.intenar) & IF_PORTS);
-}
-
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
-{
- /* On the Sparc, DMA_ST_WRITE means "move data from device to memory"
- * so when (write) is true, it actually means READ!
- */
- if(write){
- dma_init_read(esp, addr, count);
- } else {
- dma_init_write(esp, addr, count);
- }
-}
-
-/*
- * IRQ entry when DMA transfer is ready to be started
- */
-
-static void dma_irq_exit(struct NCR_ESP *esp)
-{
-#ifdef USE_BOTTOM_HALF
- if(dma_on)
- {
- schedule_work(&tq_fake_dma);
- }
-#else
- while(len && !dma_irq_p(esp))
- {
- if(direction)
- *paddress = *address++;
- else
- *address++ = *paddress;
- len -= (sizeof(long));
- }
- len = 0;
- dma_on = 0;
-#endif
-}
-
-/*
- * IRQ entry when DMA has just finished
- */
-
-static void dma_invalidate(struct NCR_ESP *esp)
-{
-}
-
-/*
- * Since the processor does the data transfer we have to use the custom
- * mmu interface to pass the virtual address, not the physical.
- */
-
-void dma_mmu_get_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- sp->SCp.ptr =
- sp->request_buffer;
-}
-
-void dma_mmu_get_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- sp->SCp.ptr = sg_virt(sp->SCp.buffer);
-}
-
-void dma_mmu_release_scsi_one(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
-}
-
-void dma_mmu_release_scsi_sgl(struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
-}
-
-void dma_advance_sg(Scsi_Cmnd *sp)
-{
- sp->SCp.ptr = sg_virt(sp->SCp.buffer);
-}
-
-
-#define HOSTS_C
-
-int oktagon_esp_release(struct Scsi_Host *instance)
-{
-#ifdef MODULE
- unsigned long address = (unsigned long)((struct NCR_ESP *)instance->hostdata)->edev;
- esp_release();
- release_mem_region(address, sizeof(struct ESP_regs));
- free_irq(IRQ_AMIGA_PORTS, esp_intr);
- unregister_reboot_notifier(&oktagon_notifier);
-#endif
- return 1;
-}
-
-
-static struct scsi_host_template driver_template = {
- .proc_name = "esp-oktagon",
- .proc_info = &esp_proc_info,
- .name = "BSC Oktagon SCSI",
- .detect = oktagon_esp_detect,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .release = oktagon_esp_release,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .can_queue = 7,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = ENABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/oktagon_io.S b/drivers/scsi/oktagon_io.S
deleted file mode 100644
index 8a7340b02707..000000000000
--- a/drivers/scsi/oktagon_io.S
+++ /dev/null
@@ -1,194 +0,0 @@
-/* -*- mode: asm -*-
- * Due to problems while transferring data I've put these routines as assembly
- * code.
- * Since I'm no PPC assembler guru, the code is just the assembler version of
-
-int oktag_to_io(long *paddr,long *addr,long len)
-{
- long *addr2 = addr;
- for(len=(len+sizeof(long)-1)/sizeof(long);len--;)
- *paddr = *addr2++;
- return addr2 - addr;
-}
-
-int oktag_from_io(long *addr,long *paddr,long len)
-{
- long *addr2 = addr;
- for(len=(len+sizeof(long)-1)/sizeof(long);len--;)
- *addr2++ = *paddr;
- return addr2 - addr;
-}
-
- * assembled using gcc -O2 -S, with two exception catch points where data
- * is moved to/from the IO register.
- */
-
-
-#ifdef CONFIG_APUS
-
- .file "oktagon_io.c"
-
-gcc2_compiled.:
-/*
- .section ".text"
-*/
- .align 2
- .globl oktag_to_io
- .type oktag_to_io,@function
-oktag_to_io:
- addi 5,5,3
- srwi 5,5,2
- cmpwi 1,5,0
- mr 9,3
- mr 3,4
- addi 5,5,-1
- bc 12,6,.L3
-.L5:
- cmpwi 1,5,0
- lwz 0,0(3)
- addi 3,3,4
- addi 5,5,-1
-exp1: stw 0,0(9)
- bc 4,6,.L5
-.L3:
-ret1: subf 3,4,3
- srawi 3,3,2
- blr
-.Lfe1:
- .size oktag_to_io,.Lfe1-oktag_to_io
- .align 2
- .globl oktag_from_io
- .type oktag_from_io,@function
-oktag_from_io:
- addi 5,5,3
- srwi 5,5,2
- cmpwi 1,5,0
- mr 9,3
- addi 5,5,-1
- bc 12,6,.L9
-.L11:
- cmpwi 1,5,0
-exp2: lwz 0,0(4)
- addi 5,5,-1
- stw 0,0(3)
- addi 3,3,4
- bc 4,6,.L11
-.L9:
-ret2: subf 3,9,3
- srawi 3,3,2
- blr
-.Lfe2:
- .size oktag_from_io,.Lfe2-oktag_from_io
- .ident "GCC: (GNU) egcs-2.90.29 980515 (egcs-1.0.3 release)"
-
-/*
- * Exception table.
- * Second longword shows where to jump when an exception at the addr the first
- * longword is pointing to is caught.
- */
-
-.section __ex_table,"a"
- .align 2
-oktagon_except:
- .long exp1,ret1
- .long exp2,ret2
-
-#else
-
-/*
-The code which follows is for 680x0 based assembler and is meant for
-Linux/m68k. It was created by cross compiling the code using the
-instructions given above. I then added the four labels used in the
-exception handler table at the bottom of this file.
-- Kevin <kcozens@interlog.com>
-*/
-
-#ifdef CONFIG_AMIGA
-
- .file "oktagon_io.c"
- .version "01.01"
-gcc2_compiled.:
-.text
- .align 2
-.globl oktag_to_io
- .type oktag_to_io,@function
-oktag_to_io:
- link.w %a6,#0
- move.l %d2,-(%sp)
- move.l 8(%a6),%a1
- move.l 12(%a6),%d1
- move.l %d1,%a0
- move.l 16(%a6),%d0
- addq.l #3,%d0
- lsr.l #2,%d0
- subq.l #1,%d0
- moveq.l #-1,%d2
- cmp.l %d0,%d2
- jbeq .L3
-.L5:
-exp1:
- move.l (%a0)+,(%a1)
- dbra %d0,.L5
- clr.w %d0
- subq.l #1,%d0
- jbcc .L5
-.L3:
-ret1:
- move.l %a0,%d0
- sub.l %d1,%d0
- asr.l #2,%d0
- move.l -4(%a6),%d2
- unlk %a6
- rts
-
-.Lfe1:
- .size oktag_to_io,.Lfe1-oktag_to_io
- .align 2
-.globl oktag_from_io
- .type oktag_from_io,@function
-oktag_from_io:
- link.w %a6,#0
- move.l %d2,-(%sp)
- move.l 8(%a6),%d1
- move.l 12(%a6),%a1
- move.l %d1,%a0
- move.l 16(%a6),%d0
- addq.l #3,%d0
- lsr.l #2,%d0
- subq.l #1,%d0
- moveq.l #-1,%d2
- cmp.l %d0,%d2
- jbeq .L9
-.L11:
-exp2:
- move.l (%a1),(%a0)+
- dbra %d0,.L11
- clr.w %d0
- subq.l #1,%d0
- jbcc .L11
-.L9:
-ret2:
- move.l %a0,%d0
- sub.l %d1,%d0
- asr.l #2,%d0
- move.l -4(%a6),%d2
- unlk %a6
- rts
-.Lfe2:
- .size oktag_from_io,.Lfe2-oktag_from_io
- .ident "GCC: (GNU) 2.7.2.1"
-
-/*
- * Exception table.
- * Second longword shows where to jump when an exception at the addr the first
- * longword is pointing to is caught.
- */
-
-.section __ex_table,"a"
- .align 2
-oktagon_except:
- .long exp1,ret1
- .long exp2,ret2
-
-#endif
-#endif
diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c
index 4b82b2021981..d8b99351b053 100644
--- a/drivers/scsi/pcmcia/fdomain_stub.c
+++ b/drivers/scsi/pcmcia/fdomain_stub.c
@@ -130,7 +130,7 @@ static int fdomain_config(struct pcmcia_device *link)
cisparse_t parse;
int i, last_ret, last_fn;
u_char tuple_data[64];
- char str[16];
+ char str[22];
struct Scsi_Host *host;
DEBUG(0, "fdomain_config(0x%p)\n", link);
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 17b4a7c4618c..0cd614a0fa73 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -35,7 +35,7 @@
#define BOUNCE_SIZE (64*1024)
-#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE / CD_FRAMESIZE)
+#define PS3ROM_MAX_SECTORS (BOUNCE_SIZE >> 9)
struct ps3rom_private {
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index adf97320574b..4894dc886b62 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -428,6 +428,19 @@ qla2x00_sysfs_read_sfp(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
return 0;
+ if (ha->sfp_data)
+ goto do_read;
+
+ ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->sfp_data_dma);
+ if (!ha->sfp_data) {
+ qla_printk(KERN_WARNING, ha,
+ "Unable to allocate memory for SFP read-data.\n");
+ return 0;
+ }
+
+do_read:
+ memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
addr = 0xa0;
for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
iter++, offset += SFP_BLOCK_SIZE) {
@@ -835,7 +848,7 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
static void
qla2x00_get_host_speed(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
uint32_t speed = 0;
switch (ha->link_data_rate) {
@@ -848,6 +861,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
case PORT_SPEED_4GB:
speed = 4;
break;
+ case PORT_SPEED_8GB:
+ speed = 8;
+ break;
}
fc_host_speed(shost) = speed;
}
@@ -855,7 +871,7 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
static void
qla2x00_get_host_port_type(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
uint32_t port_type = FC_PORTTYPE_UNKNOWN;
switch (ha->current_topology) {
@@ -965,7 +981,7 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
static struct fc_host_statistics *
qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
int rval;
struct link_statistics *stats;
dma_addr_t stats_dma;
@@ -1049,7 +1065,7 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
static void
qla2x00_get_host_port_state(struct Scsi_Host *shost)
{
- scsi_qla_host_t *ha = shost_priv(shost);
+ scsi_qla_host_t *ha = to_qla_parent(shost_priv(shost));
if (!ha->flags.online)
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b72c7f170854..3750319f4968 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2041,8 +2041,6 @@ typedef struct vport_params {
#define VP_RET_CODE_NO_MEM 5
#define VP_RET_CODE_NOT_FOUND 6
-#define to_qla_parent(x) (((x)->parent) ? (x)->parent : (x))
-
/*
* ISP operations
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ba35fc26ce6b..193f688ec3d7 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -66,6 +66,7 @@ extern int ql2xqfullrampup;
extern int num_hosts;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
+extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
/*
* Global Functions in qla_mid.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d0633ca894be..d5c7853e7eba 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -925,6 +925,16 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
{
int rval;
uint32_t srisc_address = 0;
+ struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ unsigned long flags;
+
+ if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
+ /* Disable SRAM, Instruction RAM and GP RAM parity. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ WRT_REG_WORD(&reg->hccr, (HCCR_ENABLE_PARITY + 0x0));
+ RD_REG_WORD(&reg->hccr);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
/* Load firmware sequences */
rval = ha->isp_ops->load_risc(ha, &srisc_address);
@@ -968,6 +978,19 @@ qla2x00_setup_chip(scsi_qla_host_t *ha)
}
}
+ if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
+ /* Enable proper parity. */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (IS_QLA2300(ha))
+ /* SRAM parity */
+ WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x1);
+ else
+ /* SRAM, Instruction RAM and GP RAM parity */
+ WRT_REG_WORD(&reg->hccr, HCCR_ENABLE_PARITY + 0x7);
+ RD_REG_WORD(&reg->hccr);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
if (rval) {
DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
ha->host_no));
@@ -3213,9 +3236,6 @@ int
qla2x00_abort_isp(scsi_qla_host_t *ha)
{
int rval;
- unsigned long flags = 0;
- uint16_t cnt;
- srb_t *sp;
uint8_t status = 0;
if (ha->flags.online) {
@@ -3236,19 +3256,8 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
LOOP_DOWN_TIME);
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
/* Requeue all commands in outstanding command list. */
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
- sp = ha->outstanding_cmds[cnt];
- if (sp) {
- ha->outstanding_cmds[cnt] = NULL;
- sp->flags = 0;
- sp->cmd->result = DID_RESET << 16;
- sp->cmd->host_scribble = (unsigned char *)NULL;
- qla2x00_sp_compl(ha, sp);
- }
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ qla2x00_abort_all_cmds(ha, DID_RESET << 16);
ha->isp_ops->get_flash_version(ha, ha->request_ring);
@@ -3273,6 +3282,7 @@ qla2x00_abort_isp(scsi_qla_host_t *ha)
clear_bit(ISP_ABORT_RETRY, &ha->dpc_flags);
if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
rval = qla2x00_enable_eft_trace(ha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
@@ -3357,60 +3367,15 @@ static int
qla2x00_restart_isp(scsi_qla_host_t *ha)
{
uint8_t status = 0;
- struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- unsigned long flags = 0;
uint32_t wait_time;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(ha)) {
ha->flags.online = 0;
- if (!(status = ha->isp_ops->chip_diag(ha))) {
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- status = qla2x00_setup_chip(ha);
- goto done;
- }
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha) &&
- !IS_QLA25XX(ha)) {
- /*
- * Disable SRAM, Instruction RAM and GP RAM
- * parity.
- */
- WRT_REG_WORD(&reg->hccr,
- (HCCR_ENABLE_PARITY + 0x0));
- RD_REG_WORD(&reg->hccr);
- }
-
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
+ if (!(status = ha->isp_ops->chip_diag(ha)))
status = qla2x00_setup_chip(ha);
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- if (!IS_QLA24XX(ha) && !IS_QLA54XX(ha) &&
- !IS_QLA25XX(ha)) {
- /* Enable proper parity */
- if (IS_QLA2300(ha))
- /* SRAM parity */
- WRT_REG_WORD(&reg->hccr,
- (HCCR_ENABLE_PARITY + 0x1));
- else
- /*
- * SRAM, Instruction RAM and GP RAM
- * parity.
- */
- WRT_REG_WORD(&reg->hccr,
- (HCCR_ENABLE_PARITY + 0x7));
- RD_REG_WORD(&reg->hccr);
- }
-
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- }
}
- done:
if (!status && !(status = qla2x00_init_rings(ha))) {
clear_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
if (!(status = qla2x00_fw_ready(ha))) {
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 8e3b04464cff..5d1a3f7c408f 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -119,6 +119,13 @@ static __inline__ void qla2x00_check_fabric_devices(scsi_qla_host_t *ha)
qla2x00_get_firmware_state(ha, &fw_state);
}
+static __inline__ scsi_qla_host_t * to_qla_parent(scsi_qla_host_t *);
+static __inline__ scsi_qla_host_t *
+to_qla_parent(scsi_qla_host_t *ha)
+{
+ return ha->parent ? ha->parent : ha;
+}
+
/**
* qla2x00_issue_marker() - Issue a Marker IOCB if necessary.
* @ha: HA context
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 642a0c3f09c6..14e6f22944b7 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1815,6 +1815,8 @@ int
qla2x00_request_irqs(scsi_qla_host_t *ha)
{
int ret;
+ device_reg_t __iomem *reg = ha->iobase;
+ unsigned long flags;
/* If possible, enable MSI-X. */
if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
@@ -1846,7 +1848,7 @@ qla2x00_request_irqs(scsi_qla_host_t *ha)
DEBUG2(qla_printk(KERN_INFO, ha,
"MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
ha->fw_attributes));
- return ret;
+ goto clear_risc_ints;
}
qla_printk(KERN_WARNING, ha,
"MSI-X: Falling back-to INTa mode -- %d.\n", ret);
@@ -1864,15 +1866,30 @@ skip_msi:
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
- if (!ret) {
- ha->flags.inta_enabled = 1;
- ha->host->irq = ha->pdev->irq;
- } else {
+ if (ret) {
qla_printk(KERN_WARNING, ha,
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
+ goto fail;
+ }
+ ha->flags.inta_enabled = 1;
+ ha->host->irq = ha->pdev->irq;
+clear_risc_ints:
+
+ ha->isp_ops->disable_intrs(ha);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (IS_FWI2_CAPABLE(ha)) {
+ WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
+ WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
+ } else {
+ WRT_REG_WORD(&reg->isp.semaphore, 0);
+ WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
+ WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
}
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ ha->isp_ops->enable_intrs(ha);
+fail:
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 0c10c0b0fb73..99d29fff836d 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -980,7 +980,7 @@ qla2x00_init_firmware(scsi_qla_host_t *ha, uint16_t size)
DEBUG11(printk("qla2x00_init_firmware(%ld): entered.\n",
ha->host_no));
- if (ha->fw_attributes & BIT_2)
+ if (ha->flags.npiv_supported)
mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
else
mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8f69caf83272..3c1b43356adb 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -204,10 +204,8 @@ static int qla2x00_do_dpc(void *data);
static void qla2x00_rst_aen(scsi_qla_host_t *);
-static uint8_t qla2x00_mem_alloc(scsi_qla_host_t *);
+static int qla2x00_mem_alloc(scsi_qla_host_t *);
static void qla2x00_mem_free(scsi_qla_host_t *ha);
-static int qla2x00_allocate_sp_pool( scsi_qla_host_t *ha);
-static void qla2x00_free_sp_pool(scsi_qla_host_t *ha);
static void qla2x00_sp_free_dma(scsi_qla_host_t *, srb_t *);
/* -------------------------------------------------------------------------- */
@@ -1117,6 +1115,27 @@ qla2x00_device_reset(scsi_qla_host_t *ha, fc_port_t *reset_fcport)
return ha->isp_ops->abort_target(reset_fcport);
}
+void
+qla2x00_abort_all_cmds(scsi_qla_host_t *ha, int res)
+{
+ int cnt;
+ unsigned long flags;
+ srb_t *sp;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = ha->outstanding_cmds[cnt];
+ if (sp) {
+ ha->outstanding_cmds[cnt] = NULL;
+ sp->flags = 0;
+ sp->cmd->result = res;
+ sp->cmd->host_scribble = (unsigned char *)NULL;
+ qla2x00_sp_compl(ha, sp);
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
static int
qla2xxx_slave_alloc(struct scsi_device *sdev)
{
@@ -1557,10 +1576,8 @@ static int __devinit
qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret = -ENODEV;
- device_reg_t __iomem *reg;
struct Scsi_Host *host;
scsi_qla_host_t *ha;
- unsigned long flags = 0;
char pci_info[30];
char fw_str[30];
struct scsi_host_template *sht;
@@ -1608,6 +1625,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->parent = NULL;
ha->bars = bars;
ha->mem_only = mem_only;
+ spin_lock_init(&ha->hardware_lock);
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
@@ -1621,8 +1639,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"Found an ISP%04X, irq %d, iobase 0x%p\n", pdev->device, pdev->irq,
ha->iobase);
- spin_lock_init(&ha->hardware_lock);
-
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
ha->mgmt_svr_loop_id = MANAGEMENT_SERVER + ha->vp_idx;
@@ -1751,34 +1767,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
DEBUG2(printk("DEBUG: detect hba %ld at address = %p\n",
ha->host_no, ha));
- ha->isp_ops->disable_intrs(ha);
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
- reg = ha->iobase;
- if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
- WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
- } else {
- WRT_REG_WORD(&reg->isp.semaphore, 0);
- WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
- WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
-
- /* Enable proper parity */
- if (!IS_QLA2100(ha) && !IS_QLA2200(ha)) {
- if (IS_QLA2300(ha))
- /* SRAM parity */
- WRT_REG_WORD(&reg->isp.hccr,
- (HCCR_ENABLE_PARITY + 0x1));
- else
- /* SRAM, Instruction RAM and GP RAM parity */
- WRT_REG_WORD(&reg->isp.hccr,
- (HCCR_ENABLE_PARITY + 0x7));
- }
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- ha->isp_ops->enable_intrs(ha);
-
pci_set_drvdata(pdev, ha);
ha->flags.init_done = 1;
@@ -1848,10 +1836,14 @@ qla2x00_remove_one(struct pci_dev *pdev)
static void
qla2x00_free_device(scsi_qla_host_t *ha)
{
+ qla2x00_abort_all_cmds(ha, DID_NO_CONNECT << 16);
+
/* Disable timer */
if (ha->timer_active)
qla2x00_stop_timer(ha);
+ ha->flags.online = 0;
+
/* Kill the kernel thread for this host */
if (ha->dpc_thread) {
struct task_struct *t = ha->dpc_thread;
@@ -1870,8 +1862,6 @@ qla2x00_free_device(scsi_qla_host_t *ha)
if (ha->eft)
qla2x00_disable_eft_trace(ha);
- ha->flags.online = 0;
-
/* Stop currently executing firmware. */
qla2x00_try_to_stop_firmware(ha);
@@ -2010,196 +2000,109 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *ha, int defer)
*
* Returns:
* 0 = success.
-* 1 = failure.
+* !0 = failure.
*/
-static uint8_t
+static int
qla2x00_mem_alloc(scsi_qla_host_t *ha)
{
char name[16];
- uint8_t status = 1;
- int retry= 10;
-
- do {
- /*
- * This will loop only once if everything goes well, else some
- * number of retries will be performed to get around a kernel
- * bug where available mem is not allocated until after a
- * little delay and a retry.
- */
- ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
- (ha->request_q_length + 1) * sizeof(request_t),
- &ha->request_dma, GFP_KERNEL);
- if (ha->request_ring == NULL) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - request_ring\n");
-
- qla2x00_mem_free(ha);
- msleep(100);
-
- continue;
- }
-
- ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
- (ha->response_q_length + 1) * sizeof(response_t),
- &ha->response_dma, GFP_KERNEL);
- if (ha->response_ring == NULL) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - response_ring\n");
-
- qla2x00_mem_free(ha);
- msleep(100);
-
- continue;
- }
-
- ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
- &ha->gid_list_dma, GFP_KERNEL);
- if (ha->gid_list == NULL) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - gid_list\n");
-
- qla2x00_mem_free(ha);
- msleep(100);
-
- continue;
- }
-
- /* get consistent memory allocated for init control block */
- ha->init_cb = dma_alloc_coherent(&ha->pdev->dev,
- ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL);
- if (ha->init_cb == NULL) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - init_cb\n");
-
- qla2x00_mem_free(ha);
- msleep(100);
-
- continue;
- }
- memset(ha->init_cb, 0, ha->init_cb_size);
-
- snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
- ha->host_no);
- ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
- DMA_POOL_SIZE, 8, 0);
- if (ha->s_dma_pool == NULL) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - s_dma_pool\n");
-
- qla2x00_mem_free(ha);
- msleep(100);
-
- continue;
- }
-
- if (qla2x00_allocate_sp_pool(ha)) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - "
- "qla2x00_allocate_sp_pool()\n");
-
- qla2x00_mem_free(ha);
- msleep(100);
-
- continue;
- }
-
- /* Allocate memory for SNS commands */
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- /* Get consistent memory allocated for SNS commands */
- ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma,
- GFP_KERNEL);
- if (ha->sns_cmd == NULL) {
- /* error */
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - sns_cmd\n");
-
- qla2x00_mem_free(ha);
- msleep(100);
-
- continue;
- }
- memset(ha->sns_cmd, 0, sizeof(struct sns_cmd_pkt));
- } else {
- /* Get consistent memory allocated for MS IOCB */
- ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
- &ha->ms_iocb_dma);
- if (ha->ms_iocb == NULL) {
- /* error */
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - ms_iocb\n");
-
- qla2x00_mem_free(ha);
- msleep(100);
-
- continue;
- }
- memset(ha->ms_iocb, 0, sizeof(ms_iocb_entry_t));
-
- /*
- * Get consistent memory allocated for CT SNS
- * commands
- */
- ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
- sizeof(struct ct_sns_pkt), &ha->ct_sns_dma,
- GFP_KERNEL);
- if (ha->ct_sns == NULL) {
- /* error */
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - ct_sns\n");
- qla2x00_mem_free(ha);
- msleep(100);
+ ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
+ (ha->request_q_length + 1) * sizeof(request_t), &ha->request_dma,
+ GFP_KERNEL);
+ if (!ha->request_ring)
+ goto fail;
+
+ ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
+ (ha->response_q_length + 1) * sizeof(response_t),
+ &ha->response_dma, GFP_KERNEL);
+ if (!ha->response_ring)
+ goto fail_free_request_ring;
+
+ ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE,
+ &ha->gid_list_dma, GFP_KERNEL);
+ if (!ha->gid_list)
+ goto fail_free_response_ring;
+
+ ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
+ &ha->init_cb_dma, GFP_KERNEL);
+ if (!ha->init_cb)
+ goto fail_free_gid_list;
+
+ snprintf(name, sizeof(name), "%s_%ld", QLA2XXX_DRIVER_NAME,
+ ha->host_no);
+ ha->s_dma_pool = dma_pool_create(name, &ha->pdev->dev,
+ DMA_POOL_SIZE, 8, 0);
+ if (!ha->s_dma_pool)
+ goto fail_free_init_cb;
- continue;
- }
- memset(ha->ct_sns, 0, sizeof(struct ct_sns_pkt));
-
- if (IS_FWI2_CAPABLE(ha)) {
- /*
- * Get consistent memory allocated for SFP
- * block.
- */
- ha->sfp_data = dma_pool_alloc(ha->s_dma_pool,
- GFP_KERNEL, &ha->sfp_data_dma);
- if (ha->sfp_data == NULL) {
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - "
- "sfp_data\n");
-
- qla2x00_mem_free(ha);
- msleep(100);
-
- continue;
- }
- memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
- }
- }
-
- /* Get memory for cached NVRAM */
- ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
- if (ha->nvram == NULL) {
- /* error */
- qla_printk(KERN_WARNING, ha,
- "Memory Allocation failed - nvram cache\n");
-
- qla2x00_mem_free(ha);
- msleep(100);
-
- continue;
- }
-
- /* Done all allocations without any error. */
- status = 0;
-
- } while (retry-- && status != 0);
+ ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
+ if (!ha->srb_mempool)
+ goto fail_free_s_dma_pool;
+
+ /* Get memory for cached NVRAM */
+ ha->nvram = kzalloc(MAX_NVRAM_SIZE, GFP_KERNEL);
+ if (!ha->nvram)
+ goto fail_free_srb_mempool;
+
+ /* Allocate memory for SNS commands */
+ if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
+ /* Get consistent memory allocated for SNS commands */
+ ha->sns_cmd = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct sns_cmd_pkt), &ha->sns_cmd_dma, GFP_KERNEL);
+ if (!ha->sns_cmd)
+ goto fail_free_nvram;
+ } else {
+ /* Get consistent memory allocated for MS IOCB */
+ ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->ms_iocb_dma);
+ if (!ha->ms_iocb)
+ goto fail_free_nvram;
- if (status) {
- printk(KERN_WARNING
- "%s(): **** FAILED ****\n", __func__);
+ /* Get consistent memory allocated for CT SNS commands */
+ ha->ct_sns = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct ct_sns_pkt), &ha->ct_sns_dma, GFP_KERNEL);
+ if (!ha->ct_sns)
+ goto fail_free_ms_iocb;
}
- return(status);
+ return 0;
+
+fail_free_ms_iocb:
+ dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+ ha->ms_iocb = NULL;
+ ha->ms_iocb_dma = 0;
+fail_free_nvram:
+ kfree(ha->nvram);
+ ha->nvram = NULL;
+fail_free_srb_mempool:
+ mempool_destroy(ha->srb_mempool);
+ ha->srb_mempool = NULL;
+fail_free_s_dma_pool:
+ dma_pool_destroy(ha->s_dma_pool);
+ ha->s_dma_pool = NULL;
+fail_free_init_cb:
+ dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
+ ha->init_cb_dma);
+ ha->init_cb = NULL;
+ ha->init_cb_dma = 0;
+fail_free_gid_list:
+ dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list,
+ ha->gid_list_dma);
+ ha->gid_list = NULL;
+ ha->gid_list_dma = 0;
+fail_free_response_ring:
+ dma_free_coherent(&ha->pdev->dev, (ha->response_q_length + 1) *
+ sizeof(response_t), ha->response_ring, ha->response_dma);
+ ha->response_ring = NULL;
+ ha->response_dma = 0;
+fail_free_request_ring:
+ dma_free_coherent(&ha->pdev->dev, (ha->request_q_length + 1) *
+ sizeof(request_t), ha->request_ring, ha->request_dma);
+ ha->request_ring = NULL;
+ ha->request_dma = 0;
+fail:
+ return -ENOMEM;
}
/*
@@ -2215,14 +2118,8 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
struct list_head *fcpl, *fcptemp;
fc_port_t *fcport;
- if (ha == NULL) {
- /* error */
- DEBUG2(printk("%s(): ERROR invalid ha pointer.\n", __func__));
- return;
- }
-
- /* free sp pool */
- qla2x00_free_sp_pool(ha);
+ if (ha->srb_mempool)
+ mempool_destroy(ha->srb_mempool);
if (ha->fce)
dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
@@ -2270,6 +2167,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
(ha->request_q_length + 1) * sizeof(request_t),
ha->request_ring, ha->request_dma);
+ ha->srb_mempool = NULL;
ha->eft = NULL;
ha->eft_dma = 0;
ha->sns_cmd = NULL;
@@ -2308,44 +2206,6 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
kfree(ha->nvram);
}
-/*
- * qla2x00_allocate_sp_pool
- * This routine is called during initialization to allocate
- * memory for local srb_t.
- *
- * Input:
- * ha = adapter block pointer.
- *
- * Context:
- * Kernel context.
- */
-static int
-qla2x00_allocate_sp_pool(scsi_qla_host_t *ha)
-{
- int rval;
-
- rval = QLA_SUCCESS;
- ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
- if (ha->srb_mempool == NULL) {
- qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n");
- rval = QLA_FUNCTION_FAILED;
- }
- return (rval);
-}
-
-/*
- * This routine frees all adapter allocated memory.
- *
- */
-static void
-qla2x00_free_sp_pool( scsi_qla_host_t *ha)
-{
- if (ha->srb_mempool) {
- mempool_destroy(ha->srb_mempool);
- ha->srb_mempool = NULL;
- }
-}
-
/**************************************************************************
* qla2x00_do_dpc
* This kernel thread is a task that is schedule by the interrupt handler
@@ -2367,6 +2227,9 @@ qla2x00_do_dpc(void *data)
fc_port_t *fcport;
uint8_t status;
uint16_t next_loopid;
+ struct scsi_qla_host *vha;
+ int i;
+
ha = (scsi_qla_host_t *)data;
@@ -2409,6 +2272,18 @@ qla2x00_do_dpc(void *data)
}
clear_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags);
}
+
+ for_each_mapped_vp_idx(ha, i) {
+ list_for_each_entry(vha, &ha->vp_list,
+ vp_list) {
+ if (i == vha->vp_idx) {
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ break;
+ }
+ }
+ }
+
DEBUG(printk("scsi(%ld): dpc: qla2x00_abort_isp end\n",
ha->host_no));
}
@@ -3029,3 +2904,4 @@ MODULE_FIRMWARE(FW_FILE_ISP22XX);
MODULE_FIRMWARE(FW_FILE_ISP2300);
MODULE_FIRMWARE(FW_FILE_ISP2322);
MODULE_FIRMWARE(FW_FILE_ISP24XX);
+MODULE_FIRMWARE(FW_FILE_ISP25XX);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index b68fb73613ed..26822c8807ee 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -893,6 +893,8 @@ qla2x00_flip_colors(scsi_qla_host_t *ha, uint16_t *pflags)
}
}
+#define PIO_REG(h, r) ((h)->pio_address + offsetof(struct device_reg_2xxx, r))
+
void
qla2x00_beacon_blink(struct scsi_qla_host *ha)
{
@@ -902,15 +904,12 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
unsigned long flags;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- if (ha->pio_address)
- reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
-
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Save the Original GPIOE. */
if (ha->pio_address) {
- gpio_enable = RD_REG_WORD_PIO(&reg->gpioe);
- gpio_data = RD_REG_WORD_PIO(&reg->gpiod);
+ gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
+ gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
gpio_enable = RD_REG_WORD(&reg->gpioe);
gpio_data = RD_REG_WORD(&reg->gpiod);
@@ -920,7 +919,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
gpio_enable |= GPIO_LED_MASK;
if (ha->pio_address) {
- WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable);
+ WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
WRT_REG_WORD(&reg->gpioe, gpio_enable);
RD_REG_WORD(&reg->gpioe);
@@ -936,7 +935,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *ha)
/* Set the modified gpio_data values */
if (ha->pio_address) {
- WRT_REG_WORD_PIO(&reg->gpiod, gpio_data);
+ WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
WRT_REG_WORD(&reg->gpiod, gpio_data);
RD_REG_WORD(&reg->gpiod);
@@ -962,14 +961,11 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
return QLA_FUNCTION_FAILED;
}
- if (ha->pio_address)
- reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
-
/* Turn off LEDs. */
spin_lock_irqsave(&ha->hardware_lock, flags);
if (ha->pio_address) {
- gpio_enable = RD_REG_WORD_PIO(&reg->gpioe);
- gpio_data = RD_REG_WORD_PIO(&reg->gpiod);
+ gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe));
+ gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod));
} else {
gpio_enable = RD_REG_WORD(&reg->gpioe);
gpio_data = RD_REG_WORD(&reg->gpiod);
@@ -978,7 +974,7 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
/* Set the modified gpio_enable values. */
if (ha->pio_address) {
- WRT_REG_WORD_PIO(&reg->gpioe, gpio_enable);
+ WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable);
} else {
WRT_REG_WORD(&reg->gpioe, gpio_enable);
RD_REG_WORD(&reg->gpioe);
@@ -987,7 +983,7 @@ qla2x00_beacon_on(struct scsi_qla_host *ha)
/* Clear out previously set LED colour. */
gpio_data &= ~GPIO_LED_MASK;
if (ha->pio_address) {
- WRT_REG_WORD_PIO(&reg->gpiod, gpio_data);
+ WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data);
} else {
WRT_REG_WORD(&reg->gpiod, gpio_data);
RD_REG_WORD(&reg->gpiod);
@@ -1244,13 +1240,12 @@ qla2x00_read_flash_byte(scsi_qla_host_t *ha, uint32_t addr)
if (ha->pio_address) {
uint16_t data2;
- reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
- WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
+ WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
do {
- data = RD_REG_WORD_PIO(&reg->flash_data);
+ data = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
barrier();
cpu_relax();
- data2 = RD_REG_WORD_PIO(&reg->flash_data);
+ data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data));
} while (data != data2);
} else {
WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
@@ -1304,9 +1299,8 @@ qla2x00_write_flash_byte(scsi_qla_host_t *ha, uint32_t addr, uint8_t data)
/* Always perform IO mapped accesses to the FLASH registers. */
if (ha->pio_address) {
- reg = (struct device_reg_2xxx __iomem *)ha->pio_address;
- WRT_REG_WORD_PIO(&reg->flash_address, (uint16_t)addr);
- WRT_REG_WORD_PIO(&reg->flash_data, (uint16_t)data);
+ WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr);
+ WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data);
} else {
WRT_REG_WORD(&reg->flash_address, (uint16_t)addr);
RD_REG_WORD(&reg->ctrl_status); /* PCI Posting. */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 2c2f6b4697c7..c5742cc15abb 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.02.00-k7"
+#define QLA2XXX_VERSION "8.02.00-k8"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 2
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 49925f92555e..10b3b9a620f3 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1306,6 +1306,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
atomic_set(&ddb_entry->relogin_timer, 0);
clear_bit(DF_RELOGIN, &ddb_entry->flags);
clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
+ iscsi_unblock_session(ddb_entry->sess);
iscsi_session_event(ddb_entry->sess,
ISCSI_KEVENT_CREATE_SESSION);
/*
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 2e2b9fedffcc..c3c59d763037 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -63,8 +63,6 @@ static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
enum iscsi_param param, char *buf);
static int qla4xxx_host_get_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
-static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag);
-static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session);
/*
@@ -91,6 +89,8 @@ static struct scsi_host_template qla4xxx_driver_template = {
.slave_alloc = qla4xxx_slave_alloc,
.slave_destroy = qla4xxx_slave_destroy,
+ .scan_finished = iscsi_scan_finished,
+
.this_id = -1,
.cmd_per_lun = 3,
.use_clustering = ENABLE_CLUSTERING,
@@ -116,8 +116,6 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
.get_conn_param = qla4xxx_conn_get_param,
.get_session_param = qla4xxx_sess_get_param,
.get_host_param = qla4xxx_host_get_param,
- .start_conn = qla4xxx_conn_start,
- .stop_conn = qla4xxx_conn_stop,
.session_recovery_timedout = qla4xxx_recovery_timedout,
};
@@ -128,48 +126,19 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session)
struct ddb_entry *ddb_entry = session->dd_data;
struct scsi_qla_host *ha = ddb_entry->ha;
- DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count of (%d) "
- "secs exhausted, marking device DEAD.\n", ha->host_no,
- __func__, ddb_entry->fw_ddb_index,
- ha->port_down_retry_count));
-
- atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
-
- DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc flags = "
- "0x%lx\n", ha->host_no, __func__, ha->dpc_flags));
- queue_work(ha->dpc_thread, &ha->dpc_work);
-}
-
-static int qla4xxx_conn_start(struct iscsi_cls_conn *conn)
-{
- struct iscsi_cls_session *session;
- struct ddb_entry *ddb_entry;
-
- session = iscsi_dev_to_session(conn->dev.parent);
- ddb_entry = session->dd_data;
-
- DEBUG2(printk("scsi%ld: %s: index [%d] starting conn\n",
- ddb_entry->ha->host_no, __func__,
- ddb_entry->fw_ddb_index));
- iscsi_unblock_session(session);
- return 0;
-}
-
-static void qla4xxx_conn_stop(struct iscsi_cls_conn *conn, int flag)
-{
- struct iscsi_cls_session *session;
- struct ddb_entry *ddb_entry;
+ if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
+ atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
- session = iscsi_dev_to_session(conn->dev.parent);
- ddb_entry = session->dd_data;
+ DEBUG2(printk("scsi%ld: %s: index [%d] port down retry count "
+ "of (%d) secs exhausted, marking device DEAD.\n",
+ ha->host_no, __func__, ddb_entry->fw_ddb_index,
+ ha->port_down_retry_count));
- DEBUG2(printk("scsi%ld: %s: index [%d] stopping conn\n",
- ddb_entry->ha->host_no, __func__,
- ddb_entry->fw_ddb_index));
- if (flag == STOP_CONN_RECOVER)
- iscsi_block_session(session);
- else
- printk(KERN_ERR "iscsi: invalid stop flag %d\n", flag);
+ DEBUG2(printk("scsi%ld: %s: scheduling dpc routine - dpc "
+ "flags = 0x%lx\n",
+ ha->host_no, __func__, ha->dpc_flags));
+ queue_work(ha->dpc_thread, &ha->dpc_work);
+ }
}
static int qla4xxx_host_get_param(struct Scsi_Host *shost,
@@ -308,6 +277,9 @@ int qla4xxx_add_sess(struct ddb_entry *ddb_entry)
DEBUG2(printk(KERN_ERR "Could not add connection.\n"));
return -ENOMEM;
}
+
+ /* finally ready to go */
+ iscsi_unblock_session(ddb_entry->sess);
return 0;
}
@@ -364,6 +336,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
DEBUG3(printk("scsi%d:%d:%d: index [%d] marked MISSING\n",
ha->host_no, ddb_entry->bus, ddb_entry->target,
ddb_entry->fw_ddb_index));
+ iscsi_block_session(ddb_entry->sess);
iscsi_conn_error(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
}
@@ -430,9 +403,21 @@ static int qla4xxx_queuecommand(struct scsi_cmnd *cmd,
{
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
+ struct iscsi_cls_session *sess = ddb_entry->sess;
struct srb *srb;
int rval;
+ if (!sess) {
+ cmd->result = DID_IMM_RETRY << 16;
+ goto qc_fail_command;
+ }
+
+ rval = iscsi_session_chkready(sess);
+ if (rval) {
+ cmd->result = rval;
+ goto qc_fail_command;
+ }
+
if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) {
cmd->result = DID_NO_CONNECT << 16;
@@ -1323,7 +1308,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
ha->patch_number, ha->build_number);
-
+ scsi_scan_host(host);
return 0;
remove_host:
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b35d19472caa..fecba05b4e77 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -969,9 +969,10 @@ void starget_for_each_device(struct scsi_target *starget, void *data,
EXPORT_SYMBOL(starget_for_each_device);
/**
- * __starget_for_each_device - helper to walk all devices of a target
- * (UNLOCKED)
+ * __starget_for_each_device - helper to walk all devices of a target (UNLOCKED)
* @starget: target whose devices we want to iterate over.
+ * @data: parameter for callback @fn()
+ * @fn: callback function that is invoked for each device
*
* This traverses over each device of @starget. It does _not_
* take a reference on the scsi_device, so the whole loop must be
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b12fb310e399..135c1d054701 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -301,7 +301,6 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
page = sg_page(sg);
off = sg->offset;
len = sg->length;
- data_len += len;
while (len > 0 && data_len > 0) {
/*
@@ -1569,6 +1568,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
request_fn_proc *request_fn)
{
struct request_queue *q;
+ struct device *dev = shost->shost_gendev.parent;
q = blk_init_queue(request_fn, NULL);
if (!q)
@@ -1583,6 +1583,9 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
blk_queue_max_sectors(q, shost->max_sectors);
blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
blk_queue_segment_boundary(q, shost->dma_boundary);
+ dma_set_seg_boundary(dev, shost->dma_boundary);
+
+ blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
if (!shost->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0d7b4e79415c..fac7534f3ec4 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -30,10 +30,10 @@
#include <scsi/scsi_transport_iscsi.h>
#include <scsi/iscsi_if.h>
-#define ISCSI_SESSION_ATTRS 18
-#define ISCSI_CONN_ATTRS 11
+#define ISCSI_SESSION_ATTRS 19
+#define ISCSI_CONN_ATTRS 13
#define ISCSI_HOST_ATTRS 4
-#define ISCSI_TRANSPORT_VERSION "2.0-867"
+#define ISCSI_TRANSPORT_VERSION "2.0-868"
struct iscsi_internal {
int daemon_pid;
@@ -127,12 +127,13 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
memset(ihost, 0, sizeof(*ihost));
INIT_LIST_HEAD(&ihost->sessions);
mutex_init(&ihost->mutex);
+ atomic_set(&ihost->nr_scans, 0);
- snprintf(ihost->unbind_workq_name, KOBJ_NAME_LEN, "iscsi_unbind_%d",
+ snprintf(ihost->scan_workq_name, KOBJ_NAME_LEN, "iscsi_scan_%d",
shost->host_no);
- ihost->unbind_workq = create_singlethread_workqueue(
- ihost->unbind_workq_name);
- if (!ihost->unbind_workq)
+ ihost->scan_workq = create_singlethread_workqueue(
+ ihost->scan_workq_name);
+ if (!ihost->scan_workq)
return -ENOMEM;
return 0;
}
@@ -143,7 +144,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct device *dev,
struct Scsi_Host *shost = dev_to_shost(dev);
struct iscsi_host *ihost = shost->shost_data;
- destroy_workqueue(ihost->unbind_workq);
+ destroy_workqueue(ihost->scan_workq);
return 0;
}
@@ -221,6 +222,54 @@ static struct iscsi_cls_conn *iscsi_conn_lookup(uint32_t sid, uint32_t cid)
* The following functions can be used by LLDs that allocate
* their own scsi_hosts or by software iscsi LLDs
*/
+static struct {
+ int value;
+ char *name;
+} iscsi_session_state_names[] = {
+ { ISCSI_SESSION_LOGGED_IN, "LOGGED_IN" },
+ { ISCSI_SESSION_FAILED, "FAILED" },
+ { ISCSI_SESSION_FREE, "FREE" },
+};
+
+const char *iscsi_session_state_name(int state)
+{
+ int i;
+ char *name = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(iscsi_session_state_names); i++) {
+ if (iscsi_session_state_names[i].value == state) {
+ name = iscsi_session_state_names[i].name;
+ break;
+ }
+ }
+ return name;
+}
+
+int iscsi_session_chkready(struct iscsi_cls_session *session)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&session->lock, flags);
+ switch (session->state) {
+ case ISCSI_SESSION_LOGGED_IN:
+ err = 0;
+ break;
+ case ISCSI_SESSION_FAILED:
+ err = DID_IMM_RETRY << 16;
+ break;
+ case ISCSI_SESSION_FREE:
+ err = DID_NO_CONNECT << 16;
+ break;
+ default:
+ err = DID_NO_CONNECT << 16;
+ break;
+ }
+ spin_unlock_irqrestore(&session->lock, flags);
+ return err;
+}
+EXPORT_SYMBOL_GPL(iscsi_session_chkready);
+
static void iscsi_session_release(struct device *dev)
{
struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
@@ -236,6 +285,25 @@ static int iscsi_is_session_dev(const struct device *dev)
return dev->release == iscsi_session_release;
}
+/**
+ * iscsi_scan_finished - helper to report when running scans are done
+ * @shost: scsi host
+ * @time: scan run time
+ *
+ * This function can be used by drives like qla4xxx to report to the scsi
+ * layer when the scans it kicked off at module load time are done.
+ */
+int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct iscsi_host *ihost = shost->shost_data;
+ /*
+ * qla4xxx will have kicked off some session unblocks before calling
+ * scsi_scan_host, so just wait for them to complete.
+ */
+ return !atomic_read(&ihost->nr_scans);
+}
+EXPORT_SYMBOL_GPL(iscsi_scan_finished);
+
static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
uint id, uint lun)
{
@@ -254,14 +322,50 @@ static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
return 0;
}
+static void iscsi_scan_session(struct work_struct *work)
+{
+ struct iscsi_cls_session *session =
+ container_of(work, struct iscsi_cls_session, scan_work);
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&session->lock, flags);
+ if (session->state != ISCSI_SESSION_LOGGED_IN) {
+ spin_unlock_irqrestore(&session->lock, flags);
+ goto done;
+ }
+ spin_unlock_irqrestore(&session->lock, flags);
+
+ scsi_scan_target(&session->dev, 0, session->target_id,
+ SCAN_WILD_CARD, 1);
+done:
+ atomic_dec(&ihost->nr_scans);
+}
+
static void session_recovery_timedout(struct work_struct *work)
{
struct iscsi_cls_session *session =
container_of(work, struct iscsi_cls_session,
recovery_work.work);
+ unsigned long flags;
+
+ iscsi_cls_session_printk(KERN_INFO, session,
+ "session recovery timed out after %d secs\n",
+ session->recovery_tmo);
- dev_printk(KERN_INFO, &session->dev, "iscsi: session recovery timed "
- "out after %d secs\n", session->recovery_tmo);
+ spin_lock_irqsave(&session->lock, flags);
+ switch (session->state) {
+ case ISCSI_SESSION_FAILED:
+ session->state = ISCSI_SESSION_FREE;
+ break;
+ case ISCSI_SESSION_LOGGED_IN:
+ case ISCSI_SESSION_FREE:
+ /* we raced with the unblock's flush */
+ spin_unlock_irqrestore(&session->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&session->lock, flags);
if (session->transport->session_recovery_timedout)
session->transport->session_recovery_timedout(session);
@@ -269,16 +373,44 @@ static void session_recovery_timedout(struct work_struct *work)
scsi_target_unblock(&session->dev);
}
-void iscsi_unblock_session(struct iscsi_cls_session *session)
+void __iscsi_unblock_session(struct iscsi_cls_session *session)
{
if (!cancel_delayed_work(&session->recovery_work))
flush_workqueue(iscsi_eh_timer_workq);
scsi_target_unblock(&session->dev);
}
+
+void iscsi_unblock_session(struct iscsi_cls_session *session)
+{
+ struct Scsi_Host *shost = iscsi_session_to_shost(session);
+ struct iscsi_host *ihost = shost->shost_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&session->lock, flags);
+ session->state = ISCSI_SESSION_LOGGED_IN;
+ spin_unlock_irqrestore(&session->lock, flags);
+
+ __iscsi_unblock_session(session);
+ /*
+ * Only do kernel scanning if the driver is properly hooked into
+ * the async scanning code (drivers like iscsi_tcp do login and
+ * scanning from userspace).
+ */
+ if (shost->hostt->scan_finished) {
+ if (queue_work(ihost->scan_workq, &session->scan_work))
+ atomic_inc(&ihost->nr_scans);
+ }
+}
EXPORT_SYMBOL_GPL(iscsi_unblock_session);
void iscsi_block_session(struct iscsi_cls_session *session)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&session->lock, flags);
+ session->state = ISCSI_SESSION_FAILED;
+ spin_unlock_irqrestore(&session->lock, flags);
+
scsi_target_block(&session->dev);
queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
session->recovery_tmo * HZ);
@@ -311,7 +443,7 @@ static int iscsi_unbind_session(struct iscsi_cls_session *session)
struct Scsi_Host *shost = iscsi_session_to_shost(session);
struct iscsi_host *ihost = shost->shost_data;
- return queue_work(ihost->unbind_workq, &session->unbind_work);
+ return queue_work(ihost->scan_workq, &session->unbind_work);
}
struct iscsi_cls_session *
@@ -327,10 +459,13 @@ iscsi_alloc_session(struct Scsi_Host *shost,
session->transport = transport;
session->recovery_tmo = 120;
+ session->state = ISCSI_SESSION_FREE;
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
INIT_LIST_HEAD(&session->host_list);
INIT_LIST_HEAD(&session->sess_list);
INIT_WORK(&session->unbind_work, __iscsi_unbind_session);
+ INIT_WORK(&session->scan_work, iscsi_scan_session);
+ spin_lock_init(&session->lock);
/* this is released in the dev's release function */
scsi_host_get(shost);
@@ -358,8 +493,8 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
session->sid);
err = device_add(&session->dev);
if (err) {
- dev_printk(KERN_ERR, &session->dev, "iscsi: could not "
- "register session's dev\n");
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "could not register session's dev\n");
goto release_host;
}
transport_register_device(&session->dev);
@@ -444,22 +579,28 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
* If we are blocked let commands flow again. The lld or iscsi
* layer should set up the queuecommand to fail commands.
*/
- iscsi_unblock_session(session);
- iscsi_unbind_session(session);
+ spin_lock_irqsave(&session->lock, flags);
+ session->state = ISCSI_SESSION_FREE;
+ spin_unlock_irqrestore(&session->lock, flags);
+ __iscsi_unblock_session(session);
+ __iscsi_unbind_session(&session->unbind_work);
+
+ /* flush running scans */
+ flush_workqueue(ihost->scan_workq);
/*
* If the session dropped while removing devices then we need to make
* sure it is not blocked
*/
if (!cancel_delayed_work(&session->recovery_work))
flush_workqueue(iscsi_eh_timer_workq);
- flush_workqueue(ihost->unbind_workq);
/* hw iscsi may not have removed all connections from session */
err = device_for_each_child(&session->dev, NULL,
iscsi_iter_destroy_conn_fn);
if (err)
- dev_printk(KERN_ERR, &session->dev, "iscsi: Could not delete "
- "all connections for session. Error %d.\n", err);
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "Could not delete all connections "
+ "for session. Error %d.\n", err);
transport_unregister_device(&session->dev);
device_del(&session->dev);
@@ -531,8 +672,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, uint32_t cid)
conn->dev.release = iscsi_conn_release;
err = device_register(&conn->dev);
if (err) {
- dev_printk(KERN_ERR, &conn->dev, "iscsi: could not register "
- "connection's dev\n");
+ iscsi_cls_session_printk(KERN_ERR, session, "could not "
+ "register connection's dev\n");
goto release_parent_ref;
}
transport_register_device(&conn->dev);
@@ -639,8 +780,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
iscsi_conn_error(conn, ISCSI_ERR_CONN_FAILED);
- dev_printk(KERN_ERR, &conn->dev, "iscsi: can not deliver "
- "control PDU: OOM\n");
+ iscsi_cls_conn_printk(KERN_ERR, conn, "can not deliver "
+ "control PDU: OOM\n");
return -ENOMEM;
}
@@ -661,20 +802,27 @@ EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
{
+ struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
struct nlmsghdr *nlh;
struct sk_buff *skb;
struct iscsi_uevent *ev;
struct iscsi_internal *priv;
int len = NLMSG_SPACE(sizeof(*ev));
+ unsigned long flags;
priv = iscsi_if_transport_lookup(conn->transport);
if (!priv)
return;
+ spin_lock_irqsave(&session->lock, flags);
+ if (session->state == ISCSI_SESSION_LOGGED_IN)
+ session->state = ISCSI_SESSION_FAILED;
+ spin_unlock_irqrestore(&session->lock, flags);
+
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
- dev_printk(KERN_ERR, &conn->dev, "iscsi: gracefully ignored "
- "conn error (%d)\n", error);
+ iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
+ "conn error (%d)\n", error);
return;
}
@@ -688,8 +836,8 @@ void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error)
iscsi_broadcast_skb(skb, GFP_ATOMIC);
- dev_printk(KERN_INFO, &conn->dev, "iscsi: detected conn error (%d)\n",
- error);
+ iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
+ error);
}
EXPORT_SYMBOL_GPL(iscsi_conn_error);
@@ -744,8 +892,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
skbstat = alloc_skb(len, GFP_ATOMIC);
if (!skbstat) {
- dev_printk(KERN_ERR, &conn->dev, "iscsi: can not "
- "deliver stats: OOM\n");
+ iscsi_cls_conn_printk(KERN_ERR, conn, "can not "
+ "deliver stats: OOM\n");
return -ENOMEM;
}
@@ -801,8 +949,9 @@ int iscsi_session_event(struct iscsi_cls_session *session,
skb = alloc_skb(len, GFP_KERNEL);
if (!skb) {
- dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace "
- "of session event %u\n", event);
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "Cannot notify userspace of session "
+ "event %u\n", event);
return -ENOMEM;
}
@@ -825,8 +974,8 @@ int iscsi_session_event(struct iscsi_cls_session *session,
ev->r.unbind_session.sid = session->sid;
break;
default:
- dev_printk(KERN_ERR, &session->dev, "Invalid event %u.\n",
- event);
+ iscsi_cls_session_printk(KERN_ERR, session, "Invalid event "
+ "%u.\n", event);
kfree_skb(skb);
return -EINVAL;
}
@@ -837,8 +986,10 @@ int iscsi_session_event(struct iscsi_cls_session *session,
*/
rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
if (rc < 0)
- dev_printk(KERN_ERR, &session->dev, "Cannot notify userspace "
- "of session event %u. Check iscsi daemon\n", event);
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "Cannot notify userspace of session "
+ "event %u. Check iscsi daemon\n",
+ event);
return rc;
}
EXPORT_SYMBOL_GPL(iscsi_session_event);
@@ -871,16 +1022,15 @@ iscsi_if_create_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session = iscsi_session_lookup(ev->u.c_conn.sid);
if (!session) {
- printk(KERN_ERR "iscsi: invalid session %d\n",
+ printk(KERN_ERR "iscsi: invalid session %d.\n",
ev->u.c_conn.sid);
return -EINVAL;
}
conn = transport->create_conn(session, ev->u.c_conn.cid);
if (!conn) {
- printk(KERN_ERR "iscsi: couldn't create a new "
- "connection for session %d\n",
- session->sid);
+ iscsi_cls_session_printk(KERN_ERR, session,
+ "couldn't create a new connection.");
return -ENOMEM;
}
@@ -1246,6 +1396,15 @@ iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
+static ssize_t
+show_priv_session_state(struct class_device *cdev, char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_cdev_to_session(cdev);
+ return sprintf(buf, "%s\n", iscsi_session_state_name(session->state));
+}
+static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
+ NULL);
+
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
show_priv_session_##field(struct class_device *cdev, char *buf) \
@@ -1472,6 +1631,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
SETUP_PRIV_SESSION_RD_ATTR(recovery_tmo);
+ SETUP_PRIV_SESSION_RD_ATTR(state);
BUG_ON(count > ISCSI_SESSION_ATTRS);
priv->session_attrs[count] = NULL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 51a5557f42dd..37df8bbe7f46 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -929,6 +929,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned int xfer_size = scsi_bufflen(SCpnt);
unsigned int good_bytes = result ? 0 : xfer_size;
u64 start_lba = SCpnt->request->sector;
+ u64 end_lba = SCpnt->request->sector + (xfer_size / 512);
u64 bad_lba;
struct scsi_sense_hdr sshdr;
int sense_valid = 0;
@@ -967,26 +968,23 @@ static int sd_done(struct scsi_cmnd *SCpnt)
goto out;
if (xfer_size <= SCpnt->device->sector_size)
goto out;
- switch (SCpnt->device->sector_size) {
- case 256:
+ if (SCpnt->device->sector_size < 512) {
+ /* only legitimate sector_size here is 256 */
start_lba <<= 1;
- break;
- case 512:
- break;
- case 1024:
- start_lba >>= 1;
- break;
- case 2048:
- start_lba >>= 2;
- break;
- case 4096:
- start_lba >>= 3;
- break;
- default:
- /* Print something here with limiting frequency. */
- goto out;
- break;
+ end_lba <<= 1;
+ } else {
+ /* be careful ... don't want any overflows */
+ u64 factor = SCpnt->device->sector_size / 512;
+ do_div(start_lba, factor);
+ do_div(end_lba, factor);
}
+
+ if (bad_lba < start_lba || bad_lba >= end_lba)
+ /* the bad lba was reported incorrectly, we have
+ * no idea where the error is
+ */
+ goto out;
+
/* This computation should always be done in terms of
* the resolution of the device's medium.
*/
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
new file mode 100644
index 000000000000..2a6e4f472eaa
--- /dev/null
+++ b/drivers/scsi/ses.c
@@ -0,0 +1,689 @@
+/*
+ * SCSI Enclosure Services
+ *
+ * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
+ *
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or
+** modify it under the terms of the GNU General Public License
+** version 2 as published by the Free Software Foundation.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/enclosure.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_host.h>
+
+struct ses_device {
+ char *page1;
+ char *page2;
+ char *page10;
+ short page1_len;
+ short page2_len;
+ short page10_len;
+};
+
+struct ses_component {
+ u64 addr;
+ unsigned char *desc;
+};
+
+static int ses_probe(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ int err = -ENODEV;
+
+ if (sdev->type != TYPE_ENCLOSURE)
+ goto out;
+
+ err = 0;
+ sdev_printk(KERN_NOTICE, sdev, "Attached Enclosure device\n");
+
+ out:
+ return err;
+}
+
+#define SES_TIMEOUT 30
+#define SES_RETRIES 3
+
+static int ses_recv_diag(struct scsi_device *sdev, int page_code,
+ void *buf, int bufflen)
+{
+ char cmd[] = {
+ RECEIVE_DIAGNOSTIC,
+ 1, /* Set PCV bit */
+ page_code,
+ bufflen >> 8,
+ bufflen & 0xff,
+ 0
+ };
+
+ return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+ NULL, SES_TIMEOUT, SES_RETRIES);
+}
+
+static int ses_send_diag(struct scsi_device *sdev, int page_code,
+ void *buf, int bufflen)
+{
+ u32 result;
+
+ char cmd[] = {
+ SEND_DIAGNOSTIC,
+ 0x10, /* Set PF bit */
+ 0,
+ bufflen >> 8,
+ bufflen & 0xff,
+ 0
+ };
+
+ result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
+ NULL, SES_TIMEOUT, SES_RETRIES);
+ if (result)
+ sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
+ result);
+ return result;
+}
+
+static int ses_set_page2_descriptor(struct enclosure_device *edev,
+ struct enclosure_component *ecomp,
+ char *desc)
+{
+ int i, j, count = 0, descriptor = ecomp->number;
+ struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
+ struct ses_device *ses_dev = edev->scratch;
+ char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
+ char *desc_ptr = ses_dev->page2 + 8;
+
+ /* Clear everything */
+ memset(desc_ptr, 0, ses_dev->page2_len - 8);
+ for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
+ for (j = 0; j < type_ptr[1]; j++) {
+ desc_ptr += 4;
+ if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
+ type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+ continue;
+ if (count++ == descriptor) {
+ memcpy(desc_ptr, desc, 4);
+ /* set select */
+ desc_ptr[0] |= 0x80;
+ /* clear reserved, just in case */
+ desc_ptr[0] &= 0xf0;
+ }
+ }
+ }
+
+ return ses_send_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
+}
+
+static char *ses_get_page2_descriptor(struct enclosure_device *edev,
+ struct enclosure_component *ecomp)
+{
+ int i, j, count = 0, descriptor = ecomp->number;
+ struct scsi_device *sdev = to_scsi_device(edev->cdev.dev);
+ struct ses_device *ses_dev = edev->scratch;
+ char *type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
+ char *desc_ptr = ses_dev->page2 + 8;
+
+ ses_recv_diag(sdev, 2, ses_dev->page2, ses_dev->page2_len);
+
+ for (i = 0; i < ses_dev->page1[10]; i++, type_ptr += 4) {
+ for (j = 0; j < type_ptr[1]; j++) {
+ desc_ptr += 4;
+ if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
+ type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+ continue;
+ if (count++ == descriptor)
+ return desc_ptr;
+ }
+ }
+ return NULL;
+}
+
+static void ses_get_fault(struct enclosure_device *edev,
+ struct enclosure_component *ecomp)
+{
+ char *desc;
+
+ desc = ses_get_page2_descriptor(edev, ecomp);
+ ecomp->fault = (desc[3] & 0x60) >> 4;
+}
+
+static int ses_set_fault(struct enclosure_device *edev,
+ struct enclosure_component *ecomp,
+ enum enclosure_component_setting val)
+{
+ char desc[4] = {0 };
+
+ switch (val) {
+ case ENCLOSURE_SETTING_DISABLED:
+ /* zero is disabled */
+ break;
+ case ENCLOSURE_SETTING_ENABLED:
+ desc[2] = 0x02;
+ break;
+ default:
+ /* SES doesn't do the SGPIO blink settings */
+ return -EINVAL;
+ }
+
+ return ses_set_page2_descriptor(edev, ecomp, desc);
+}
+
+static void ses_get_status(struct enclosure_device *edev,
+ struct enclosure_component *ecomp)
+{
+ char *desc;
+
+ desc = ses_get_page2_descriptor(edev, ecomp);
+ ecomp->status = (desc[0] & 0x0f);
+}
+
+static void ses_get_locate(struct enclosure_device *edev,
+ struct enclosure_component *ecomp)
+{
+ char *desc;
+
+ desc = ses_get_page2_descriptor(edev, ecomp);
+ ecomp->locate = (desc[2] & 0x02) ? 1 : 0;
+}
+
+static int ses_set_locate(struct enclosure_device *edev,
+ struct enclosure_component *ecomp,
+ enum enclosure_component_setting val)
+{
+ char desc[4] = {0 };
+
+ switch (val) {
+ case ENCLOSURE_SETTING_DISABLED:
+ /* zero is disabled */
+ break;
+ case ENCLOSURE_SETTING_ENABLED:
+ desc[2] = 0x02;
+ break;
+ default:
+ /* SES doesn't do the SGPIO blink settings */
+ return -EINVAL;
+ }
+ return ses_set_page2_descriptor(edev, ecomp, desc);
+}
+
+static int ses_set_active(struct enclosure_device *edev,
+ struct enclosure_component *ecomp,
+ enum enclosure_component_setting val)
+{
+ char desc[4] = {0 };
+
+ switch (val) {
+ case ENCLOSURE_SETTING_DISABLED:
+ /* zero is disabled */
+ ecomp->active = 0;
+ break;
+ case ENCLOSURE_SETTING_ENABLED:
+ desc[2] = 0x80;
+ ecomp->active = 1;
+ break;
+ default:
+ /* SES doesn't do the SGPIO blink settings */
+ return -EINVAL;
+ }
+ return ses_set_page2_descriptor(edev, ecomp, desc);
+}
+
+static struct enclosure_component_callbacks ses_enclosure_callbacks = {
+ .get_fault = ses_get_fault,
+ .set_fault = ses_set_fault,
+ .get_status = ses_get_status,
+ .get_locate = ses_get_locate,
+ .set_locate = ses_set_locate,
+ .set_active = ses_set_active,
+};
+
+struct ses_host_edev {
+ struct Scsi_Host *shost;
+ struct enclosure_device *edev;
+};
+
+int ses_match_host(struct enclosure_device *edev, void *data)
+{
+ struct ses_host_edev *sed = data;
+ struct scsi_device *sdev;
+
+ if (!scsi_is_sdev_device(edev->cdev.dev))
+ return 0;
+
+ sdev = to_scsi_device(edev->cdev.dev);
+
+ if (sdev->host != sed->shost)
+ return 0;
+
+ sed->edev = edev;
+ return 1;
+}
+
+static void ses_process_descriptor(struct enclosure_component *ecomp,
+ unsigned char *desc)
+{
+ int eip = desc[0] & 0x10;
+ int invalid = desc[0] & 0x80;
+ enum scsi_protocol proto = desc[0] & 0x0f;
+ u64 addr = 0;
+ struct ses_component *scomp = ecomp->scratch;
+ unsigned char *d;
+
+ scomp->desc = desc;
+
+ if (invalid)
+ return;
+
+ switch (proto) {
+ case SCSI_PROTOCOL_SAS:
+ if (eip)
+ d = desc + 8;
+ else
+ d = desc + 4;
+ /* only take the phy0 addr */
+ addr = (u64)d[12] << 56 |
+ (u64)d[13] << 48 |
+ (u64)d[14] << 40 |
+ (u64)d[15] << 32 |
+ (u64)d[16] << 24 |
+ (u64)d[17] << 16 |
+ (u64)d[18] << 8 |
+ (u64)d[19];
+ break;
+ default:
+ /* FIXME: Need to add more protocols than just SAS */
+ break;
+ }
+ scomp->addr = addr;
+}
+
+struct efd {
+ u64 addr;
+ struct device *dev;
+};
+
+static int ses_enclosure_find_by_addr(struct enclosure_device *edev,
+ void *data)
+{
+ struct efd *efd = data;
+ int i;
+ struct ses_component *scomp;
+
+ if (!edev->component[0].scratch)
+ return 0;
+
+ for (i = 0; i < edev->components; i++) {
+ scomp = edev->component[i].scratch;
+ if (scomp->addr != efd->addr)
+ continue;
+
+ enclosure_add_device(edev, i, efd->dev);
+ return 1;
+ }
+ return 0;
+}
+
+#define VPD_INQUIRY_SIZE 512
+
+static void ses_match_to_enclosure(struct enclosure_device *edev,
+ struct scsi_device *sdev)
+{
+ unsigned char *buf = kmalloc(VPD_INQUIRY_SIZE, GFP_KERNEL);
+ unsigned char *desc;
+ int len;
+ struct efd efd = {
+ .addr = 0,
+ };
+ unsigned char cmd[] = {
+ INQUIRY,
+ 1,
+ 0x83,
+ VPD_INQUIRY_SIZE >> 8,
+ VPD_INQUIRY_SIZE & 0xff,
+ 0
+ };
+
+ if (!buf)
+ return;
+
+ if (scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf,
+ VPD_INQUIRY_SIZE, NULL, SES_TIMEOUT, SES_RETRIES))
+ goto free;
+
+ len = (buf[2] << 8) + buf[3];
+ desc = buf + 4;
+ while (desc < buf + len) {
+ enum scsi_protocol proto = desc[0] >> 4;
+ u8 code_set = desc[0] & 0x0f;
+ u8 piv = desc[1] & 0x80;
+ u8 assoc = (desc[1] & 0x30) >> 4;
+ u8 type = desc[1] & 0x0f;
+ u8 len = desc[3];
+
+ if (piv && code_set == 1 && assoc == 1 && code_set == 1
+ && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8)
+ efd.addr = (u64)desc[4] << 56 |
+ (u64)desc[5] << 48 |
+ (u64)desc[6] << 40 |
+ (u64)desc[7] << 32 |
+ (u64)desc[8] << 24 |
+ (u64)desc[9] << 16 |
+ (u64)desc[10] << 8 |
+ (u64)desc[11];
+
+ desc += len + 4;
+ }
+ if (!efd.addr)
+ goto free;
+
+ efd.dev = &sdev->sdev_gendev;
+
+ enclosure_for_each_device(ses_enclosure_find_by_addr, &efd);
+ free:
+ kfree(buf);
+}
+
+#define INIT_ALLOC_SIZE 32
+
+static int ses_intf_add(struct class_device *cdev,
+ struct class_interface *intf)
+{
+ struct scsi_device *sdev = to_scsi_device(cdev->dev);
+ struct scsi_device *tmp_sdev;
+ unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr,
+ *addl_desc_ptr;
+ struct ses_device *ses_dev;
+ u32 result;
+ int i, j, types, len, components = 0;
+ int err = -ENOMEM;
+ struct enclosure_device *edev;
+ struct ses_component *scomp;
+
+ if (!scsi_device_enclosure(sdev)) {
+ /* not an enclosure, but might be in one */
+ edev = enclosure_find(&sdev->host->shost_gendev);
+ if (edev) {
+ ses_match_to_enclosure(edev, sdev);
+ class_device_put(&edev->cdev);
+ }
+ return -ENODEV;
+ }
+
+ /* TYPE_ENCLOSURE prints a message in probe */
+ if (sdev->type != TYPE_ENCLOSURE)
+ sdev_printk(KERN_NOTICE, sdev, "Embedded Enclosure Device\n");
+
+ ses_dev = kzalloc(sizeof(*ses_dev), GFP_KERNEL);
+ hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
+ if (!hdr_buf || !ses_dev)
+ goto err_init_free;
+
+ result = ses_recv_diag(sdev, 1, hdr_buf, INIT_ALLOC_SIZE);
+ if (result)
+ goto recv_failed;
+
+ if (hdr_buf[1] != 0) {
+ /* FIXME: need subenclosure support; I've just never
+ * seen a device with subenclosures and it makes the
+ * traversal routines more complex */
+ sdev_printk(KERN_ERR, sdev,
+ "FIXME driver has no support for subenclosures (%d)\n",
+ buf[1]);
+ goto err_free;
+ }
+
+ len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto err_free;
+
+ ses_dev->page1 = buf;
+ ses_dev->page1_len = len;
+
+ result = ses_recv_diag(sdev, 1, buf, len);
+ if (result)
+ goto recv_failed;
+
+ types = buf[10];
+ len = buf[11];
+
+ type_ptr = buf + 12 + len;
+
+ for (i = 0; i < types; i++, type_ptr += 4) {
+ if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
+ type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+ components += type_ptr[1];
+ }
+
+ result = ses_recv_diag(sdev, 2, hdr_buf, INIT_ALLOC_SIZE);
+ if (result)
+ goto recv_failed;
+
+ len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto err_free;
+
+ /* make sure getting page 2 actually works */
+ result = ses_recv_diag(sdev, 2, buf, len);
+ if (result)
+ goto recv_failed;
+ ses_dev->page2 = buf;
+ ses_dev->page2_len = len;
+
+ /* The additional information page --- allows us
+ * to match up the devices */
+ result = ses_recv_diag(sdev, 10, hdr_buf, INIT_ALLOC_SIZE);
+ if (result)
+ goto no_page10;
+
+ len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ goto err_free;
+
+ result = ses_recv_diag(sdev, 10, buf, len);
+ if (result)
+ goto recv_failed;
+ ses_dev->page10 = buf;
+ ses_dev->page10_len = len;
+
+ no_page10:
+ scomp = kmalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
+ if (!scomp)
+ goto err_free;
+
+ edev = enclosure_register(cdev->dev, sdev->sdev_gendev.bus_id,
+ components, &ses_enclosure_callbacks);
+ if (IS_ERR(edev)) {
+ err = PTR_ERR(edev);
+ goto err_free;
+ }
+
+ edev->scratch = ses_dev;
+ for (i = 0; i < components; i++)
+ edev->component[i].scratch = scomp++;
+
+ /* Page 7 for the descriptors is optional */
+ buf = NULL;
+ result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
+ if (result)
+ goto simple_populate;
+
+ len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
+ /* add 1 for trailing '\0' we'll use */
+ buf = kzalloc(len + 1, GFP_KERNEL);
+ result = ses_recv_diag(sdev, 7, buf, len);
+ if (result) {
+ simple_populate:
+ kfree(buf);
+ buf = NULL;
+ desc_ptr = NULL;
+ addl_desc_ptr = NULL;
+ } else {
+ desc_ptr = buf + 8;
+ len = (desc_ptr[2] << 8) + desc_ptr[3];
+ /* skip past overall descriptor */
+ desc_ptr += len + 4;
+ addl_desc_ptr = ses_dev->page10 + 8;
+ }
+ type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
+ components = 0;
+ for (i = 0; i < types; i++, type_ptr += 4) {
+ for (j = 0; j < type_ptr[1]; j++) {
+ char *name = NULL;
+ struct enclosure_component *ecomp;
+
+ if (desc_ptr) {
+ len = (desc_ptr[2] << 8) + desc_ptr[3];
+ desc_ptr += 4;
+ /* Add trailing zero - pushes into
+ * reserved space */
+ desc_ptr[len] = '\0';
+ name = desc_ptr;
+ }
+ if (type_ptr[0] != ENCLOSURE_COMPONENT_DEVICE &&
+ type_ptr[0] != ENCLOSURE_COMPONENT_ARRAY_DEVICE)
+ continue;
+ ecomp = enclosure_component_register(edev,
+ components++,
+ type_ptr[0],
+ name);
+ if (desc_ptr) {
+ desc_ptr += len;
+ if (!IS_ERR(ecomp))
+ ses_process_descriptor(ecomp,
+ addl_desc_ptr);
+
+ if (addl_desc_ptr)
+ addl_desc_ptr += addl_desc_ptr[1] + 2;
+ }
+ }
+ }
+ kfree(buf);
+ kfree(hdr_buf);
+
+ /* see if there are any devices matching before
+ * we found the enclosure */
+ shost_for_each_device(tmp_sdev, sdev->host) {
+ if (tmp_sdev->lun != 0 || scsi_device_enclosure(tmp_sdev))
+ continue;
+ ses_match_to_enclosure(edev, tmp_sdev);
+ }
+
+ return 0;
+
+ recv_failed:
+ sdev_printk(KERN_ERR, sdev, "Failed to get diagnostic page 0x%x\n",
+ result);
+ err = -ENODEV;
+ err_free:
+ kfree(buf);
+ kfree(ses_dev->page10);
+ kfree(ses_dev->page2);
+ kfree(ses_dev->page1);
+ err_init_free:
+ kfree(ses_dev);
+ kfree(hdr_buf);
+ sdev_printk(KERN_ERR, sdev, "Failed to bind enclosure %d\n", err);
+ return err;
+}
+
+static int ses_remove(struct device *dev)
+{
+ return 0;
+}
+
+static void ses_intf_remove(struct class_device *cdev,
+ struct class_interface *intf)
+{
+ struct scsi_device *sdev = to_scsi_device(cdev->dev);
+ struct enclosure_device *edev;
+ struct ses_device *ses_dev;
+
+ if (!scsi_device_enclosure(sdev))
+ return;
+
+ edev = enclosure_find(cdev->dev);
+ if (!edev)
+ return;
+
+ ses_dev = edev->scratch;
+ edev->scratch = NULL;
+
+ kfree(ses_dev->page1);
+ kfree(ses_dev->page2);
+ kfree(ses_dev);
+
+ kfree(edev->component[0].scratch);
+
+ class_device_put(&edev->cdev);
+ enclosure_unregister(edev);
+}
+
+static struct class_interface ses_interface = {
+ .add = ses_intf_add,
+ .remove = ses_intf_remove,
+};
+
+static struct scsi_driver ses_template = {
+ .owner = THIS_MODULE,
+ .gendrv = {
+ .name = "ses",
+ .probe = ses_probe,
+ .remove = ses_remove,
+ },
+};
+
+static int __init ses_init(void)
+{
+ int err;
+
+ err = scsi_register_interface(&ses_interface);
+ if (err)
+ return err;
+
+ err = scsi_register_driver(&ses_template.gendrv);
+ if (err)
+ goto out_unreg;
+
+ return 0;
+
+ out_unreg:
+ scsi_unregister_interface(&ses_interface);
+ return err;
+}
+
+static void __exit ses_exit(void)
+{
+ scsi_unregister_driver(&ses_template.gendrv);
+ scsi_unregister_interface(&ses_interface);
+}
+
+module_init(ses_init);
+module_exit(ses_exit);
+
+MODULE_ALIAS_SCSI_DEVICE(TYPE_ENCLOSURE);
+
+MODULE_AUTHOR("James Bottomley");
+MODULE_DESCRIPTION("SCSI Enclosure Services (ses) driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index aba28f335b88..e5156aa6dd20 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1160,23 +1160,22 @@ sg_fasync(int fd, struct file *filp, int mode)
return (retval < 0) ? retval : 0;
}
-static struct page *
-sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
+static int
+sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
Sg_fd *sfp;
- struct page *page = NOPAGE_SIGBUS;
unsigned long offset, len, sa;
Sg_scatter_hold *rsv_schp;
struct scatterlist *sg;
int k;
if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
- return page;
+ return VM_FAULT_SIGBUS;
rsv_schp = &sfp->reserve;
- offset = addr - vma->vm_start;
+ offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= rsv_schp->bufflen)
- return page;
- SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
+ return VM_FAULT_SIGBUS;
+ SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
offset, rsv_schp->k_use_sg));
sg = rsv_schp->buffer;
sa = vma->vm_start;
@@ -1185,21 +1184,21 @@ sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
len = vma->vm_end - sa;
len = (len < sg->length) ? len : sg->length;
if (offset < len) {
+ struct page *page;
page = virt_to_page(page_address(sg_page(sg)) + offset);
get_page(page); /* increment page count */
- break;
+ vmf->page = page;
+ return 0; /* success */
}
sa += len;
offset -= len;
}
- if (type)
- *type = VM_FAULT_MINOR;
- return page;
+ return VM_FAULT_SIGBUS;
}
static struct vm_operations_struct sg_mmap_vm_ops = {
- .nopage = sg_vma_nopage,
+ .fault = sg_vma_fault,
};
static int
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 50ba49250203..208565bdbe8e 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -163,6 +163,29 @@ static void scsi_cd_put(struct scsi_cd *cd)
mutex_unlock(&sr_ref_mutex);
}
+/* identical to scsi_test_unit_ready except that it doesn't
+ * eat the NOT_READY returns for removable media */
+int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr)
+{
+ int retries = MAX_RETRIES;
+ int the_result;
+ u8 cmd[] = {TEST_UNIT_READY, 0, 0, 0, 0, 0 };
+
+ /* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION
+ * conditions are gone, or a timeout happens
+ */
+ do {
+ the_result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL,
+ 0, sshdr, SR_TIMEOUT,
+ retries--);
+
+ } while (retries > 0 &&
+ (!scsi_status_is_good(the_result) ||
+ (scsi_sense_valid(sshdr) &&
+ sshdr->sense_key == UNIT_ATTENTION)));
+ return the_result;
+}
+
/*
* This function checks to see if the media has been changed in the
* CDROM drive. It is possible that we have already sensed a change,
@@ -185,8 +208,7 @@ static int sr_media_change(struct cdrom_device_info *cdi, int slot)
}
sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
- retval = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES,
- sshdr);
+ retval = sr_test_unit_ready(cd->device, sshdr);
if (retval || (scsi_sense_valid(sshdr) &&
/* 0x3a is medium not present */
sshdr->asc == 0x3a)) {
@@ -733,10 +755,8 @@ static void get_capabilities(struct scsi_cd *cd)
{
unsigned char *buffer;
struct scsi_mode_data data;
- unsigned char cmd[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sshdr;
- unsigned int the_result;
- int retries, rc, n;
+ int rc, n;
static const char *loadmech[] =
{
@@ -758,23 +778,8 @@ static void get_capabilities(struct scsi_cd *cd)
return;
}
- /* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION
- * conditions are gone, or a timeout happens
- */
- retries = 0;
- do {
- memset((void *)cmd, 0, MAX_COMMAND_SIZE);
- cmd[0] = TEST_UNIT_READY;
-
- the_result = scsi_execute_req (cd->device, cmd, DMA_NONE, NULL,
- 0, &sshdr, SR_TIMEOUT,
- MAX_RETRIES);
-
- retries++;
- } while (retries < 5 &&
- (!scsi_status_is_good(the_result) ||
- (scsi_sense_valid(&sshdr) &&
- sshdr.sense_key == UNIT_ATTENTION)));
+ /* eat unit attentions */
+ sr_test_unit_ready(cd->device, &sshdr);
/* ask for mode page 0x2a */
rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 81fbc0b78a52..1e144dfdbd4b 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -61,6 +61,7 @@ int sr_select_speed(struct cdrom_device_info *cdi, int speed);
int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
int sr_is_xa(Scsi_CD *);
+int sr_test_unit_ready(struct scsi_device *sdev, struct scsi_sense_hdr *sshdr);
/* sr_vendor.c */
void sr_vendor_init(Scsi_CD *);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index d5cebff1d646..ae87d08df588 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -306,8 +306,7 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
/* we have no changer support */
return -EINVAL;
}
- if (0 == scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES,
- &sshdr))
+ if (0 == sr_test_unit_ready(cd->device, &sshdr))
return CDS_DISC_OK;
if (!cdrom_get_media_event(cdi, &med)) {
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 1bc41907a038..06152c7fa689 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -1,392 +1,316 @@
-/* sun3x_esp.c: EnhancedScsiProcessor Sun3x SCSI driver code.
+/* sun3x_esp.c: ESP front-end for Sun3x systems.
*
- * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
- *
- * Based on David S. Miller's esp driver
+ * Copyright (C) 2007,2008 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*/
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/blkdev.h>
-#include <linux/proc_fs.h>
-#include <linux/stat.h>
#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "NCR53C9x.h"
-
#include <asm/sun3x.h>
+#include <asm/io.h>
+#include <asm/dma.h>
#include <asm/dvma.h>
-#include <asm/irq.h>
-
-static void dma_barrier(struct NCR_ESP *esp);
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count);
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_drain(struct NCR_ESP *esp);
-static void dma_invalidate(struct NCR_ESP *esp);
-static void dma_dump_state(struct NCR_ESP *esp);
-static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length);
-static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length);
-static void dma_ints_off(struct NCR_ESP *esp);
-static void dma_ints_on(struct NCR_ESP *esp);
-static int dma_irq_p(struct NCR_ESP *esp);
-static void dma_poll(struct NCR_ESP *esp, unsigned char *vaddr);
-static int dma_ports_p(struct NCR_ESP *esp);
-static void dma_reset(struct NCR_ESP *esp);
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write);
-static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp);
-static void dma_advance_sg (Scsi_Cmnd *sp);
-
-/* Detecting ESP chips on the machine. This is the simple and easy
- * version.
- */
-int sun3x_esp_detect(struct scsi_host_template *tpnt)
-{
- struct NCR_ESP *esp;
- struct ConfigDev *esp_dev;
-
- esp_dev = 0;
- esp = esp_allocate(tpnt, esp_dev, 0);
-
- /* Do command transfer with DMA */
- esp->do_pio_cmds = 0;
-
- /* Required functions */
- esp->dma_bytes_sent = &dma_bytes_sent;
- esp->dma_can_transfer = &dma_can_transfer;
- esp->dma_dump_state = &dma_dump_state;
- esp->dma_init_read = &dma_init_read;
- esp->dma_init_write = &dma_init_write;
- esp->dma_ints_off = &dma_ints_off;
- esp->dma_ints_on = &dma_ints_on;
- esp->dma_irq_p = &dma_irq_p;
- esp->dma_ports_p = &dma_ports_p;
- esp->dma_setup = &dma_setup;
-
- /* Optional functions */
- esp->dma_barrier = &dma_barrier;
- esp->dma_invalidate = &dma_invalidate;
- esp->dma_drain = &dma_drain;
- esp->dma_irq_entry = 0;
- esp->dma_irq_exit = 0;
- esp->dma_led_on = 0;
- esp->dma_led_off = 0;
- esp->dma_poll = &dma_poll;
- esp->dma_reset = &dma_reset;
-
- /* virtual DMA functions */
- esp->dma_mmu_get_scsi_one = &dma_mmu_get_scsi_one;
- esp->dma_mmu_get_scsi_sgl = &dma_mmu_get_scsi_sgl;
- esp->dma_mmu_release_scsi_one = &dma_mmu_release_scsi_one;
- esp->dma_mmu_release_scsi_sgl = &dma_mmu_release_scsi_sgl;
- esp->dma_advance_sg = &dma_advance_sg;
-
- /* SCSI chip speed */
- esp->cfreq = 20000000;
- esp->eregs = (struct ESP_regs *)(SUN3X_ESP_BASE);
- esp->dregs = (void *)SUN3X_ESP_DMA;
- esp->esp_command = (volatile unsigned char *)dvma_malloc(DVMA_PAGE_SIZE);
- esp->esp_command_dvma = dvma_vtob((unsigned long)esp->esp_command);
-
- esp->irq = 2;
- if (request_irq(esp->irq, esp_intr, IRQF_DISABLED,
- "SUN3X SCSI", esp->ehost)) {
- esp_deallocate(esp);
- return 0;
- }
+/* DMA controller reg offsets */
+#define DMA_CSR 0x00UL /* rw DMA control/status register 0x00 */
+#define DMA_ADDR 0x04UL /* rw DMA transfer address register 0x04 */
+#define DMA_COUNT 0x08UL /* rw DMA transfer count register 0x08 */
+#define DMA_TEST 0x0cUL /* rw DMA test/debug register 0x0c */
- esp->scsi_id = 7;
- esp->diff = 0;
+#include <scsi/scsi_host.h>
- esp_initialize(esp);
+#include "esp_scsi.h"
- /* for reasons beyond my knowledge (and which should likely be fixed)
- sync mode doesn't work on a 3/80 at 5mhz. but it does at 4. */
- esp->sync_defp = 0x3f;
+#define DRV_MODULE_NAME "sun3x_esp"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_VERSION "1.000"
+#define DRV_MODULE_RELDATE "Nov 1, 2007"
- printk("ESP: Total of %d ESP hosts found, %d actually in use.\n", nesps,
- esps_in_use);
- esps_running = esps_in_use;
- return esps_in_use;
+/*
+ * m68k always assumes readl/writel operate on little endian
+ * mmio space; this is wrong at least for Sun3x, so we
+ * need to workaround this until a proper way is found
+ */
+#if 0
+#define dma_read32(REG) \
+ readl(esp->dma_regs + (REG))
+#define dma_write32(VAL, REG) \
+ writel((VAL), esp->dma_regs + (REG))
+#else
+#define dma_read32(REG) \
+ *(volatile u32 *)(esp->dma_regs + (REG))
+#define dma_write32(VAL, REG) \
+ do { *(volatile u32 *)(esp->dma_regs + (REG)) = (VAL); } while (0)
+#endif
+
+static void sun3x_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+ writeb(val, esp->regs + (reg * 4UL));
}
-static void dma_do_drain(struct NCR_ESP *esp)
+static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
{
- struct sparc_dma_registers *dregs =
- (struct sparc_dma_registers *) esp->dregs;
-
- int count = 500000;
-
- while((dregs->cond_reg & DMA_PEND_READ) && (--count > 0))
- udelay(1);
-
- if(!count) {
- printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
- }
-
- dregs->cond_reg |= DMA_FIFO_STDRAIN;
-
- count = 500000;
-
- while((dregs->cond_reg & DMA_FIFO_ISDRAIN) && (--count > 0))
- udelay(1);
-
- if(!count) {
- printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
- }
-
+ return readb(esp->regs + (reg * 4UL));
}
-
-static void dma_barrier(struct NCR_ESP *esp)
+
+static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
+ size_t sz, int dir)
{
- struct sparc_dma_registers *dregs =
- (struct sparc_dma_registers *) esp->dregs;
- int count = 500000;
-
- while((dregs->cond_reg & DMA_PEND_READ) && (--count > 0))
- udelay(1);
-
- if(!count) {
- printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
- }
-
- dregs->cond_reg &= ~(DMA_ENABLE);
+ return dma_map_single(esp->dev, buf, sz, dir);
}
-/* This uses various DMA csr fields and the fifo flags count value to
- * determine how many bytes were successfully sent/received by the ESP.
- */
-static int dma_bytes_sent(struct NCR_ESP *esp, int fifo_count)
+static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
{
- struct sparc_dma_registers *dregs =
- (struct sparc_dma_registers *) esp->dregs;
-
- int rval = dregs->st_addr - esp->esp_command_dvma;
-
- return rval - fifo_count;
+ return dma_map_sg(esp->dev, sg, num_sg, dir);
}
-static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
+static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+ size_t sz, int dir)
{
- return sp->SCp.this_residual;
+ dma_unmap_single(esp->dev, addr, sz, dir);
}
-static void dma_drain(struct NCR_ESP *esp)
+static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+ int num_sg, int dir)
{
- struct sparc_dma_registers *dregs =
- (struct sparc_dma_registers *) esp->dregs;
- int count = 500000;
-
- if(dregs->cond_reg & DMA_FIFO_ISDRAIN) {
- dregs->cond_reg |= DMA_FIFO_STDRAIN;
- while((dregs->cond_reg & DMA_FIFO_ISDRAIN) && (--count > 0))
- udelay(1);
- if(!count) {
- printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
- }
-
- }
+ dma_unmap_sg(esp->dev, sg, num_sg, dir);
}
-static void dma_invalidate(struct NCR_ESP *esp)
+static int sun3x_esp_irq_pending(struct esp *esp)
{
- struct sparc_dma_registers *dregs =
- (struct sparc_dma_registers *) esp->dregs;
-
- __u32 tmp;
- int count = 500000;
-
- while(((tmp = dregs->cond_reg) & DMA_PEND_READ) && (--count > 0))
- udelay(1);
+ if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
+ return 1;
+ return 0;
+}
- if(!count) {
- printk("%s:%d timeout CSR %08lx\n", __FILE__, __LINE__, dregs->cond_reg);
- }
+static void sun3x_esp_reset_dma(struct esp *esp)
+{
+ u32 val;
- dregs->cond_reg = tmp | DMA_FIFO_INV;
- dregs->cond_reg &= ~DMA_FIFO_INV;
+ val = dma_read32(DMA_CSR);
+ dma_write32(val | DMA_RST_SCSI, DMA_CSR);
+ dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
+ /* Enable interrupts. */
+ val = dma_read32(DMA_CSR);
+ dma_write32(val | DMA_INT_ENAB, DMA_CSR);
}
-static void dma_dump_state(struct NCR_ESP *esp)
+static void sun3x_esp_dma_drain(struct esp *esp)
{
- struct sparc_dma_registers *dregs =
- (struct sparc_dma_registers *) esp->dregs;
+ u32 csr;
+ int lim;
- ESPLOG(("esp%d: dma -- cond_reg<%08lx> addr<%08lx>\n",
- esp->esp_id, dregs->cond_reg, dregs->st_addr));
-}
+ csr = dma_read32(DMA_CSR);
+ if (!(csr & DMA_FIFO_ISDRAIN))
+ return;
-static void dma_init_read(struct NCR_ESP *esp, __u32 vaddress, int length)
-{
- struct sparc_dma_registers *dregs =
- (struct sparc_dma_registers *) esp->dregs;
+ dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
- dregs->st_addr = vaddress;
- dregs->cond_reg |= (DMA_ST_WRITE | DMA_ENABLE);
+ lim = 1000;
+ while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
+ if (--lim == 0) {
+ printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
+ esp->host->unique_id);
+ break;
+ }
+ udelay(1);
+ }
}
-static void dma_init_write(struct NCR_ESP *esp, __u32 vaddress, int length)
+static void sun3x_esp_dma_invalidate(struct esp *esp)
{
- struct sparc_dma_registers *dregs =
- (struct sparc_dma_registers *) esp->dregs;
-
- /* Set up the DMA counters */
+ u32 val;
+ int lim;
+
+ lim = 1000;
+ while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
+ if (--lim == 0) {
+ printk(KERN_ALERT PFX "esp%d: DMA will not "
+ "invalidate!\n", esp->host->unique_id);
+ break;
+ }
+ udelay(1);
+ }
- dregs->st_addr = vaddress;
- dregs->cond_reg = ((dregs->cond_reg & ~(DMA_ST_WRITE)) | DMA_ENABLE);
+ val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
+ val |= DMA_FIFO_INV;
+ dma_write32(val, DMA_CSR);
+ val &= ~DMA_FIFO_INV;
+ dma_write32(val, DMA_CSR);
}
-static void dma_ints_off(struct NCR_ESP *esp)
+static void sun3x_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
{
- DMA_INTSOFF((struct sparc_dma_registers *) esp->dregs);
+ u32 csr;
+
+ BUG_ON(!(cmd & ESP_CMD_DMA));
+
+ sun3x_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
+ sun3x_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
+ csr = dma_read32(DMA_CSR);
+ csr |= DMA_ENABLE;
+ if (write)
+ csr |= DMA_ST_WRITE;
+ else
+ csr &= ~DMA_ST_WRITE;
+ dma_write32(csr, DMA_CSR);
+ dma_write32(addr, DMA_ADDR);
+
+ scsi_esp_cmd(esp, cmd);
}
-static void dma_ints_on(struct NCR_ESP *esp)
+static int sun3x_esp_dma_error(struct esp *esp)
{
- DMA_INTSON((struct sparc_dma_registers *) esp->dregs);
-}
+ u32 csr = dma_read32(DMA_CSR);
-static int dma_irq_p(struct NCR_ESP *esp)
-{
- return DMA_IRQ_P((struct sparc_dma_registers *) esp->dregs);
+ if (csr & DMA_HNDL_ERROR)
+ return 1;
+
+ return 0;
}
-static void dma_poll(struct NCR_ESP *esp, unsigned char *vaddr)
+static const struct esp_driver_ops sun3x_esp_ops = {
+ .esp_write8 = sun3x_esp_write8,
+ .esp_read8 = sun3x_esp_read8,
+ .map_single = sun3x_esp_map_single,
+ .map_sg = sun3x_esp_map_sg,
+ .unmap_single = sun3x_esp_unmap_single,
+ .unmap_sg = sun3x_esp_unmap_sg,
+ .irq_pending = sun3x_esp_irq_pending,
+ .reset_dma = sun3x_esp_reset_dma,
+ .dma_drain = sun3x_esp_dma_drain,
+ .dma_invalidate = sun3x_esp_dma_invalidate,
+ .send_dma_cmd = sun3x_esp_send_dma_cmd,
+ .dma_error = sun3x_esp_dma_error,
+};
+
+static int __devinit esp_sun3x_probe(struct platform_device *dev)
{
- int count = 50;
- dma_do_drain(esp);
+ struct scsi_host_template *tpnt = &scsi_esp_template;
+ struct Scsi_Host *host;
+ struct esp *esp;
+ struct resource *res;
+ int err = -ENOMEM;
- /* Wait till the first bits settle. */
- while((*(volatile unsigned char *)vaddr == 0xff) && (--count > 0))
- udelay(1);
+ host = scsi_host_alloc(tpnt, sizeof(struct esp));
+ if (!host)
+ goto fail;
- if(!count) {
-// printk("%s:%d timeout expire (data %02x)\n", __FILE__, __LINE__,
-// esp_read(esp->eregs->esp_fdata));
- //mach_halt();
- vaddr[0] = esp_read(esp->eregs->esp_fdata);
- vaddr[1] = esp_read(esp->eregs->esp_fdata);
- }
+ host->max_id = 8;
+ esp = shost_priv(host);
-}
+ esp->host = host;
+ esp->dev = dev;
+ esp->ops = &sun3x_esp_ops;
-static int dma_ports_p(struct NCR_ESP *esp)
-{
- return (((struct sparc_dma_registers *) esp->dregs)->cond_reg
- & DMA_INT_ENAB);
-}
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res && !res->start)
+ goto fail_unlink;
-/* Resetting various pieces of the ESP scsi driver chipset/buses. */
-static void dma_reset(struct NCR_ESP *esp)
-{
- struct sparc_dma_registers *dregs =
- (struct sparc_dma_registers *)esp->dregs;
+ esp->regs = ioremap_nocache(res->start, 0x20);
+ if (!esp->regs)
+ goto fail_unmap_regs;
- /* Punt the DVMA into a known state. */
- dregs->cond_reg |= DMA_RST_SCSI;
- dregs->cond_reg &= ~(DMA_RST_SCSI);
- DMA_INTSON(dregs);
-}
+ res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ if (!res && !res->start)
+ goto fail_unmap_regs;
-static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
-{
- struct sparc_dma_registers *dregs =
- (struct sparc_dma_registers *) esp->dregs;
- unsigned long nreg = dregs->cond_reg;
+ esp->dma_regs = ioremap_nocache(res->start, 0x10);
-// printk("dma_setup %c addr %08x cnt %08x\n",
-// write ? 'W' : 'R', addr, count);
+ esp->command_block = dma_alloc_coherent(esp->dev, 16,
+ &esp->command_block_dma,
+ GFP_KERNEL);
+ if (!esp->command_block)
+ goto fail_unmap_regs_dma;
- dma_do_drain(esp);
+ host->irq = platform_get_irq(dev, 0);
+ err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
+ "SUN3X ESP", esp);
+ if (err < 0)
+ goto fail_unmap_command_block;
- if(write)
- nreg |= DMA_ST_WRITE;
- else {
- nreg &= ~(DMA_ST_WRITE);
- }
-
- nreg |= DMA_ENABLE;
- dregs->cond_reg = nreg;
- dregs->st_addr = addr;
-}
+ esp->scsi_id = 7;
+ esp->host->this_id = esp->scsi_id;
+ esp->scsi_id_mask = (1 << esp->scsi_id);
+ esp->cfreq = 20000000;
-static void dma_mmu_get_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- sp->SCp.have_data_in = dvma_map((unsigned long)sp->SCp.buffer,
- sp->SCp.this_residual);
- sp->SCp.ptr = (char *)((unsigned long)sp->SCp.have_data_in);
+ dev_set_drvdata(&dev->dev, esp);
+
+ err = scsi_esp_register(esp, &dev->dev);
+ if (err)
+ goto fail_free_irq;
+
+ return 0;
+
+fail_free_irq:
+ free_irq(host->irq, esp);
+fail_unmap_command_block:
+ dma_free_coherent(esp->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
+fail_unmap_regs_dma:
+ iounmap(esp->dma_regs);
+fail_unmap_regs:
+ iounmap(esp->regs);
+fail_unlink:
+ scsi_host_put(host);
+fail:
+ return err;
}
-static void dma_mmu_get_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
+static int __devexit esp_sun3x_remove(struct platform_device *dev)
{
- int sz = sp->SCp.buffers_residual;
- struct scatterlist *sg = sp->SCp.buffer;
-
- while (sz >= 0) {
- sg[sz].dma_address = dvma_map((unsigned long)sg_virt(&sg[sz]),
- sg[sz].length);
- sz--;
- }
- sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address);
-}
+ struct esp *esp = dev_get_drvdata(&dev->dev);
+ unsigned int irq = esp->host->irq;
+ u32 val;
-static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- dvma_unmap((char *)sp->SCp.have_data_in);
-}
+ scsi_esp_unregister(esp);
-static void dma_mmu_release_scsi_sgl (struct NCR_ESP *esp, Scsi_Cmnd *sp)
-{
- int sz = sp->use_sg - 1;
- struct scatterlist *sg = (struct scatterlist *)sp->request_buffer;
-
- while(sz >= 0) {
- dvma_unmap((char *)sg[sz].dma_address);
- sz--;
- }
-}
+ /* Disable interrupts. */
+ val = dma_read32(DMA_CSR);
+ dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
-static void dma_advance_sg (Scsi_Cmnd *sp)
-{
- sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dma_address);
-}
+ free_irq(irq, esp);
+ dma_free_coherent(esp->dev, 16,
+ esp->command_block,
+ esp->command_block_dma);
-static int sun3x_esp_release(struct Scsi_Host *instance)
-{
- /* this code does not support being compiled as a module */
- return 1;
+ scsi_host_put(esp->host);
+ return 0;
}
-static struct scsi_host_template driver_template = {
- .proc_name = "sun3x_esp",
- .proc_info = &esp_proc_info,
- .name = "Sun ESP 100/100a/200",
- .detect = sun3x_esp_detect,
- .release = sun3x_esp_release,
- .slave_alloc = esp_slave_alloc,
- .slave_destroy = esp_slave_destroy,
- .info = esp_info,
- .queuecommand = esp_queue,
- .eh_abort_handler = esp_abort,
- .eh_bus_reset_handler = esp_reset,
- .can_queue = 7,
- .this_id = 7,
- .sg_tablesize = SG_ALL,
- .cmd_per_lun = 1,
- .use_clustering = DISABLE_CLUSTERING,
+static struct platform_driver esp_sun3x_driver = {
+ .probe = esp_sun3x_probe,
+ .remove = __devexit_p(esp_sun3x_remove),
+ .driver = {
+ .name = "sun3x_esp",
+ },
};
+static int __init sun3x_esp_init(void)
+{
+ return platform_driver_register(&esp_sun3x_driver);
+}
-#include "scsi_module.c"
+static void __exit sun3x_esp_exit(void)
+{
+ platform_driver_unregister(&esp_sun3x_driver);
+}
+MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
+MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(sun3x_esp_init);
+module_exit(sun3x_esp_exit);
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index 254bdaeb35ff..35142b5341b5 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -3842,7 +3842,7 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp)
if (cp->startp == cp->phys.head.lastp ||
sym_evaluate_dp(np, cp, scr_to_cpu(cp->phys.head.lastp),
&dp_ofs) < 0) {
- return cp->data_len;
+ return cp->data_len - cp->odd_byte_adjustment;
}
/*
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 662c00451be4..58d7eee4fe81 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1216,7 +1216,7 @@ static void scsi_to_dev_dir(unsigned int i, unsigned int j) {
cpp->xdir = DTD_IN;
return;
}
- else if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
+ else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
cpp->xdir = DTD_OUT;
return;
}
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index 0d99120ab5a2..2b8a410e0959 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -84,9 +84,6 @@ extern wait_queue_head_t keypress_wait;
struct tty_driver *serial_driver;
-/* serial subtype definitions */
-#define SERIAL_TYPE_NORMAL 1
-
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index f94109cbb46e..b8a4bd94f51d 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2047,7 +2047,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
* Oxford Semi 952 rev B workaround
*/
if (up->bugs & UART_BUG_QUOT && (quot & 0xff) == 0)
- quot ++;
+ quot++;
if (up->capabilities & UART_CAP_FIFO && up->port.fifosize > 1) {
if (baud < 2400)
@@ -2662,16 +2662,17 @@ static int __devinit serial8250_probe(struct platform_device *dev)
memset(&port, 0, sizeof(struct uart_port));
for (i = 0; p && p->flags != 0; p++, i++) {
- port.iobase = p->iobase;
- port.membase = p->membase;
- port.irq = p->irq;
- port.uartclk = p->uartclk;
- port.regshift = p->regshift;
- port.iotype = p->iotype;
- port.flags = p->flags;
- port.mapbase = p->mapbase;
- port.hub6 = p->hub6;
- port.dev = &dev->dev;
+ port.iobase = p->iobase;
+ port.membase = p->membase;
+ port.irq = p->irq;
+ port.uartclk = p->uartclk;
+ port.regshift = p->regshift;
+ port.iotype = p->iotype;
+ port.flags = p->flags;
+ port.mapbase = p->mapbase;
+ port.hub6 = p->hub6;
+ port.private_data = p->private_data;
+ port.dev = &dev->dev;
if (share_irqs)
port.flags |= UPF_SHARE_IRQ;
ret = serial8250_register_port(&port);
@@ -2812,15 +2813,16 @@ int serial8250_register_port(struct uart_port *port)
if (uart) {
uart_remove_one_port(&serial8250_reg, &uart->port);
- uart->port.iobase = port->iobase;
- uart->port.membase = port->membase;
- uart->port.irq = port->irq;
- uart->port.uartclk = port->uartclk;
- uart->port.fifosize = port->fifosize;
- uart->port.regshift = port->regshift;
- uart->port.iotype = port->iotype;
- uart->port.flags = port->flags | UPF_BOOT_AUTOCONF;
- uart->port.mapbase = port->mapbase;
+ uart->port.iobase = port->iobase;
+ uart->port.membase = port->membase;
+ uart->port.irq = port->irq;
+ uart->port.uartclk = port->uartclk;
+ uart->port.fifosize = port->fifosize;
+ uart->port.regshift = port->regshift;
+ uart->port.iotype = port->iotype;
+ uart->port.flags = port->flags | UPF_BOOT_AUTOCONF;
+ uart->port.mapbase = port->mapbase;
+ uart->port.private_data = port->private_data;
if (port->dev)
uart->port.dev = port->dev;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index ceb03c9e749f..0a4ac2b6eb5a 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -106,6 +106,32 @@ setup_port(struct serial_private *priv, struct uart_port *port,
}
/*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+static int addidata_apci7800_setup(struct serial_private *priv,
+ struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ unsigned int bar = 0, offset = board->first_offset;
+ bar = FL_GET_BASE(board->flags);
+
+ if (idx < 2) {
+ offset += idx * board->uart_offset;
+ } else if ((idx >= 2) && (idx < 4)) {
+ bar += 1;
+ offset += ((idx - 2) * board->uart_offset);
+ } else if ((idx >= 4) && (idx < 6)) {
+ bar += 2;
+ offset += ((idx - 4) * board->uart_offset);
+ } else if (idx >= 6) {
+ bar += 3;
+ offset += ((idx - 6) * board->uart_offset);
+ }
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+}
+
+/*
* AFAVLAB uses a different mixture of BARs and offsets
* Not that ugly ;) -- HW
*/
@@ -752,6 +778,16 @@ pci_default_setup(struct serial_private *priv, struct pciserial_board *board,
*/
static struct pci_serial_quirk pci_serial_quirks[] = {
/*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+ {
+ .vendor = PCI_VENDOR_ID_ADDIDATA_OLD,
+ .device = PCI_DEVICE_ID_ADDIDATA_APCI7800,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = addidata_apci7800_setup,
+ },
+ /*
* AFAVLAB cards - these may be called via parport_serial
* It is not clear whether this applies to all products.
*/
@@ -1179,6 +1215,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [pbn_b0_8_115200] = {
+ .flags = FL_BASE0,
+ .num_ports = 8,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
[pbn_b0_1_921600] = {
.flags = FL_BASE0,
@@ -2697,6 +2739,97 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pasemi_1682M },
/*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7500,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_4_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7420,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_2_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7300,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA_OLD,
+ PCI_DEVICE_ID_ADDIDATA_APCI7800,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b1_8_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7500_2,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_4_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7420_2,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_2_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7300_2,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7500_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_4_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7420_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_2_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7300_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_ADDIDATA,
+ PCI_DEVICE_ID_ADDIDATA_APCI7800_3,
+ PCI_ANY_ID,
+ PCI_ANY_ID,
+ 0,
+ 0,
+ pbn_b0_8_115200 },
+
+ /*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
*/
diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c
index 1de098e75497..6f09cbd7fc48 100644
--- a/drivers/serial/8250_pnp.c
+++ b/drivers/serial/8250_pnp.c
@@ -414,8 +414,9 @@ static int __devinit check_resources(struct pnp_option *option)
*/
static int __devinit serial_pnp_guess_board(struct pnp_dev *dev, int *flags)
{
- if (!(check_name(pnp_dev_name(dev)) || (dev->card && check_name(dev->card->name))))
- return -ENODEV;
+ if (!(check_name(pnp_dev_name(dev)) ||
+ (dev->card && check_name(dev->card->name))))
+ return -ENODEV;
if (check_resources(dev->independent))
return 0;
@@ -452,8 +453,9 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
return -ENODEV;
#ifdef SERIAL_DEBUG_PNP
- printk("Setup PNP port: port %x, mem 0x%lx, irq %d, type %d\n",
- port.iobase, port.mapbase, port.irq, port.iotype);
+ printk(KERN_DEBUG
+ "Setup PNP port: port %x, mem 0x%lx, irq %d, type %d\n",
+ port.iobase, port.mapbase, port.irq, port.iotype);
#endif
port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF;
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 4fa7927997ad..84a054d7e986 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -465,20 +465,24 @@ config SERIAL_DZ
bool "DECstation DZ serial driver"
depends on MACH_DECSTATION && 32BIT
select SERIAL_CORE
- help
- DZ11-family serial controllers for VAXstations, including the
- DC7085, M7814, and M7819.
+ default y
+ ---help---
+ DZ11-family serial controllers for DECstations and VAXstations,
+ including the DC7085, M7814, and M7819.
config SERIAL_DZ_CONSOLE
bool "Support console on DECstation DZ serial driver"
depends on SERIAL_DZ=y
select SERIAL_CORE_CONSOLE
- help
+ default y
+ ---help---
If you say Y here, it will be possible to use a serial port as the
system console (the system console is the device which receives all
kernel messages and warnings and which allows logins in single user
- mode). Note that the firmware uses ttyS0 as the serial console on
- the Maxine and ttyS2 on the others.
+ mode).
+
+ Note that the firmware uses ttyS3 as the serial console on
+ DECstations that use this driver.
If unsure, say Y.
@@ -877,15 +881,15 @@ config SERIAL_SUNHV
systems. Say Y if you want to be able to use this device.
config SERIAL_IP22_ZILOG
- tristate "IP22 Zilog8530 serial support"
- depends on SGI_IP22
+ tristate "SGI Zilog8530 serial support"
+ depends on SGI_HAS_ZILOG
select SERIAL_CORE
help
- This driver supports the Zilog8530 serial ports found on SGI IP22
+ This driver supports the Zilog8530 serial ports found on SGI
systems. Say Y or M if you want to be able to these serial ports.
config SERIAL_IP22_ZILOG_CONSOLE
- bool "Console on IP22 Zilog8530 serial port"
+ bool "Console on SGI Zilog8530 serial port"
depends on SERIAL_IP22_ZILOG=y
select SERIAL_CORE_CONSOLE
@@ -1138,17 +1142,17 @@ config SERIAL_SGI_L1_CONSOLE
say Y. Otherwise, say N.
config SERIAL_MPC52xx
- tristate "Freescale MPC52xx family PSC serial support"
- depends on PPC_MPC52xx
+ tristate "Freescale MPC52xx/MPC512x family PSC serial support"
+ depends on PPC_MPC52xx || PPC_MPC512x
select SERIAL_CORE
help
- This drivers support the MPC52xx PSC serial ports. If you would
- like to use them, you must answer Y or M to this option. Not that
+ This driver supports MPC52xx and MPC512x PSC serial ports. If you would
+ like to use them, you must answer Y or M to this option. Note that
for use as console, it must be included in kernel and not as a
module.
config SERIAL_MPC52xx_CONSOLE
- bool "Console on a Freescale MPC52xx family PSC serial port"
+ bool "Console on a Freescale MPC52xx/MPC512x family PSC serial port"
depends on SERIAL_MPC52xx=y
select SERIAL_CORE_CONSOLE
help
@@ -1156,7 +1160,7 @@ config SERIAL_MPC52xx_CONSOLE
of the Freescale MPC52xx family as a console.
config SERIAL_MPC52xx_CONSOLE_BAUD
- int "Freescale MPC52xx family PSC serial port baud"
+ int "Freescale MPC52xx/MPC512x family PSC serial port baud"
depends on SERIAL_MPC52xx_CONSOLE=y
default "9600"
help
@@ -1318,4 +1322,19 @@ config SERIAL_QE
This driver supports the QE serial ports on Freescale embedded
PowerPC that contain a QUICC Engine.
+config SERIAL_SC26XX
+ tristate "SC2681/SC2692 serial port support"
+ depends on SNI_RM
+ select SERIAL_CORE
+ help
+ This is a driver for the onboard serial ports of
+ older RM400 machines.
+
+config SERIAL_SC26XX_CONSOLE
+ bool "Console on SC2681/SC2692 serial port"
+ depends on SERIAL_SC26XX
+ select SERIAL_CORE_CONSOLE
+ help
+ Support for Console on SC2681/SC2692 serial ports.
+
endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 2dd41b4cc8db..640cfe44a56d 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_SERIAL_M32R_SIO) += m32r_sio.o
obj-$(CONFIG_SERIAL_MPSC) += mpsc.o
obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
+obj-$(CONFIG_SERIAL_SC26XX) += sc26xx.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index b5e4478de0e3..236af9d33851 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -380,7 +380,7 @@ static void cpm_uart_int_rx(struct uart_port *port)
static irqreturn_t cpm_uart_int(int irq, void *data)
{
u8 events;
- struct uart_port *port = (struct uart_port *)data;
+ struct uart_port *port = data;
struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
smc_t __iomem *smcp = pinfo->smcp;
scc_t __iomem *sccp = pinfo->sccp;
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index a4e23cf47906..383c4e660cd5 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -68,11 +68,6 @@ static char *serial_version = "$Revision: 1.25 $";
struct tty_driver *serial_driver;
-/* serial subtype definitions */
-#ifndef SERIAL_TYPE_NORMAL
-#define SERIAL_TYPE_NORMAL 1
-#endif
-
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
diff --git a/drivers/serial/dz.c b/drivers/serial/dz.c
index d31721f2744d..116211fcd36f 100644
--- a/drivers/serial/dz.c
+++ b/drivers/serial/dz.c
@@ -6,7 +6,7 @@
*
* Email: olivier.lebaillif@ifrsys.com
*
- * Copyright (C) 2004, 2006 Maciej W. Rozycki
+ * Copyright (C) 2004, 2006, 2007 Maciej W. Rozycki
*
* [31-AUG-98] triemer
* Changed IRQ to use Harald's dec internals interrupts.h
@@ -32,38 +32,63 @@
#define SUPPORT_SYSRQ
#endif
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/console.h>
#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
+#include <asm/atomic.h>
#include <asm/bootinfo.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
#include <asm/dec/interrupts.h>
#include <asm/dec/kn01.h>
#include <asm/dec/kn02.h>
#include <asm/dec/machtype.h>
#include <asm/dec/prom.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <asm/dec/system.h>
#include "dz.h"
-static char *dz_name = "DECstation DZ serial driver version ";
-static char *dz_version = "1.03";
+
+MODULE_DESCRIPTION("DECstation DZ serial driver");
+MODULE_LICENSE("GPL");
+
+
+static char dz_name[] __initdata = "DECstation DZ serial driver version ";
+static char dz_version[] __initdata = "1.04";
struct dz_port {
+ struct dz_mux *mux;
struct uart_port port;
unsigned int cflag;
};
-static struct dz_port dz_ports[DZ_NB_PORT];
+struct dz_mux {
+ struct dz_port dport[DZ_NB_PORT];
+ atomic_t map_guard;
+ atomic_t irq_guard;
+ int initialised;
+};
+
+static struct dz_mux dz_mux;
+
+static inline struct dz_port *to_dport(struct uart_port *uport)
+{
+ return container_of(uport, struct dz_port, port);
+}
/*
* ------------------------------------------------------------
@@ -74,21 +99,18 @@ static struct dz_port dz_ports[DZ_NB_PORT];
* ------------------------------------------------------------
*/
-static inline unsigned short dz_in(struct dz_port *dport, unsigned offset)
+static u16 dz_in(struct dz_port *dport, unsigned offset)
{
- volatile unsigned short *addr =
- (volatile unsigned short *) (dport->port.membase + offset);
+ void __iomem *addr = dport->port.membase + offset;
- return *addr;
+ return readw(addr);
}
-static inline void dz_out(struct dz_port *dport, unsigned offset,
- unsigned short value)
+static void dz_out(struct dz_port *dport, unsigned offset, u16 value)
{
- volatile unsigned short *addr =
- (volatile unsigned short *) (dport->port.membase + offset);
+ void __iomem *addr = dport->port.membase + offset;
- *addr = value;
+ writew(value, addr);
}
/*
@@ -103,42 +125,33 @@ static inline void dz_out(struct dz_port *dport, unsigned offset,
static void dz_stop_tx(struct uart_port *uport)
{
- struct dz_port *dport = (struct dz_port *)uport;
- unsigned short tmp, mask = 1 << dport->port.line;
- unsigned long flags;
+ struct dz_port *dport = to_dport(uport);
+ u16 tmp, mask = 1 << dport->port.line;
- spin_lock_irqsave(&dport->port.lock, flags);
tmp = dz_in(dport, DZ_TCR); /* read the TX flag */
tmp &= ~mask; /* clear the TX flag */
dz_out(dport, DZ_TCR, tmp);
- spin_unlock_irqrestore(&dport->port.lock, flags);
}
static void dz_start_tx(struct uart_port *uport)
{
- struct dz_port *dport = (struct dz_port *)uport;
- unsigned short tmp, mask = 1 << dport->port.line;
- unsigned long flags;
+ struct dz_port *dport = to_dport(uport);
+ u16 tmp, mask = 1 << dport->port.line;
- spin_lock_irqsave(&dport->port.lock, flags);
tmp = dz_in(dport, DZ_TCR); /* read the TX flag */
tmp |= mask; /* set the TX flag */
dz_out(dport, DZ_TCR, tmp);
- spin_unlock_irqrestore(&dport->port.lock, flags);
}
static void dz_stop_rx(struct uart_port *uport)
{
- struct dz_port *dport = (struct dz_port *)uport;
- unsigned long flags;
+ struct dz_port *dport = to_dport(uport);
- spin_lock_irqsave(&dport->port.lock, flags);
- dport->cflag &= ~DZ_CREAD;
- dz_out(dport, DZ_LPR, dport->cflag | dport->port.line);
- spin_unlock_irqrestore(&dport->port.lock, flags);
+ dport->cflag &= ~DZ_RXENAB;
+ dz_out(dport, DZ_LPR, dport->cflag);
}
-static void dz_enable_ms(struct uart_port *port)
+static void dz_enable_ms(struct uart_port *uport)
{
/* nothing to do */
}
@@ -170,73 +183,73 @@ static void dz_enable_ms(struct uart_port *port)
* This routine deals with inputs from any lines.
* ------------------------------------------------------------
*/
-static inline void dz_receive_chars(struct dz_port *dport_in)
+static inline void dz_receive_chars(struct dz_mux *mux)
{
- struct dz_port *dport;
+ struct uart_port *uport;
+ struct dz_port *dport = &mux->dport[0];
struct tty_struct *tty = NULL;
struct uart_icount *icount;
int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 };
- unsigned short status;
unsigned char ch, flag;
+ u16 status;
int i;
- while ((status = dz_in(dport_in, DZ_RBUF)) & DZ_DVAL) {
- dport = &dz_ports[LINE(status)];
- tty = dport->port.info->tty; /* point to the proper dev */
+ while ((status = dz_in(dport, DZ_RBUF)) & DZ_DVAL) {
+ dport = &mux->dport[LINE(status)];
+ uport = &dport->port;
+ tty = uport->info->tty; /* point to the proper dev */
ch = UCHAR(status); /* grab the char */
+ flag = TTY_NORMAL;
- icount = &dport->port.icount;
+ icount = &uport->icount;
icount->rx++;
- flag = TTY_NORMAL;
- if (status & DZ_FERR) { /* frame error */
+ if (unlikely(status & (DZ_OERR | DZ_FERR | DZ_PERR))) {
+
/*
- * There is no separate BREAK status bit, so
- * treat framing errors as BREAKs for Magic SysRq
- * and SAK; normally, otherwise.
+ * There is no separate BREAK status bit, so treat
+ * null characters with framing errors as BREAKs;
+ * normally, otherwise. For this move the Framing
+ * Error bit to a simulated BREAK bit.
*/
- if (uart_handle_break(&dport->port))
- continue;
- if (dport->port.flags & UPF_SAK)
+ if (!ch) {
+ status |= (status & DZ_FERR) >>
+ (ffs(DZ_FERR) - ffs(DZ_BREAK));
+ status &= ~DZ_FERR;
+ }
+
+ /* Handle SysRq/SAK & keep track of the statistics. */
+ if (status & DZ_BREAK) {
+ icount->brk++;
+ if (uart_handle_break(uport))
+ continue;
+ } else if (status & DZ_FERR)
+ icount->frame++;
+ else if (status & DZ_PERR)
+ icount->parity++;
+ if (status & DZ_OERR)
+ icount->overrun++;
+
+ status &= uport->read_status_mask;
+ if (status & DZ_BREAK)
flag = TTY_BREAK;
- else
+ else if (status & DZ_FERR)
flag = TTY_FRAME;
- } else if (status & DZ_OERR) /* overrun error */
- flag = TTY_OVERRUN;
- else if (status & DZ_PERR) /* parity error */
- flag = TTY_PARITY;
-
- /* keep track of the statistics */
- switch (flag) {
- case TTY_FRAME:
- icount->frame++;
- break;
- case TTY_PARITY:
- icount->parity++;
- break;
- case TTY_OVERRUN:
- icount->overrun++;
- break;
- case TTY_BREAK:
- icount->brk++;
- break;
- default:
- break;
+ else if (status & DZ_PERR)
+ flag = TTY_PARITY;
+
}
- if (uart_handle_sysrq_char(&dport->port, ch))
+ if (uart_handle_sysrq_char(uport, ch))
continue;
- if ((status & dport->port.ignore_status_mask) == 0) {
- uart_insert_char(&dport->port,
- status, DZ_OERR, ch, flag);
- lines_rx[LINE(status)] = 1;
- }
+ uart_insert_char(uport, status, DZ_OERR, ch, flag);
+ lines_rx[LINE(status)] = 1;
}
for (i = 0; i < DZ_NB_PORT; i++)
if (lines_rx[i])
- tty_flip_buffer_push(dz_ports[i].port.info->tty);
+ tty_flip_buffer_push(mux->dport[i].port.info->tty);
}
/*
@@ -246,15 +259,15 @@ static inline void dz_receive_chars(struct dz_port *dport_in)
* This routine deals with outputs to any lines.
* ------------------------------------------------------------
*/
-static inline void dz_transmit_chars(struct dz_port *dport_in)
+static inline void dz_transmit_chars(struct dz_mux *mux)
{
- struct dz_port *dport;
+ struct dz_port *dport = &mux->dport[0];
struct circ_buf *xmit;
- unsigned short status;
unsigned char tmp;
+ u16 status;
- status = dz_in(dport_in, DZ_CSR);
- dport = &dz_ports[LINE(status)];
+ status = dz_in(dport, DZ_CSR);
+ dport = &mux->dport[LINE(status)];
xmit = &dport->port.info->xmit;
if (dport->port.x_char) { /* XON/XOFF chars */
@@ -265,7 +278,9 @@ static inline void dz_transmit_chars(struct dz_port *dport_in)
}
/* If nothing to do or stopped or hardware stopped. */
if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) {
+ spin_lock(&dport->port.lock);
dz_stop_tx(&dport->port);
+ spin_unlock(&dport->port.lock);
return;
}
@@ -282,8 +297,11 @@ static inline void dz_transmit_chars(struct dz_port *dport_in)
uart_write_wakeup(&dport->port);
/* Are we are done. */
- if (uart_circ_empty(xmit))
+ if (uart_circ_empty(xmit)) {
+ spin_lock(&dport->port.lock);
dz_stop_tx(&dport->port);
+ spin_unlock(&dport->port.lock);
+ }
}
/*
@@ -301,7 +319,7 @@ static inline void check_modem_status(struct dz_port *dport)
* 1. No status change interrupt; use a timer.
* 2. Handle the 3100/5000 as appropriate. --macro
*/
- unsigned short status;
+ u16 status;
/* If not the modem line just return. */
if (dport->port.line != DZ_MODEM)
@@ -322,19 +340,20 @@ static inline void check_modem_status(struct dz_port *dport)
* It deals with the multiple ports.
* ------------------------------------------------------------
*/
-static irqreturn_t dz_interrupt(int irq, void *dev)
+static irqreturn_t dz_interrupt(int irq, void *dev_id)
{
- struct dz_port *dport = (struct dz_port *)dev;
- unsigned short status;
+ struct dz_mux *mux = dev_id;
+ struct dz_port *dport = &mux->dport[0];
+ u16 status;
/* get the reason why we just got an irq */
status = dz_in(dport, DZ_CSR);
if ((status & (DZ_RDONE | DZ_RIE)) == (DZ_RDONE | DZ_RIE))
- dz_receive_chars(dport);
+ dz_receive_chars(mux);
if ((status & (DZ_TRDY | DZ_TIE)) == (DZ_TRDY | DZ_TIE))
- dz_transmit_chars(dport);
+ dz_transmit_chars(mux);
return IRQ_HANDLED;
}
@@ -350,7 +369,7 @@ static unsigned int dz_get_mctrl(struct uart_port *uport)
/*
* FIXME: Handle the 3100/5000 as appropriate. --macro
*/
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
unsigned int mctrl = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
if (dport->port.line == DZ_MODEM) {
@@ -366,8 +385,8 @@ static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl)
/*
* FIXME: Handle the 3100/5000 as appropriate. --macro
*/
- struct dz_port *dport = (struct dz_port *)uport;
- unsigned short tmp;
+ struct dz_port *dport = to_dport(uport);
+ u16 tmp;
if (dport->port.line == DZ_MODEM) {
tmp = dz_in(dport, DZ_TCR);
@@ -388,15 +407,30 @@ static void dz_set_mctrl(struct uart_port *uport, unsigned int mctrl)
*/
static int dz_startup(struct uart_port *uport)
{
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
+ struct dz_mux *mux = dport->mux;
unsigned long flags;
- unsigned short tmp;
+ int irq_guard;
+ int ret;
+ u16 tmp;
+
+ irq_guard = atomic_add_return(1, &mux->irq_guard);
+ if (irq_guard != 1)
+ return 0;
+
+ ret = request_irq(dport->port.irq, dz_interrupt,
+ IRQF_SHARED, "dz", mux);
+ if (ret) {
+ atomic_add(-1, &mux->irq_guard);
+ printk(KERN_ERR "dz: Cannot get IRQ %d!\n", dport->port.irq);
+ return ret;
+ }
spin_lock_irqsave(&dport->port.lock, flags);
- /* enable the interrupt and the scanning */
+ /* Enable interrupts. */
tmp = dz_in(dport, DZ_CSR);
- tmp |= DZ_RIE | DZ_TIE | DZ_MSE;
+ tmp |= DZ_RIE | DZ_TIE;
dz_out(dport, DZ_CSR, tmp);
spin_unlock_irqrestore(&dport->port.lock, flags);
@@ -414,7 +448,25 @@ static int dz_startup(struct uart_port *uport)
*/
static void dz_shutdown(struct uart_port *uport)
{
- dz_stop_tx(uport);
+ struct dz_port *dport = to_dport(uport);
+ struct dz_mux *mux = dport->mux;
+ unsigned long flags;
+ int irq_guard;
+ u16 tmp;
+
+ spin_lock_irqsave(&dport->port.lock, flags);
+ dz_stop_tx(&dport->port);
+ spin_unlock_irqrestore(&dport->port.lock, flags);
+
+ irq_guard = atomic_add_return(-1, &mux->irq_guard);
+ if (!irq_guard) {
+ /* Disable interrupts. */
+ tmp = dz_in(dport, DZ_CSR);
+ tmp &= ~(DZ_RIE | DZ_TIE);
+ dz_out(dport, DZ_CSR, tmp);
+
+ free_irq(dport->port.irq, mux);
+ }
}
/*
@@ -431,7 +483,7 @@ static void dz_shutdown(struct uart_port *uport)
*/
static unsigned int dz_tx_empty(struct uart_port *uport)
{
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
unsigned short tmp, mask = 1 << dport->port.line;
tmp = dz_in(dport, DZ_TCR);
@@ -446,7 +498,7 @@ static void dz_break_ctl(struct uart_port *uport, int break_state)
* FIXME: Can't access BREAK bits in TDR easily;
* reuse the code for polled TX. --macro
*/
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
unsigned long flags;
unsigned short tmp, mask = 1 << dport->port.line;
@@ -460,12 +512,69 @@ static void dz_break_ctl(struct uart_port *uport, int break_state)
spin_unlock_irqrestore(&uport->lock, flags);
}
+static int dz_encode_baud_rate(unsigned int baud)
+{
+ switch (baud) {
+ case 50:
+ return DZ_B50;
+ case 75:
+ return DZ_B75;
+ case 110:
+ return DZ_B110;
+ case 134:
+ return DZ_B134;
+ case 150:
+ return DZ_B150;
+ case 300:
+ return DZ_B300;
+ case 600:
+ return DZ_B600;
+ case 1200:
+ return DZ_B1200;
+ case 1800:
+ return DZ_B1800;
+ case 2000:
+ return DZ_B2000;
+ case 2400:
+ return DZ_B2400;
+ case 3600:
+ return DZ_B3600;
+ case 4800:
+ return DZ_B4800;
+ case 7200:
+ return DZ_B7200;
+ case 9600:
+ return DZ_B9600;
+ default:
+ return -1;
+ }
+}
+
+
+static void dz_reset(struct dz_port *dport)
+{
+ struct dz_mux *mux = dport->mux;
+
+ if (mux->initialised)
+ return;
+
+ dz_out(dport, DZ_CSR, DZ_CLR);
+ while (dz_in(dport, DZ_CSR) & DZ_CLR);
+ iob();
+
+ /* Enable scanning. */
+ dz_out(dport, DZ_CSR, DZ_MSE);
+
+ mux->initialised = 1;
+}
+
static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
struct ktermios *old_termios)
{
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
unsigned long flags;
unsigned int cflag, baud;
+ int bflag;
cflag = dport->port.line;
@@ -492,105 +601,127 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
cflag |= DZ_PARODD;
baud = uart_get_baud_rate(uport, termios, old_termios, 50, 9600);
- switch (baud) {
- case 50:
- cflag |= DZ_B50;
- break;
- case 75:
- cflag |= DZ_B75;
- break;
- case 110:
- cflag |= DZ_B110;
- break;
- case 134:
- cflag |= DZ_B134;
- break;
- case 150:
- cflag |= DZ_B150;
- break;
- case 300:
- cflag |= DZ_B300;
- break;
- case 600:
- cflag |= DZ_B600;
- break;
- case 1200:
- cflag |= DZ_B1200;
- break;
- case 1800:
- cflag |= DZ_B1800;
- break;
- case 2000:
- cflag |= DZ_B2000;
- break;
- case 2400:
- cflag |= DZ_B2400;
- break;
- case 3600:
- cflag |= DZ_B3600;
- break;
- case 4800:
- cflag |= DZ_B4800;
- break;
- case 7200:
- cflag |= DZ_B7200;
- break;
- case 9600:
- default:
- cflag |= DZ_B9600;
+ bflag = dz_encode_baud_rate(baud);
+ if (bflag < 0) { /* Try to keep unchanged. */
+ baud = uart_get_baud_rate(uport, old_termios, NULL, 50, 9600);
+ bflag = dz_encode_baud_rate(baud);
+ if (bflag < 0) { /* Resort to 9600. */
+ baud = 9600;
+ bflag = DZ_B9600;
+ }
+ tty_termios_encode_baud_rate(termios, baud, baud);
}
+ cflag |= bflag;
if (termios->c_cflag & CREAD)
cflag |= DZ_RXENAB;
spin_lock_irqsave(&dport->port.lock, flags);
- dz_out(dport, DZ_LPR, cflag | dport->port.line);
+ uart_update_timeout(uport, termios->c_cflag, baud);
+
+ dz_out(dport, DZ_LPR, cflag);
dport->cflag = cflag;
/* setup accept flag */
dport->port.read_status_mask = DZ_OERR;
if (termios->c_iflag & INPCK)
dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ dport->port.read_status_mask |= DZ_BREAK;
/* characters to ignore */
uport->ignore_status_mask = 0;
+ if ((termios->c_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK))
+ dport->port.ignore_status_mask |= DZ_OERR;
if (termios->c_iflag & IGNPAR)
dport->port.ignore_status_mask |= DZ_FERR | DZ_PERR;
+ if (termios->c_iflag & IGNBRK)
+ dport->port.ignore_status_mask |= DZ_BREAK;
spin_unlock_irqrestore(&dport->port.lock, flags);
}
-static const char *dz_type(struct uart_port *port)
+static const char *dz_type(struct uart_port *uport)
{
return "DZ";
}
-static void dz_release_port(struct uart_port *port)
+static void dz_release_port(struct uart_port *uport)
{
- /* nothing to do */
+ struct dz_mux *mux = to_dport(uport)->mux;
+ int map_guard;
+
+ iounmap(uport->membase);
+ uport->membase = NULL;
+
+ map_guard = atomic_add_return(-1, &mux->map_guard);
+ if (!map_guard)
+ release_mem_region(uport->mapbase, dec_kn_slot_size);
}
-static int dz_request_port(struct uart_port *port)
+static int dz_map_port(struct uart_port *uport)
{
+ if (!uport->membase)
+ uport->membase = ioremap_nocache(uport->mapbase,
+ dec_kn_slot_size);
+ if (!uport->membase) {
+ printk(KERN_ERR "dz: Cannot map MMIO\n");
+ return -ENOMEM;
+ }
return 0;
}
-static void dz_config_port(struct uart_port *port, int flags)
+static int dz_request_port(struct uart_port *uport)
{
- if (flags & UART_CONFIG_TYPE)
- port->type = PORT_DZ;
+ struct dz_mux *mux = to_dport(uport)->mux;
+ int map_guard;
+ int ret;
+
+ map_guard = atomic_add_return(1, &mux->map_guard);
+ if (map_guard == 1) {
+ if (!request_mem_region(uport->mapbase, dec_kn_slot_size,
+ "dz")) {
+ atomic_add(-1, &mux->map_guard);
+ printk(KERN_ERR
+ "dz: Unable to reserve MMIO resource\n");
+ return -EBUSY;
+ }
+ }
+ ret = dz_map_port(uport);
+ if (ret) {
+ map_guard = atomic_add_return(-1, &mux->map_guard);
+ if (!map_guard)
+ release_mem_region(uport->mapbase, dec_kn_slot_size);
+ return ret;
+ }
+ return 0;
+}
+
+static void dz_config_port(struct uart_port *uport, int flags)
+{
+ struct dz_port *dport = to_dport(uport);
+
+ if (flags & UART_CONFIG_TYPE) {
+ if (dz_request_port(uport))
+ return;
+
+ uport->type = PORT_DZ;
+
+ dz_reset(dport);
+ }
}
/*
- * verify the new serial_struct (for TIOCSSERIAL).
+ * Verify the new serial_struct (for TIOCSSERIAL).
*/
-static int dz_verify_port(struct uart_port *port, struct serial_struct *ser)
+static int dz_verify_port(struct uart_port *uport, struct serial_struct *ser)
{
int ret = 0;
+
if (ser->type != PORT_UNKNOWN && ser->type != PORT_DZ)
ret = -EINVAL;
- if (ser->irq != port->irq)
+ if (ser->irq != uport->irq)
ret = -EINVAL;
return ret;
}
@@ -617,40 +748,32 @@ static struct uart_ops dz_ops = {
static void __init dz_init_ports(void)
{
static int first = 1;
- struct dz_port *dport;
unsigned long base;
- int i;
+ int line;
if (!first)
return;
first = 0;
- if (mips_machtype == MACH_DS23100 ||
- mips_machtype == MACH_DS5100)
- base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_DZ11);
+ if (mips_machtype == MACH_DS23100 || mips_machtype == MACH_DS5100)
+ base = dec_kn_slot_base + KN01_DZ11;
else
- base = CKSEG1ADDR(KN02_SLOT_BASE + KN02_DZ11);
-
- for (i = 0, dport = dz_ports; i < DZ_NB_PORT; i++, dport++) {
- spin_lock_init(&dport->port.lock);
- dport->port.membase = (char *) base;
- dport->port.iotype = UPIO_MEM;
- dport->port.irq = dec_interrupt[DEC_IRQ_DZ11];
- dport->port.line = i;
- dport->port.fifosize = 1;
- dport->port.ops = &dz_ops;
- dport->port.flags = UPF_BOOT_AUTOCONF;
- }
-}
+ base = dec_kn_slot_base + KN02_DZ11;
-static void dz_reset(struct dz_port *dport)
-{
- dz_out(dport, DZ_CSR, DZ_CLR);
- while (dz_in(dport, DZ_CSR) & DZ_CLR);
- iob();
+ for (line = 0; line < DZ_NB_PORT; line++) {
+ struct dz_port *dport = &dz_mux.dport[line];
+ struct uart_port *uport = &dport->port;
- /* enable scanning */
- dz_out(dport, DZ_CSR, DZ_MSE);
+ dport->mux = &dz_mux;
+
+ uport->irq = dec_interrupt[DEC_IRQ_DZ11];
+ uport->fifosize = 1;
+ uport->iotype = UPIO_MEM;
+ uport->flags = UPF_BOOT_AUTOCONF;
+ uport->ops = &dz_ops;
+ uport->line = line;
+ uport->mapbase = base;
+ }
}
#ifdef CONFIG_SERIAL_DZ_CONSOLE
@@ -670,7 +793,7 @@ static void dz_reset(struct dz_port *dport)
*/
static void dz_console_putchar(struct uart_port *uport, int ch)
{
- struct dz_port *dport = (struct dz_port *)uport;
+ struct dz_port *dport = to_dport(uport);
unsigned long flags;
unsigned short csr, tcr, trdy, mask;
int loops = 10000;
@@ -685,7 +808,7 @@ static void dz_console_putchar(struct uart_port *uport, int ch)
iob();
spin_unlock_irqrestore(&dport->port.lock, flags);
- while (loops--) {
+ do {
trdy = dz_in(dport, DZ_CSR);
if (!(trdy & DZ_TRDY))
continue;
@@ -696,7 +819,7 @@ static void dz_console_putchar(struct uart_port *uport, int ch)
dz_out(dport, DZ_TCR, mask);
iob();
udelay(2);
- }
+ } while (loops--);
if (loops) /* Cannot send otherwise. */
dz_out(dport, DZ_TDR, ch);
@@ -717,7 +840,7 @@ static void dz_console_print(struct console *co,
const char *str,
unsigned int count)
{
- struct dz_port *dport = &dz_ports[co->index];
+ struct dz_port *dport = &dz_mux.dport[co->index];
#ifdef DEBUG_DZ
prom_printf((char *) str);
#endif
@@ -726,22 +849,28 @@ static void dz_console_print(struct console *co,
static int __init dz_console_setup(struct console *co, char *options)
{
- struct dz_port *dport = &dz_ports[co->index];
+ struct dz_port *dport = &dz_mux.dport[co->index];
+ struct uart_port *uport = &dport->port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
+ int ret;
- if (options)
- uart_parse_options(options, &baud, &parity, &bits, &flow);
+ ret = dz_map_port(uport);
+ if (ret)
+ return ret;
dz_reset(dport);
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
return uart_set_options(&dport->port, co, baud, parity, bits, flow);
}
static struct uart_driver dz_reg;
-static struct console dz_sercons = {
+static struct console dz_console = {
.name = "ttyS",
.write = dz_console_print,
.device = uart_console_device,
@@ -755,7 +884,7 @@ static int __init dz_serial_console_init(void)
{
if (!IOASIC) {
dz_init_ports();
- register_console(&dz_sercons);
+ register_console(&dz_console);
return 0;
} else
return -ENXIO;
@@ -763,7 +892,7 @@ static int __init dz_serial_console_init(void)
console_initcall(dz_serial_console_init);
-#define SERIAL_DZ_CONSOLE &dz_sercons
+#define SERIAL_DZ_CONSOLE &dz_console
#else
#define SERIAL_DZ_CONSOLE NULL
#endif /* CONFIG_SERIAL_DZ_CONSOLE */
@@ -789,26 +918,14 @@ static int __init dz_init(void)
dz_init_ports();
-#ifndef CONFIG_SERIAL_DZ_CONSOLE
- /* reset the chip */
- dz_reset(&dz_ports[0]);
-#endif
-
- if (request_irq(dz_ports[0].port.irq, dz_interrupt,
- IRQF_DISABLED, "DZ", &dz_ports[0]))
- panic("Unable to register DZ interrupt");
-
ret = uart_register_driver(&dz_reg);
- if (ret != 0)
+ if (ret)
return ret;
for (i = 0; i < DZ_NB_PORT; i++)
- uart_add_one_port(&dz_reg, &dz_ports[i].port);
+ uart_add_one_port(&dz_reg, &dz_mux.dport[i].port);
- return ret;
+ return 0;
}
module_init(dz_init);
-
-MODULE_DESCRIPTION("DECstation DZ serial driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/serial/dz.h b/drivers/serial/dz.h
index 9674d4e49872..faf169ed27b3 100644
--- a/drivers/serial/dz.h
+++ b/drivers/serial/dz.h
@@ -33,6 +33,8 @@
#define DZ_FERR 0x2000 /* Frame error indicator */
#define DZ_PERR 0x1000 /* Parity error indicator */
+#define DZ_BREAK 0x0800 /* BREAK event software flag */
+
#define LINE(x) ((x & DZ_LINE_MASK) >> 8) /* Get the line number
from the input buffer */
#define UCHAR(x) ((unsigned char)(x & DZ_RBUF_MASK))
@@ -107,8 +109,8 @@
#define DZ_B7200 0x0D00
#define DZ_B9600 0x0E00
-#define DZ_CREAD 0x1000 /* Enable receiver */
-#define DZ_RXENAB 0x1000 /* enable receive char */
+#define DZ_RXENAB 0x1000 /* Receiver Enable */
+
/*
* Addresses for the DZ registers
*/
@@ -124,9 +126,4 @@
#define DZ_XMIT_SIZE 4096 /* buffer size */
#define DZ_WAKEUP_CHARS DZ_XMIT_SIZE/4
-#ifdef MODULE
-int init_module (void)
-void cleanup_module (void)
-#endif
-
#endif /* DZ_SERIAL_H */
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index dc1967176fe2..56af1f566a4c 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -308,7 +308,7 @@ static void imx_start_tx(struct uart_port *port)
static irqreturn_t imx_rtsint(int irq, void *dev_id)
{
- struct imx_port *sport = (struct imx_port *)dev_id;
+ struct imx_port *sport = dev_id;
unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS;
unsigned long flags;
@@ -324,7 +324,7 @@ static irqreturn_t imx_rtsint(int irq, void *dev_id)
static irqreturn_t imx_txint(int irq, void *dev_id)
{
- struct imx_port *sport = (struct imx_port *)dev_id;
+ struct imx_port *sport = dev_id;
struct circ_buf *xmit = &sport->port.info->xmit;
unsigned long flags;
diff --git a/drivers/serial/mcf.c b/drivers/serial/mcf.c
index 051fcc2f5ba8..e76fc72c9b36 100644
--- a/drivers/serial/mcf.c
+++ b/drivers/serial/mcf.c
@@ -434,7 +434,7 @@ static struct uart_ops mcf_uart_ops = {
static struct mcf_uart mcf_ports[3];
-#define MCF_MAXPORTS (sizeof(mcf_ports) / sizeof(struct mcf_uart))
+#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
/****************************************************************************/
#if defined(CONFIG_SERIAL_MCF_CONSOLE)
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
index 3c4d29e59b2c..a638f23c6c61 100644
--- a/drivers/serial/mpc52xx_uart.c
+++ b/drivers/serial/mpc52xx_uart.c
@@ -16,6 +16,9 @@
* Some of the code has been inspired/copied from the 2.4 code written
* by Dale Farnsworth <dfarnsworth@mvista.com>.
*
+ * Copyright (C) 2008 Freescale Semiconductor Inc.
+ * John Rigby <jrigby@gmail.com>
+ * Added support for MPC5121
* Copyright (C) 2006 Secret Lab Technologies Ltd.
* Grant Likely <grant.likely@secretlab.ca>
* Copyright (C) 2004-2006 Sylvain Munaut <tnt@246tNt.com>
@@ -67,7 +70,6 @@
#include <linux/serial.h>
#include <linux/sysrq.h>
#include <linux/console.h>
-
#include <linux/delay.h>
#include <linux/io.h>
@@ -79,6 +81,7 @@
#endif
#include <asm/mpc52xx.h>
+#include <asm/mpc512x.h>
#include <asm/mpc52xx_psc.h>
#if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
@@ -111,8 +114,8 @@ static struct device_node *mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM];
static void mpc52xx_uart_of_enumerate(void);
#endif
+
#define PSC(port) ((struct mpc52xx_psc __iomem *)((port)->membase))
-#define FIFO(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1))
/* Forward declaration of the interruption handling routine */
@@ -128,15 +131,301 @@ static irqreturn_t mpc52xx_uart_int(int irq, void *dev_id);
#define uart_console(port) (0)
#endif
+/* ======================================================================== */
+/* PSC fifo operations for isolating differences between 52xx and 512x */
+/* ======================================================================== */
+
+struct psc_ops {
+ void (*fifo_init)(struct uart_port *port);
+ int (*raw_rx_rdy)(struct uart_port *port);
+ int (*raw_tx_rdy)(struct uart_port *port);
+ int (*rx_rdy)(struct uart_port *port);
+ int (*tx_rdy)(struct uart_port *port);
+ int (*tx_empty)(struct uart_port *port);
+ void (*stop_rx)(struct uart_port *port);
+ void (*start_tx)(struct uart_port *port);
+ void (*stop_tx)(struct uart_port *port);
+ void (*rx_clr_irq)(struct uart_port *port);
+ void (*tx_clr_irq)(struct uart_port *port);
+ void (*write_char)(struct uart_port *port, unsigned char c);
+ unsigned char (*read_char)(struct uart_port *port);
+ void (*cw_disable_ints)(struct uart_port *port);
+ void (*cw_restore_ints)(struct uart_port *port);
+ unsigned long (*getuartclk)(void *p);
+};
+
+#ifdef CONFIG_PPC_MPC52xx
+#define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1))
+static void mpc52xx_psc_fifo_init(struct uart_port *port)
+{
+ struct mpc52xx_psc __iomem *psc = PSC(port);
+ struct mpc52xx_psc_fifo __iomem *fifo = FIFO_52xx(port);
+
+ /* /32 prescaler */
+ out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00);
+
+ out_8(&fifo->rfcntl, 0x00);
+ out_be16(&fifo->rfalarm, 0x1ff);
+ out_8(&fifo->tfcntl, 0x07);
+ out_be16(&fifo->tfalarm, 0x80);
+
+ port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
+ out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static int mpc52xx_psc_raw_rx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_status)
+ & MPC52xx_PSC_SR_RXRDY;
+}
+
+static int mpc52xx_psc_raw_tx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_status)
+ & MPC52xx_PSC_SR_TXRDY;
+}
+
+
+static int mpc52xx_psc_rx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_isr)
+ & port->read_status_mask
+ & MPC52xx_PSC_IMR_RXRDY;
+}
+
+static int mpc52xx_psc_tx_rdy(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_isr)
+ & port->read_status_mask
+ & MPC52xx_PSC_IMR_TXRDY;
+}
+
+static int mpc52xx_psc_tx_empty(struct uart_port *port)
+{
+ return in_be16(&PSC(port)->mpc52xx_psc_status)
+ & MPC52xx_PSC_SR_TXEMP;
+}
+
+static void mpc52xx_psc_start_tx(struct uart_port *port)
+{
+ port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc52xx_psc_stop_tx(struct uart_port *port)
+{
+ port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY;
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc52xx_psc_stop_rx(struct uart_port *port)
+{
+ port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY;
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+static void mpc52xx_psc_rx_clr_irq(struct uart_port *port)
+{
+}
+
+static void mpc52xx_psc_tx_clr_irq(struct uart_port *port)
+{
+}
+
+static void mpc52xx_psc_write_char(struct uart_port *port, unsigned char c)
+{
+ out_8(&PSC(port)->mpc52xx_psc_buffer_8, c);
+}
+
+static unsigned char mpc52xx_psc_read_char(struct uart_port *port)
+{
+ return in_8(&PSC(port)->mpc52xx_psc_buffer_8);
+}
+
+static void mpc52xx_psc_cw_disable_ints(struct uart_port *port)
+{
+ out_be16(&PSC(port)->mpc52xx_psc_imr, 0);
+}
+
+static void mpc52xx_psc_cw_restore_ints(struct uart_port *port)
+{
+ out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+}
+
+/* Search for bus-frequency property in this node or a parent */
+static unsigned long mpc52xx_getuartclk(void *p)
+{
#if defined(CONFIG_PPC_MERGE)
-static struct of_device_id mpc52xx_uart_of_match[] = {
- { .type = "serial", .compatible = "fsl,mpc5200-psc-uart", },
- { .type = "serial", .compatible = "mpc5200-psc-uart", }, /* lite5200 */
- { .type = "serial", .compatible = "mpc5200-serial", }, /* efika */
- {},
+ /*
+ * 5200 UARTs have a / 32 prescaler
+ * but the generic serial code assumes 16
+ * so return ipb freq / 2
+ */
+ return mpc52xx_find_ipb_freq(p) / 2;
+#else
+ pr_debug("unexpected call to mpc52xx_getuartclk with arch/ppc\n");
+ return NULL;
+#endif
+}
+
+static struct psc_ops mpc52xx_psc_ops = {
+ .fifo_init = mpc52xx_psc_fifo_init,
+ .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
+ .rx_rdy = mpc52xx_psc_rx_rdy,
+ .tx_rdy = mpc52xx_psc_tx_rdy,
+ .tx_empty = mpc52xx_psc_tx_empty,
+ .stop_rx = mpc52xx_psc_stop_rx,
+ .start_tx = mpc52xx_psc_start_tx,
+ .stop_tx = mpc52xx_psc_stop_tx,
+ .rx_clr_irq = mpc52xx_psc_rx_clr_irq,
+ .tx_clr_irq = mpc52xx_psc_tx_clr_irq,
+ .write_char = mpc52xx_psc_write_char,
+ .read_char = mpc52xx_psc_read_char,
+ .cw_disable_ints = mpc52xx_psc_cw_disable_ints,
+ .cw_restore_ints = mpc52xx_psc_cw_restore_ints,
+ .getuartclk = mpc52xx_getuartclk,
+};
+
+#endif /* CONFIG_MPC52xx */
+
+#ifdef CONFIG_PPC_MPC512x
+#define FIFO_512x(port) ((struct mpc512x_psc_fifo __iomem *)(PSC(port)+1))
+static void mpc512x_psc_fifo_init(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_RESET_SLICE);
+ out_be32(&FIFO_512x(port)->txcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
+ out_be32(&FIFO_512x(port)->txalarm, 1);
+ out_be32(&FIFO_512x(port)->tximr, 0);
+
+ out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_RESET_SLICE);
+ out_be32(&FIFO_512x(port)->rxcmd, MPC512x_PSC_FIFO_ENABLE_SLICE);
+ out_be32(&FIFO_512x(port)->rxalarm, 1);
+ out_be32(&FIFO_512x(port)->rximr, 0);
+
+ out_be32(&FIFO_512x(port)->tximr, MPC512x_PSC_FIFO_ALARM);
+ out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM);
+}
+
+static int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
+{
+ return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
+}
+
+static int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
+{
+ return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL);
+}
+
+static int mpc512x_psc_rx_rdy(struct uart_port *port)
+{
+ return in_be32(&FIFO_512x(port)->rxsr)
+ & in_be32(&FIFO_512x(port)->rximr)
+ & MPC512x_PSC_FIFO_ALARM;
+}
+
+static int mpc512x_psc_tx_rdy(struct uart_port *port)
+{
+ return in_be32(&FIFO_512x(port)->txsr)
+ & in_be32(&FIFO_512x(port)->tximr)
+ & MPC512x_PSC_FIFO_ALARM;
+}
+
+static int mpc512x_psc_tx_empty(struct uart_port *port)
+{
+ return in_be32(&FIFO_512x(port)->txsr)
+ & MPC512x_PSC_FIFO_EMPTY;
+}
+
+static void mpc512x_psc_stop_rx(struct uart_port *port)
+{
+ unsigned long rx_fifo_imr;
+
+ rx_fifo_imr = in_be32(&FIFO_512x(port)->rximr);
+ rx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
+ out_be32(&FIFO_512x(port)->rximr, rx_fifo_imr);
+}
+
+static void mpc512x_psc_start_tx(struct uart_port *port)
+{
+ unsigned long tx_fifo_imr;
+
+ tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr);
+ tx_fifo_imr |= MPC512x_PSC_FIFO_ALARM;
+ out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr);
+}
+
+static void mpc512x_psc_stop_tx(struct uart_port *port)
+{
+ unsigned long tx_fifo_imr;
+
+ tx_fifo_imr = in_be32(&FIFO_512x(port)->tximr);
+ tx_fifo_imr &= ~MPC512x_PSC_FIFO_ALARM;
+ out_be32(&FIFO_512x(port)->tximr, tx_fifo_imr);
+}
+
+static void mpc512x_psc_rx_clr_irq(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->rxisr, in_be32(&FIFO_512x(port)->rxisr));
+}
+
+static void mpc512x_psc_tx_clr_irq(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->txisr, in_be32(&FIFO_512x(port)->txisr));
+}
+
+static void mpc512x_psc_write_char(struct uart_port *port, unsigned char c)
+{
+ out_8(&FIFO_512x(port)->txdata_8, c);
+}
+
+static unsigned char mpc512x_psc_read_char(struct uart_port *port)
+{
+ return in_8(&FIFO_512x(port)->rxdata_8);
+}
+
+static void mpc512x_psc_cw_disable_ints(struct uart_port *port)
+{
+ port->read_status_mask =
+ in_be32(&FIFO_512x(port)->tximr) << 16 |
+ in_be32(&FIFO_512x(port)->rximr);
+ out_be32(&FIFO_512x(port)->tximr, 0);
+ out_be32(&FIFO_512x(port)->rximr, 0);
+}
+
+static void mpc512x_psc_cw_restore_ints(struct uart_port *port)
+{
+ out_be32(&FIFO_512x(port)->tximr,
+ (port->read_status_mask >> 16) & 0x7f);
+ out_be32(&FIFO_512x(port)->rximr, port->read_status_mask & 0x7f);
+}
+
+static unsigned long mpc512x_getuartclk(void *p)
+{
+ return mpc512x_find_ips_freq(p);
+}
+
+static struct psc_ops mpc512x_psc_ops = {
+ .fifo_init = mpc512x_psc_fifo_init,
+ .raw_rx_rdy = mpc512x_psc_raw_rx_rdy,
+ .raw_tx_rdy = mpc512x_psc_raw_tx_rdy,
+ .rx_rdy = mpc512x_psc_rx_rdy,
+ .tx_rdy = mpc512x_psc_tx_rdy,
+ .tx_empty = mpc512x_psc_tx_empty,
+ .stop_rx = mpc512x_psc_stop_rx,
+ .start_tx = mpc512x_psc_start_tx,
+ .stop_tx = mpc512x_psc_stop_tx,
+ .rx_clr_irq = mpc512x_psc_rx_clr_irq,
+ .tx_clr_irq = mpc512x_psc_tx_clr_irq,
+ .write_char = mpc512x_psc_write_char,
+ .read_char = mpc512x_psc_read_char,
+ .cw_disable_ints = mpc512x_psc_cw_disable_ints,
+ .cw_restore_ints = mpc512x_psc_cw_restore_ints,
+ .getuartclk = mpc512x_getuartclk,
};
#endif
+static struct psc_ops *psc_ops;
/* ======================================================================== */
/* UART operations */
@@ -145,8 +434,7 @@ static struct of_device_id mpc52xx_uart_of_match[] = {
static unsigned int
mpc52xx_uart_tx_empty(struct uart_port *port)
{
- int status = in_be16(&PSC(port)->mpc52xx_psc_status);
- return (status & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0;
+ return psc_ops->tx_empty(port) ? TIOCSER_TEMT : 0;
}
static void
@@ -166,16 +454,14 @@ static void
mpc52xx_uart_stop_tx(struct uart_port *port)
{
/* port->lock taken by caller */
- port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->stop_tx(port);
}
static void
mpc52xx_uart_start_tx(struct uart_port *port)
{
/* port->lock taken by caller */
- port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->start_tx(port);
}
static void
@@ -188,8 +474,7 @@ mpc52xx_uart_send_xchar(struct uart_port *port, char ch)
if (ch) {
/* Make sure tx interrupts are on */
/* Truly necessary ??? They should be anyway */
- port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->start_tx(port);
}
spin_unlock_irqrestore(&port->lock, flags);
@@ -199,8 +484,7 @@ static void
mpc52xx_uart_stop_rx(struct uart_port *port)
{
/* port->lock taken by caller */
- port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY;
- out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->stop_rx(port);
}
static void
@@ -227,12 +511,12 @@ static int
mpc52xx_uart_startup(struct uart_port *port)
{
struct mpc52xx_psc __iomem *psc = PSC(port);
- struct mpc52xx_psc_fifo __iomem *fifo = FIFO(port);
int ret;
/* Request IRQ */
ret = request_irq(port->irq, mpc52xx_uart_int,
- IRQF_DISABLED | IRQF_SAMPLE_RANDOM, "mpc52xx_psc_uart", port);
+ IRQF_DISABLED | IRQF_SAMPLE_RANDOM | IRQF_SHARED,
+ "mpc52xx_psc_uart", port);
if (ret)
return ret;
@@ -242,15 +526,7 @@ mpc52xx_uart_startup(struct uart_port *port)
out_be32(&psc->sicr, 0); /* UART mode DCD ignored */
- out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); /* /16 prescaler on */
-
- out_8(&fifo->rfcntl, 0x00);
- out_be16(&fifo->rfalarm, 0x1ff);
- out_8(&fifo->tfcntl, 0x07);
- out_be16(&fifo->tfalarm, 0x80);
-
- port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY;
- out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->fifo_init(port);
out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
@@ -333,8 +609,7 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new,
* boot for the console, all stuff is not yet ready to receive at that
* time and that just makes the kernel oops */
/* while (j-- && mpc52xx_uart_int_rx_chars(port)); */
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
- --j)
+ while (!mpc52xx_uart_tx_empty(port) && --j)
udelay(1);
if (!j)
@@ -462,11 +737,9 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
unsigned short status;
/* While we can read, do so ! */
- while ((status = in_be16(&PSC(port)->mpc52xx_psc_status)) &
- MPC52xx_PSC_SR_RXRDY) {
-
+ while (psc_ops->raw_rx_rdy(port)) {
/* Get the char */
- ch = in_8(&PSC(port)->mpc52xx_psc_buffer_8);
+ ch = psc_ops->read_char(port);
/* Handle sysreq char */
#ifdef SUPPORT_SYSRQ
@@ -481,6 +754,8 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
flag = TTY_NORMAL;
port->icount.rx++;
+ status = in_be16(&PSC(port)->mpc52xx_psc_status);
+
if (status & (MPC52xx_PSC_SR_PE |
MPC52xx_PSC_SR_FE |
MPC52xx_PSC_SR_RB)) {
@@ -510,7 +785,7 @@ mpc52xx_uart_int_rx_chars(struct uart_port *port)
tty_flip_buffer_push(tty);
- return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY;
+ return psc_ops->raw_rx_rdy(port);
}
static inline int
@@ -520,7 +795,7 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
/* Process out of band chars */
if (port->x_char) {
- out_8(&PSC(port)->mpc52xx_psc_buffer_8, port->x_char);
+ psc_ops->write_char(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
return 1;
@@ -533,8 +808,8 @@ mpc52xx_uart_int_tx_chars(struct uart_port *port)
}
/* Send chars */
- while (in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXRDY) {
- out_8(&PSC(port)->mpc52xx_psc_buffer_8, xmit->buf[xmit->tail]);
+ while (psc_ops->raw_tx_rdy(port)) {
+ psc_ops->write_char(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
@@ -560,7 +835,6 @@ mpc52xx_uart_int(int irq, void *dev_id)
struct uart_port *port = dev_id;
unsigned long pass = ISR_PASS_LIMIT;
unsigned int keepgoing;
- unsigned short status;
spin_lock(&port->lock);
@@ -569,18 +843,12 @@ mpc52xx_uart_int(int irq, void *dev_id)
/* If we don't find anything to do, we stop */
keepgoing = 0;
- /* Read status */
- status = in_be16(&PSC(port)->mpc52xx_psc_isr);
- status &= port->read_status_mask;
-
- /* Do we need to receive chars ? */
- /* For this RX interrupts must be on and some chars waiting */
- if (status & MPC52xx_PSC_IMR_RXRDY)
+ psc_ops->rx_clr_irq(port);
+ if (psc_ops->rx_rdy(port))
keepgoing |= mpc52xx_uart_int_rx_chars(port);
- /* Do we need to send chars ? */
- /* For this, TX must be ready and TX interrupt enabled */
- if (status & MPC52xx_PSC_IMR_TXRDY)
+ psc_ops->tx_clr_irq(port);
+ if (psc_ops->tx_rdy(port))
keepgoing |= mpc52xx_uart_int_tx_chars(port);
/* Limit number of iteration */
@@ -647,36 +915,33 @@ static void
mpc52xx_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_port *port = &mpc52xx_uart_ports[co->index];
- struct mpc52xx_psc __iomem *psc = PSC(port);
unsigned int i, j;
/* Disable interrupts */
- out_be16(&psc->mpc52xx_psc_imr, 0);
+ psc_ops->cw_disable_ints(port);
/* Wait the TX buffer to be empty */
j = 5000000; /* Maximum wait */
- while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) &&
- --j)
+ while (!mpc52xx_uart_tx_empty(port) && --j)
udelay(1);
/* Write all the chars */
for (i = 0; i < count; i++, s++) {
/* Line return handling */
if (*s == '\n')
- out_8(&psc->mpc52xx_psc_buffer_8, '\r');
+ psc_ops->write_char(port, '\r');
/* Send the char */
- out_8(&psc->mpc52xx_psc_buffer_8, *s);
+ psc_ops->write_char(port, *s);
/* Wait the TX buffer to be empty */
j = 20000; /* Maximum wait */
- while (!(in_be16(&psc->mpc52xx_psc_status) &
- MPC52xx_PSC_SR_TXEMP) && --j)
+ while (!mpc52xx_uart_tx_empty(port) && --j)
udelay(1);
}
/* Restore interrupt state */
- out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask);
+ psc_ops->cw_restore_ints(port);
}
#if !defined(CONFIG_PPC_MERGE)
@@ -721,7 +986,7 @@ mpc52xx_console_setup(struct console *co, char *options)
{
struct uart_port *port = &mpc52xx_uart_ports[co->index];
struct device_node *np = mpc52xx_uart_nodes[co->index];
- unsigned int ipb_freq;
+ unsigned int uartclk;
struct resource res;
int ret;
@@ -753,17 +1018,16 @@ mpc52xx_console_setup(struct console *co, char *options)
return ret;
}
- /* Search for bus-frequency property in this node or a parent */
- ipb_freq = mpc52xx_find_ipb_freq(np);
- if (ipb_freq == 0) {
- pr_debug("Could not find IPB bus frequency!\n");
+ uartclk = psc_ops->getuartclk(np);
+ if (uartclk == 0) {
+ pr_debug("Could not find uart clock frequency!\n");
return -EINVAL;
}
/* Basic port init. Needed since we use some uart_??? func before
* real init for early access */
spin_lock_init(&port->lock);
- port->uartclk = ipb_freq / 2;
+ port->uartclk = uartclk;
port->ops = &mpc52xx_uart_ops;
port->mapbase = res.start;
port->membase = ioremap(res.start, sizeof(struct mpc52xx_psc));
@@ -945,11 +1209,25 @@ static struct platform_driver mpc52xx_uart_platform_driver = {
/* OF Platform Driver */
/* ======================================================================== */
+static struct of_device_id mpc52xx_uart_of_match[] = {
+#ifdef CONFIG_PPC_MPC52xx
+ { .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
+ /* binding used by old lite5200 device trees: */
+ { .compatible = "mpc5200-psc-uart", .data = &mpc52xx_psc_ops, },
+ /* binding used by efika: */
+ { .compatible = "mpc5200-serial", .data = &mpc52xx_psc_ops, },
+#endif
+#ifdef CONFIG_PPC_MPC512x
+ { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, },
+ {},
+#endif
+};
+
static int __devinit
mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
{
int idx = -1;
- unsigned int ipb_freq;
+ unsigned int uartclk;
struct uart_port *port = NULL;
struct resource res;
int ret;
@@ -965,10 +1243,9 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
pr_debug("Found %s assigned to ttyPSC%x\n",
mpc52xx_uart_nodes[idx]->full_name, idx);
- /* Search for bus-frequency property in this node or a parent */
- ipb_freq = mpc52xx_find_ipb_freq(op->node);
- if (ipb_freq == 0) {
- dev_dbg(&op->dev, "Could not find IPB bus frequency!\n");
+ uartclk = psc_ops->getuartclk(op->node);
+ if (uartclk == 0) {
+ dev_dbg(&op->dev, "Could not find uart clock frequency!\n");
return -EINVAL;
}
@@ -976,7 +1253,7 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match)
port = &mpc52xx_uart_ports[idx];
spin_lock_init(&port->lock);
- port->uartclk = ipb_freq / 2;
+ port->uartclk = uartclk;
port->fifosize = 512;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF |
@@ -1080,15 +1357,19 @@ mpc52xx_uart_of_enumerate(void)
static int enum_done;
struct device_node *np;
const unsigned int *devno;
+ const struct of_device_id *match;
int i;
if (enum_done)
return;
for_each_node_by_type(np, "serial") {
- if (!of_match_node(mpc52xx_uart_of_match, np))
+ match = of_match_node(mpc52xx_uart_of_match, np);
+ if (!match)
continue;
+ psc_ops = match->data;
+
/* Is a particular device number requested? */
devno = of_get_property(np, "port-number", NULL);
mpc52xx_uart_of_assign(np, devno ? *devno : -1);
@@ -1149,6 +1430,7 @@ mpc52xx_uart_init(void)
return ret;
}
#else
+ psc_ops = &mpc52xx_psc_ops;
ret = platform_driver_register(&mpc52xx_uart_platform_driver);
if (ret) {
printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
diff --git a/drivers/serial/mpsc.c b/drivers/serial/mpsc.c
index 4d643c926657..cb3a91967742 100644
--- a/drivers/serial/mpsc.c
+++ b/drivers/serial/mpsc.c
@@ -612,6 +612,7 @@ static void mpsc_hw_init(struct mpsc_port_info *pi)
/* No preamble, 16x divider, low-latency, */
writel(0x04400400, pi->mpsc_base + MPSC_MMCRH);
+ mpsc_set_baudrate(pi, pi->default_baud);
if (pi->mirror_regs) {
pi->MPSC_CHR_1_m = 0;
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index e773c8e14962..45de19366030 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -1527,7 +1527,7 @@ static inline void s3c2440_serial_exit(void)
#define s3c2440_uart_inf_at NULL
#endif /* CONFIG_CPU_S3C2440 */
-#if defined(CONFIG_CPU_S3C2412) || defined(CONFIG_CPU_S3C2413)
+#if defined(CONFIG_CPU_S3C2412)
static int s3c2412_serial_setsource(struct uart_port *port,
struct s3c24xx_uart_clksrc *clk)
diff --git a/drivers/serial/sc26xx.c b/drivers/serial/sc26xx.c
new file mode 100644
index 000000000000..a350b6d2a181
--- /dev/null
+++ b/drivers/serial/sc26xx.c
@@ -0,0 +1,755 @@
+/*
+ * SC268xx.c: Serial driver for Philiphs SC2681/SC2692 devices.
+ *
+ * Copyright (C) 2006,2007 Thomas Bogendörfer (tsbogend@alpha.franken.de)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/major.h>
+#include <linux/circ_buf.h>
+#include <linux/serial.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+
+#if defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#define SC26XX_MAJOR 204
+#define SC26XX_MINOR_START 205
+#define SC26XX_NR 2
+
+struct uart_sc26xx_port {
+ struct uart_port port[2];
+ u8 dsr_mask[2];
+ u8 cts_mask[2];
+ u8 dcd_mask[2];
+ u8 ri_mask[2];
+ u8 dtr_mask[2];
+ u8 rts_mask[2];
+ u8 imr;
+};
+
+/* register common to both ports */
+#define RD_ISR 0x14
+#define RD_IPR 0x34
+
+#define WR_ACR 0x10
+#define WR_IMR 0x14
+#define WR_OPCR 0x34
+#define WR_OPR_SET 0x38
+#define WR_OPR_CLR 0x3C
+
+/* access common register */
+#define READ_SC(p, r) readb((p)->membase + RD_##r)
+#define WRITE_SC(p, r, v) writeb((v), (p)->membase + WR_##r)
+
+/* register per port */
+#define RD_PORT_MRx 0x00
+#define RD_PORT_SR 0x04
+#define RD_PORT_RHR 0x0c
+
+#define WR_PORT_MRx 0x00
+#define WR_PORT_CSR 0x04
+#define WR_PORT_CR 0x08
+#define WR_PORT_THR 0x0c
+
+/* SR bits */
+#define SR_BREAK (1 << 7)
+#define SR_FRAME (1 << 6)
+#define SR_PARITY (1 << 5)
+#define SR_OVERRUN (1 << 4)
+#define SR_TXRDY (1 << 2)
+#define SR_RXRDY (1 << 0)
+
+#define CR_RES_MR (1 << 4)
+#define CR_RES_RX (2 << 4)
+#define CR_RES_TX (3 << 4)
+#define CR_STRT_BRK (6 << 4)
+#define CR_STOP_BRK (7 << 4)
+#define CR_DIS_TX (1 << 3)
+#define CR_ENA_TX (1 << 2)
+#define CR_DIS_RX (1 << 1)
+#define CR_ENA_RX (1 << 0)
+
+/* ISR bits */
+#define ISR_RXRDYB (1 << 5)
+#define ISR_TXRDYB (1 << 4)
+#define ISR_RXRDYA (1 << 1)
+#define ISR_TXRDYA (1 << 0)
+
+/* IMR bits */
+#define IMR_RXRDY (1 << 1)
+#define IMR_TXRDY (1 << 0)
+
+/* access port register */
+static inline u8 read_sc_port(struct uart_port *p, u8 reg)
+{
+ return readb(p->membase + p->line * 0x20 + reg);
+}
+
+static inline void write_sc_port(struct uart_port *p, u8 reg, u8 val)
+{
+ writeb(val, p->membase + p->line * 0x20 + reg);
+}
+
+#define READ_SC_PORT(p, r) read_sc_port(p, RD_PORT_##r)
+#define WRITE_SC_PORT(p, r, v) write_sc_port(p, WR_PORT_##r, v)
+
+static void sc26xx_enable_irq(struct uart_port *port, int mask)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+
+ up->imr |= mask << (line * 4);
+ WRITE_SC(port, IMR, up->imr);
+}
+
+static void sc26xx_disable_irq(struct uart_port *port, int mask)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+
+ up->imr &= ~(mask << (line * 4));
+ WRITE_SC(port, IMR, up->imr);
+}
+
+static struct tty_struct *receive_chars(struct uart_port *port)
+{
+ struct tty_struct *tty = NULL;
+ int limit = 10000;
+ unsigned char ch;
+ char flag;
+ u8 status;
+
+ if (port->info != NULL) /* Unopened serial console */
+ tty = port->info->tty;
+
+ while (limit-- > 0) {
+ status = READ_SC_PORT(port, SR);
+ if (!(status & SR_RXRDY))
+ break;
+ ch = READ_SC_PORT(port, RHR);
+
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if (unlikely(status & (SR_BREAK | SR_FRAME |
+ SR_PARITY | SR_OVERRUN))) {
+ if (status & SR_BREAK) {
+ status &= ~(SR_PARITY | SR_FRAME);
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (status & SR_PARITY)
+ port->icount.parity++;
+ else if (status & SR_FRAME)
+ port->icount.frame++;
+ if (status & SR_OVERRUN)
+ port->icount.overrun++;
+
+ status &= port->read_status_mask;
+ if (status & SR_BREAK)
+ flag = TTY_BREAK;
+ else if (status & SR_PARITY)
+ flag = TTY_PARITY;
+ else if (status & SR_FRAME)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, ch))
+ continue;
+
+ if (status & port->ignore_status_mask)
+ continue;
+
+ tty_insert_flip_char(tty, ch, flag);
+ }
+ return tty;
+}
+
+static void transmit_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit;
+
+ if (!port->info)
+ return;
+
+ xmit = &port->info->xmit;
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ sc26xx_disable_irq(port, IMR_TXRDY);
+ return;
+ }
+ while (!uart_circ_empty(xmit)) {
+ if (!(READ_SC_PORT(port, SR) & SR_TXRDY))
+ break;
+
+ WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static irqreturn_t sc26xx_interrupt(int irq, void *dev_id)
+{
+ struct uart_sc26xx_port *up = dev_id;
+ struct tty_struct *tty;
+ unsigned long flags;
+ u8 isr;
+
+ spin_lock_irqsave(&up->port[0].lock, flags);
+
+ tty = NULL;
+ isr = READ_SC(&up->port[0], ISR);
+ if (isr & ISR_TXRDYA)
+ transmit_chars(&up->port[0]);
+ if (isr & ISR_RXRDYA)
+ tty = receive_chars(&up->port[0]);
+
+ spin_unlock(&up->port[0].lock);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ spin_lock(&up->port[1].lock);
+
+ tty = NULL;
+ if (isr & ISR_TXRDYB)
+ transmit_chars(&up->port[1]);
+ if (isr & ISR_RXRDYB)
+ tty = receive_chars(&up->port[1]);
+
+ spin_unlock_irqrestore(&up->port[1].lock, flags);
+
+ if (tty)
+ tty_flip_buffer_push(tty);
+
+ return IRQ_HANDLED;
+}
+
+/* port->lock is not held. */
+static unsigned int sc26xx_tx_empty(struct uart_port *port)
+{
+ return (READ_SC_PORT(port, SR) & SR_TXRDY) ? TIOCSER_TEMT : 0;
+}
+
+/* port->lock held by caller. */
+static void sc26xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+
+ if (up->dtr_mask[line]) {
+ if (mctrl & TIOCM_DTR)
+ WRITE_SC(port, OPR_SET, up->dtr_mask[line]);
+ else
+ WRITE_SC(port, OPR_CLR, up->dtr_mask[line]);
+ }
+ if (up->rts_mask[line]) {
+ if (mctrl & TIOCM_RTS)
+ WRITE_SC(port, OPR_SET, up->rts_mask[line]);
+ else
+ WRITE_SC(port, OPR_CLR, up->rts_mask[line]);
+ }
+}
+
+/* port->lock is held by caller and interrupts are disabled. */
+static unsigned int sc26xx_get_mctrl(struct uart_port *port)
+{
+ struct uart_sc26xx_port *up;
+ int line = port->line;
+ unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
+ u8 ipr;
+
+ port -= line;
+ up = container_of(port, struct uart_sc26xx_port, port[0]);
+ ipr = READ_SC(port, IPR) ^ 0xff;
+
+ if (up->dsr_mask[line]) {
+ mctrl &= ~TIOCM_DSR;
+ mctrl |= ipr & up->dsr_mask[line] ? TIOCM_DSR : 0;
+ }
+ if (up->cts_mask[line]) {
+ mctrl &= ~TIOCM_CTS;
+ mctrl |= ipr & up->cts_mask[line] ? TIOCM_CTS : 0;
+ }
+ if (up->dcd_mask[line]) {
+ mctrl &= ~TIOCM_CAR;
+ mctrl |= ipr & up->dcd_mask[line] ? TIOCM_CAR : 0;
+ }
+ if (up->ri_mask[line]) {
+ mctrl &= ~TIOCM_RNG;
+ mctrl |= ipr & up->ri_mask[line] ? TIOCM_RNG : 0;
+ }
+ return mctrl;
+}
+
+/* port->lock held by caller. */
+static void sc26xx_stop_tx(struct uart_port *port)
+{
+ return;
+}
+
+/* port->lock held by caller. */
+static void sc26xx_start_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+
+ while (!uart_circ_empty(xmit)) {
+ if (!(READ_SC_PORT(port, SR) & SR_TXRDY)) {
+ sc26xx_enable_irq(port, IMR_TXRDY);
+ break;
+ }
+ WRITE_SC_PORT(port, THR, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ }
+}
+
+/* port->lock held by caller. */
+static void sc26xx_stop_rx(struct uart_port *port)
+{
+}
+
+/* port->lock held by caller. */
+static void sc26xx_enable_ms(struct uart_port *port)
+{
+}
+
+/* port->lock is not held. */
+static void sc26xx_break_ctl(struct uart_port *port, int break_state)
+{
+ if (break_state == -1)
+ WRITE_SC_PORT(port, CR, CR_STRT_BRK);
+ else
+ WRITE_SC_PORT(port, CR, CR_STOP_BRK);
+}
+
+/* port->lock is not held. */
+static int sc26xx_startup(struct uart_port *port)
+{
+ sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
+ WRITE_SC(port, OPCR, 0);
+
+ /* reset tx and rx */
+ WRITE_SC_PORT(port, CR, CR_RES_RX);
+ WRITE_SC_PORT(port, CR, CR_RES_TX);
+
+ /* start rx/tx */
+ WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
+
+ /* enable irqs */
+ sc26xx_enable_irq(port, IMR_RXRDY);
+ return 0;
+}
+
+/* port->lock is not held. */
+static void sc26xx_shutdown(struct uart_port *port)
+{
+ /* disable interrupst */
+ sc26xx_disable_irq(port, IMR_TXRDY | IMR_RXRDY);
+
+ /* stop tx/rx */
+ WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
+}
+
+/* port->lock is not held. */
+static void sc26xx_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned int baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ unsigned int quot = uart_get_divisor(port, baud);
+ unsigned int iflag, cflag;
+ unsigned long flags;
+ u8 mr1, mr2, csr;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
+ udelay(2);
+
+ WRITE_SC_PORT(port, CR, CR_DIS_TX | CR_DIS_RX);
+
+ iflag = termios->c_iflag;
+ cflag = termios->c_cflag;
+
+ port->read_status_mask = SR_OVERRUN;
+ if (iflag & INPCK)
+ port->read_status_mask |= SR_PARITY | SR_FRAME;
+ if (iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= SR_BREAK;
+
+ port->ignore_status_mask = 0;
+ if (iflag & IGNBRK)
+ port->ignore_status_mask |= SR_BREAK;
+ if ((cflag & CREAD) == 0)
+ port->ignore_status_mask |= SR_BREAK | SR_FRAME |
+ SR_PARITY | SR_OVERRUN;
+
+ switch (cflag & CSIZE) {
+ case CS5:
+ mr1 = 0x00;
+ break;
+ case CS6:
+ mr1 = 0x01;
+ break;
+ case CS7:
+ mr1 = 0x02;
+ break;
+ default:
+ case CS8:
+ mr1 = 0x03;
+ break;
+ }
+ mr2 = 0x07;
+ if (cflag & CSTOPB)
+ mr2 = 0x0f;
+ if (cflag & PARENB) {
+ if (cflag & PARODD)
+ mr1 |= (1 << 2);
+ } else
+ mr1 |= (2 << 3);
+
+ switch (baud) {
+ case 50:
+ csr = 0x00;
+ break;
+ case 110:
+ csr = 0x11;
+ break;
+ case 134:
+ csr = 0x22;
+ break;
+ case 200:
+ csr = 0x33;
+ break;
+ case 300:
+ csr = 0x44;
+ break;
+ case 600:
+ csr = 0x55;
+ break;
+ case 1200:
+ csr = 0x66;
+ break;
+ case 2400:
+ csr = 0x88;
+ break;
+ case 4800:
+ csr = 0x99;
+ break;
+ default:
+ case 9600:
+ csr = 0xbb;
+ break;
+ case 19200:
+ csr = 0xcc;
+ break;
+ }
+
+ WRITE_SC_PORT(port, CR, CR_RES_MR);
+ WRITE_SC_PORT(port, MRx, mr1);
+ WRITE_SC_PORT(port, MRx, mr2);
+
+ WRITE_SC(port, ACR, 0x80);
+ WRITE_SC_PORT(port, CSR, csr);
+
+ /* reset tx and rx */
+ WRITE_SC_PORT(port, CR, CR_RES_RX);
+ WRITE_SC_PORT(port, CR, CR_RES_TX);
+
+ WRITE_SC_PORT(port, CR, CR_ENA_TX | CR_ENA_RX);
+ while ((READ_SC_PORT(port, SR) & ((1 << 3) | (1 << 2))) != 0xc)
+ udelay(2);
+
+ /* XXX */
+ uart_update_timeout(port, cflag,
+ (port->uartclk / (16 * quot)));
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *sc26xx_type(struct uart_port *port)
+{
+ return "SC26XX";
+}
+
+static void sc26xx_release_port(struct uart_port *port)
+{
+}
+
+static int sc26xx_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void sc26xx_config_port(struct uart_port *port, int flags)
+{
+}
+
+static int sc26xx_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ return -EINVAL;
+}
+
+static struct uart_ops sc26xx_ops = {
+ .tx_empty = sc26xx_tx_empty,
+ .set_mctrl = sc26xx_set_mctrl,
+ .get_mctrl = sc26xx_get_mctrl,
+ .stop_tx = sc26xx_stop_tx,
+ .start_tx = sc26xx_start_tx,
+ .stop_rx = sc26xx_stop_rx,
+ .enable_ms = sc26xx_enable_ms,
+ .break_ctl = sc26xx_break_ctl,
+ .startup = sc26xx_startup,
+ .shutdown = sc26xx_shutdown,
+ .set_termios = sc26xx_set_termios,
+ .type = sc26xx_type,
+ .release_port = sc26xx_release_port,
+ .request_port = sc26xx_request_port,
+ .config_port = sc26xx_config_port,
+ .verify_port = sc26xx_verify_port,
+};
+
+static struct uart_port *sc26xx_port;
+
+#ifdef CONFIG_SERIAL_SC26XX_CONSOLE
+static void sc26xx_console_putchar(struct uart_port *port, char c)
+{
+ unsigned long flags;
+ int limit = 1000000;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ while (limit-- > 0) {
+ if (READ_SC_PORT(port, SR) & SR_TXRDY) {
+ WRITE_SC_PORT(port, THR, c);
+ break;
+ }
+ udelay(2);
+ }
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void sc26xx_console_write(struct console *con, const char *s, unsigned n)
+{
+ struct uart_port *port = sc26xx_port;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (*s == '\n')
+ sc26xx_console_putchar(port, '\r');
+ sc26xx_console_putchar(port, *s++);
+ }
+}
+
+static int __init sc26xx_console_setup(struct console *con, char *options)
+{
+ struct uart_port *port = sc26xx_port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (port->type != PORT_SC26XX)
+ return -1;
+
+ printk(KERN_INFO "Console: ttySC%d (SC26XX)\n", con->index);
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, con, baud, parity, bits, flow);
+}
+
+static struct uart_driver sc26xx_reg;
+static struct console sc26xx_console = {
+ .name = "ttySC",
+ .write = sc26xx_console_write,
+ .device = uart_console_device,
+ .setup = sc26xx_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sc26xx_reg,
+};
+#define SC26XX_CONSOLE &sc26xx_console
+#else
+#define SC26XX_CONSOLE NULL
+#endif
+
+static struct uart_driver sc26xx_reg = {
+ .owner = THIS_MODULE,
+ .driver_name = "SC26xx",
+ .dev_name = "ttySC",
+ .major = SC26XX_MAJOR,
+ .minor = SC26XX_MINOR_START,
+ .nr = SC26XX_NR,
+ .cons = SC26XX_CONSOLE,
+};
+
+static u8 sc26xx_flags2mask(unsigned int flags, unsigned int bitpos)
+{
+ unsigned int bit = (flags >> bitpos) & 15;
+
+ return bit ? (1 << (bit - 1)) : 0;
+}
+
+static void __devinit sc26xx_init_masks(struct uart_sc26xx_port *up,
+ int line, unsigned int data)
+{
+ up->dtr_mask[line] = sc26xx_flags2mask(data, 0);
+ up->rts_mask[line] = sc26xx_flags2mask(data, 4);
+ up->dsr_mask[line] = sc26xx_flags2mask(data, 8);
+ up->cts_mask[line] = sc26xx_flags2mask(data, 12);
+ up->dcd_mask[line] = sc26xx_flags2mask(data, 16);
+ up->ri_mask[line] = sc26xx_flags2mask(data, 20);
+}
+
+static int __devinit sc26xx_probe(struct platform_device *dev)
+{
+ struct resource *res;
+ struct uart_sc26xx_port *up;
+ unsigned int *sc26xx_data = dev->dev.platform_data;
+ int err;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ up = kzalloc(sizeof *up, GFP_KERNEL);
+ if (unlikely(!up))
+ return -ENOMEM;
+
+ up->port[0].line = 0;
+ up->port[0].ops = &sc26xx_ops;
+ up->port[0].type = PORT_SC26XX;
+ up->port[0].uartclk = (29491200 / 16); /* arbitrary */
+
+ up->port[0].mapbase = res->start;
+ up->port[0].membase = ioremap_nocache(up->port[0].mapbase, 0x40);
+ up->port[0].iotype = UPIO_MEM;
+ up->port[0].irq = platform_get_irq(dev, 0);
+
+ up->port[0].dev = &dev->dev;
+
+ sc26xx_init_masks(up, 0, sc26xx_data[0]);
+
+ sc26xx_port = &up->port[0];
+
+ up->port[1].line = 1;
+ up->port[1].ops = &sc26xx_ops;
+ up->port[1].type = PORT_SC26XX;
+ up->port[1].uartclk = (29491200 / 16); /* arbitrary */
+
+ up->port[1].mapbase = up->port[0].mapbase;
+ up->port[1].membase = up->port[0].membase;
+ up->port[1].iotype = UPIO_MEM;
+ up->port[1].irq = up->port[0].irq;
+
+ up->port[1].dev = &dev->dev;
+
+ sc26xx_init_masks(up, 1, sc26xx_data[1]);
+
+ err = uart_register_driver(&sc26xx_reg);
+ if (err)
+ goto out_free_port;
+
+ sc26xx_reg.tty_driver->name_base = sc26xx_reg.minor;
+
+ err = uart_add_one_port(&sc26xx_reg, &up->port[0]);
+ if (err)
+ goto out_unregister_driver;
+
+ err = uart_add_one_port(&sc26xx_reg, &up->port[1]);
+ if (err)
+ goto out_remove_port0;
+
+ err = request_irq(up->port[0].irq, sc26xx_interrupt, 0, "sc26xx", up);
+ if (err)
+ goto out_remove_ports;
+
+ dev_set_drvdata(&dev->dev, up);
+ return 0;
+
+out_remove_ports:
+ uart_remove_one_port(&sc26xx_reg, &up->port[1]);
+out_remove_port0:
+ uart_remove_one_port(&sc26xx_reg, &up->port[0]);
+
+out_unregister_driver:
+ uart_unregister_driver(&sc26xx_reg);
+
+out_free_port:
+ kfree(up);
+ sc26xx_port = NULL;
+ return err;
+}
+
+
+static int __exit sc26xx_driver_remove(struct platform_device *dev)
+{
+ struct uart_sc26xx_port *up = dev_get_drvdata(&dev->dev);
+
+ free_irq(up->port[0].irq, up);
+
+ uart_remove_one_port(&sc26xx_reg, &up->port[0]);
+ uart_remove_one_port(&sc26xx_reg, &up->port[1]);
+
+ uart_unregister_driver(&sc26xx_reg);
+
+ kfree(up);
+ sc26xx_port = NULL;
+
+ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+}
+
+static struct platform_driver sc26xx_driver = {
+ .probe = sc26xx_probe,
+ .remove = __devexit_p(sc26xx_driver_remove),
+ .driver = {
+ .name = "SC26xx",
+ },
+};
+
+static int __init sc26xx_init(void)
+{
+ return platform_driver_register(&sc26xx_driver);
+}
+
+static void __exit sc26xx_exit(void)
+{
+ platform_driver_unregister(&sc26xx_driver);
+}
+
+module_init(sc26xx_init);
+module_exit(sc26xx_exit);
+
+
+MODULE_AUTHOR("Thomas Bogendörfer");
+MODULE_DESCRIPTION("SC681/SC2692 serial driver");
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c
index 3bb5d241dd40..276da148c57e 100644
--- a/drivers/serial/serial_core.c
+++ b/drivers/serial/serial_core.c
@@ -371,7 +371,8 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
*/
termios->c_cflag &= ~CBAUD;
if (old) {
- termios->c_cflag |= old->c_cflag & CBAUD;
+ baud = tty_termios_baud_rate(old);
+ tty_termios_encode_baud_rate(termios, baud, baud);
old = NULL;
continue;
}
@@ -380,7 +381,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
* As a last resort, if the quotient is zero,
* default to 9600 bps
*/
- termios->c_cflag |= B9600;
+ tty_termios_encode_baud_rate(termios, 9600, 9600);
}
return 0;
@@ -1977,6 +1978,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
if (state->info && state->info->flags & UIF_INITIALIZED) {
const struct uart_ops *ops = port->ops;
+ int tries;
state->info->flags = (state->info->flags & ~UIF_INITIALIZED)
| UIF_SUSPENDED;
@@ -1990,9 +1992,14 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port)
/*
* Wait for the transmitter to empty.
*/
- while (!ops->tx_empty(port)) {
+ for (tries = 3; !ops->tx_empty(port) && tries; tries--) {
msleep(10);
}
+ if (!tries)
+ printk(KERN_ERR "%s%s%s%d: Unable to drain transmitter\n",
+ port->dev ? port->dev->bus_id : "",
+ port->dev ? ": " : "",
+ drv->dev_name, port->line);
ops->shutdown(port);
}
@@ -2029,8 +2036,6 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
}
port->suspended = 0;
- uart_change_pm(state, 0);
-
/*
* Re-enable the console device after suspending.
*/
@@ -2049,6 +2054,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
if (state->info && state->info->tty && termios.c_cflag == 0)
termios = *state->info->tty->termios;
+ uart_change_pm(state, 0);
port->ops->set_termios(port, &termios, NULL);
console_start(port->cons);
}
@@ -2057,6 +2063,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
const struct uart_ops *ops = port->ops;
int ret;
+ uart_change_pm(state, 0);
ops->set_mctrl(port, 0);
ret = ops->startup(port);
if (ret == 0) {
@@ -2150,10 +2157,11 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
/*
* Ensure that the modem control lines are de-activated.
+ * keep the DTR setting that is set in uart_set_options()
* We probably don't need a spinlock around this, but
*/
spin_lock_irqsave(&port->lock, flags);
- port->ops->set_mctrl(port, 0);
+ port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR);
spin_unlock_irqrestore(&port->lock, flags);
/*
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index d8b660061c13..164d2a42eb59 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -389,7 +389,7 @@ static void serial_detach(struct pcmcia_device *link)
/*====================================================================*/
static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
- kio_addr_t iobase, int irq)
+ unsigned int iobase, int irq)
{
struct uart_port port;
int line;
@@ -456,7 +456,7 @@ next_tuple(struct pcmcia_device *handle, tuple_t * tuple, cisparse_t * parse)
static int simple_config(struct pcmcia_device *link)
{
- static const kio_addr_t base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
+ static const unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 };
static const int size_table[2] = { 8, 16 };
struct serial_info *info = link->priv;
struct serial_cfg_mem *cfg_mem;
@@ -480,7 +480,7 @@ static int simple_config(struct pcmcia_device *link)
/* If the card is already configured, look up the port and irq */
i = pcmcia_get_configuration_info(link, &config);
if ((i == CS_SUCCESS) && (config.Attributes & CONF_VALID_CLIENT)) {
- kio_addr_t port = 0;
+ unsigned int port = 0;
if ((config.BasePort2 != 0) && (config.NumPorts2 == 8)) {
port = config.BasePort2;
info->slave = 1;
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index 80943409edb0..4e06ab6bcb6e 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -17,10 +17,21 @@
#include <linux/tty.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/init.h>
#include <asm/io.h>
#if defined(CONFIG_OF)
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+
+/* Match table for of_platform binding */
+static struct of_device_id ulite_of_match[] __devinitdata = {
+ { .compatible = "xlnx,opb-uartlite-1.00.b", },
+ { .compatible = "xlnx,xps-uartlite-1.00.a", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ulite_of_match);
+
#endif
#define ULITE_NAME "ttyUL"
@@ -142,7 +153,7 @@ static int ulite_transmit(struct uart_port *port, int stat)
static irqreturn_t ulite_isr(int irq, void *dev_id)
{
- struct uart_port *port = (struct uart_port *)dev_id;
+ struct uart_port *port = dev_id;
int busy;
do {
@@ -275,6 +286,9 @@ static void ulite_release_port(struct uart_port *port)
static int ulite_request_port(struct uart_port *port)
{
+ pr_debug("ulite console: port=%p; port->mapbase=%x\n",
+ port, port->mapbase);
+
if (!request_mem_region(port->mapbase, ULITE_REGION, "uartlite")) {
dev_err(port->dev, "Memory region busy\n");
return -EBUSY;
@@ -375,32 +389,6 @@ static void ulite_console_write(struct console *co, const char *s,
spin_unlock_irqrestore(&port->lock, flags);
}
-#if defined(CONFIG_OF)
-static inline void __init ulite_console_of_find_device(int id)
-{
- struct device_node *np;
- struct resource res;
- const unsigned int *of_id;
- int rc;
-
- for_each_compatible_node(np, NULL, "xilinx,uartlite") {
- of_id = of_get_property(np, "port-number", NULL);
- if ((!of_id) || (*of_id != id))
- continue;
-
- rc = of_address_to_resource(np, 0, &res);
- if (rc)
- continue;
-
- ulite_ports[id].mapbase = res.start;
- of_node_put(np);
- return;
- }
-}
-#else /* CONFIG_OF */
-static inline void __init ulite_console_of_find_device(int id) { /* do nothing */ }
-#endif /* CONFIG_OF */
-
static int __init ulite_console_setup(struct console *co, char *options)
{
struct uart_port *port;
@@ -414,11 +402,7 @@ static int __init ulite_console_setup(struct console *co, char *options)
port = &ulite_ports[co->index];
- /* Check if it is an OF device */
- if (!port->mapbase)
- ulite_console_of_find_device(co->index);
-
- /* Do we have a device now? */
+ /* Has the device been initialized yet? */
if (!port->mapbase) {
pr_debug("console on ttyUL%i not present\n", co->index);
return -ENODEV;
@@ -617,13 +601,6 @@ static int __devexit ulite_of_remove(struct of_device *op)
return ulite_release(&op->dev);
}
-/* Match table for of_platform binding */
-static struct of_device_id __devinit ulite_of_match[] = {
- { .type = "serial", .compatible = "xilinx,uartlite", },
- {},
-};
-MODULE_DEVICE_TABLE(of, ulite_of_match);
-
static struct of_platform_driver ulite_of_driver = {
.owner = THIS_MODULE,
.name = "uartlite",
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index aaaea81e412a..d8107890db15 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -144,10 +144,10 @@ config SPI_OMAP_UWIRE
This hooks up to the MicroWire controller on OMAP1 chips.
config SPI_OMAP24XX
- tristate "McSPI driver for OMAP24xx"
- depends on SPI_MASTER && ARCH_OMAP24XX
+ tristate "McSPI driver for OMAP24xx/OMAP34xx"
+ depends on SPI_MASTER && (ARCH_OMAP24XX || ARCH_OMAP34XX)
help
- SPI master controller for OMAP24xx Multichannel SPI
+ SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
(McSPI) modules.
config SPI_PXA2XX
@@ -176,6 +176,13 @@ config SPI_S3C24XX_GPIO
the inbuilt hardware cannot provide the transfer mode, or
where the board is using non hardware connected pins.
+config SPI_SH_SCI
+ tristate "SuperH SCI SPI controller"
+ depends on SPI_MASTER && SUPERH
+ select SPI_BITBANG
+ help
+ SPI driver for SuperH SCI blocks.
+
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
depends on SPI_MASTER && GENERIC_GPIO && CPU_TX49XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 41fbac45c323..7fca043ce723 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o
obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
+obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
# ... add above this line ...
# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index ff10808183a3..293b7cab3e57 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -51,7 +51,9 @@ struct atmel_spi {
u8 stopping;
struct list_head queue;
struct spi_transfer *current_transfer;
- unsigned long remaining_bytes;
+ unsigned long current_remaining_bytes;
+ struct spi_transfer *next_transfer;
+ unsigned long next_remaining_bytes;
void *buffer;
dma_addr_t buffer_dma;
@@ -121,6 +123,48 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
gpio_set_value(gpio, !active);
}
+static inline int atmel_spi_xfer_is_last(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ return msg->transfers.prev == &xfer->transfer_list;
+}
+
+static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer)
+{
+ return xfer->delay_usecs == 0 && !xfer->cs_change;
+}
+
+static void atmel_spi_next_xfer_data(struct spi_master *master,
+ struct spi_transfer *xfer,
+ dma_addr_t *tx_dma,
+ dma_addr_t *rx_dma,
+ u32 *plen)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 len = *plen;
+
+ /* use scratch buffer only when rx or tx data is unspecified */
+ if (xfer->rx_buf)
+ *rx_dma = xfer->rx_dma + xfer->len - len;
+ else {
+ *rx_dma = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ }
+ if (xfer->tx_buf)
+ *tx_dma = xfer->tx_dma + xfer->len - len;
+ else {
+ *tx_dma = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ memset(as->buffer, 0, len);
+ dma_sync_single_for_device(&as->pdev->dev,
+ as->buffer_dma, len, DMA_TO_DEVICE);
+ }
+
+ *plen = len;
+}
+
/*
* Submit next transfer for DMA.
* lock is held, spi irq is blocked
@@ -130,53 +174,78 @@ static void atmel_spi_next_xfer(struct spi_master *master,
{
struct atmel_spi *as = spi_master_get_devdata(master);
struct spi_transfer *xfer;
- u32 len;
+ u32 len, remaining, total;
dma_addr_t tx_dma, rx_dma;
- xfer = as->current_transfer;
- if (!xfer || as->remaining_bytes == 0) {
- if (xfer)
- xfer = list_entry(xfer->transfer_list.next,
- struct spi_transfer, transfer_list);
- else
- xfer = list_entry(msg->transfers.next,
- struct spi_transfer, transfer_list);
- as->remaining_bytes = xfer->len;
- as->current_transfer = xfer;
- }
+ if (!as->current_transfer)
+ xfer = list_entry(msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ else if (!as->next_transfer)
+ xfer = list_entry(as->current_transfer->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ else
+ xfer = NULL;
- len = as->remaining_bytes;
+ if (xfer) {
+ len = xfer->len;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ remaining = xfer->len - len;
- tx_dma = xfer->tx_dma + xfer->len - len;
- rx_dma = xfer->rx_dma + xfer->len - len;
+ spi_writel(as, RPR, rx_dma);
+ spi_writel(as, TPR, tx_dma);
- /* use scratch buffer only when rx or tx data is unspecified */
- if (!xfer->rx_buf) {
- rx_dma = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- }
- if (!xfer->tx_buf) {
- tx_dma = as->buffer_dma;
- if (len > BUFFER_SIZE)
- len = BUFFER_SIZE;
- memset(as->buffer, 0, len);
- dma_sync_single_for_device(&as->pdev->dev,
- as->buffer_dma, len, DMA_TO_DEVICE);
+ if (msg->spi->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RCR, len);
+ spi_writel(as, TCR, len);
+
+ dev_dbg(&msg->spi->dev,
+ " start xfer %p: len %u tx %p/%08x rx %p/%08x\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
+ xfer->rx_buf, xfer->rx_dma);
+ } else {
+ xfer = as->next_transfer;
+ remaining = as->next_remaining_bytes;
}
- spi_writel(as, RPR, rx_dma);
- spi_writel(as, TPR, tx_dma);
+ as->current_transfer = xfer;
+ as->current_remaining_bytes = remaining;
- as->remaining_bytes -= len;
- if (msg->spi->bits_per_word > 8)
- len >>= 1;
+ if (remaining > 0)
+ len = remaining;
+ else if (!atmel_spi_xfer_is_last(msg, xfer)
+ && atmel_spi_xfer_can_be_chained(xfer)) {
+ xfer = list_entry(xfer->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ len = xfer->len;
+ } else
+ xfer = NULL;
- /* REVISIT: when xfer->delay_usecs == 0, the PDC "next transfer"
- * mechanism might help avoid the IRQ latency between transfers
- * (and improve the nCS0 errata handling on at91rm9200 chips)
- *
- * We're also waiting for ENDRX before we start the next
+ as->next_transfer = xfer;
+
+ if (xfer) {
+ total = len;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ as->next_remaining_bytes = total - len;
+
+ spi_writel(as, RNPR, rx_dma);
+ spi_writel(as, TNPR, tx_dma);
+
+ if (msg->spi->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RNCR, len);
+ spi_writel(as, TNCR, len);
+
+ dev_dbg(&msg->spi->dev,
+ " next xfer %p: len %u tx %p/%08x rx %p/%08x\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
+ xfer->rx_buf, xfer->rx_dma);
+ } else {
+ spi_writel(as, RNCR, 0);
+ spi_writel(as, TNCR, 0);
+ }
+
+ /* REVISIT: We're waiting for ENDRX before we start the next
* transfer because we need to handle some difficult timing
* issues otherwise. If we wait for ENDTX in one transfer and
* then starts waiting for ENDRX in the next, it's difficult
@@ -186,17 +255,7 @@ static void atmel_spi_next_xfer(struct spi_master *master,
*
* It should be doable, though. Just not now...
*/
- spi_writel(as, TNCR, 0);
- spi_writel(as, RNCR, 0);
spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES));
-
- dev_dbg(&msg->spi->dev,
- " start xfer %p: len %u tx %p/%08x rx %p/%08x imr %03x\n",
- xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma, spi_readl(as, IMR));
-
- spi_writel(as, RCR, len);
- spi_writel(as, TCR, len);
spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
}
@@ -294,6 +353,7 @@ atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as,
spin_lock(&as->lock);
as->current_transfer = NULL;
+ as->next_transfer = NULL;
/* continue if needed */
if (list_empty(&as->queue) || as->stopping)
@@ -377,7 +437,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
spi_writel(as, IDR, pending);
- if (as->remaining_bytes == 0) {
+ if (as->current_remaining_bytes == 0) {
msg->actual_length += xfer->len;
if (!msg->is_dma_mapped)
@@ -387,7 +447,7 @@ atmel_spi_interrupt(int irq, void *dev_id)
if (xfer->delay_usecs)
udelay(xfer->delay_usecs);
- if (msg->transfers.prev == &xfer->transfer_list) {
+ if (atmel_spi_xfer_is_last(msg, xfer)) {
/* report completed message */
atmel_spi_msg_done(master, as, msg, 0,
xfer->cs_change);
@@ -490,9 +550,14 @@ static int atmel_spi_setup(struct spi_device *spi)
if (!(spi->mode & SPI_CPHA))
csr |= SPI_BIT(NCPHA);
- /* TODO: DLYBS and DLYBCT */
- csr |= SPI_BF(DLYBS, 10);
- csr |= SPI_BF(DLYBCT, 10);
+ /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
+ *
+ * DLYBCT would add delays between words, slowing down transfers.
+ * It could potentially be useful to cope with DMA bottlenecks, but
+ * in those cases it's probably best to just use a lower bitrate.
+ */
+ csr |= SPI_BF(DLYBS, 0);
+ csr |= SPI_BF(DLYBCT, 0);
/* chipselect must have been muxed as GPIO (e.g. in board setup) */
npcs_pin = (unsigned int)spi->controller_data;
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index ea61724ae225..a6ba11afb03f 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -915,6 +915,28 @@ static u8 __initdata spi2_txdma_id[] = {
OMAP24XX_DMA_SPI2_TX1,
};
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+static u8 __initdata spi3_rxdma_id[] = {
+ OMAP24XX_DMA_SPI3_RX0,
+ OMAP24XX_DMA_SPI3_RX1,
+};
+
+static u8 __initdata spi3_txdma_id[] = {
+ OMAP24XX_DMA_SPI3_TX0,
+ OMAP24XX_DMA_SPI3_TX1,
+};
+#endif
+
+#ifdef CONFIG_ARCH_OMAP3
+static u8 __initdata spi4_rxdma_id[] = {
+ OMAP34XX_DMA_SPI4_RX0,
+};
+
+static u8 __initdata spi4_txdma_id[] = {
+ OMAP34XX_DMA_SPI4_TX0,
+};
+#endif
+
static int __init omap2_mcspi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -935,7 +957,20 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
txdma_id = spi2_txdma_id;
num_chipselect = 2;
break;
- /* REVISIT omap2430 has a third McSPI ... */
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
+ case 3:
+ rxdma_id = spi3_rxdma_id;
+ txdma_id = spi3_txdma_id;
+ num_chipselect = 2;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP3
+ case 4:
+ rxdma_id = spi4_rxdma_id;
+ txdma_id = spi4_txdma_id;
+ num_chipselect = 1;
+ break;
+#endif
default:
return -EINVAL;
}
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index eb817b8eb024..365e0e355aea 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1526,17 +1526,6 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int suspend_devices(struct device *dev, void *pm_message)
-{
- pm_message_t *state = pm_message;
-
- if (dev->power.power_state.event != state->event) {
- dev_warn(dev, "pm state does not match request\n");
- return -1;
- }
-
- return 0;
-}
static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
{
@@ -1544,12 +1533,6 @@ static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
struct ssp_device *ssp = drv_data->ssp;
int status = 0;
- /* Check all childern for current power state */
- if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) {
- dev_warn(&pdev->dev, "suspend aborted\n");
- return -1;
- }
-
status = stop_queue(drv_data);
if (status != 0)
return status;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 682a6a48fec3..1ad12afc6ba0 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -18,7 +18,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/autoconf.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -77,39 +76,33 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
#ifdef CONFIG_PM
-/*
- * NOTE: the suspend() method for an spi_master controller driver
- * should verify that all its child devices are marked as suspended;
- * suspend requests delivered through sysfs power/state files don't
- * enforce such constraints.
- */
static int spi_suspend(struct device *dev, pm_message_t message)
{
- int value;
+ int value = 0;
struct spi_driver *drv = to_spi_driver(dev->driver);
- if (!drv || !drv->suspend)
- return 0;
-
/* suspend will stop irqs and dma; no more i/o */
- value = drv->suspend(to_spi_device(dev), message);
- if (value == 0)
- dev->power.power_state = message;
+ if (drv) {
+ if (drv->suspend)
+ value = drv->suspend(to_spi_device(dev), message);
+ else
+ dev_dbg(dev, "... can't suspend\n");
+ }
return value;
}
static int spi_resume(struct device *dev)
{
- int value;
+ int value = 0;
struct spi_driver *drv = to_spi_driver(dev->driver);
- if (!drv || !drv->resume)
- return 0;
-
/* resume may restart the i/o queue */
- value = drv->resume(to_spi_device(dev));
- if (value == 0)
- dev->power.power_state = PMSG_ON;
+ if (drv) {
+ if (drv->resume)
+ value = drv->resume(to_spi_device(dev));
+ else
+ dev_dbg(dev, "... can't resume\n");
+ }
return value;
}
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 7ef39a6e8c06..d853fceb6bf0 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -1,37 +1,11 @@
/*
- * File: drivers/spi/bfin5xx_spi.c
- * Maintainer:
- * Bryan Wu <bryan.wu@analog.com>
- * Original Author:
- * Luke Yang (Analog Devices Inc.)
- *
- * Created: March. 10th 2006
- * Description: SPI controller driver for Blackfin BF5xx
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * Modified:
- * March 10, 2006 bfin5xx_spi.c Created. (Luke Yang)
- * August 7, 2006 added full duplex mode (Axel Weiss & Luke Yang)
- * July 17, 2007 add support for BF54x SPI0 controller (Bryan Wu)
- * July 30, 2007 add platfrom_resource interface to support multi-port
- * SPI controller (Bryan Wu)
+ * Blackfin On-Chip SPI Driver
*
* Copyright 2004-2007 Analog Devices Inc.
*
- * This program is free software ; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation ; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY ; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * You should have received a copy of the GNU General Public License
- * along with this program ; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Licensed under the GPL-2 or later.
*/
#include <linux/init.h>
@@ -223,10 +197,9 @@ static void cs_deactive(struct driver_data *drv_data, struct chip_data *chip)
#define MAX_SPI_SSEL 7
/* stop controller and re-config current chip*/
-static int restore_state(struct driver_data *drv_data)
+static void restore_state(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- int ret = 0;
/* Clear status and disable clock */
write_STAT(drv_data, BIT_STAT_CLR);
@@ -239,13 +212,6 @@ static int restore_state(struct driver_data *drv_data)
bfin_spi_enable(drv_data);
cs_active(drv_data, chip);
-
- if (ret)
- dev_dbg(&drv_data->pdev->dev,
- ": request chip select number %d failed\n",
- chip->chip_select_num);
-
- return ret;
}
/* used to kick off transfer in rx mode */
@@ -286,32 +252,30 @@ static void u8_writer(struct driver_data *drv_data)
dev_dbg(&drv_data->pdev->dev,
"cr8-s is 0x%x\n", read_STAT(drv_data));
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->tx < drv_data->tx_end) {
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
while (read_STAT(drv_data) & BIT_STAT_TXS)
cpu_relax();
++drv_data->tx;
}
+
+ /* poll for SPI completion before return */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
}
static void u8_cs_chg_writer(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->tx < drv_data->tx_end) {
cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
while (read_STAT(drv_data) & BIT_STAT_TXS)
cpu_relax();
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
cs_deactive(drv_data, chip);
@@ -350,43 +314,28 @@ static void u8_cs_chg_reader(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
- /* clear TDBR buffer before read(else it will be shifted out) */
- write_TDBR(drv_data, 0xFFFF);
+ while (drv_data->rx < drv_data->rx_end) {
+ cs_active(drv_data, chip);
+ read_RDBR(drv_data); /* kick off */
- cs_active(drv_data, chip);
- dummy_read(drv_data);
+ while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ cpu_relax();
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
- while (drv_data->rx < drv_data->rx_end - 1) {
+ *(u8 *) (drv_data->rx) = read_SHAW(drv_data);
cs_deactive(drv_data, chip);
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
- cpu_relax();
- cs_active(drv_data, chip);
- *(u8 *) (drv_data->rx) = read_RDBR(drv_data);
++drv_data->rx;
}
- cs_deactive(drv_data, chip);
-
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
- cpu_relax();
- *(u8 *) (drv_data->rx) = read_SHAW(drv_data);
- ++drv_data->rx;
}
static void u8_duplex(struct driver_data *drv_data)
{
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
/* in duplex mode, clk is triggered by writing of TDBR */
while (drv_data->rx < drv_data->rx_end) {
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
- while (read_STAT(drv_data) & BIT_STAT_TXS)
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
cpu_relax();
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
@@ -400,15 +349,12 @@ static void u8_cs_chg_duplex(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->rx < drv_data->rx_end) {
cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u8 *) (drv_data->tx)));
- while (read_STAT(drv_data) & BIT_STAT_TXS)
+
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
cpu_relax();
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
@@ -426,32 +372,30 @@ static void u16_writer(struct driver_data *drv_data)
dev_dbg(&drv_data->pdev->dev,
"cr16 is 0x%x\n", read_STAT(drv_data));
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->tx < drv_data->tx_end) {
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
while ((read_STAT(drv_data) & BIT_STAT_TXS))
cpu_relax();
drv_data->tx += 2;
}
+
+ /* poll for SPI completion before return */
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
}
static void u16_cs_chg_writer(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->tx < drv_data->tx_end) {
cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
while ((read_STAT(drv_data) & BIT_STAT_TXS))
cpu_relax();
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ cpu_relax();
cs_deactive(drv_data, chip);
@@ -519,14 +463,10 @@ static void u16_cs_chg_reader(struct driver_data *drv_data)
static void u16_duplex(struct driver_data *drv_data)
{
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
/* in duplex mode, clk is triggered by writing of TDBR */
while (drv_data->tx < drv_data->tx_end) {
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
- while (read_STAT(drv_data) & BIT_STAT_TXS)
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
cpu_relax();
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
@@ -540,15 +480,11 @@ static void u16_cs_chg_duplex(struct driver_data *drv_data)
{
struct chip_data *chip = drv_data->cur_chip;
- /* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
- cpu_relax();
-
while (drv_data->tx < drv_data->tx_end) {
cs_active(drv_data, chip);
write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
- while (read_STAT(drv_data) & BIT_STAT_TXS)
+ while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
cpu_relax();
while (!(read_STAT(drv_data) & BIT_STAT_RXS))
cpu_relax();
@@ -616,7 +552,7 @@ static void giveback(struct driver_data *drv_data)
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
- struct driver_data *drv_data = (struct driver_data *)dev_id;
+ struct driver_data *drv_data = dev_id;
struct chip_data *chip = drv_data->cur_chip;
struct spi_message *msg = drv_data->cur_msg;
@@ -978,10 +914,7 @@ static void pump_messages(struct work_struct *work)
/* Setup the SSP using the per chip configuration */
drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
- if (restore_state(drv_data)) {
- spin_unlock_irqrestore(&drv_data->lock, flags);
- return;
- };
+ restore_state(drv_data);
list_del_init(&drv_data->cur_msg->queue);
@@ -1187,7 +1120,7 @@ static int setup(struct spi_device *spi)
if ((chip->chip_select_num > 0)
&& (chip->chip_select_num <= spi->master->num_chipselect))
peripheral_request(ssel[spi->master->bus_num]
- [chip->chip_select_num-1], DRV_NAME);
+ [chip->chip_select_num-1], spi->modalias);
cs_deactive(drv_data, chip);
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 639963eb1ac1..1b0647124933 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -1686,17 +1686,6 @@ static void spi_imx_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int suspend_devices(struct device *dev, void *pm_message)
-{
- pm_message_t *state = pm_message;
-
- if (dev->power.power_state.event != state->event) {
- dev_warn(dev, "pm state does not match request\n");
- return -1;
- }
-
- return 0;
-}
static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
{
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 89d6685a5ca4..6e834b8b9d27 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -237,10 +237,8 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
{
struct s3c24xx_spi *hw;
struct spi_master *master;
- struct spi_board_info *bi;
struct resource *res;
int err = 0;
- int i;
master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
if (master == NULL) {
@@ -348,16 +346,6 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
goto err_register;
}
- /* register all the devices associated */
-
- bi = &hw->pdata->board_info[0];
- for (i = 0; i < hw->pdata->board_size; i++, bi++) {
- dev_info(hw->dev, "registering %s\n", bi->modalias);
-
- bi->controller_data = hw;
- spi_new_device(master, bi);
- }
-
return 0;
err_register:
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
index 109d82c1abc0..82ae7d7eca38 100644
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ b/drivers/spi/spi_s3c24xx_gpio.c
@@ -100,7 +100,6 @@ static int s3c2410_spigpio_probe(struct platform_device *dev)
struct spi_master *master;
struct s3c2410_spigpio *sp;
int ret;
- int i;
master = spi_alloc_master(&dev->dev, sizeof(struct s3c2410_spigpio));
if (master == NULL) {
@@ -143,17 +142,6 @@ static int s3c2410_spigpio_probe(struct platform_device *dev)
if (ret)
goto err_no_bitbang;
- /* register the chips to go with the board */
-
- for (i = 0; i < sp->info->board_size; i++) {
- dev_info(&dev->dev, "registering %p: %s\n",
- &sp->info->board_info[i],
- sp->info->board_info[i].modalias);
-
- sp->info->board_info[i].controller_data = sp;
- spi_new_device(master, sp->info->board_info + i);
- }
-
return 0;
err_no_bitbang:
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c
new file mode 100644
index 000000000000..3dbe71b16d60
--- /dev/null
+++ b/drivers/spi/spi_sh_sci.c
@@ -0,0 +1,205 @@
+/*
+ * SH SCI SPI interface
+ *
+ * Copyright (c) 2008 Magnus Damm
+ *
+ * Based on S3C24XX GPIO based SPI driver, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <asm/spi.h>
+#include <asm/io.h>
+
+struct sh_sci_spi {
+ struct spi_bitbang bitbang;
+
+ void __iomem *membase;
+ unsigned char val;
+ struct sh_spi_info *info;
+ struct platform_device *dev;
+};
+
+#define SCSPTR(sp) (sp->membase + 0x1c)
+#define PIN_SCK (1 << 2)
+#define PIN_TXD (1 << 0)
+#define PIN_RXD PIN_TXD
+#define PIN_INIT ((1 << 1) | (1 << 3) | PIN_SCK | PIN_TXD)
+
+static inline void setbits(struct sh_sci_spi *sp, int bits, int on)
+{
+ /*
+ * We are the only user of SCSPTR so no locking is required.
+ * Reading bit 2 and 0 in SCSPTR gives pin state as input.
+ * Writing the same bits sets the output value.
+ * This makes regular read-modify-write difficult so we
+ * use sp->val to keep track of the latest register value.
+ */
+
+ if (on)
+ sp->val |= bits;
+ else
+ sp->val &= ~bits;
+
+ iowrite8(sp->val, SCSPTR(sp));
+}
+
+static inline void setsck(struct spi_device *dev, int on)
+{
+ setbits(spi_master_get_devdata(dev->master), PIN_SCK, on);
+}
+
+static inline void setmosi(struct spi_device *dev, int on)
+{
+ setbits(spi_master_get_devdata(dev->master), PIN_TXD, on);
+}
+
+static inline u32 getmiso(struct spi_device *dev)
+{
+ struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
+
+ return (ioread8(SCSPTR(sp)) & PIN_RXD) ? 1 : 0;
+}
+
+#define spidelay(x) ndelay(x)
+
+#define EXPAND_BITBANG_TXRX
+#include <linux/spi/spi_bitbang.h>
+
+static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, word, bits);
+}
+
+static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
+{
+ struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
+
+ if (sp->info && sp->info->chip_select)
+ (sp->info->chip_select)(sp->info, dev->chip_select, value);
+}
+
+static int sh_sci_spi_probe(struct platform_device *dev)
+{
+ struct resource *r;
+ struct spi_master *master;
+ struct sh_sci_spi *sp;
+ int ret;
+
+ master = spi_alloc_master(&dev->dev, sizeof(struct sh_sci_spi));
+ if (master == NULL) {
+ dev_err(&dev->dev, "failed to allocate spi master\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ sp = spi_master_get_devdata(master);
+
+ platform_set_drvdata(dev, sp);
+ sp->info = dev->dev.platform_data;
+
+ /* setup spi bitbang adaptor */
+ sp->bitbang.master = spi_master_get(master);
+ sp->bitbang.master->bus_num = sp->info->bus_num;
+ sp->bitbang.master->num_chipselect = sp->info->num_chipselect;
+ sp->bitbang.chipselect = sh_sci_spi_chipselect;
+
+ sp->bitbang.txrx_word[SPI_MODE_0] = sh_sci_spi_txrx_mode0;
+ sp->bitbang.txrx_word[SPI_MODE_1] = sh_sci_spi_txrx_mode1;
+ sp->bitbang.txrx_word[SPI_MODE_2] = sh_sci_spi_txrx_mode2;
+ sp->bitbang.txrx_word[SPI_MODE_3] = sh_sci_spi_txrx_mode3;
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENOENT;
+ goto err1;
+ }
+ sp->membase = ioremap(r->start, r->end - r->start + 1);
+ if (!sp->membase) {
+ ret = -ENXIO;
+ goto err1;
+ }
+ sp->val = ioread8(SCSPTR(sp));
+ setbits(sp, PIN_INIT, 1);
+
+ ret = spi_bitbang_start(&sp->bitbang);
+ if (!ret)
+ return 0;
+
+ setbits(sp, PIN_INIT, 0);
+ iounmap(sp->membase);
+ err1:
+ spi_master_put(sp->bitbang.master);
+ err0:
+ return ret;
+}
+
+static int sh_sci_spi_remove(struct platform_device *dev)
+{
+ struct sh_sci_spi *sp = platform_get_drvdata(dev);
+
+ iounmap(sp->membase);
+ setbits(sp, PIN_INIT, 0);
+ spi_bitbang_stop(&sp->bitbang);
+ spi_master_put(sp->bitbang.master);
+ return 0;
+}
+
+static struct platform_driver sh_sci_spi_drv = {
+ .probe = sh_sci_spi_probe,
+ .remove = sh_sci_spi_remove,
+ .driver = {
+ .name = "spi_sh_sci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sh_sci_spi_init(void)
+{
+ return platform_driver_register(&sh_sci_spi_drv);
+}
+module_init(sh_sci_spi_init);
+
+static void __exit sh_sci_spi_exit(void)
+{
+ platform_driver_unregister(&sh_sci_spi_drv);
+}
+module_exit(sh_sci_spi_exit);
+
+MODULE_DESCRIPTION("SH SCI SPI Driver");
+MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
new file mode 100644
index 000000000000..9b3f61200000
--- /dev/null
+++ b/drivers/thermal/Kconfig
@@ -0,0 +1,15 @@
+#
+# Generic thermal sysfs drivers configuration
+#
+
+menuconfig THERMAL
+ bool "Generic Thermal sysfs driver"
+ default y
+ help
+ Generic Thermal Sysfs driver offers a generic mechanism for
+ thermal management. Usually it's made up of one or more thermal
+ zone and cooling device.
+ each thermal zone contains its own temperature, trip points,
+ cooling devices.
+ All platforms with ACPI thermal support can use this driver.
+ If you want this support, you should say Y here
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
new file mode 100644
index 000000000000..8ef1232de376
--- /dev/null
+++ b/drivers/thermal/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for sensor chip drivers.
+#
+
+obj-$(CONFIG_THERMAL) += thermal.o
diff --git a/drivers/thermal/thermal.c b/drivers/thermal/thermal.c
new file mode 100644
index 000000000000..3273e348fd14
--- /dev/null
+++ b/drivers/thermal/thermal.c
@@ -0,0 +1,714 @@
+/*
+ * thermal.c - Generic Thermal Management Sysfs support.
+ *
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include <linux/idr.h>
+#include <linux/thermal.h>
+#include <linux/spinlock.h>
+
+MODULE_AUTHOR("Zhang Rui")
+MODULE_DESCRIPTION("Generic thermal management sysfs support");
+MODULE_LICENSE("GPL");
+
+#define PREFIX "Thermal: "
+
+struct thermal_cooling_device_instance {
+ int id;
+ char name[THERMAL_NAME_LENGTH];
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *cdev;
+ int trip;
+ char attr_name[THERMAL_NAME_LENGTH];
+ struct device_attribute attr;
+ struct list_head node;
+};
+
+static DEFINE_IDR(thermal_tz_idr);
+static DEFINE_IDR(thermal_cdev_idr);
+static DEFINE_MUTEX(thermal_idr_lock);
+
+static LIST_HEAD(thermal_tz_list);
+static LIST_HEAD(thermal_cdev_list);
+static DEFINE_MUTEX(thermal_list_lock);
+
+static int get_idr(struct idr *idr, struct mutex *lock, int *id)
+{
+ int err;
+
+ again:
+ if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
+ return -ENOMEM;
+
+ if (lock)
+ mutex_lock(lock);
+ err = idr_get_new(idr, NULL, id);
+ if (lock)
+ mutex_unlock(lock);
+ if (unlikely(err == -EAGAIN))
+ goto again;
+ else if (unlikely(err))
+ return err;
+
+ *id = *id & MAX_ID_MASK;
+ return 0;
+}
+
+static void release_idr(struct idr *idr, struct mutex *lock, int id)
+{
+ if (lock)
+ mutex_lock(lock);
+ idr_remove(idr, id);
+ if (lock)
+ mutex_unlock(lock);
+}
+
+/* sys I/F for thermal zone */
+
+#define to_thermal_zone(_dev) \
+ container_of(_dev, struct thermal_zone_device, device)
+
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ return sprintf(buf, "%s\n", tz->type);
+}
+
+static ssize_t
+temp_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (!tz->ops->get_temp)
+ return -EPERM;
+
+ return tz->ops->get_temp(tz, buf);
+}
+
+static ssize_t
+mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+
+ if (!tz->ops->get_mode)
+ return -EPERM;
+
+ return tz->ops->get_mode(tz, buf);
+}
+
+static ssize_t
+mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int result;
+
+ if (!tz->ops->set_mode)
+ return -EPERM;
+
+ result = tz->ops->set_mode(tz, buf);
+ if (result)
+ return result;
+
+ return count;
+}
+
+static ssize_t
+trip_point_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip;
+
+ if (!tz->ops->get_trip_type)
+ return -EPERM;
+
+ if (!sscanf(attr->attr.name, "trip_point_%d_type", &trip))
+ return -EINVAL;
+
+ return tz->ops->get_trip_type(tz, trip, buf);
+}
+
+static ssize_t
+trip_point_temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct thermal_zone_device *tz = to_thermal_zone(dev);
+ int trip;
+
+ if (!tz->ops->get_trip_temp)
+ return -EPERM;
+
+ if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
+ return -EINVAL;
+
+ return tz->ops->get_trip_temp(tz, trip, buf);
+}
+
+static DEVICE_ATTR(type, 0444, type_show, NULL);
+static DEVICE_ATTR(temp, 0444, temp_show, NULL);
+static DEVICE_ATTR(mode, 0644, mode_show, mode_store);
+
+static struct device_attribute trip_point_attrs[] = {
+ __ATTR(trip_point_0_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_0_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_1_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_1_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_2_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_2_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_3_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_3_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_4_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_4_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_5_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_5_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_6_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_6_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_7_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_7_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_8_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_8_temp, 0444, trip_point_temp_show, NULL),
+ __ATTR(trip_point_9_type, 0444, trip_point_type_show, NULL),
+ __ATTR(trip_point_9_temp, 0444, trip_point_temp_show, NULL),
+};
+
+#define TRIP_POINT_ATTR_ADD(_dev, _index, result) \
+do { \
+ result = device_create_file(_dev, \
+ &trip_point_attrs[_index * 2]); \
+ if (result) \
+ break; \
+ result = device_create_file(_dev, \
+ &trip_point_attrs[_index * 2 + 1]); \
+} while (0)
+
+#define TRIP_POINT_ATTR_REMOVE(_dev, _index) \
+do { \
+ device_remove_file(_dev, &trip_point_attrs[_index * 2]); \
+ device_remove_file(_dev, &trip_point_attrs[_index * 2 + 1]); \
+} while (0)
+
+/* sys I/F for cooling device */
+#define to_cooling_device(_dev) \
+ container_of(_dev, struct thermal_cooling_device, device)
+
+static ssize_t
+thermal_cooling_device_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+ return sprintf(buf, "%s\n", cdev->type);
+}
+
+static ssize_t
+thermal_cooling_device_max_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+ return cdev->ops->get_max_state(cdev, buf);
+}
+
+static ssize_t
+thermal_cooling_device_cur_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+
+ return cdev->ops->get_cur_state(cdev, buf);
+}
+
+static ssize_t
+thermal_cooling_device_cur_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct thermal_cooling_device *cdev = to_cooling_device(dev);
+ int state;
+ int result;
+
+ if (!sscanf(buf, "%d\n", &state))
+ return -EINVAL;
+
+ if (state < 0)
+ return -EINVAL;
+
+ result = cdev->ops->set_cur_state(cdev, state);
+ if (result)
+ return result;
+ return count;
+}
+
+static struct device_attribute dev_attr_cdev_type =
+ __ATTR(type, 0444, thermal_cooling_device_type_show, NULL);
+static DEVICE_ATTR(max_state, 0444,
+ thermal_cooling_device_max_state_show, NULL);
+static DEVICE_ATTR(cur_state, 0644,
+ thermal_cooling_device_cur_state_show,
+ thermal_cooling_device_cur_state_store);
+
+static ssize_t
+thermal_cooling_device_trip_point_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct thermal_cooling_device_instance *instance;
+
+ instance =
+ container_of(attr, struct thermal_cooling_device_instance, attr);
+
+ if (instance->trip == THERMAL_TRIPS_NONE)
+ return sprintf(buf, "-1\n");
+ else
+ return sprintf(buf, "%d\n", instance->trip);
+}
+
+/* Device management */
+
+/**
+ * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone
+ * this function is usually called in the thermal zone device .bind callback.
+ * @tz: thermal zone device
+ * @trip: indicates which trip point the cooling devices is
+ * associated with in this thermal zone.
+ * @cdev: thermal cooling device
+ */
+int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ int trip,
+ struct thermal_cooling_device *cdev)
+{
+ struct thermal_cooling_device_instance *dev;
+ struct thermal_cooling_device_instance *pos;
+ int result;
+
+ if (trip >= tz->trips ||
+ (trip < 0 && trip != THERMAL_TRIPS_NONE))
+ return -EINVAL;
+
+ if (!tz || !cdev)
+ return -EINVAL;
+
+ dev =
+ kzalloc(sizeof(struct thermal_cooling_device_instance), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->tz = tz;
+ dev->cdev = cdev;
+ dev->trip = trip;
+ result = get_idr(&tz->idr, &tz->lock, &dev->id);
+ if (result)
+ goto free_mem;
+
+ sprintf(dev->name, "cdev%d", dev->id);
+ result =
+ sysfs_create_link(&tz->device.kobj, &cdev->device.kobj, dev->name);
+ if (result)
+ goto release_idr;
+
+ sprintf(dev->attr_name, "cdev%d_trip_point", dev->id);
+ dev->attr.attr.name = dev->attr_name;
+ dev->attr.attr.mode = 0444;
+ dev->attr.show = thermal_cooling_device_trip_point_show;
+ result = device_create_file(&tz->device, &dev->attr);
+ if (result)
+ goto remove_symbol_link;
+
+ mutex_lock(&tz->lock);
+ list_for_each_entry(pos, &tz->cooling_devices, node)
+ if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) {
+ result = -EEXIST;
+ break;
+ }
+ if (!result)
+ list_add_tail(&dev->node, &tz->cooling_devices);
+ mutex_unlock(&tz->lock);
+
+ if (!result)
+ return 0;
+
+ device_remove_file(&tz->device, &dev->attr);
+ remove_symbol_link:
+ sysfs_remove_link(&tz->device.kobj, dev->name);
+ release_idr:
+ release_idr(&tz->idr, &tz->lock, dev->id);
+ free_mem:
+ kfree(dev);
+ return result;
+}
+EXPORT_SYMBOL(thermal_zone_bind_cooling_device);
+
+/**
+ * thermal_zone_unbind_cooling_device - unbind a cooling device from a thermal zone
+ * this function is usually called in the thermal zone device .unbind callback.
+ * @tz: thermal zone device
+ * @trip: indicates which trip point the cooling devices is
+ * associated with in this thermal zone.
+ * @cdev: thermal cooling device
+ */
+int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
+ int trip,
+ struct thermal_cooling_device *cdev)
+{
+ struct thermal_cooling_device_instance *pos, *next;
+
+ mutex_lock(&tz->lock);
+ list_for_each_entry_safe(pos, next, &tz->cooling_devices, node) {
+ if (pos->tz == tz && pos->trip == trip
+ && pos->cdev == cdev) {
+ list_del(&pos->node);
+ mutex_unlock(&tz->lock);
+ goto unbind;
+ }
+ }
+ mutex_unlock(&tz->lock);
+
+ return -ENODEV;
+
+ unbind:
+ device_remove_file(&tz->device, &pos->attr);
+ sysfs_remove_link(&tz->device.kobj, pos->name);
+ release_idr(&tz->idr, &tz->lock, pos->id);
+ kfree(pos);
+ return 0;
+}
+EXPORT_SYMBOL(thermal_zone_unbind_cooling_device);
+
+static void thermal_release(struct device *dev)
+{
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *cdev;
+
+ if (!strncmp(dev->bus_id, "thermal_zone", sizeof "thermal_zone" - 1)) {
+ tz = to_thermal_zone(dev);
+ kfree(tz);
+ } else {
+ cdev = to_cooling_device(dev);
+ kfree(cdev);
+ }
+}
+
+static struct class thermal_class = {
+ .name = "thermal",
+ .dev_release = thermal_release,
+};
+
+/**
+ * thermal_cooling_device_register - register a new thermal cooling device
+ * @type: the thermal cooling device type.
+ * @devdata: device private data.
+ * @ops: standard thermal cooling devices callbacks.
+ */
+struct thermal_cooling_device *thermal_cooling_device_register(char *type,
+ void *devdata, struct thermal_cooling_device_ops *ops)
+{
+ struct thermal_cooling_device *cdev;
+ struct thermal_zone_device *pos;
+ int result;
+
+ if (strlen(type) >= THERMAL_NAME_LENGTH)
+ return NULL;
+
+ if (!ops || !ops->get_max_state || !ops->get_cur_state ||
+ !ops->set_cur_state)
+ return NULL;
+
+ cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL);
+ if (!cdev)
+ return NULL;
+
+ result = get_idr(&thermal_cdev_idr, &thermal_idr_lock, &cdev->id);
+ if (result) {
+ kfree(cdev);
+ return NULL;
+ }
+
+ strcpy(cdev->type, type);
+ cdev->ops = ops;
+ cdev->device.class = &thermal_class;
+ cdev->devdata = devdata;
+ sprintf(cdev->device.bus_id, "cooling_device%d", cdev->id);
+ result = device_register(&cdev->device);
+ if (result) {
+ release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
+ kfree(cdev);
+ return NULL;
+ }
+
+ /* sys I/F */
+ if (type) {
+ result = device_create_file(&cdev->device,
+ &dev_attr_cdev_type);
+ if (result)
+ goto unregister;
+ }
+
+ result = device_create_file(&cdev->device, &dev_attr_max_state);
+ if (result)
+ goto unregister;
+
+ result = device_create_file(&cdev->device, &dev_attr_cur_state);
+ if (result)
+ goto unregister;
+
+ mutex_lock(&thermal_list_lock);
+ list_add(&cdev->node, &thermal_cdev_list);
+ list_for_each_entry(pos, &thermal_tz_list, node) {
+ if (!pos->ops->bind)
+ continue;
+ result = pos->ops->bind(pos, cdev);
+ if (result)
+ break;
+
+ }
+ mutex_unlock(&thermal_list_lock);
+
+ if (!result)
+ return cdev;
+
+ unregister:
+ release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
+ device_unregister(&cdev->device);
+ return NULL;
+}
+EXPORT_SYMBOL(thermal_cooling_device_register);
+
+/**
+ * thermal_cooling_device_unregister - removes the registered thermal cooling device
+ *
+ * @cdev: the thermal cooling device to remove.
+ *
+ * thermal_cooling_device_unregister() must be called when the device is no
+ * longer needed.
+ */
+void thermal_cooling_device_unregister(struct
+ thermal_cooling_device
+ *cdev)
+{
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *pos = NULL;
+
+ if (!cdev)
+ return;
+
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(pos, &thermal_cdev_list, node)
+ if (pos == cdev)
+ break;
+ if (pos != cdev) {
+ /* thermal cooling device not found */
+ mutex_unlock(&thermal_list_lock);
+ return;
+ }
+ list_del(&cdev->node);
+ list_for_each_entry(tz, &thermal_tz_list, node) {
+ if (!tz->ops->unbind)
+ continue;
+ tz->ops->unbind(tz, cdev);
+ }
+ mutex_unlock(&thermal_list_lock);
+ if (cdev->type[0])
+ device_remove_file(&cdev->device,
+ &dev_attr_cdev_type);
+ device_remove_file(&cdev->device, &dev_attr_max_state);
+ device_remove_file(&cdev->device, &dev_attr_cur_state);
+
+ release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id);
+ device_unregister(&cdev->device);
+ return;
+}
+EXPORT_SYMBOL(thermal_cooling_device_unregister);
+
+/**
+ * thermal_zone_device_register - register a new thermal zone device
+ * @type: the thermal zone device type
+ * @trips: the number of trip points the thermal zone support
+ * @devdata: private device data
+ * @ops: standard thermal zone device callbacks
+ *
+ * thermal_zone_device_unregister() must be called when the device is no
+ * longer needed.
+ */
+struct thermal_zone_device *thermal_zone_device_register(char *type,
+ int trips, void *devdata,
+ struct thermal_zone_device_ops *ops)
+{
+ struct thermal_zone_device *tz;
+ struct thermal_cooling_device *pos;
+ int result;
+ int count;
+
+ if (strlen(type) >= THERMAL_NAME_LENGTH)
+ return NULL;
+
+ if (trips > THERMAL_MAX_TRIPS || trips < 0)
+ return NULL;
+
+ if (!ops || !ops->get_temp)
+ return NULL;
+
+ tz = kzalloc(sizeof(struct thermal_zone_device), GFP_KERNEL);
+ if (!tz)
+ return NULL;
+
+ INIT_LIST_HEAD(&tz->cooling_devices);
+ idr_init(&tz->idr);
+ mutex_init(&tz->lock);
+ result = get_idr(&thermal_tz_idr, &thermal_idr_lock, &tz->id);
+ if (result) {
+ kfree(tz);
+ return NULL;
+ }
+
+ strcpy(tz->type, type);
+ tz->ops = ops;
+ tz->device.class = &thermal_class;
+ tz->devdata = devdata;
+ tz->trips = trips;
+ sprintf(tz->device.bus_id, "thermal_zone%d", tz->id);
+ result = device_register(&tz->device);
+ if (result) {
+ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
+ kfree(tz);
+ return NULL;
+ }
+
+ /* sys I/F */
+ if (type) {
+ result = device_create_file(&tz->device, &dev_attr_type);
+ if (result)
+ goto unregister;
+ }
+
+ result = device_create_file(&tz->device, &dev_attr_temp);
+ if (result)
+ goto unregister;
+
+ if (ops->get_mode) {
+ result = device_create_file(&tz->device, &dev_attr_mode);
+ if (result)
+ goto unregister;
+ }
+
+ for (count = 0; count < trips; count++) {
+ TRIP_POINT_ATTR_ADD(&tz->device, count, result);
+ if (result)
+ goto unregister;
+ }
+
+ mutex_lock(&thermal_list_lock);
+ list_add_tail(&tz->node, &thermal_tz_list);
+ if (ops->bind)
+ list_for_each_entry(pos, &thermal_cdev_list, node) {
+ result = ops->bind(tz, pos);
+ if (result)
+ break;
+ }
+ mutex_unlock(&thermal_list_lock);
+
+ if (!result)
+ return tz;
+
+ unregister:
+ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
+ device_unregister(&tz->device);
+ return NULL;
+}
+EXPORT_SYMBOL(thermal_zone_device_register);
+
+/**
+ * thermal_device_unregister - removes the registered thermal zone device
+ *
+ * @tz: the thermal zone device to remove
+ */
+void thermal_zone_device_unregister(struct thermal_zone_device *tz)
+{
+ struct thermal_cooling_device *cdev;
+ struct thermal_zone_device *pos = NULL;
+ int count;
+
+ if (!tz)
+ return;
+
+ mutex_lock(&thermal_list_lock);
+ list_for_each_entry(pos, &thermal_tz_list, node)
+ if (pos == tz)
+ break;
+ if (pos != tz) {
+ /* thermal zone device not found */
+ mutex_unlock(&thermal_list_lock);
+ return;
+ }
+ list_del(&tz->node);
+ if (tz->ops->unbind)
+ list_for_each_entry(cdev, &thermal_cdev_list, node)
+ tz->ops->unbind(tz, cdev);
+ mutex_unlock(&thermal_list_lock);
+
+ if (tz->type[0])
+ device_remove_file(&tz->device, &dev_attr_type);
+ device_remove_file(&tz->device, &dev_attr_temp);
+ if (tz->ops->get_mode)
+ device_remove_file(&tz->device, &dev_attr_mode);
+
+ for (count = 0; count < tz->trips; count++)
+ TRIP_POINT_ATTR_REMOVE(&tz->device, count);
+
+ release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
+ idr_destroy(&tz->idr);
+ mutex_destroy(&tz->lock);
+ device_unregister(&tz->device);
+ return;
+}
+EXPORT_SYMBOL(thermal_zone_device_unregister);
+
+static int __init thermal_init(void)
+{
+ int result = 0;
+
+ result = class_register(&thermal_class);
+ if (result) {
+ idr_destroy(&thermal_tz_idr);
+ idr_destroy(&thermal_cdev_idr);
+ mutex_destroy(&thermal_idr_lock);
+ mutex_destroy(&thermal_list_lock);
+ }
+ return result;
+}
+
+static void __exit thermal_exit(void)
+{
+ class_unregister(&thermal_class);
+ idr_destroy(&thermal_tz_idr);
+ idr_destroy(&thermal_cdev_idr);
+ mutex_destroy(&thermal_idr_lock);
+ mutex_destroy(&thermal_list_lock);
+}
+
+subsys_initcall(thermal_init);
+module_exit(thermal_exit);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index cc246faa3590..2a77e9d42c68 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -417,30 +417,28 @@ static void uio_vma_close(struct vm_area_struct *vma)
idev->vma_count--;
}
-static struct page *uio_vma_nopage(struct vm_area_struct *vma,
- unsigned long address, int *type)
+static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct uio_device *idev = vma->vm_private_data;
- struct page* page = NOPAGE_SIGBUS;
+ struct page *page;
int mi = uio_find_mem_index(vma);
if (mi < 0)
- return page;
+ return VM_FAULT_SIGBUS;
if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
page = virt_to_page(idev->info->mem[mi].addr);
else
page = vmalloc_to_page((void*)idev->info->mem[mi].addr);
get_page(page);
- if (type)
- *type = VM_FAULT_MINOR;
- return page;
+ vmf->page = page;
+ return 0;
}
static struct vm_operations_struct uio_vm_ops = {
.open = uio_vma_open,
.close = uio_vma_close,
- .nopage = uio_vma_nopage,
+ .fault = uio_vma_fault,
};
static int uio_mmap_physical(struct vm_area_struct *vma)
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index f8e711147501..fc65c02306dd 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -16,6 +16,7 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/backlight.h>
#include <asm/arch/board.h>
#include <asm/arch/cpu.h>
@@ -69,6 +70,107 @@ static void atmel_lcdfb_update_dma2d(struct atmel_lcdfb_info *sinfo,
}
#endif
+static const u32 contrast_ctr = ATMEL_LCDC_PS_DIV8
+ | ATMEL_LCDC_POL_POSITIVE
+ | ATMEL_LCDC_ENA_PWMENABLE;
+
+#ifdef CONFIG_BACKLIGHT_ATMEL_LCDC
+
+/* some bl->props field just changed */
+static int atmel_bl_update_status(struct backlight_device *bl)
+{
+ struct atmel_lcdfb_info *sinfo = bl_get_data(bl);
+ int power = sinfo->bl_power;
+ int brightness = bl->props.brightness;
+
+ /* REVISIT there may be a meaningful difference between
+ * fb_blank and power ... there seem to be some cases
+ * this doesn't handle correctly.
+ */
+ if (bl->props.fb_blank != sinfo->bl_power)
+ power = bl->props.fb_blank;
+ else if (bl->props.power != sinfo->bl_power)
+ power = bl->props.power;
+
+ if (brightness < 0 && power == FB_BLANK_UNBLANK)
+ brightness = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
+ else if (power != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, brightness);
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR,
+ brightness ? contrast_ctr : 0);
+
+ bl->props.fb_blank = bl->props.power = sinfo->bl_power = power;
+
+ return 0;
+}
+
+static int atmel_bl_get_brightness(struct backlight_device *bl)
+{
+ struct atmel_lcdfb_info *sinfo = bl_get_data(bl);
+
+ return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
+}
+
+static struct backlight_ops atmel_lcdc_bl_ops = {
+ .update_status = atmel_bl_update_status,
+ .get_brightness = atmel_bl_get_brightness,
+};
+
+static void init_backlight(struct atmel_lcdfb_info *sinfo)
+{
+ struct backlight_device *bl;
+
+ sinfo->bl_power = FB_BLANK_UNBLANK;
+
+ if (sinfo->backlight)
+ return;
+
+ bl = backlight_device_register("backlight", &sinfo->pdev->dev,
+ sinfo, &atmel_lcdc_bl_ops);
+ if (IS_ERR(sinfo->backlight)) {
+ dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n",
+ PTR_ERR(bl));
+ return;
+ }
+ sinfo->backlight = bl;
+
+ bl->props.power = FB_BLANK_UNBLANK;
+ bl->props.fb_blank = FB_BLANK_UNBLANK;
+ bl->props.max_brightness = 0xff;
+ bl->props.brightness = atmel_bl_get_brightness(bl);
+}
+
+static void exit_backlight(struct atmel_lcdfb_info *sinfo)
+{
+ if (sinfo->backlight)
+ backlight_device_unregister(sinfo->backlight);
+}
+
+#else
+
+static void init_backlight(struct atmel_lcdfb_info *sinfo)
+{
+ dev_warn(&sinfo->pdev->dev, "backlight control is not available\n");
+}
+
+static void exit_backlight(struct atmel_lcdfb_info *sinfo)
+{
+}
+
+#endif
+
+static void init_contrast(struct atmel_lcdfb_info *sinfo)
+{
+ /* have some default contrast/backlight settings */
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, ATMEL_LCDC_CVAL_DEFAULT);
+
+ if (sinfo->lcdcon_is_backlight)
+ init_backlight(sinfo);
+}
+
static struct fb_fix_screeninfo atmel_lcdfb_fix __initdata = {
.type = FB_TYPE_PACKED_PIXELS,
@@ -203,6 +305,26 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
var->transp.offset = var->transp.length = 0;
var->xoffset = var->yoffset = 0;
+ /* Saturate vertical and horizontal timings at maximum values */
+ var->vsync_len = min_t(u32, var->vsync_len,
+ (ATMEL_LCDC_VPW >> ATMEL_LCDC_VPW_OFFSET) + 1);
+ var->upper_margin = min_t(u32, var->upper_margin,
+ ATMEL_LCDC_VBP >> ATMEL_LCDC_VBP_OFFSET);
+ var->lower_margin = min_t(u32, var->lower_margin,
+ ATMEL_LCDC_VFP);
+ var->right_margin = min_t(u32, var->right_margin,
+ (ATMEL_LCDC_HFP >> ATMEL_LCDC_HFP_OFFSET) + 1);
+ var->hsync_len = min_t(u32, var->hsync_len,
+ (ATMEL_LCDC_HPW >> ATMEL_LCDC_HPW_OFFSET) + 1);
+ var->left_margin = min_t(u32, var->left_margin,
+ ATMEL_LCDC_HBP + 1);
+
+ /* Some parameters can't be zero */
+ var->vsync_len = max_t(u32, var->vsync_len, 1);
+ var->right_margin = max_t(u32, var->right_margin, 1);
+ var->hsync_len = max_t(u32, var->hsync_len, 1);
+ var->left_margin = max_t(u32, var->left_margin, 1);
+
switch (var->bits_per_pixel) {
case 1:
case 2:
@@ -370,10 +492,6 @@ static int atmel_lcdfb_set_par(struct fb_info *info)
/* Disable all interrupts */
lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
- /* Set contrast */
- value = ATMEL_LCDC_PS_DIV8 | ATMEL_LCDC_POL_POSITIVE | ATMEL_LCDC_ENA_PWMENABLE;
- lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, value);
- lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, ATMEL_LCDC_CVAL_DEFAULT);
/* ...wait for DMA engine to become idle... */
while (lcdc_readl(sinfo, ATMEL_LCDC_DMACON) & ATMEL_LCDC_DMABUSY)
msleep(10);
@@ -577,6 +695,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
sinfo->default_monspecs = pdata_sinfo->default_monspecs;
sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
sinfo->guard_time = pdata_sinfo->guard_time;
+ sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
} else {
dev_err(dev, "cannot get default configuration\n");
goto free_info;
@@ -670,6 +789,9 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
goto release_mem;
}
+ /* Initialize PWM for contrast or backlight ("off") */
+ init_contrast(sinfo);
+
/* interrupt */
ret = request_irq(sinfo->irq_base, atmel_lcdfb_interrupt, 0, pdev->name, info);
if (ret) {
@@ -721,6 +843,7 @@ free_cmap:
unregister_irqs:
free_irq(sinfo->irq_base, info);
unmap_mmio:
+ exit_backlight(sinfo);
iounmap(sinfo->mmio);
release_mem:
release_mem_region(info->fix.mmio_start, info->fix.mmio_len);
@@ -755,6 +878,7 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
if (!sinfo)
return 0;
+ exit_backlight(sinfo);
if (sinfo->atmel_lcdfb_power_control)
sinfo->atmel_lcdfb_power_control(0);
unregister_framebuffer(info);
@@ -781,6 +905,9 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
static struct platform_driver atmel_lcdfb_driver = {
.remove = __exit_p(atmel_lcdfb_remove),
+
+// FIXME need suspend, resume
+
.driver = {
.name = "atmel_lcdfb",
.owner = THIS_MODULE,
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 9609a6c676be..dcd8073c2369 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -50,6 +50,19 @@ config BACKLIGHT_CLASS_DEVICE
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
+config BACKLIGHT_ATMEL_LCDC
+ bool "Atmel LCDC Contrast-as-Backlight control"
+ depends on BACKLIGHT_CLASS_DEVICE && FB_ATMEL
+ default y if MACH_SAM9261EK || MACH_SAM9263EK
+ help
+ This provides a backlight control internal to the Atmel LCDC
+ driver. If the LCD "contrast control" on your board is wired
+ so it controls the backlight brightness, select this option to
+ export this as a PWM-based backlight control.
+
+ If in doubt, it's safe to enable this option; it doesn't kick
+ in unless the board's description says it's wired that way.
+
config BACKLIGHT_CORGI
tristate "Generic (aka Sharp Corgi) Backlight Driver"
depends on BACKLIGHT_CLASS_DEVICE
@@ -67,6 +80,15 @@ config BACKLIGHT_LOCOMO
If you have a Sharp Zaurus SL-5500 (Collie) or SL-5600 (Poodle) say y to
enable the LCD/backlight driver.
+config BACKLIGHT_OMAP1
+ tristate "OMAP1 PWL-based LCD Backlight"
+ depends on BACKLIGHT_CLASS_DEVICE && ARCH_OMAP1
+ default y
+ help
+ This driver controls the LCD backlight level and power for
+ the PWL module of OMAP1 processors. Say Y if your board
+ uses this hardware.
+
config BACKLIGHT_HP680
tristate "HP Jornada 680 Backlight Driver"
depends on BACKLIGHT_CLASS_DEVICE && SH_HP6XX
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 965a78b18118..33f6c7cecc73 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
obj-$(CONFIG_BACKLIGHT_CORGI) += corgi_bl.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
+obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o
obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 4840fe217e4d..39394757679c 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -94,8 +94,10 @@ static ssize_t backlight_store_power(struct device *dev,
mutex_lock(&bd->ops_lock);
if (bd->ops) {
pr_debug("backlight: set power to %d\n", power);
- bd->props.power = power;
- backlight_update_status(bd);
+ if (bd->props.power != power) {
+ bd->props.power = power;
+ backlight_update_status(bd);
+ }
rc = count;
}
mutex_unlock(&bd->ops_lock);
@@ -132,8 +134,10 @@ static ssize_t backlight_store_brightness(struct device *dev,
else {
pr_debug("backlight: set brightness to %d\n",
brightness);
- bd->props.brightness = brightness;
- backlight_update_status(bd);
+ if (bd->props.brightness != brightness) {
+ bd->props.brightness = brightness;
+ backlight_update_status(bd);
+ }
rc = count;
}
}
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
new file mode 100644
index 000000000000..891875d53a49
--- /dev/null
+++ b/drivers/video/backlight/omap1_bl.c
@@ -0,0 +1,210 @@
+/*
+ * Backlight driver for OMAP based boards.
+ *
+ * Copyright (c) 2006 Andrzej Zaborowski <balrog@zabor.org>
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This package is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this package; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/backlight.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/arch/board.h>
+#include <asm/arch/mux.h>
+
+#define OMAPBL_MAX_INTENSITY 0xff
+
+struct omap_backlight {
+ int powermode;
+ int current_intensity;
+
+ struct device *dev;
+ struct omap_backlight_config *pdata;
+};
+
+static void inline omapbl_send_intensity(int intensity)
+{
+ omap_writeb(intensity, OMAP_PWL_ENABLE);
+}
+
+static void inline omapbl_send_enable(int enable)
+{
+ omap_writeb(enable, OMAP_PWL_CLK_ENABLE);
+}
+
+static void omapbl_blank(struct omap_backlight *bl, int mode)
+{
+ if (bl->pdata->set_power)
+ bl->pdata->set_power(bl->dev, mode);
+
+ switch (mode) {
+ case FB_BLANK_NORMAL:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_POWERDOWN:
+ omapbl_send_intensity(0);
+ omapbl_send_enable(0);
+ break;
+
+ case FB_BLANK_UNBLANK:
+ omapbl_send_intensity(bl->current_intensity);
+ omapbl_send_enable(1);
+ break;
+ }
+}
+
+#ifdef CONFIG_PM
+static int omapbl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct backlight_device *dev = platform_get_drvdata(pdev);
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+
+ omapbl_blank(bl, FB_BLANK_POWERDOWN);
+ return 0;
+}
+
+static int omapbl_resume(struct platform_device *pdev)
+{
+ struct backlight_device *dev = platform_get_drvdata(pdev);
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+
+ omapbl_blank(bl, bl->powermode);
+ return 0;
+}
+#else
+#define omapbl_suspend NULL
+#define omapbl_resume NULL
+#endif
+
+static int omapbl_set_power(struct backlight_device *dev, int state)
+{
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+
+ omapbl_blank(bl, state);
+ bl->powermode = state;
+
+ return 0;
+}
+
+static int omapbl_update_status(struct backlight_device *dev)
+{
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+
+ if (bl->current_intensity != dev->props.brightness) {
+ if (bl->powermode == FB_BLANK_UNBLANK)
+ omapbl_send_intensity(dev->props.brightness);
+ bl->current_intensity = dev->props.brightness;
+ }
+
+ if (dev->props.fb_blank != bl->powermode)
+ omapbl_set_power(dev, dev->props.fb_blank);
+
+ return 0;
+}
+
+static int omapbl_get_intensity(struct backlight_device *dev)
+{
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+ return bl->current_intensity;
+}
+
+static struct backlight_ops omapbl_ops = {
+ .get_brightness = omapbl_get_intensity,
+ .update_status = omapbl_update_status,
+};
+
+static int omapbl_probe(struct platform_device *pdev)
+{
+ struct backlight_device *dev;
+ struct omap_backlight *bl;
+ struct omap_backlight_config *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -ENXIO;
+
+ omapbl_ops.check_fb = pdata->check_fb;
+
+ bl = kzalloc(sizeof(struct omap_backlight), GFP_KERNEL);
+ if (unlikely(!bl))
+ return -ENOMEM;
+
+ dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops);
+ if (IS_ERR(dev)) {
+ kfree(bl);
+ return PTR_ERR(dev);
+ }
+
+ bl->powermode = FB_BLANK_POWERDOWN;
+ bl->current_intensity = 0;
+
+ bl->pdata = pdata;
+ bl->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, dev);
+
+ omap_cfg_reg(PWL); /* Conflicts with UART3 */
+
+ dev->props.fb_blank = FB_BLANK_UNBLANK;
+ dev->props.max_brightness = OMAPBL_MAX_INTENSITY;
+ dev->props.brightness = pdata->default_intensity;
+ omapbl_update_status(dev);
+
+ printk(KERN_INFO "OMAP LCD backlight initialised\n");
+
+ return 0;
+}
+
+static int omapbl_remove(struct platform_device *pdev)
+{
+ struct backlight_device *dev = platform_get_drvdata(pdev);
+ struct omap_backlight *bl = dev_get_drvdata(&dev->dev);
+
+ backlight_device_unregister(dev);
+ kfree(bl);
+
+ return 0;
+}
+
+static struct platform_driver omapbl_driver = {
+ .probe = omapbl_probe,
+ .remove = omapbl_remove,
+ .suspend = omapbl_suspend,
+ .resume = omapbl_resume,
+ .driver = {
+ .name = "omap-bl",
+ },
+};
+
+static int __init omapbl_init(void)
+{
+ return platform_driver_register(&omapbl_driver);
+}
+
+static void __exit omapbl_exit(void)
+{
+ platform_driver_unregister(&omapbl_driver);
+}
+
+module_init(omapbl_init);
+module_exit(omapbl_exit);
+
+MODULE_AUTHOR("Andrzej Zaborowski <balrog@zabor.org>");
+MODULE_DESCRIPTION("OMAP LCD Backlight driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index c8e7427a0bc8..0ce791e6f79c 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -498,8 +498,7 @@ static struct lcd_device *lcd_dev;
static irqreturn_t bfin_bf54x_irq_error(int irq, void *dev_id)
{
-
- /*struct bfin_bf54xfb_info *info = (struct bfin_bf54xfb_info *)dev_id;*/
+ /*struct bfin_bf54xfb_info *info = dev_id;*/
u16 status = bfin_read_EPPI0_STATUS();
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
index 308850df16fe..69864b1b3f9e 100644
--- a/drivers/video/console/bitblit.c
+++ b/drivers/video/console/bitblit.c
@@ -63,7 +63,7 @@ static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
struct fb_fillrect region;
- region.color = attr_bgcol_ec(bgshift, vc);
+ region.color = attr_bgcol_ec(bgshift, vc, info);
region.dx = sx * vc->vc_font.width;
region.dy = sy * vc->vc_font.height;
region.width = width * vc->vc_font.width;
@@ -213,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
unsigned int bs = info->var.yres - bh;
struct fb_fillrect region;
- region.color = attr_bgcol_ec(bgshift, vc);
+ region.color = attr_bgcol_ec(bgshift, vc, info);
region.rop = ROP_COPY;
if (rw && !bottom_only) {
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 0f32f4a00b2d..022282494d3f 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -84,7 +84,7 @@
#ifdef CONFIG_MAC
#include <asm/macints.h>
#endif
-#if defined(__mc68000__) || defined(CONFIG_APUS)
+#if defined(__mc68000__)
#include <asm/machdep.h>
#include <asm/setup.h>
#endif
@@ -147,7 +147,7 @@ static char fontname[40];
static int info_idx = -1;
/* console rotation */
-static int rotate;
+static int initial_rotation;
static int fbcon_has_sysfs;
static const struct consw fb_con;
@@ -334,10 +334,7 @@ static inline int get_color(struct vc_data *vc, struct fb_info *info,
switch (depth) {
case 1:
{
- int col = ~(0xfff << (max(info->var.green.length,
- max(info->var.red.length,
- info->var.blue.length)))) & 0xff;
-
+ int col = mono_col(info);
/* 0 or 1 */
int fg = (info->fix.visual != FB_VISUAL_MONO01) ? col : 0;
int bg = (info->fix.visual != FB_VISUAL_MONO01) ? 0 : col;
@@ -537,9 +534,9 @@ static int __init fb_console_setup(char *this_opt)
if (!strncmp(options, "rotate:", 7)) {
options += 7;
if (*options)
- rotate = simple_strtoul(options, &options, 0);
- if (rotate > 3)
- rotate = 0;
+ initial_rotation = simple_strtoul(options, &options, 0);
+ if (initial_rotation > 3)
+ initial_rotation = 0;
}
}
return 1;
@@ -989,7 +986,7 @@ static const char *fbcon_startup(void)
ops->graphics = 1;
ops->cur_rotate = -1;
info->fbcon_par = ops;
- p->con_rotate = rotate;
+ p->con_rotate = initial_rotation;
set_blitting_type(vc, info);
if (info->fix.type != FB_TYPE_TEXT) {
@@ -1176,7 +1173,7 @@ static void fbcon_init(struct vc_data *vc, int init)
con_copy_unimap(vc, svc);
ops = info->fbcon_par;
- p->con_rotate = rotate;
+ p->con_rotate = initial_rotation;
set_blitting_type(vc, info);
cols = vc->vc_cols;
@@ -2795,7 +2792,7 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
{
struct fb_info *info = registered_fb[con2fb_map[fg_console]];
struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[fg_console];
+ struct display *disp = &fb_display[fg_console];
int offset, limit, scrollback_old;
if (softback_top) {
@@ -2833,7 +2830,7 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
logo_shown = FBCON_LOGO_CANSHOW;
}
fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
- fbcon_redraw_softback(vc, p, lines);
+ fbcon_redraw_softback(vc, disp, lines);
fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
return 0;
}
@@ -2855,9 +2852,9 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
fbcon_cursor(vc, CM_ERASE);
- offset = p->yscroll - scrollback_current;
- limit = p->vrows;
- switch (p->scrollmode) {
+ offset = disp->yscroll - scrollback_current;
+ limit = disp->vrows;
+ switch (disp->scrollmode) {
case SCROLL_WRAP_MOVE:
info->var.vmode |= FB_VMODE_YWRAP;
break;
diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h
index 8e6ef4bc7a5c..3706307e70ed 100644
--- a/drivers/video/console/fbcon.h
+++ b/drivers/video/console/fbcon.h
@@ -93,10 +93,6 @@ struct fbcon_ops {
(((s) >> (fgshift)) & 0x0f)
#define attr_bgcol(bgshift,s) \
(((s) >> (bgshift)) & 0x0f)
-#define attr_bgcol_ec(bgshift,vc) \
- ((vc) ? (((vc)->vc_video_erase_char >> (bgshift)) & 0x0f) : 0)
-#define attr_fgcol_ec(fgshift,vc) \
- ((vc) ? (((vc)->vc_video_erase_char >> (fgshift)) & 0x0f) : 0)
/* Monochrome */
#define attr_bold(s) \
@@ -108,6 +104,49 @@ struct fbcon_ops {
#define attr_blink(s) \
((s) & 0x8000)
+#define mono_col(info) \
+ (~(0xfff << (max((info)->var.green.length, \
+ max((info)->var.red.length, \
+ (info)->var.blue.length)))) & 0xff)
+
+static inline int attr_col_ec(int shift, struct vc_data *vc,
+ struct fb_info *info, int is_fg)
+{
+ int is_mono01;
+ int col;
+ int fg;
+ int bg;
+
+ if (!vc)
+ return 0;
+
+ if (vc->vc_can_do_color)
+ return is_fg ? attr_fgcol(shift,vc->vc_video_erase_char)
+ : attr_bgcol(shift,vc->vc_video_erase_char);
+
+ if (!info)
+ return 0;
+
+ col = mono_col(info);
+ is_mono01 = info->fix.visual == FB_VISUAL_MONO01;
+
+ if (attr_reverse(vc->vc_video_erase_char)) {
+ fg = is_mono01 ? col : 0;
+ bg = is_mono01 ? 0 : col;
+ }
+ else {
+ fg = is_mono01 ? 0 : col;
+ bg = is_mono01 ? col : 0;
+ }
+
+ return is_fg ? fg : bg;
+}
+
+#define attr_bgcol_ec(bgshift,vc,info) \
+ attr_col_ec(bgshift,vc,info,0);
+#define attr_fgcol_ec(fgshift,vc,info) \
+ attr_col_ec(fgshift,vc,info,1);
+
/* Font */
#define REFCOUNT(fd) (((int *)(fd))[-1])
#define FNTSIZE(fd) (((int *)(fd))[-2])
diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
index 825e6d6972a7..bdf913ecf001 100644
--- a/drivers/video/console/fbcon_ccw.c
+++ b/drivers/video/console/fbcon_ccw.c
@@ -84,7 +84,7 @@ static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vyres = GETVYRES(ops->p->scrollmode, info);
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = sy * vc->vc_font.height;
region.dy = vyres - ((sx + width) * vc->vc_font.width);
region.height = width * vc->vc_font.width;
@@ -198,7 +198,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.rop = ROP_COPY;
if (rw && !bottom_only) {
diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
index c637e6318803..a6819b9d1770 100644
--- a/drivers/video/console/fbcon_cw.c
+++ b/drivers/video/console/fbcon_cw.c
@@ -70,7 +70,7 @@ static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vxres = GETVXRES(ops->p->scrollmode, info);
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = vxres - ((sy + height) * vc->vc_font.height);
region.dy = sx * vc->vc_font.width;
region.height = width * vc->vc_font.width;
@@ -182,7 +182,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.rop = ROP_COPY;
if (rw && !bottom_only) {
diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
index 1473506df5d0..d9b5d6eb68a7 100644
--- a/drivers/video/console/fbcon_ud.c
+++ b/drivers/video/console/fbcon_ud.c
@@ -71,7 +71,7 @@ static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
u32 vyres = GETVYRES(ops->p->scrollmode, info);
u32 vxres = GETVXRES(ops->p->scrollmode, info);
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.dy = vyres - ((sy + height) * vc->vc_font.height);
region.dx = vxres - ((sx + width) * vc->vc_font.width);
region.width = width * vc->vc_font.width;
@@ -228,7 +228,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- region.color = attr_bgcol_ec(bgshift,vc);
+ region.color = attr_bgcol_ec(bgshift,vc,info);
region.rop = ROP_COPY;
if (rw && !bottom_only) {
diff --git a/drivers/video/console/fonts.c b/drivers/video/console/fonts.c
index 96979c377518..d0c03fd70871 100644
--- a/drivers/video/console/fonts.c
+++ b/drivers/video/console/fonts.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
-#if defined(__mc68000__) || defined(CONFIG_APUS)
+#if defined(__mc68000__)
#include <asm/setup.h>
#endif
#include <linux/font.h>
@@ -120,7 +120,7 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
for(i=0; i<num_fonts; i++) {
f = fonts[i];
c = f->pref;
-#if defined(__mc68000__) || defined(CONFIG_APUS)
+#if defined(__mc68000__)
#ifdef CONFIG_FONT_PEARL_8x8
if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
c = 100;
diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c
index d981fe4d86c6..0056a41e5c35 100644
--- a/drivers/video/console/tileblit.c
+++ b/drivers/video/console/tileblit.c
@@ -40,8 +40,8 @@ static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
rect.index = vc->vc_video_erase_char &
((vc->vc_hi_font_mask) ? 0x1ff : 0xff);
- rect.fg = attr_fgcol_ec(fgshift, vc);
- rect.bg = attr_bgcol_ec(bgshift, vc);
+ rect.fg = attr_fgcol_ec(fgshift, vc, info);
+ rect.bg = attr_bgcol_ec(bgshift, vc, info);
rect.sx = sx;
rect.sy = sy;
rect.width = width;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index f65bcd314d54..6df29a62d720 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1153,8 +1153,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
/* if 512 char mode is already enabled don't re-enable it. */
if ((set) && (ch512 != vga_512_chars)) {
- int i;
-
/* attribute controller */
for (i = 0; i < MAX_NR_CONSOLES; i++) {
struct vc_data *c = vc_cons[i].d;
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index a0c5d9d90d74..0f8cfb988c90 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -25,8 +25,8 @@
#include <linux/pagemap.h>
/* this is to find and return the vmalloc-ed fb pages */
-static struct page* fb_deferred_io_nopage(struct vm_area_struct *vma,
- unsigned long vaddr, int *type)
+static int fb_deferred_io_fault(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
{
unsigned long offset;
struct page *page;
@@ -34,18 +34,17 @@ static struct page* fb_deferred_io_nopage(struct vm_area_struct *vma,
/* info->screen_base is in System RAM */
void *screen_base = (void __force *) info->screen_base;
- offset = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
+ offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= info->fix.smem_len)
- return NOPAGE_SIGBUS;
+ return VM_FAULT_SIGBUS;
page = vmalloc_to_page(screen_base + offset);
if (!page)
- return NOPAGE_OOM;
+ return VM_FAULT_SIGBUS;
get_page(page);
- if (type)
- *type = VM_FAULT_MINOR;
- return page;
+ vmf->page = page;
+ return 0;
}
int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
@@ -84,7 +83,7 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
}
static struct vm_operations_struct fb_deferred_io_vm_ops = {
- .nopage = fb_deferred_io_nopage,
+ .fault = fb_deferred_io_fault,
.page_mkwrite = fb_deferred_io_mkwrite,
};
diff --git a/drivers/video/fb_draw.h b/drivers/video/fb_draw.h
index cdafbe14ef1f..a2a0618d86a5 100644
--- a/drivers/video/fb_draw.h
+++ b/drivers/video/fb_draw.h
@@ -91,6 +91,7 @@ static inline unsigned long fb_rev_pixels_in_long(unsigned long val,
val = comp(val >> 2, val << 2, REV_PIXELS_MASK2);
if (bswapmask & 3)
val = comp(val >> 4, val << 4, REV_PIXELS_MASK4);
+ return val;
}
static inline u32 fb_shifted_pixels_mask_u32(u32 index, u32 bswapmask)
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index 4ba9c0894416..052e18058498 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -4,7 +4,7 @@
* Copyright (C) 2002 James Simmons <jsimmons@users.sf.net>
*
* Credits:
- *
+ *
* The EDID Parser is a conglomeration from the following sources:
*
* 1. SciTech SNAP Graphics Architecture
@@ -12,13 +12,13 @@
*
* 2. XFree86 4.3.0, interpret_edid.c
* Copyright 1998 by Egbert Eich <Egbert.Eich@Physik.TU-Darmstadt.DE>
- *
- * 3. John Fremlin <vii@users.sourceforge.net> and
+ *
+ * 3. John Fremlin <vii@users.sourceforge.net> and
* Ani Joshi <ajoshi@unixbox.com>
- *
+ *
* Generalized Timing Formula is derived from:
*
- * GTF Spreadsheet by Andy Morrish (1/5/97)
+ * GTF Spreadsheet by Andy Morrish (1/5/97)
* available at http://www.vesa.org
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -36,7 +36,7 @@
#endif
#include "edid.h"
-/*
+/*
* EDID parser
*/
@@ -160,8 +160,8 @@ static int check_edid(unsigned char *edid)
for (i = 0; i < ARRAY_SIZE(brokendb); i++) {
if (!strncmp(manufacturer, brokendb[i].manufacturer, 4) &&
brokendb[i].model == model) {
- fix = brokendb[i].fix;
- break;
+ fix = brokendb[i].fix;
+ break;
}
}
@@ -323,7 +323,7 @@ static void get_dpms_capabilities(unsigned char flags,
(flags & DPMS_SUSPEND) ? "yes" : "no",
(flags & DPMS_STANDBY) ? "yes" : "no");
}
-
+
static void get_chroma(unsigned char *block, struct fb_monspecs *specs)
{
int tmp;
@@ -365,7 +365,7 @@ static void get_chroma(unsigned char *block, struct fb_monspecs *specs)
tmp += 512;
specs->chroma.bluey = tmp/1024;
DPRINTK("BlueY: 0.%03d\n", specs->chroma.bluey);
-
+
tmp = ((block[6] & (3 << 2)) >> 2) | (block[0xd] << 2);
tmp *= 1000;
tmp += 512;
@@ -383,7 +383,7 @@ static void calc_mode_timings(int xres, int yres, int refresh,
struct fb_videomode *mode)
{
struct fb_var_screeninfo *var;
-
+
var = kzalloc(sizeof(struct fb_var_screeninfo), GFP_KERNEL);
if (var) {
@@ -451,11 +451,11 @@ static int get_est_timing(unsigned char *block, struct fb_videomode *mode)
c = block[1];
if (c&0x80) {
- mode[num++] = vesa_modes[9];
+ mode[num++] = vesa_modes[9];
DPRINTK(" 800x600@72Hz\n");
}
if (c&0x40) {
- mode[num++] = vesa_modes[10];
+ mode[num++] = vesa_modes[10];
DPRINTK(" 800x600@75Hz\n");
}
if (c&0x20) {
@@ -495,7 +495,7 @@ static int get_est_timing(unsigned char *block, struct fb_videomode *mode)
static int get_std_timing(unsigned char *block, struct fb_videomode *mode)
{
int xres, yres = 0, refresh, ratio, i;
-
+
xres = (block[0] + 31) * 8;
if (xres <= 256)
return 0;
@@ -519,7 +519,7 @@ static int get_std_timing(unsigned char *block, struct fb_videomode *mode)
DPRINTK(" %dx%d@%dHz\n", xres, yres, refresh);
for (i = 0; i < VESA_MODEDB_SIZE; i++) {
- if (vesa_modes[i].xres == xres &&
+ if (vesa_modes[i].xres == xres &&
vesa_modes[i].yres == yres &&
vesa_modes[i].refresh == refresh) {
*mode = vesa_modes[i];
@@ -536,13 +536,13 @@ static int get_dst_timing(unsigned char *block,
{
int j, num = 0;
- for (j = 0; j < 6; j++, block+= STD_TIMING_DESCRIPTION_SIZE)
+ for (j = 0; j < 6; j++, block += STD_TIMING_DESCRIPTION_SIZE)
num += get_std_timing(block, &mode[num]);
return num;
}
-static void get_detailed_timing(unsigned char *block,
+static void get_detailed_timing(unsigned char *block,
struct fb_videomode *mode)
{
mode->xres = H_ACTIVE;
@@ -553,7 +553,7 @@ static void get_detailed_timing(unsigned char *block,
mode->right_margin = H_SYNC_OFFSET;
mode->left_margin = (H_ACTIVE + H_BLANKING) -
(H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
- mode->upper_margin = V_BLANKING - V_SYNC_OFFSET -
+ mode->upper_margin = V_BLANKING - V_SYNC_OFFSET -
V_SYNC_WIDTH;
mode->lower_margin = V_SYNC_OFFSET;
mode->hsync_len = H_SYNC_WIDTH;
@@ -597,7 +597,7 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize)
if (mode == NULL)
return NULL;
- if (edid == NULL || !edid_checksum(edid) ||
+ if (edid == NULL || !edid_checksum(edid) ||
!edid_check_header(edid)) {
kfree(mode);
return NULL;
@@ -632,7 +632,7 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize)
if (block[0] == 0x00 && block[1] == 0x00 && block[3] == 0xfa)
num += get_dst_timing(block + 5, &mode[num]);
}
-
+
/* Yikes, EDID data is totally useless */
if (!num) {
kfree(mode);
@@ -686,7 +686,7 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
/* estimate monitor limits based on modes supported */
if (retval) {
struct fb_videomode *modes, *mode;
- int num_modes, i, hz, hscan, pixclock;
+ int num_modes, hz, hscan, pixclock;
int vtotal, htotal;
modes = fb_create_modedb(edid, &num_modes);
@@ -713,7 +713,7 @@ static int fb_get_monitor_limits(unsigned char *edid, struct fb_monspecs *specs)
hscan = (pixclock + htotal / 2) / htotal;
hscan = (hscan + 500) / 1000 * 1000;
hz = (hscan + vtotal / 2) / vtotal;
-
+
if (specs->dclkmax == 0 || specs->dclkmax < pixclock)
specs->dclkmax = pixclock;
@@ -966,8 +966,8 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
DPRINTK("========================================\n");
}
-/*
- * VESA Generalized Timing Formula (GTF)
+/*
+ * VESA Generalized Timing Formula (GTF)
*/
#define FLYBACK 550
@@ -996,7 +996,7 @@ struct __fb_timings {
* @hfreq: horizontal freq
*
* DESCRIPTION:
- * vblank = right_margin + vsync_len + left_margin
+ * vblank = right_margin + vsync_len + left_margin
*
* given: right_margin = 1 (V_FRONTPORCH)
* vsync_len = 3
@@ -1010,12 +1010,12 @@ static u32 fb_get_vblank(u32 hfreq)
{
u32 vblank;
- vblank = (hfreq * FLYBACK)/1000;
+ vblank = (hfreq * FLYBACK)/1000;
vblank = (vblank + 500)/1000;
return (vblank + V_FRONTPORCH);
}
-/**
+/**
* fb_get_hblank_by_freq - get horizontal blank time given hfreq
* @hfreq: horizontal freq
* @xres: horizontal resolution in pixels
@@ -1031,7 +1031,7 @@ static u32 fb_get_vblank(u32 hfreq)
*
* where: C = ((offset - scale factor) * blank_scale)
* -------------------------------------- + scale factor
- * 256
+ * 256
* M = blank_scale * gradient
*
*/
@@ -1039,7 +1039,7 @@ static u32 fb_get_hblank_by_hfreq(u32 hfreq, u32 xres)
{
u32 c_val, m_val, duty_cycle, hblank;
- c_val = (((H_OFFSET - H_SCALEFACTOR) * H_BLANKSCALE)/256 +
+ c_val = (((H_OFFSET - H_SCALEFACTOR) * H_BLANKSCALE)/256 +
H_SCALEFACTOR) * 1000;
m_val = (H_BLANKSCALE * H_GRADIENT)/256;
m_val = (m_val * 1000000)/hfreq;
@@ -1048,7 +1048,7 @@ static u32 fb_get_hblank_by_hfreq(u32 hfreq, u32 xres)
return (hblank);
}
-/**
+/**
* fb_get_hblank_by_dclk - get horizontal blank time given pixelclock
* @dclk: pixelclock in Hz
* @xres: horizontal resolution in pixels
@@ -1061,7 +1061,7 @@ static u32 fb_get_hblank_by_hfreq(u32 hfreq, u32 xres)
*
* duty cycle = percent of htotal assigned to inactive display
* duty cycle = C - (M * h_period)
- *
+ *
* where: h_period = SQRT(100 - C + (0.4 * xres * M)/dclk) + C - 100
* -----------------------------------------------
* 2 * M
@@ -1077,11 +1077,11 @@ static u32 fb_get_hblank_by_dclk(u32 dclk, u32 xres)
h_period = 100 - C_VAL;
h_period *= h_period;
h_period += (M_VAL * xres * 2 * 1000)/(5 * dclk);
- h_period *=10000;
+ h_period *= 10000;
h_period = int_sqrt(h_period);
h_period -= (100 - C_VAL) * 100;
- h_period *= 1000;
+ h_period *= 1000;
h_period /= 2 * M_VAL;
duty_cycle = C_VAL * 1000 - (M_VAL * h_period)/100;
@@ -1089,7 +1089,7 @@ static u32 fb_get_hblank_by_dclk(u32 dclk, u32 xres)
hblank &= ~15;
return (hblank);
}
-
+
/**
* fb_get_hfreq - estimate hsync
* @vfreq: vertical refresh rate
@@ -1100,13 +1100,13 @@ static u32 fb_get_hblank_by_dclk(u32 dclk, u32 xres)
* (yres + front_port) * vfreq * 1000000
* hfreq = -------------------------------------
* (1000000 - (vfreq * FLYBACK)
- *
+ *
*/
static u32 fb_get_hfreq(u32 vfreq, u32 yres)
{
u32 divisor, hfreq;
-
+
divisor = (1000000 - (vfreq * FLYBACK))/1000;
hfreq = (yres + V_FRONTPORCH) * vfreq * 1000;
return (hfreq/divisor);
@@ -1117,7 +1117,7 @@ static void fb_timings_vfreq(struct __fb_timings *timings)
timings->hfreq = fb_get_hfreq(timings->vfreq, timings->vactive);
timings->vblank = fb_get_vblank(timings->hfreq);
timings->vtotal = timings->vactive + timings->vblank;
- timings->hblank = fb_get_hblank_by_hfreq(timings->hfreq,
+ timings->hblank = fb_get_hblank_by_hfreq(timings->hfreq,
timings->hactive);
timings->htotal = timings->hactive + timings->hblank;
timings->dclk = timings->htotal * timings->hfreq;
@@ -1128,7 +1128,7 @@ static void fb_timings_hfreq(struct __fb_timings *timings)
timings->vblank = fb_get_vblank(timings->hfreq);
timings->vtotal = timings->vactive + timings->vblank;
timings->vfreq = timings->hfreq/timings->vtotal;
- timings->hblank = fb_get_hblank_by_hfreq(timings->hfreq,
+ timings->hblank = fb_get_hblank_by_hfreq(timings->hfreq,
timings->hactive);
timings->htotal = timings->hactive + timings->hblank;
timings->dclk = timings->htotal * timings->hfreq;
@@ -1136,7 +1136,7 @@ static void fb_timings_hfreq(struct __fb_timings *timings)
static void fb_timings_dclk(struct __fb_timings *timings)
{
- timings->hblank = fb_get_hblank_by_dclk(timings->dclk,
+ timings->hblank = fb_get_hblank_by_dclk(timings->dclk,
timings->hactive);
timings->htotal = timings->hactive + timings->hblank;
timings->hfreq = timings->dclk/timings->htotal;
@@ -1156,29 +1156,29 @@ static void fb_timings_dclk(struct __fb_timings *timings)
* @info: pointer to fb_info
*
* DESCRIPTION:
- * Calculates video mode based on monitor specs using VESA GTF.
- * The GTF is best for VESA GTF compliant monitors but is
+ * Calculates video mode based on monitor specs using VESA GTF.
+ * The GTF is best for VESA GTF compliant monitors but is
* specifically formulated to work for older monitors as well.
*
- * If @flag==0, the function will attempt to maximize the
+ * If @flag==0, the function will attempt to maximize the
* refresh rate. Otherwise, it will calculate timings based on
- * the flag and accompanying value.
+ * the flag and accompanying value.
*
- * If FB_IGNOREMON bit is set in @flags, monitor specs will be
+ * If FB_IGNOREMON bit is set in @flags, monitor specs will be
* ignored and @var will be filled with the calculated timings.
*
* All calculations are based on the VESA GTF Spreadsheet
* available at VESA's public ftp (http://www.vesa.org).
- *
+ *
* NOTES:
* The timings generated by the GTF will be different from VESA
* DMT. It might be a good idea to keep a table of standard
* VESA modes as well. The GTF may also not work for some displays,
* such as, and especially, analog TV.
- *
+ *
* REQUIRES:
* A valid info->monspecs, otherwise 'safe numbers' will be used.
- */
+ */
int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_info *info)
{
struct __fb_timings *timings;
@@ -1191,7 +1191,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
if (!timings)
return -ENOMEM;
- /*
+ /*
* If monspecs are invalid, use values that are enough
* for 640x480@60
*/
@@ -1214,7 +1214,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
timings->hactive = var->xres;
timings->vactive = var->yres;
- if (var->vmode & FB_VMODE_INTERLACED) {
+ if (var->vmode & FB_VMODE_INTERLACED) {
timings->vactive /= 2;
interlace = 2;
}
@@ -1250,9 +1250,9 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
break;
default:
err = -EINVAL;
-
- }
-
+
+ }
+
if (err || (!(flags & FB_IGNOREMON) &&
(timings->vfreq < vfmin || timings->vfreq > vfmax ||
timings->hfreq < hfmin || timings->hfreq > hfmax ||
@@ -1269,7 +1269,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
var->upper_margin = (timings->vblank * interlace)/dscan -
(var->vsync_len + var->lower_margin);
}
-
+
kfree(timings);
return err;
}
@@ -1291,7 +1291,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var,
return -EINVAL;
}
#endif /* CONFIG_FB_MODE_HELPERS */
-
+
/*
* fb_validate_mode - validates var against monitor capabilities
* @var: pointer to fb_var_screeninfo
@@ -1309,7 +1309,7 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
u32 hfreq, vfreq, htotal, vtotal, pixclock;
u32 hfmin, hfmax, vfmin, vfmax, dclkmin, dclkmax;
- /*
+ /*
* If monspecs are invalid, use values that are enough
* for 640x480@60
*/
@@ -1333,10 +1333,10 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
if (!var->pixclock)
return -EINVAL;
pixclock = PICOS2KHZ(var->pixclock) * 1000;
-
- htotal = var->xres + var->right_margin + var->hsync_len +
+
+ htotal = var->xres + var->right_margin + var->hsync_len +
var->left_margin;
- vtotal = var->yres + var->lower_margin + var->vsync_len +
+ vtotal = var->yres + var->lower_margin + var->vsync_len +
var->upper_margin;
if (var->vmode & FB_VMODE_INTERLACED)
@@ -1349,7 +1349,7 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
vfreq = hfreq/vtotal;
- return (vfreq < vfmin || vfreq > vfmax ||
+ return (vfreq < vfmin || vfreq > vfmax ||
hfreq < hfmin || hfreq > hfmax ||
pixclock < dclkmin || pixclock > dclkmax) ?
-EINVAL : 0;
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index 583185fd7c94..eb6b88171538 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -34,7 +34,7 @@ static int fbsize;
* we try to make it something sane - 640x480-60 is sane
*/
-const struct fb_videomode geode_modedb[] __initdata = {
+static const struct fb_videomode geode_modedb[] __initdata = {
/* 640x480-60 */
{ NULL, 60, 640, 480, 39682, 48, 8, 25, 2, 88, 2,
FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
diff --git a/drivers/video/hpfb.c b/drivers/video/hpfb.c
index b18486ad8e17..2eb4fb159084 100644
--- a/drivers/video/hpfb.c
+++ b/drivers/video/hpfb.c
@@ -207,7 +207,8 @@ static struct fb_ops hpfb_ops = {
#define HPFB_FBOMSB 0x5d /* Frame buffer offset */
#define HPFB_FBOLSB 0x5f
-static int __init hpfb_init_one(unsigned long phys_base, unsigned long virt_base)
+static int __devinit hpfb_init_one(unsigned long phys_base,
+ unsigned long virt_base)
{
unsigned long fboff, fb_width, fb_height, fb_start;
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 1a7d7789d877..1d13dd099af8 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1476,7 +1476,7 @@ static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor)
struct i810fb_par *par = info->par;
u8 __iomem *mmio = par->mmio_start_virtual;
- if (!par->dev_flags & LOCKUP)
+ if (!(par->dev_flags & LOCKUP))
return -ENXIO;
if (cursor->image.width > 64 || cursor->image.height > 64)
diff --git a/drivers/video/igafb.c b/drivers/video/igafb.c
index b87ea21d3d78..3a81060137a2 100644
--- a/drivers/video/igafb.c
+++ b/drivers/video/igafb.c
@@ -400,6 +400,7 @@ int __init igafb_init(void)
info = kzalloc(size, GFP_ATOMIC);
if (!info) {
printk("igafb_init: can't alloc fb_info\n");
+ pci_dev_put(pdev);
return -ENOMEM;
}
@@ -409,12 +410,14 @@ int __init igafb_init(void)
if ((addr = pdev->resource[0].start) == 0) {
printk("igafb_init: no memory start\n");
kfree(info);
+ pci_dev_put(pdev);
return -ENXIO;
}
if ((info->screen_base = ioremap(addr, 1024*1024*2)) == 0) {
printk("igafb_init: can't remap %lx[2M]\n", addr);
kfree(info);
+ pci_dev_put(pdev);
return -ENXIO;
}
@@ -449,6 +452,7 @@ int __init igafb_init(void)
printk("igafb_init: can't remap %lx[4K]\n", igafb_fix.mmio_start);
iounmap((void *)info->screen_base);
kfree(info);
+ pci_dev_put(pdev);
return -ENXIO;
}
@@ -466,6 +470,7 @@ int __init igafb_init(void)
iounmap((void *)par->io_base);
iounmap(info->screen_base);
kfree(info);
+ pci_dev_put(pdev);
return -ENOMEM;
}
diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/intelfb/intelfbhw.c
index 5f6fb7d2c408..fa1fff553565 100644
--- a/drivers/video/intelfb/intelfbhw.c
+++ b/drivers/video/intelfb/intelfbhw.c
@@ -1971,7 +1971,7 @@ void intelfbhw_cursor_reset(struct intelfb_info *dinfo)
static irqreturn_t intelfbhw_irq(int irq, void *dev_id)
{
u16 tmp;
- struct intelfb_info *dinfo = (struct intelfb_info *)dev_id;
+ struct intelfb_info *dinfo = dev_id;
spin_lock(&dinfo->int_lock);
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index 4b6a99b5be0d..5246b0402d76 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -2066,40 +2066,49 @@ static struct fb_info *__devinit neo_alloc_fb_info(struct pci_dev *dev, const st
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
- sprintf(info->fix.id, "MagicGraph 128");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 128");
break;
case FB_ACCEL_NEOMAGIC_NM2090:
- sprintf(info->fix.id, "MagicGraph 128V");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 128V");
break;
case FB_ACCEL_NEOMAGIC_NM2093:
- sprintf(info->fix.id, "MagicGraph 128ZV");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 128ZV");
break;
case FB_ACCEL_NEOMAGIC_NM2097:
- sprintf(info->fix.id, "MagicGraph 128ZV+");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 128ZV+");
break;
case FB_ACCEL_NEOMAGIC_NM2160:
- sprintf(info->fix.id, "MagicGraph 128XD");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 128XD");
break;
case FB_ACCEL_NEOMAGIC_NM2200:
- sprintf(info->fix.id, "MagicGraph 256AV");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 256AV");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2230:
- sprintf(info->fix.id, "MagicGraph 256AV+");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 256AV+");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2360:
- sprintf(info->fix.id, "MagicGraph 256ZX");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 256ZX");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2380:
- sprintf(info->fix.id, "MagicGraph 256XL+");
+ snprintf(info->fix.id, sizeof(info->fix.id),
+ "MagicGraph 256XL+");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 30e14eb1f51e..74517b1b26a6 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -849,9 +849,27 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var,
if (!mode_valid && info->monspecs.modedb_len)
return -EINVAL;
+ /*
+ * If we're on a flat panel, check if the mode is outside of the
+ * panel dimensions. If so, cap it and try for the next best mode
+ * before bailing out.
+ */
if (par->fpWidth && par->fpHeight && (par->fpWidth < var->xres ||
- par->fpHeight < var->yres))
- return -EINVAL;
+ par->fpHeight < var->yres)) {
+ const struct fb_videomode *mode;
+
+ var->xres = par->fpWidth;
+ var->yres = par->fpHeight;
+
+ mode = fb_find_best_mode(var, &info->modelist);
+ if (!mode) {
+ printk(KERN_ERR PFX "mode out of range of flat "
+ "panel dimensions\n");
+ return -EINVAL;
+ }
+
+ fb_videomode_to_var(var, mode);
+ }
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index 5591dfb22b18..30181b593829 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -1159,6 +1159,11 @@ static void pm2fb_imageblit(struct fb_info *info, const struct fb_image *image)
u32 fgx, bgx;
const u32 *src = (const u32 *)image->data;
u32 xres = (info->var.xres + 31) & ~31;
+ int raster_mode = 1; /* invert bits */
+
+#ifdef __LITTLE_ENDIAN
+ raster_mode |= 3 << 7; /* reverse byte order */
+#endif
if (info->state != FBINFO_STATE_RUNNING)
return;
@@ -1208,9 +1213,8 @@ static void pm2fb_imageblit(struct fb_info *info, const struct fb_image *image)
pm2_WR(par, PM2R_RENDER,
PM2F_RENDER_RECTANGLE |
PM2F_INCREASE_X | PM2F_INCREASE_Y);
- /* BitMapPackEachScanline & invert bits and byte order*/
- /* force background */
- pm2_WR(par, PM2R_RASTERIZER_MODE, (1 << 9) | 1 | (3 << 7));
+ /* BitMapPackEachScanline */
+ pm2_WR(par, PM2R_RASTERIZER_MODE, raster_mode | (1 << 9));
pm2_WR(par, PM2R_CONSTANT_COLOR, fgx);
pm2_WR(par, PM2R_RENDER,
PM2F_RENDER_RECTANGLE |
@@ -1224,8 +1228,7 @@ static void pm2fb_imageblit(struct fb_info *info, const struct fb_image *image)
PM2F_RENDER_RECTANGLE |
PM2F_RENDER_FASTFILL |
PM2F_INCREASE_X | PM2F_INCREASE_Y);
- /* invert bits and byte order*/
- pm2_WR(par, PM2R_RASTERIZER_MODE, 1 | (3 << 7));
+ pm2_WR(par, PM2R_RASTERIZER_MODE, raster_mode);
pm2_WR(par, PM2R_FB_BLOCK_COLOR, fgx);
pm2_WR(par, PM2R_RENDER,
PM2F_RENDER_RECTANGLE |
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index 070659992c18..5dba8cdd0517 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -1227,7 +1227,7 @@ static struct fb_ops pm3fb_ops = {
/* mmio register are already mapped when this function is called */
/* the pm3fb_fix.smem_start is also set */
-static unsigned long pm3fb_size_memory(struct pm3_par *par)
+static unsigned long __devinit pm3fb_size_memory(struct pm3_par *par)
{
unsigned long memsize = 0;
unsigned long tempBypass, i, temp1, temp2;
diff --git a/drivers/video/pmag-aa-fb.c b/drivers/video/pmag-aa-fb.c
index a864438b6008..6515ec11c16b 100644
--- a/drivers/video/pmag-aa-fb.c
+++ b/drivers/video/pmag-aa-fb.c
@@ -150,7 +150,7 @@ static int aafbcon_set_font(struct display *disp, int width, int height)
{
struct aafb_info *info = (struct aafb_info *)disp->fb_info;
struct aafb_cursor *c = &info->cursor;
- u8 fgc = ~attr_bgcol_ec(disp, disp->conp);
+ u8 fgc = ~attr_bgcol_ec(disp, disp->conp, &info->info);
if (width > 64 || height > 64 || width < 0 || height < 0)
return -EINVAL;
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 044a423a72cb..dc3af1c78c56 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -57,8 +57,6 @@
#define GPU_ALIGN_UP(x) _ALIGN_UP((x), 64)
#define GPU_MAX_LINE_LENGTH (65536 - 64)
-#define PS3FB_FULL_MODE_BIT 0x80
-
#define GPU_INTR_STATUS_VSYNC_0 0 /* vsync on head A */
#define GPU_INTR_STATUS_VSYNC_1 1 /* vsync on head B */
#define GPU_INTR_STATUS_FLIP_0 3 /* flip head A */
@@ -118,8 +116,6 @@ struct ps3fb_priv {
unsigned int irq_no;
u64 context_handle, memory_handle;
- void *xdr_ea;
- size_t xdr_size;
struct gpu_driver_info *dinfo;
u64 vblank_count; /* frame count */
@@ -136,42 +132,19 @@ static struct ps3fb_priv ps3fb;
struct ps3fb_par {
u32 pseudo_palette[16];
int mode_id, new_mode_id;
- int res_index;
unsigned int num_frames; /* num of frame buffers */
unsigned int width;
unsigned int height;
- unsigned long full_offset; /* start of fullscreen DDR fb */
- unsigned long fb_offset; /* start of actual DDR fb */
- unsigned long pan_offset;
+ unsigned int ddr_line_length;
+ unsigned int ddr_frame_size;
+ unsigned int xdr_frame_size;
+ unsigned int full_offset; /* start of fullscreen DDR fb */
+ unsigned int fb_offset; /* start of actual DDR fb */
+ unsigned int pan_offset;
};
-struct ps3fb_res_table {
- u32 xres;
- u32 yres;
- u32 xoff;
- u32 yoff;
- u32 type;
-};
-#define PS3FB_RES_FULL 1
-static const struct ps3fb_res_table ps3fb_res[] = {
- /* res_x,y margin_x,y full */
- { 720, 480, 72, 48 , 0},
- { 720, 576, 72, 58 , 0},
- { 1280, 720, 78, 38 , 0},
- { 1920, 1080, 116, 58 , 0},
- /* full mode */
- { 720, 480, 0, 0 , PS3FB_RES_FULL},
- { 720, 576, 0, 0 , PS3FB_RES_FULL},
- { 1280, 720, 0, 0 , PS3FB_RES_FULL},
- { 1920, 1080, 0, 0 , PS3FB_RES_FULL},
- /* vesa: normally full mode */
- { 1280, 768, 0, 0 , 0},
- { 1280, 1024, 0, 0 , 0},
- { 1920, 1200, 0, 0 , 0},
- { 0, 0, 0, 0 , 0} };
-
-/* default resolution */
-#define GPU_RES_INDEX 0 /* 720 x 480 */
+
+#define FIRST_NATIVE_MODE_INDEX 10
static const struct fb_videomode ps3fb_modedb[] = {
/* 60 Hz broadcast modes (modes "1" to "5") */
@@ -211,7 +184,7 @@ static const struct fb_videomode ps3fb_modedb[] = {
"720p", 50, 1124, 644, 13468, 298, 478, 57, 44, 80, 5,
FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED
}, {
- /* 1080 */
+ /* 1080i */
"1080i", 50, 1688, 964, 13468, 264, 600, 94, 62, 88, 5,
FB_SYNC_BROADCAST, FB_VMODE_INTERLACED
}, {
@@ -220,24 +193,7 @@ static const struct fb_videomode ps3fb_modedb[] = {
FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED
},
- /* VESA modes (modes "11" to "13") */
- {
- /* WXGA */
- "wxga", 60, 1280, 768, 12924, 160, 24, 29, 3, 136, 6,
- 0, FB_VMODE_NONINTERLACED,
- FB_MODE_IS_VESA
- }, {
- /* SXGA */
- "sxga", 60, 1280, 1024, 9259, 248, 48, 38, 1, 112, 3,
- FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED,
- FB_MODE_IS_VESA
- }, {
- /* WUXGA */
- "wuxga", 60, 1920, 1200, 6494, 80, 48, 26, 3, 32, 6,
- FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED,
- FB_MODE_IS_VESA
- },
-
+ [FIRST_NATIVE_MODE_INDEX] =
/* 60 Hz broadcast modes (full resolution versions of modes "1" to "5") */
{
/* 480if */
@@ -276,12 +232,30 @@ static const struct fb_videomode ps3fb_modedb[] = {
FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED
}, {
/* 1080if */
- "1080f", 50, 1920, 1080, 13468, 148, 484, 36, 4, 88, 5,
+ "1080if", 50, 1920, 1080, 13468, 148, 484, 36, 4, 88, 5,
FB_SYNC_BROADCAST, FB_VMODE_INTERLACED
}, {
/* 1080pf */
"1080pf", 50, 1920, 1080, 6734, 148, 484, 36, 4, 88, 5,
FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED
+ },
+
+ /* VESA modes (modes "11" to "13") */
+ {
+ /* WXGA */
+ "wxga", 60, 1280, 768, 12924, 160, 24, 29, 3, 136, 6,
+ 0, FB_VMODE_NONINTERLACED,
+ FB_MODE_IS_VESA
+ }, {
+ /* SXGA */
+ "sxga", 60, 1280, 1024, 9259, 248, 48, 38, 1, 112, 3,
+ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED,
+ FB_MODE_IS_VESA
+ }, {
+ /* WUXGA */
+ "wuxga", 60, 1920, 1200, 6494, 80, 48, 26, 3, 32, 6,
+ FB_SYNC_HOR_HIGH_ACT, FB_VMODE_NONINTERLACED,
+ FB_MODE_IS_VESA
}
};
@@ -289,110 +263,188 @@ static const struct fb_videomode ps3fb_modedb[] = {
#define HEAD_A
#define HEAD_B
-#define X_OFF(i) (ps3fb_res[i].xoff) /* left/right margin (pixel) */
-#define Y_OFF(i) (ps3fb_res[i].yoff) /* top/bottom margin (pixel) */
-#define WIDTH(i) (ps3fb_res[i].xres) /* width of FB */
-#define HEIGHT(i) (ps3fb_res[i].yres) /* height of FB */
#define BPP 4 /* number of bytes per pixel */
-/* Start of the virtual frame buffer (relative to fullscreen ) */
-#define VP_OFF(i) ((WIDTH(i) * Y_OFF(i) + X_OFF(i)) * BPP)
-
static int ps3fb_mode;
module_param(ps3fb_mode, int, 0);
static char *mode_option __devinitdata;
-static int ps3fb_get_res_table(u32 xres, u32 yres, int mode)
+static int ps3fb_cmp_mode(const struct fb_videomode *vmode,
+ const struct fb_var_screeninfo *var)
{
- int full_mode;
- unsigned int i;
- u32 x, y, f;
-
- full_mode = (mode & PS3FB_FULL_MODE_BIT) ? PS3FB_RES_FULL : 0;
- for (i = 0;; i++) {
- x = ps3fb_res[i].xres;
- y = ps3fb_res[i].yres;
- f = ps3fb_res[i].type;
-
- if (!x) {
- pr_debug("ERROR: ps3fb_get_res_table()\n");
- return -1;
- }
+ long xres, yres, left_margin, right_margin, upper_margin, lower_margin;
+ long dx, dy;
+
+ /* maximum values */
+ if (var->xres > vmode->xres || var->yres > vmode->yres ||
+ var->pixclock > vmode->pixclock ||
+ var->hsync_len > vmode->hsync_len ||
+ var->vsync_len > vmode->vsync_len)
+ return -1;
- if (full_mode == PS3FB_RES_FULL && f != PS3FB_RES_FULL)
- continue;
+ /* progressive/interlaced must match */
+ if ((var->vmode & FB_VMODE_MASK) != vmode->vmode)
+ return -1;
- if (x == xres && (yres == 0 || y == yres))
- break;
+ /* minimum resolution */
+ xres = max(var->xres, 1U);
+ yres = max(var->yres, 1U);
+
+ /* minimum margins */
+ left_margin = max(var->left_margin, vmode->left_margin);
+ right_margin = max(var->right_margin, vmode->right_margin);
+ upper_margin = max(var->upper_margin, vmode->upper_margin);
+ lower_margin = max(var->lower_margin, vmode->lower_margin);
+
+ /* resolution + margins may not exceed native parameters */
+ dx = ((long)vmode->left_margin + (long)vmode->xres +
+ (long)vmode->right_margin) -
+ (left_margin + xres + right_margin);
+ if (dx < 0)
+ return -1;
- x = x - 2 * ps3fb_res[i].xoff;
- y = y - 2 * ps3fb_res[i].yoff;
- if (x == xres && (yres == 0 || y == yres))
- break;
+ dy = ((long)vmode->upper_margin + (long)vmode->yres +
+ (long)vmode->lower_margin) -
+ (upper_margin + yres + lower_margin);
+ if (dy < 0)
+ return -1;
+
+ /* exact match */
+ if (!dx && !dy)
+ return 0;
+
+ /* resolution difference */
+ return (vmode->xres - xres) * (vmode->yres - yres);
+}
+
+static const struct fb_videomode *ps3fb_native_vmode(enum ps3av_mode_num id)
+{
+ return &ps3fb_modedb[FIRST_NATIVE_MODE_INDEX + id - 1];
+}
+
+static const struct fb_videomode *ps3fb_vmode(int id)
+{
+ u32 mode = id & PS3AV_MODE_MASK;
+
+ if (mode < PS3AV_MODE_480I || mode > PS3AV_MODE_WUXGA)
+ return NULL;
+
+ if (mode <= PS3AV_MODE_1080P50 && !(id & PS3AV_MODE_FULL)) {
+ /* Non-fullscreen broadcast mode */
+ return &ps3fb_modedb[mode - 1];
}
- return i;
+
+ return ps3fb_native_vmode(mode);
}
-static unsigned int ps3fb_find_mode(const struct fb_var_screeninfo *var,
+static unsigned int ps3fb_find_mode(struct fb_var_screeninfo *var,
u32 *ddr_line_length, u32 *xdr_line_length)
{
- unsigned int i, mode;
-
- for (i = 0; i < ARRAY_SIZE(ps3fb_modedb); i++)
- if (var->xres == ps3fb_modedb[i].xres &&
- var->yres == ps3fb_modedb[i].yres &&
- var->pixclock == ps3fb_modedb[i].pixclock &&
- var->hsync_len == ps3fb_modedb[i].hsync_len &&
- var->vsync_len == ps3fb_modedb[i].vsync_len &&
- var->left_margin == ps3fb_modedb[i].left_margin &&
- var->right_margin == ps3fb_modedb[i].right_margin &&
- var->upper_margin == ps3fb_modedb[i].upper_margin &&
- var->lower_margin == ps3fb_modedb[i].lower_margin &&
- var->sync == ps3fb_modedb[i].sync &&
- (var->vmode & FB_VMODE_MASK) == ps3fb_modedb[i].vmode)
- goto found;
-
- pr_debug("ps3fb_find_mode: mode not found\n");
- return 0;
+ unsigned int id, best_id;
+ int diff, best_diff;
+ const struct fb_videomode *vmode;
+ long gap;
+
+ best_id = 0;
+ best_diff = INT_MAX;
+ pr_debug("%s: wanted %u [%u] %u x %u [%u] %u\n", __func__,
+ var->left_margin, var->xres, var->right_margin,
+ var->upper_margin, var->yres, var->lower_margin);
+ for (id = PS3AV_MODE_480I; id <= PS3AV_MODE_WUXGA; id++) {
+ vmode = ps3fb_native_vmode(id);
+ diff = ps3fb_cmp_mode(vmode, var);
+ pr_debug("%s: mode %u: %u [%u] %u x %u [%u] %u: diff = %d\n",
+ __func__, id, vmode->left_margin, vmode->xres,
+ vmode->right_margin, vmode->upper_margin,
+ vmode->yres, vmode->lower_margin, diff);
+ if (diff < 0)
+ continue;
+ if (diff < best_diff) {
+ best_id = id;
+ if (!diff)
+ break;
+ best_diff = diff;
+ }
+ }
-found:
- /* Cropped broadcast modes use the full line length */
- *ddr_line_length = ps3fb_modedb[i < 10 ? i + 13 : i].xres * BPP;
+ if (!best_id) {
+ pr_debug("%s: no suitable mode found\n", __func__);
+ return 0;
+ }
- if (ps3_compare_firmware_version(1, 9, 0) >= 0) {
- *xdr_line_length = GPU_ALIGN_UP(max(var->xres,
- var->xres_virtual) * BPP);
- if (*xdr_line_length > GPU_MAX_LINE_LENGTH)
- *xdr_line_length = GPU_MAX_LINE_LENGTH;
- } else
- *xdr_line_length = *ddr_line_length;
+ id = best_id;
+ vmode = ps3fb_native_vmode(id);
- /* Full broadcast modes have the full mode bit set */
- mode = i > 12 ? (i - 12) | PS3FB_FULL_MODE_BIT : i + 1;
+ *ddr_line_length = vmode->xres * BPP;
- pr_debug("ps3fb_find_mode: mode %u\n", mode);
+ /* minimum resolution */
+ if (!var->xres)
+ var->xres = 1;
+ if (!var->yres)
+ var->yres = 1;
- return mode;
-}
+ /* minimum virtual resolution */
+ if (var->xres_virtual < var->xres)
+ var->xres_virtual = var->xres;
+ if (var->yres_virtual < var->yres)
+ var->yres_virtual = var->yres;
-static const struct fb_videomode *ps3fb_default_mode(int id)
-{
- u32 mode = id & PS3AV_MODE_MASK;
- u32 flags;
+ /* minimum margins */
+ if (var->left_margin < vmode->left_margin)
+ var->left_margin = vmode->left_margin;
+ if (var->right_margin < vmode->right_margin)
+ var->right_margin = vmode->right_margin;
+ if (var->upper_margin < vmode->upper_margin)
+ var->upper_margin = vmode->upper_margin;
+ if (var->lower_margin < vmode->lower_margin)
+ var->lower_margin = vmode->lower_margin;
+
+ /* extra margins */
+ gap = ((long)vmode->left_margin + (long)vmode->xres +
+ (long)vmode->right_margin) -
+ ((long)var->left_margin + (long)var->xres +
+ (long)var->right_margin);
+ if (gap > 0) {
+ var->left_margin += gap/2;
+ var->right_margin += (gap+1)/2;
+ pr_debug("%s: rounded up H to %u [%u] %u\n", __func__,
+ var->left_margin, var->xres, var->right_margin);
+ }
- if (mode < 1 || mode > 13)
- return NULL;
+ gap = ((long)vmode->upper_margin + (long)vmode->yres +
+ (long)vmode->lower_margin) -
+ ((long)var->upper_margin + (long)var->yres +
+ (long)var->lower_margin);
+ if (gap > 0) {
+ var->upper_margin += gap/2;
+ var->lower_margin += (gap+1)/2;
+ pr_debug("%s: rounded up V to %u [%u] %u\n", __func__,
+ var->upper_margin, var->yres, var->lower_margin);
+ }
+
+ /* fixed fields */
+ var->pixclock = vmode->pixclock;
+ var->hsync_len = vmode->hsync_len;
+ var->vsync_len = vmode->vsync_len;
+ var->sync = vmode->sync;
- flags = id & ~PS3AV_MODE_MASK;
+ if (ps3_compare_firmware_version(1, 9, 0) >= 0) {
+ *xdr_line_length = GPU_ALIGN_UP(var->xres_virtual * BPP);
+ if (*xdr_line_length > GPU_MAX_LINE_LENGTH)
+ *xdr_line_length = GPU_MAX_LINE_LENGTH;
+ } else
+ *xdr_line_length = *ddr_line_length;
- if (mode <= 10 && flags & PS3FB_FULL_MODE_BIT) {
- /* Full broadcast mode */
- return &ps3fb_modedb[mode + 12];
+ if (vmode->sync & FB_SYNC_BROADCAST) {
+ /* Full broadcast modes have the full mode bit set */
+ if (vmode->xres == var->xres && vmode->yres == var->yres)
+ id |= PS3AV_MODE_FULL;
}
- return &ps3fb_modedb[mode - 1];
+ pr_debug("%s: mode %u\n", __func__, id);
+ return id;
}
static void ps3fb_sync_image(struct device *dev, u64 frame_offset,
@@ -439,8 +491,7 @@ static void ps3fb_sync_image(struct device *dev, u64 frame_offset,
static int ps3fb_sync(struct fb_info *info, u32 frame)
{
struct ps3fb_par *par = info->par;
- int i, error = 0;
- u32 ddr_line_length, xdr_line_length;
+ int error = 0;
u64 ddr_base, xdr_base;
if (frame > par->num_frames - 1) {
@@ -450,16 +501,13 @@ static int ps3fb_sync(struct fb_info *info, u32 frame)
goto out;
}
- i = par->res_index;
- xdr_line_length = info->fix.line_length;
- ddr_line_length = ps3fb_res[i].xres * BPP;
- xdr_base = frame * info->var.yres_virtual * xdr_line_length;
- ddr_base = frame * ps3fb_res[i].yres * ddr_line_length;
+ xdr_base = frame * par->xdr_frame_size;
+ ddr_base = frame * par->ddr_frame_size;
ps3fb_sync_image(info->device, ddr_base + par->full_offset,
ddr_base + par->fb_offset, xdr_base + par->pan_offset,
- par->width, par->height, ddr_line_length,
- xdr_line_length);
+ par->width, par->height, par->ddr_line_length,
+ info->fix.line_length);
out:
return error;
@@ -498,22 +546,11 @@ static int ps3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
u32 xdr_line_length, ddr_line_length;
int mode;
- dev_dbg(info->device, "var->xres:%u info->var.xres:%u\n", var->xres,
- info->var.xres);
- dev_dbg(info->device, "var->yres:%u info->var.yres:%u\n", var->yres,
- info->var.yres);
-
- /* FIXME For now we do exact matches only */
mode = ps3fb_find_mode(var, &ddr_line_length, &xdr_line_length);
if (!mode)
return -EINVAL;
/* Virtual screen */
- if (var->xres_virtual < var->xres)
- var->xres_virtual = var->xres;
- if (var->yres_virtual < var->yres)
- var->yres_virtual = var->yres;
-
if (var->xres_virtual > xdr_line_length / BPP) {
dev_dbg(info->device,
"Horizontal virtual screen size too large\n");
@@ -559,7 +596,7 @@ static int ps3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
}
/* Memory limit */
- if (var->yres_virtual * xdr_line_length > ps3fb.xdr_size) {
+ if (var->yres_virtual * xdr_line_length > info->fix.smem_len) {
dev_dbg(info->device, "Not enough memory\n");
return -ENOMEM;
}
@@ -578,39 +615,38 @@ static int ps3fb_set_par(struct fb_info *info)
{
struct ps3fb_par *par = info->par;
unsigned int mode, ddr_line_length, xdr_line_length, lines, maxlines;
- int i;
- unsigned long offset;
+ unsigned int ddr_xoff, ddr_yoff, offset;
+ const struct fb_videomode *vmode;
u64 dst;
- dev_dbg(info->device, "xres:%d xv:%d yres:%d yv:%d clock:%d\n",
- info->var.xres, info->var.xres_virtual,
- info->var.yres, info->var.yres_virtual, info->var.pixclock);
-
mode = ps3fb_find_mode(&info->var, &ddr_line_length, &xdr_line_length);
if (!mode)
return -EINVAL;
- i = ps3fb_get_res_table(info->var.xres, info->var.yres, mode);
- par->res_index = i;
+ vmode = ps3fb_native_vmode(mode & PS3AV_MODE_MASK);
- info->fix.smem_start = virt_to_abs(ps3fb.xdr_ea);
- info->fix.smem_len = ps3fb.xdr_size;
info->fix.xpanstep = info->var.xres_virtual > info->var.xres ? 1 : 0;
info->fix.ypanstep = info->var.yres_virtual > info->var.yres ? 1 : 0;
info->fix.line_length = xdr_line_length;
- info->screen_base = (char __iomem *)ps3fb.xdr_ea;
+ par->ddr_line_length = ddr_line_length;
+ par->ddr_frame_size = vmode->yres * ddr_line_length;
+ par->xdr_frame_size = info->var.yres_virtual * xdr_line_length;
- par->num_frames = ps3fb.xdr_size /
- max(ps3fb_res[i].yres * ddr_line_length,
- info->var.yres_virtual * xdr_line_length);
+ par->num_frames = info->fix.smem_len /
+ max(par->ddr_frame_size, par->xdr_frame_size);
/* Keep the special bits we cannot set using fb_var_screeninfo */
par->new_mode_id = (par->new_mode_id & ~PS3AV_MODE_MASK) | mode;
par->width = info->var.xres;
par->height = info->var.yres;
- offset = VP_OFF(i);
+
+ /* Start of the virtual frame buffer (relative to fullscreen) */
+ ddr_xoff = info->var.left_margin - vmode->left_margin;
+ ddr_yoff = info->var.upper_margin - vmode->upper_margin;
+ offset = ddr_yoff * ddr_line_length + ddr_xoff * BPP;
+
par->fb_offset = GPU_ALIGN_UP(offset);
par->full_offset = par->fb_offset - offset;
par->pan_offset = info->var.yoffset * xdr_line_length +
@@ -625,16 +661,16 @@ static int ps3fb_set_par(struct fb_info *info)
}
/* Clear XDR frame buffer memory */
- memset(ps3fb.xdr_ea, 0, ps3fb.xdr_size);
+ memset((void __force *)info->screen_base, 0, info->fix.smem_len);
/* Clear DDR frame buffer memory */
- lines = ps3fb_res[i].yres * par->num_frames;
+ lines = vmode->yres * par->num_frames;
if (par->full_offset)
lines++;
- maxlines = ps3fb.xdr_size / ddr_line_length;
+ maxlines = info->fix.smem_len / ddr_line_length;
for (dst = 0; lines; dst += maxlines * ddr_line_length) {
unsigned int l = min(lines, maxlines);
- ps3fb_sync_image(info->device, 0, dst, 0, ps3fb_res[i].xres, l,
+ ps3fb_sync_image(info->device, 0, dst, 0, vmode->xres, l,
ddr_line_length, ddr_line_length);
lines -= l;
}
@@ -797,7 +833,7 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
case PS3FB_IOCTL_SETMODE:
{
struct ps3fb_par *par = info->par;
- const struct fb_videomode *mode;
+ const struct fb_videomode *vmode;
struct fb_var_screeninfo var;
if (copy_from_user(&val, argp, sizeof(val)))
@@ -810,10 +846,10 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
}
dev_dbg(info->device, "PS3FB_IOCTL_SETMODE:%x\n", val);
retval = -EINVAL;
- mode = ps3fb_default_mode(val);
- if (mode) {
+ vmode = ps3fb_vmode(val);
+ if (vmode) {
var = info->var;
- fb_videomode_to_var(&var, mode);
+ fb_videomode_to_var(&var, vmode);
acquire_console_sem();
info->flags |= FBINFO_MISC_USEREVENT;
/* Force, in case only special bits changed */
@@ -975,10 +1011,9 @@ static int ps3fb_xdr_settings(u64 xdr_lpar, struct device *dev)
__func__, status);
return -ENXIO;
}
- dev_dbg(dev,
- "video:%p xdr_ea:%p ioif:%lx lpar:%lx phys:%lx size:%lx\n",
- ps3fb_videomemory.address, ps3fb.xdr_ea, GPU_IOIF, xdr_lpar,
- virt_to_abs(ps3fb.xdr_ea), ps3fb_videomemory.size);
+ dev_dbg(dev, "video:%p ioif:%lx lpar:%lx size:%lx\n",
+ ps3fb_videomemory.address, GPU_IOIF, xdr_lpar,
+ ps3fb_videomemory.size);
status = lv1_gpu_context_attribute(ps3fb.context_handle,
L1GPU_CONTEXT_ATTRIBUTE_FB_SETUP,
@@ -1055,14 +1090,14 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
struct fb_info *info;
struct ps3fb_par *par;
int retval = -ENOMEM;
- u32 xres, yres;
u64 ddr_lpar = 0;
u64 lpar_dma_control = 0;
u64 lpar_driver_info = 0;
u64 lpar_reports = 0;
u64 lpar_reports_size = 0;
u64 xdr_lpar;
- int status, res_index;
+ void *fb_start;
+ int status;
struct task_struct *task;
unsigned long max_ps3fb_size;
@@ -1080,14 +1115,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
if (!ps3fb_mode)
ps3fb_mode = ps3av_get_mode();
- dev_dbg(&dev->core, "ps3av_mode:%d\n", ps3fb_mode);
-
- if (ps3fb_mode > 0 &&
- !ps3av_video_mode2res(ps3fb_mode, &xres, &yres)) {
- res_index = ps3fb_get_res_table(xres, yres, ps3fb_mode);
- dev_dbg(&dev->core, "res_index:%d\n", res_index);
- } else
- res_index = GPU_RES_INDEX;
+ dev_dbg(&dev->core, "ps3fb_mode: %d\n", ps3fb_mode);
atomic_set(&ps3fb.f_count, -1); /* fbcon opens ps3fb */
atomic_set(&ps3fb.ext_flip, 0); /* for flip with vsync */
@@ -1124,7 +1152,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
}
/* vsync interrupt */
- ps3fb.dinfo = ioremap(lpar_driver_info, 128 * 1024);
+ ps3fb.dinfo = (void __force *)ioremap(lpar_driver_info, 128 * 1024);
if (!ps3fb.dinfo) {
dev_err(&dev->core, "%s: ioremap failed\n", __func__);
goto err_gpu_context_free;
@@ -1134,22 +1162,10 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
if (retval)
goto err_iounmap_dinfo;
- /* XDR frame buffer */
- ps3fb.xdr_ea = ps3fb_videomemory.address;
- xdr_lpar = ps3_mm_phys_to_lpar(__pa(ps3fb.xdr_ea));
-
/* Clear memory to prevent kernel info leakage into userspace */
- memset(ps3fb.xdr_ea, 0, ps3fb_videomemory.size);
-
- /*
- * The GPU command buffer is at the start of video memory
- * As we don't use the full command buffer, we can put the actual
- * frame buffer at offset GPU_FB_START and save some precious XDR
- * memory
- */
- ps3fb.xdr_ea += GPU_FB_START;
- ps3fb.xdr_size = ps3fb_videomemory.size - GPU_FB_START;
+ memset(ps3fb_videomemory.address, 0, ps3fb_videomemory.size);
+ xdr_lpar = ps3_mm_phys_to_lpar(__pa(ps3fb_videomemory.address));
retval = ps3fb_xdr_settings(xdr_lpar, &dev->core);
if (retval)
goto err_free_irq;
@@ -1161,15 +1177,22 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
par = info->par;
par->mode_id = ~ps3fb_mode; /* != ps3fb_mode, to trigger change */
par->new_mode_id = ps3fb_mode;
- par->res_index = res_index;
par->num_frames = 1;
- info->screen_base = (char __iomem *)ps3fb.xdr_ea;
info->fbops = &ps3fb_ops;
-
info->fix = ps3fb_fix;
- info->fix.smem_start = virt_to_abs(ps3fb.xdr_ea);
- info->fix.smem_len = ps3fb.xdr_size;
+
+ /*
+ * The GPU command buffer is at the start of video memory
+ * As we don't use the full command buffer, we can put the actual
+ * frame buffer at offset GPU_FB_START and save some precious XDR
+ * memory
+ */
+ fb_start = ps3fb_videomemory.address + GPU_FB_START;
+ info->screen_base = (char __force __iomem *)fb_start;
+ info->fix.smem_start = virt_to_abs(fb_start);
+ info->fix.smem_len = ps3fb_videomemory.size - GPU_FB_START;
+
info->pseudo_palette = par->pseudo_palette;
info->flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
@@ -1180,7 +1203,7 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
if (!fb_find_mode(&info->var, info, mode_option, ps3fb_modedb,
ARRAY_SIZE(ps3fb_modedb),
- ps3fb_default_mode(par->new_mode_id), 32)) {
+ ps3fb_vmode(par->new_mode_id), 32)) {
retval = -EINVAL;
goto err_fb_dealloc;
}
@@ -1194,9 +1217,9 @@ static int __devinit ps3fb_probe(struct ps3_system_bus_device *dev)
dev->core.driver_data = info;
- dev_info(info->device, "%s %s, using %lu KiB of video memory\n",
+ dev_info(info->device, "%s %s, using %u KiB of video memory\n",
dev_driver_string(info->dev), info->dev->bus_id,
- ps3fb.xdr_size >> 10);
+ info->fix.smem_len >> 10);
task = kthread_run(ps3fbd, info, DEVICE_NAME);
if (IS_ERR(task)) {
@@ -1219,7 +1242,7 @@ err_free_irq:
free_irq(ps3fb.irq_no, &dev->core);
ps3_irq_plug_destroy(ps3fb.irq_no);
err_iounmap_dinfo:
- iounmap((u8 __iomem *)ps3fb.dinfo);
+ iounmap((u8 __force __iomem *)ps3fb.dinfo);
err_gpu_context_free:
lv1_gpu_context_free(ps3fb.context_handle);
err_gpu_memory_free:
@@ -1254,7 +1277,7 @@ static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
framebuffer_release(info);
info = dev->core.driver_data = NULL;
}
- iounmap((u8 __iomem *)ps3fb.dinfo);
+ iounmap((u8 __force __iomem *)ps3fb.dinfo);
status = lv1_gpu_context_free(ps3fb.context_handle);
if (status)
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index b3c31d9dc591..71fa6edb5c47 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -110,6 +110,11 @@ static int debug = 0;
/* useful functions */
+static int is_s3c2412(struct s3c2410fb_info *fbi)
+{
+ return (fbi->drv_type == DRV_S3C2412);
+}
+
/* s3c2410fb_set_lcdaddr
*
* initialise lcd controller address pointers
@@ -501,7 +506,7 @@ static void schedule_palette_update(struct s3c2410fb_info *fbi,
{
unsigned long flags;
unsigned long irqen;
- void __iomem *regs = fbi->io;
+ void __iomem *irq_base = fbi->irq_base;
local_irq_save(flags);
@@ -511,9 +516,9 @@ static void schedule_palette_update(struct s3c2410fb_info *fbi,
fbi->palette_ready = 1;
/* enable IRQ */
- irqen = readl(regs + S3C2410_LCDINTMSK);
+ irqen = readl(irq_base + S3C24XX_LCDINTMSK);
irqen &= ~S3C2410_LCDINT_FRSYNC;
- writel(irqen, regs + S3C2410_LCDINTMSK);
+ writel(irqen, irq_base + S3C24XX_LCDINTMSK);
}
local_irq_restore(flags);
@@ -594,15 +599,17 @@ static int s3c2410fb_setcolreg(unsigned regno,
static int s3c2410fb_blank(int blank_mode, struct fb_info *info)
{
struct s3c2410fb_info *fbi = info->par;
- void __iomem *regs = fbi->io;
+ void __iomem *tpal_reg = fbi->io;
dprintk("blank(mode=%d, info=%p)\n", blank_mode, info);
+ tpal_reg += is_s3c2412(fbi) ? S3C2412_TPAL : S3C2410_TPAL;
+
if (blank_mode == FB_BLANK_UNBLANK)
- writel(0x0, regs + S3C2410_TPAL);
+ writel(0x0, tpal_reg);
else {
dprintk("setting TPAL to output 0x000000\n");
- writel(S3C2410_TPAL_EN, regs + S3C2410_TPAL);
+ writel(S3C2410_TPAL_EN, tpal_reg);
}
return 0;
@@ -663,7 +670,7 @@ static int __init s3c2410fb_map_video_memory(struct fb_info *info)
dma_addr_t map_dma;
unsigned map_size = PAGE_ALIGN(info->fix.smem_len);
- dprintk("map_video_memory(fbi=%p)\n", fbi);
+ dprintk("map_video_memory(fbi=%p) map_size %u\n", fbi, map_size);
info->screen_base = dma_alloc_writecombine(fbi->dev, map_size,
&map_dma, GFP_KERNEL);
@@ -672,7 +679,7 @@ static int __init s3c2410fb_map_video_memory(struct fb_info *info)
/* prevent initial garbage on screen */
dprintk("map_video_memory: clear %p:%08x\n",
info->screen_base, map_size);
- memset(info->screen_base, 0xf0, map_size);
+ memset(info->screen_base, 0x00, map_size);
info->fix.smem_start = map_dma;
@@ -709,6 +716,16 @@ static int s3c2410fb_init_registers(struct fb_info *info)
struct s3c2410fb_mach_info *mach_info = fbi->dev->platform_data;
unsigned long flags;
void __iomem *regs = fbi->io;
+ void __iomem *tpal;
+ void __iomem *lpcsel;
+
+ if (is_s3c2412(fbi)) {
+ tpal = regs + S3C2412_TPAL;
+ lpcsel = regs + S3C2412_TCONSEL;
+ } else {
+ tpal = regs + S3C2410_TPAL;
+ lpcsel = regs + S3C2410_LPCSEL;
+ }
/* Initialise LCD with values from haret */
@@ -724,12 +741,12 @@ static int s3c2410fb_init_registers(struct fb_info *info)
local_irq_restore(flags);
dprintk("LPCSEL = 0x%08lx\n", mach_info->lpcsel);
- writel(mach_info->lpcsel, regs + S3C2410_LPCSEL);
+ writel(mach_info->lpcsel, lpcsel);
- dprintk("replacing TPAL %08x\n", readl(regs + S3C2410_TPAL));
+ dprintk("replacing TPAL %08x\n", readl(tpal));
/* ensure temporary palette disabled */
- writel(0x00, regs + S3C2410_TPAL);
+ writel(0x00, tpal);
return 0;
}
@@ -763,15 +780,15 @@ static void s3c2410fb_write_palette(struct s3c2410fb_info *fbi)
static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
{
struct s3c2410fb_info *fbi = dev_id;
- void __iomem *regs = fbi->io;
- unsigned long lcdirq = readl(regs + S3C2410_LCDINTPND);
+ void __iomem *irq_base = fbi->irq_base;
+ unsigned long lcdirq = readl(irq_base + S3C24XX_LCDINTPND);
if (lcdirq & S3C2410_LCDINT_FRSYNC) {
if (fbi->palette_ready)
s3c2410fb_write_palette(fbi);
- writel(S3C2410_LCDINT_FRSYNC, regs + S3C2410_LCDINTPND);
- writel(S3C2410_LCDINT_FRSYNC, regs + S3C2410_LCDSRCPND);
+ writel(S3C2410_LCDINT_FRSYNC, irq_base + S3C24XX_LCDINTPND);
+ writel(S3C2410_LCDINT_FRSYNC, irq_base + S3C24XX_LCDSRCPND);
}
return IRQ_HANDLED;
@@ -779,7 +796,8 @@ static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
static char driver_name[] = "s3c2410fb";
-static int __init s3c2410fb_probe(struct platform_device *pdev)
+static int __init s3c24xxfb_probe(struct platform_device *pdev,
+ enum s3c_drv_type drv_type)
{
struct s3c2410fb_info *info;
struct s3c2410fb_display *display;
@@ -799,6 +817,12 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
return -EINVAL;
}
+ if (mach_info->default_display >= mach_info->num_displays) {
+ dev_err(&pdev->dev, "default is %d but only %d displays\n",
+ mach_info->default_display, mach_info->num_displays);
+ return -EINVAL;
+ }
+
display = mach_info->displays + mach_info->default_display;
irq = platform_get_irq(pdev, 0);
@@ -815,6 +839,7 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
info = fbinfo->par;
info->dev = &pdev->dev;
+ info->drv_type = drv_type;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
@@ -838,6 +863,8 @@ static int __init s3c2410fb_probe(struct platform_device *pdev)
goto release_mem;
}
+ info->irq_base = info->io + ((drv_type == DRV_S3C2412) ? S3C2412_LCDINTBASE : S3C2410_LCDINTBASE);
+
dprintk("devinit\n");
strcpy(fbinfo->fix.id, driver_name);
@@ -946,6 +973,16 @@ dealloc_fb:
return ret;
}
+static int __init s3c2410fb_probe(struct platform_device *pdev)
+{
+ return s3c24xxfb_probe(pdev, DRV_S3C2410);
+}
+
+static int __init s3c2412fb_probe(struct platform_device *pdev)
+{
+ return s3c24xxfb_probe(pdev, DRV_S3C2412);
+}
+
/* s3c2410fb_stop_lcd
*
* shutdown the lcd controller
@@ -1047,14 +1084,31 @@ static struct platform_driver s3c2410fb_driver = {
},
};
+static struct platform_driver s3c2412fb_driver = {
+ .probe = s3c2412fb_probe,
+ .remove = s3c2410fb_remove,
+ .suspend = s3c2410fb_suspend,
+ .resume = s3c2410fb_resume,
+ .driver = {
+ .name = "s3c2412-lcd",
+ .owner = THIS_MODULE,
+ },
+};
+
int __init s3c2410fb_init(void)
{
- return platform_driver_register(&s3c2410fb_driver);
+ int ret = platform_driver_register(&s3c2410fb_driver);
+
+ if (ret == 0)
+ ret = platform_driver_register(&s3c2412fb_driver);;
+
+ return ret;
}
static void __exit s3c2410fb_cleanup(void)
{
platform_driver_unregister(&s3c2410fb_driver);
+ platform_driver_unregister(&s3c2412fb_driver);
}
module_init(s3c2410fb_init);
diff --git a/drivers/video/s3c2410fb.h b/drivers/video/s3c2410fb.h
index 6ce5dc26c5f7..dbb73b95e2ef 100644
--- a/drivers/video/s3c2410fb.h
+++ b/drivers/video/s3c2410fb.h
@@ -25,13 +25,20 @@
#ifndef __S3C2410FB_H
#define __S3C2410FB_H
+enum s3c_drv_type {
+ DRV_S3C2410,
+ DRV_S3C2412,
+};
+
struct s3c2410fb_info {
struct device *dev;
struct clk *clk;
struct resource *mem;
void __iomem *io;
+ void __iomem *irq_base;
+ enum s3c_drv_type drv_type;
struct s3c2410fb_hw regs;
unsigned int palette_ready;
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 93ae747440cb..73803624c131 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -427,7 +427,7 @@ sisfb_interpret_edid(struct sisfb_monitor *monitor, u8 *buffer)
monitor->feature = buffer[0x18];
- if(!buffer[0x14] & 0x80) {
+ if(!(buffer[0x14] & 0x80)) {
if(!(buffer[0x14] & 0x08)) {
printk(KERN_INFO
"sisfb: WARNING: Monitor does not support separate syncs\n");
@@ -4621,9 +4621,9 @@ sisfb_find_host_bridge(struct sis_video_info *ivideo, struct pci_dev *mypdev,
while((pdev = pci_get_class(PCI_CLASS_BRIDGE_HOST, pdev))) {
temp = pdev->vendor;
- pci_dev_put(pdev);
if(temp == pcivendor) {
ret = 1;
+ pci_dev_put(pdev);
break;
}
}
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 58f200c69be3..e83dfba7e636 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -641,6 +641,7 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
{
unsigned long control;
void __iomem *ctrl_reg = fbi->regs + SM501_DC_PANEL_CONTROL;
+ struct sm501_platdata_fbsub *pd = fbi->pdata->fb_pnl;
control = readl(ctrl_reg);
@@ -657,26 +658,34 @@ static void sm501fb_panel_power(struct sm501fb_info *fbi, int to)
sm501fb_sync_regs(fbi);
mdelay(10);
- control |= SM501_DC_PANEL_CONTROL_BIAS; /* VBIASEN */
- writel(control, ctrl_reg);
- sm501fb_sync_regs(fbi);
- mdelay(10);
-
- control |= SM501_DC_PANEL_CONTROL_FPEN;
- writel(control, ctrl_reg);
+ if (pd->flags & SM501FB_FLAG_PANEL_USE_VBIASEN) {
+ control |= SM501_DC_PANEL_CONTROL_BIAS; /* VBIASEN */
+ writel(control, ctrl_reg);
+ sm501fb_sync_regs(fbi);
+ mdelay(10);
+ }
+ if (pd->flags & SM501FB_FLAG_PANEL_USE_FPEN) {
+ control |= SM501_DC_PANEL_CONTROL_FPEN;
+ writel(control, ctrl_reg);
+ sm501fb_sync_regs(fbi);
+ mdelay(10);
+ }
} else if (!to && (control & SM501_DC_PANEL_CONTROL_VDD) != 0) {
/* disable panel power */
+ if (pd->flags & SM501FB_FLAG_PANEL_USE_FPEN) {
+ control &= ~SM501_DC_PANEL_CONTROL_FPEN;
+ writel(control, ctrl_reg);
+ sm501fb_sync_regs(fbi);
+ mdelay(10);
+ }
- control &= ~SM501_DC_PANEL_CONTROL_FPEN;
- writel(control, ctrl_reg);
- sm501fb_sync_regs(fbi);
- mdelay(10);
-
- control &= ~SM501_DC_PANEL_CONTROL_BIAS;
- writel(control, ctrl_reg);
- sm501fb_sync_regs(fbi);
- mdelay(10);
+ if (pd->flags & SM501FB_FLAG_PANEL_USE_VBIASEN) {
+ control &= ~SM501_DC_PANEL_CONTROL_BIAS;
+ writel(control, ctrl_reg);
+ sm501fb_sync_regs(fbi);
+ mdelay(10);
+ }
control &= ~SM501_DC_PANEL_CONTROL_DATA;
writel(control, ctrl_reg);
@@ -1267,6 +1276,7 @@ static int sm501fb_start(struct sm501fb_info *info,
{
struct resource *res;
struct device *dev;
+ int k;
int ret;
info->dev = dev = &pdev->dev;
@@ -1328,6 +1338,13 @@ static int sm501fb_start(struct sm501fb_info *info,
info->fbmem_len = (res->end - res->start)+1;
+ /* clear framebuffer memory - avoids garbage data on unused fb */
+ memset(info->fbmem, 0, info->fbmem_len);
+
+ /* clear palette ram - undefined at power on */
+ for (k = 0; k < (256 * 3); k++)
+ writel(0, info->regs + SM501_DC_PANEL_PALETTE + (k * 4));
+
/* enable display controller */
sm501_unit_power(dev->parent, SM501_GATE_DISPLAY, 1);
@@ -1681,6 +1698,15 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
if (par->screen.size == 0)
return 0;
+ /* blank the relevant interface to ensure unit power minimised */
+ (par->ops.fb_blank)(FB_BLANK_POWERDOWN, fbi);
+
+ /* tell console/fb driver we are suspending */
+
+ acquire_console_sem();
+ fb_set_suspend(fbi, 1);
+ release_console_sem();
+
/* backup copies in case chip is powered down over suspend */
par->store_fb = vmalloc(par->screen.size);
@@ -1700,12 +1726,6 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info,
memcpy_fromio(par->store_fb, par->screen.k_addr, par->screen.size);
memcpy_fromio(par->store_cursor, par->cursor.k_addr, par->cursor.size);
- /* blank the relevant interface to ensure unit power minimised */
- (par->ops.fb_blank)(FB_BLANK_POWERDOWN, fbi);
-
- acquire_console_sem();
- fb_set_suspend(fbi, 1);
- release_console_sem();
return 0;
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 057bdd593800..71e179ea5f95 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -1342,7 +1342,7 @@ out_err:
}
#ifndef MODULE
-static void tdfxfb_setup(char *options)
+static void __init tdfxfb_setup(char *options)
{
char *this_opt;
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index a14ef894d571..be27b9c1ed72 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -2003,12 +2003,12 @@ static void __devexit uvesafb_exit(void)
module_exit(uvesafb_exit);
-static inline int param_get_scroll(char *buffer, struct kernel_param *kp)
+static int param_get_scroll(char *buffer, struct kernel_param *kp)
{
return 0;
}
-static inline int param_set_scroll(const char *val, struct kernel_param *kp)
+static int param_set_scroll(const char *val, struct kernel_param *kp)
{
ypan = 0;
@@ -2022,11 +2022,11 @@ static inline int param_set_scroll(const char *val, struct kernel_param *kp)
return 0;
}
-#define param_check_scroll(name, p) __param_check(name, p, void);
+#define param_check_scroll(name, p) __param_check(name, p, void)
module_param_named(scroll, ypan, scroll, 0);
MODULE_PARM_DESC(scroll,
- "Scrolling mode, set to 'redraw', ''ypan' or 'ywrap'");
+ "Scrolling mode, set to 'redraw', 'ypan', or 'ywrap'");
module_param_named(vgapal, pmi_setpal, invbool, 0);
MODULE_PARM_DESC(vgapal, "Set palette using VGA registers");
module_param_named(pmipal, pmi_setpal, bool, 0);
diff --git a/drivers/video/vermilion/vermilion.c b/drivers/video/vermilion/vermilion.c
index 1c656667b937..2aa71eb67c2b 100644
--- a/drivers/video/vermilion/vermilion.c
+++ b/drivers/video/vermilion/vermilion.c
@@ -651,7 +651,7 @@ static int vmlfb_check_var_locked(struct fb_var_screeninfo *var,
return -EINVAL;
}
- pitch = __ALIGN_MASK((var->xres * var->bits_per_pixel) >> 3, 0x3F);
+ pitch = ALIGN((var->xres * var->bits_per_pixel) >> 3, 0x40);
mem = pitch * var->yres_virtual;
if (mem > vinfo->vram_contig_size) {
return -ENOMEM;
@@ -785,8 +785,7 @@ static int vmlfb_set_par_locked(struct vml_info *vinfo)
int clock;
vinfo->bytes_per_pixel = var->bits_per_pixel >> 3;
- vinfo->stride =
- __ALIGN_MASK(var->xres_virtual * vinfo->bytes_per_pixel, 0x3F);
+ vinfo->stride = ALIGN(var->xres_virtual * vinfo->bytes_per_pixel, 0x40);
info->fix.line_length = vinfo->stride;
if (!subsys)
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index e38d3b7c3ad7..7b3a8423f485 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -459,8 +459,8 @@ static int __devexit xilinxfb_of_remove(struct of_device *op)
}
/* Match table for of_platform binding */
-static struct of_device_id __devinit xilinxfb_of_match[] = {
- { .compatible = "xilinx,ml300-fb", },
+static struct of_device_id xilinxfb_of_match[] __devinitdata = {
+ { .compatible = "xlnx,plb-tft-cntlr-ref-1.00.a", },
{},
};
MODULE_DEVICE_TABLE(of, xilinxfb_of_match);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 622aece1acce..c8a4332d1132 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -23,6 +23,7 @@
#include <linux/swap.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/delay.h>
struct virtio_balloon
{
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 8236d447adf5..c4493091c655 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -42,5 +42,15 @@ config W1_MASTER_DS1WM
in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
hx4700.
+config W1_MASTER_GPIO
+ tristate "GPIO 1-wire busmaster"
+ depends on GENERIC_GPIO
+ help
+ Say Y here if you want to communicate with your 1-wire devices using
+ GPIO pins. This driver uses the GPIO API to control the wire.
+
+ This support is also available as a module. If so, the module
+ will be called w1-gpio.ko.
+
endmenu
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index 11551b328186..1420b5bbdda8 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o
obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o
obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o
obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
+obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 5747997f8d7d..688e435b4d9a 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -361,11 +361,12 @@ static int ds1wm_probe(struct platform_device *pdev)
goto err1;
}
ds1wm_data->irq = res->start;
- ds1wm_data->active_high = (res->flags & IORESOURCE_IRQ_HIGHEDGE) ?
- 1 : 0;
+ ds1wm_data->active_high = plat->active_high;
- set_irq_type(ds1wm_data->irq, ds1wm_data->active_high ?
- IRQ_TYPE_EDGE_RISING : IRQ_TYPE_EDGE_FALLING);
+ if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
+ set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
+ if (res->flags & IORESOURCE_IRQ_LOWEDGE)
+ set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED,
"ds1wm", ds1wm_data);
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
new file mode 100644
index 000000000000..9e1138a75e8b
--- /dev/null
+++ b/drivers/w1/masters/w1-gpio.c
@@ -0,0 +1,124 @@
+/*
+ * w1-gpio - GPIO w1 bus master driver
+ *
+ * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/w1-gpio.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+
+#include <asm/gpio.h>
+
+static void w1_gpio_write_bit_dir(void *data, u8 bit)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ if (bit)
+ gpio_direction_input(pdata->pin);
+ else
+ gpio_direction_output(pdata->pin, 0);
+}
+
+static void w1_gpio_write_bit_val(void *data, u8 bit)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ gpio_set_value(pdata->pin, bit);
+}
+
+static u8 w1_gpio_read_bit(void *data)
+{
+ struct w1_gpio_platform_data *pdata = data;
+
+ return gpio_get_value(pdata->pin);
+}
+
+static int __init w1_gpio_probe(struct platform_device *pdev)
+{
+ struct w1_bus_master *master;
+ struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+
+ if (!pdata)
+ return -ENXIO;
+
+ master = kzalloc(sizeof(struct w1_bus_master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ err = gpio_request(pdata->pin, "w1");
+ if (err)
+ goto free_master;
+
+ master->data = pdata;
+ master->read_bit = w1_gpio_read_bit;
+
+ if (pdata->is_open_drain) {
+ gpio_direction_output(pdata->pin, 1);
+ master->write_bit = w1_gpio_write_bit_val;
+ } else {
+ gpio_direction_input(pdata->pin);
+ master->write_bit = w1_gpio_write_bit_dir;
+ }
+
+ err = w1_add_master_device(master);
+ if (err)
+ goto free_gpio;
+
+ platform_set_drvdata(pdev, master);
+
+ return 0;
+
+ free_gpio:
+ gpio_free(pdata->pin);
+ free_master:
+ kfree(master);
+
+ return err;
+}
+
+static int __exit w1_gpio_remove(struct platform_device *pdev)
+{
+ struct w1_bus_master *master = platform_get_drvdata(pdev);
+ struct w1_gpio_platform_data *pdata = pdev->dev.platform_data;
+
+ w1_remove_master_device(master);
+ gpio_free(pdata->pin);
+ kfree(master);
+
+ return 0;
+}
+
+static struct platform_driver w1_gpio_driver = {
+ .driver = {
+ .name = "w1-gpio",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(w1_gpio_remove),
+};
+
+static int __init w1_gpio_init(void)
+{
+ return platform_driver_probe(&w1_gpio_driver, w1_gpio_probe);
+}
+
+static void __exit w1_gpio_exit(void)
+{
+ platform_driver_unregister(&w1_gpio_driver);
+}
+
+module_init(w1_gpio_init);
+module_exit(w1_gpio_exit);
+
+MODULE_DESCRIPTION("GPIO w1 bus master driver");
+MODULE_AUTHOR("Ville Syrjala <syrjala@sci.fi>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 112f4ec59035..fb28acaeed6c 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -92,6 +92,7 @@ struct w1_therm_family_converter
int (*convert)(u8 rom[9]);
};
+/* The return value is millidegrees Centigrade. */
static inline int w1_DS18B20_convert_temp(u8 rom[9]);
static inline int w1_DS18S20_convert_temp(u8 rom[9]);
@@ -113,7 +114,7 @@ static struct w1_therm_family_converter w1_therm_families[] = {
static inline int w1_DS18B20_convert_temp(u8 rom[9])
{
s16 t = (rom[1] << 8) | rom[0];
- t /= 16;
+ t = t*1000/16;
return t;
}
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 33e50310e9e0..7293c9b11f91 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -675,7 +675,6 @@ static void w1_slave_found(void *data, u64 rn)
struct w1_slave *sl;
struct list_head *ent;
struct w1_reg_num *tmp;
- int family_found = 0;
struct w1_master *dev;
u64 rn_le = cpu_to_le64(rn);
@@ -698,9 +697,6 @@ static void w1_slave_found(void *data, u64 rn)
sl->reg_num.crc == tmp->crc) {
set_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
break;
- } else if (sl->reg_num.family == tmp->family) {
- family_found = 1;
- break;
}
slave_count++;
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index b364da70ff28..dfebdbe7440e 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -175,7 +175,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
if (!wnames)
return ERR_PTR(-ENOMEM);
- for (d = dentry, i = n; i >= 0; i--, d = d->d_parent)
+ for (d = dentry, i = (n-1); i >= 0; i--, d = d->d_parent)
wnames[i] = (char *) d->d_name.name;
clone = 1;
@@ -183,7 +183,7 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
while (i < n) {
l = min(n - i, P9_MAXWELEM);
fid = p9_client_walk(fid, l, &wnames[i], clone);
- if (!fid) {
+ if (IS_ERR(fid)) {
kfree(wnames);
return fid;
}
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index fbb12dadba83..9b0f0222e8bb 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -3,7 +3,7 @@
*
* This file contains functions assisting in mapping VFS to 9P2000
*
- * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
+ * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
* This program is free software; you can redistribute it and/or modify
@@ -31,7 +31,6 @@
#include <linux/idr.h>
#include <net/9p/9p.h>
#include <net/9p/transport.h>
-#include <net/9p/conn.h>
#include <net/9p/client.h>
#include "v9fs.h"
#include "v9fs_vfs.h"
@@ -43,11 +42,11 @@
enum {
/* Options that take integer arguments */
- Opt_debug, Opt_msize, Opt_dfltuid, Opt_dfltgid, Opt_afid,
+ Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid,
/* String options */
Opt_uname, Opt_remotename, Opt_trans,
/* Options that take no arguments */
- Opt_legacy, Opt_nodevmap,
+ Opt_nodevmap,
/* Cache options */
Opt_cache_loose,
/* Access options */
@@ -58,14 +57,11 @@ enum {
static match_table_t tokens = {
{Opt_debug, "debug=%x"},
- {Opt_msize, "msize=%u"},
{Opt_dfltuid, "dfltuid=%u"},
{Opt_dfltgid, "dfltgid=%u"},
{Opt_afid, "afid=%u"},
{Opt_uname, "uname=%s"},
{Opt_remotename, "aname=%s"},
- {Opt_trans, "trans=%s"},
- {Opt_legacy, "noextend"},
{Opt_nodevmap, "nodevmap"},
{Opt_cache_loose, "cache=loose"},
{Opt_cache_loose, "loose"},
@@ -85,16 +81,14 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses)
char *options;
substring_t args[MAX_OPT_ARGS];
char *p;
- int option;
- int ret;
+ int option = 0;
char *s, *e;
+ int ret;
/* setup defaults */
- v9ses->maxdata = 8192;
v9ses->afid = ~0;
v9ses->debug = 0;
v9ses->cache = 0;
- v9ses->trans = v9fs_default_trans();
if (!v9ses->options)
return;
@@ -106,7 +100,8 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses)
continue;
token = match_token(p, tokens, args);
if (token < Opt_uname) {
- if ((ret = match_int(&args[0], &option)) < 0) {
+ ret = match_int(&args[0], &option);
+ if (ret < 0) {
P9_DPRINTK(P9_DEBUG_ERROR,
"integer field, but no integer?\n");
continue;
@@ -119,9 +114,7 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses)
p9_debug_level = option;
#endif
break;
- case Opt_msize:
- v9ses->maxdata = option;
- break;
+
case Opt_dfltuid:
v9ses->dfltuid = option;
break;
@@ -131,18 +124,12 @@ static void v9fs_parse_options(struct v9fs_session_info *v9ses)
case Opt_afid:
v9ses->afid = option;
break;
- case Opt_trans:
- v9ses->trans = v9fs_match_trans(&args[0]);
- break;
case Opt_uname:
match_strcpy(v9ses->uname, &args[0]);
break;
case Opt_remotename:
match_strcpy(v9ses->aname, &args[0]);
break;
- case Opt_legacy:
- v9ses->flags &= ~V9FS_EXTENDED;
- break;
case Opt_nodevmap:
v9ses->nodev = 1;
break;
@@ -185,7 +172,6 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
const char *dev_name, char *data)
{
int retval = -EINVAL;
- struct p9_trans *trans = NULL;
struct p9_fid *fid;
v9ses->uname = __getname();
@@ -207,24 +193,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
v9ses->options = kstrdup(data, GFP_KERNEL);
v9fs_parse_options(v9ses);
- if (v9ses->trans == NULL) {
- retval = -EPROTONOSUPPORT;
- P9_DPRINTK(P9_DEBUG_ERROR,
- "No transport defined or default transport\n");
- goto error;
- }
-
- trans = v9ses->trans->create(dev_name, v9ses->options);
- if (IS_ERR(trans)) {
- retval = PTR_ERR(trans);
- trans = NULL;
- goto error;
- }
- if ((v9ses->maxdata+P9_IOHDRSZ) > v9ses->trans->maxsize)
- v9ses->maxdata = v9ses->trans->maxsize-P9_IOHDRSZ;
-
- v9ses->clnt = p9_client_create(trans, v9ses->maxdata+P9_IOHDRSZ,
- v9fs_extended(v9ses));
+ v9ses->clnt = p9_client_create(dev_name, v9ses->options);
if (IS_ERR(v9ses->clnt)) {
retval = PTR_ERR(v9ses->clnt);
@@ -236,6 +205,8 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
if (!v9ses->clnt->dotu)
v9ses->flags &= ~V9FS_EXTENDED;
+ v9ses->maxdata = v9ses->clnt->msize;
+
/* for legacy mode, fall back to V9FS_ACCESS_ANY */
if (!v9fs_extended(v9ses) &&
((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) {
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index db4b4193f2e2..7d3a1018db52 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -1,7 +1,7 @@
/*
* V9FS definitions.
*
- * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
+ * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
* This program is free software; you can redistribute it and/or modify
@@ -28,7 +28,6 @@
struct v9fs_session_info {
/* options */
- unsigned int maxdata;
unsigned char flags; /* session flags */
unsigned char nodev; /* set to 1 if no disable device mapping */
unsigned short debug; /* debug level */
@@ -38,10 +37,10 @@ struct v9fs_session_info {
char *options; /* copy of mount options */
char *uname; /* user name to mount as */
char *aname; /* name of remote hierarchy being mounted */
+ unsigned int maxdata; /* max data for client interface */
unsigned int dfltuid; /* default uid/muid for legacy support */
unsigned int dfltgid; /* default gid for legacy support */
u32 uid; /* if ACCESS_SINGLE, the uid that has access */
- struct p9_trans_module *trans; /* 9p transport */
struct p9_client *clnt; /* 9p client */
struct dentry *debugfs_dir;
};
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index ba4b1caa9c43..a616fff8906d 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -184,7 +184,7 @@ static const struct file_operations v9fs_cached_file_operations = {
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
- .mmap = generic_file_mmap,
+ .mmap = generic_file_readonly_mmap,
};
const struct file_operations v9fs_file_operations = {
@@ -194,5 +194,5 @@ const struct file_operations v9fs_file_operations = {
.open = v9fs_file_open,
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
- .mmap = generic_file_mmap,
+ .mmap = generic_file_readonly_mmap,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 23581bcb599b..6a28842052ea 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -77,6 +77,8 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
res |= P9_DMSETUID;
if ((mode & S_ISGID) == S_ISGID)
res |= P9_DMSETGID;
+ if ((mode & S_ISVTX) == S_ISVTX)
+ res |= P9_DMSETVTX;
if ((mode & P9_DMLINK))
res |= P9_DMLINK;
}
@@ -119,6 +121,9 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
if ((mode & P9_DMSETGID) == P9_DMSETGID)
res |= S_ISGID;
+
+ if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
+ res |= S_ISVTX;
}
return res;
@@ -568,7 +573,7 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
v9ses = v9fs_inode2v9ses(dir);
dfid = v9fs_fid_lookup(dentry->d_parent);
if (IS_ERR(dfid))
- return ERR_PTR(PTR_ERR(dfid));
+ return ERR_CAST(dfid);
name = (char *) dentry->d_name.name;
fid = p9_client_walk(dfid, 1, &name, 1);
diff --git a/fs/Kconfig b/fs/Kconfig
index 987b5d7cb21a..d7312825592b 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -463,40 +463,18 @@ config OCFS2_DEBUG_FS
this option for debugging only as it is likely to decrease
performance of the filesystem.
-config MINIX_FS
- tristate "Minix fs support"
- help
- Minix is a simple operating system used in many classes about OS's.
- The minix file system (method to organize files on a hard disk
- partition or a floppy disk) was the original file system for Linux,
- but has been superseded by the second extended file system ext2fs.
- You don't want to use the minix file system on your hard disk
- because of certain built-in restrictions, but it is sometimes found
- on older Linux floppy disks. This option will enlarge your kernel
- by about 28 KB. If unsure, say N.
+endif # BLOCK
- To compile this file system support as a module, choose M here: the
- module will be called minix. Note that the file system of your root
- partition (the one containing the directory /) cannot be compiled as
- a module.
-
-config ROMFS_FS
- tristate "ROM file system support"
- ---help---
- This is a very small read-only file system mainly intended for
- initial ram disks of installation disks, but it could be used for
- other read-only media as well. Read
- <file:Documentation/filesystems/romfs.txt> for details.
-
- To compile this file system support as a module, choose M here: the
- module will be called romfs. Note that the file system of your
- root partition (the one containing the directory /) cannot be a
- module.
-
- If you don't know whether you need it, then you don't need it:
- answer N.
+config DNOTIFY
+ bool "Dnotify support"
+ default y
+ help
+ Dnotify is a directory-based per-fd file change notification system
+ that uses signals to communicate events to user-space. There exist
+ superior alternatives, but some applications may still rely on
+ dnotify.
-endif
+ If unsure, say Y.
config INOTIFY
bool "Inotify file change notification support"
@@ -577,17 +555,6 @@ config QUOTACTL
depends on XFS_QUOTA || QUOTA
default y
-config DNOTIFY
- bool "Dnotify support"
- default y
- help
- Dnotify is a directory-based per-fd file change notification system
- that uses signals to communicate events to user-space. There exist
- superior alternatives, but some applications may still rely on
- dnotify.
-
- If unsure, say Y.
-
config AUTOFS_FS
tristate "Kernel automounter support"
help
@@ -713,7 +680,7 @@ config UDF_NLS
depends on (UDF_FS=m && NLS) || (UDF_FS=y && NLS=y)
endmenu
-endif
+endif # BLOCK
if BLOCK
menu "DOS/FAT/NT Filesystems"
@@ -896,7 +863,7 @@ config NTFS_RW
It is perfectly safe to say N here.
endmenu
-endif
+endif # BLOCK
menu "Pseudo filesystems"
@@ -1152,7 +1119,7 @@ config BEFS_DEBUG
depends on BEFS_FS
help
If you say Y here, you can use the 'debug' mount option to enable
- debugging output from the driver.
+ debugging output from the driver.
config BFS_FS
tristate "BFS file system support (EXPERIMENTAL)"
@@ -1263,7 +1230,7 @@ config JFFS2_FS_XATTR
Extended attributes are name:value pairs associated with inodes by
the kernel or by users (see the attr(5) manual page, or visit
<http://acl.bestbits.at/> for details).
-
+
If unsure, say N.
config JFFS2_FS_POSIX_ACL
@@ -1274,10 +1241,10 @@ config JFFS2_FS_POSIX_ACL
help
Posix Access Control Lists (ACLs) support permissions for users and
groups beyond the owner/group/world scheme.
-
+
To learn more about Access Control Lists, visit the Posix ACLs for
Linux website <http://acl.bestbits.at/>.
-
+
If you don't know what Access Control Lists are, say N
config JFFS2_FS_SECURITY
@@ -1289,7 +1256,7 @@ config JFFS2_FS_SECURITY
implemented by security modules like SELinux. This option
enables an extended attribute handler for file security
labels in the jffs2 filesystem.
-
+
If you are not using a security module that requires using
extended attributes for file security labels, say N.
@@ -1417,6 +1384,24 @@ config VXFS_FS
To compile this as a module, choose M here: the module will be
called freevxfs. If unsure, say N.
+config MINIX_FS
+ tristate "Minix file system support"
+ depends on BLOCK
+ help
+ Minix is a simple operating system used in many classes about OS's.
+ The minix file system (method to organize files on a hard disk
+ partition or a floppy disk) was the original file system for Linux,
+ but has been superseded by the second extended file system ext2fs.
+ You don't want to use the minix file system on your hard disk
+ because of certain built-in restrictions, but it is sometimes found
+ on older Linux floppy disks. This option will enlarge your kernel
+ by about 28 KB. If unsure, say N.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called minix. Note that the file system of your root
+ partition (the one containing the directory /) cannot be compiled as
+ a module.
+
config HPFS_FS
tristate "OS/2 HPFS file system support"
@@ -1434,7 +1419,6 @@ config HPFS_FS
module will be called hpfs. If unsure, say N.
-
config QNX4FS_FS
tristate "QNX4 file system support (read only)"
depends on BLOCK
@@ -1461,6 +1445,22 @@ config QNX4FS_RW
It's currently broken, so for now:
answer N.
+config ROMFS_FS
+ tristate "ROM file system support"
+ depends on BLOCK
+ ---help---
+ This is a very small read-only file system mainly intended for
+ initial ram disks of installation disks, but it could be used for
+ other read-only media as well. Read
+ <file:Documentation/filesystems/romfs.txt> for details.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called romfs. Note that the file system of your
+ root partition (the one containing the directory /) cannot be a
+ module.
+
+ If you don't know whether you need it, then you don't need it:
+ answer N.
config SYSV_FS
@@ -1501,7 +1501,6 @@ config SYSV_FS
If you haven't heard about all of this before, it's safe to say N.
-
config UFS_FS
tristate "UFS file system support (read only)"
depends on BLOCK
@@ -1779,12 +1778,9 @@ config SUNRPC_GSS
tristate
config SUNRPC_XPRT_RDMA
- tristate "RDMA transport for sunrpc (EXPERIMENTAL)"
+ tristate
depends on SUNRPC && INFINIBAND && EXPERIMENTAL
- default m
- help
- Adds a client RPC transport for supporting kernel NFS over RDMA
- mounts, including Infiniband and iWARP. Experimental.
+ default SUNRPC && INFINIBAND
config SUNRPC_BIND34
bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)"
@@ -1835,7 +1831,7 @@ config RPCSEC_GSS_SPKM3
If unsure, say N.
config SMB_FS
- tristate "SMB file system support (to mount Windows shares etc.)"
+ tristate "SMB file system support (OBSOLETE, please use CIFS)"
depends on INET
select NLS
help
@@ -1858,8 +1854,8 @@ config SMB_FS
General information about how to connect Linux, Windows machines and
Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
- To compile the SMB support as a module, choose M here: the module will
- be called smbfs. Most people say N, however.
+ To compile the SMB support as a module, choose M here:
+ the module will be called smbfs. Most people say N, however.
config SMB_NLS_DEFAULT
bool "Use a default NLS"
@@ -1891,7 +1887,7 @@ config SMB_NLS_REMOTE
smbmount from samba 2.2.0 or later supports this.
config CIFS
- tristate "CIFS support (advanced network filesystem for Samba, Window and other CIFS compliant servers)"
+ tristate "CIFS support (advanced network filesystem, SMBFS successor)"
depends on INET
select NLS
help
@@ -1949,16 +1945,16 @@ config CIFS_WEAK_PW_HASH
LANMAN based servers such as OS/2 and Windows 95, but such
mounts may be less secure than mounts using NTLM or more recent
security mechanisms if you are on a public network. Unless you
- have a need to access old SMB servers (and are on a private
+ have a need to access old SMB servers (and are on a private
network) you probably want to say N. Even if this support
is enabled in the kernel build, LANMAN authentication will not be
used automatically. At runtime LANMAN mounts are disabled but
can be set to required (or optional) either in
/proc/fs/cifs (see fs/cifs/README for more detail) or via an
- option on the mount command. This support is disabled by
+ option on the mount command. This support is disabled by
default in order to reduce the possibility of a downgrade
attack.
-
+
If unsure, say N.
config CIFS_XATTR
@@ -1999,7 +1995,7 @@ config CIFS_DEBUG2
messages in some error paths, slowing performance. This
option can be turned off unless you are debugging
cifs problems. If unsure, say N.
-
+
config CIFS_EXPERIMENTAL
bool "CIFS Experimental Features (EXPERIMENTAL)"
depends on CIFS && EXPERIMENTAL
@@ -2090,7 +2086,7 @@ config CODA_FS_OLD_API
However this new API is not backward compatible with older
clients. If you really need to run the old Coda userspace
cache manager then say Y.
-
+
For most cases you probably want to say N.
config AFS_FS
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 232c69493683..d5bd497ab9cb 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -174,7 +174,8 @@ extern void affs_put_inode(struct inode *inode);
extern void affs_drop_inode(struct inode *inode);
extern void affs_delete_inode(struct inode *inode);
extern void affs_clear_inode(struct inode *inode);
-extern void affs_read_inode(struct inode *inode);
+extern struct inode *affs_iget(struct super_block *sb,
+ unsigned long ino);
extern int affs_write_inode(struct inode *inode, int);
extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type);
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index f4de4b98004f..805573005de6 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -170,9 +170,11 @@ affs_remove_link(struct dentry *dentry)
if (!link_bh)
goto done;
- dir = iget(sb, be32_to_cpu(AFFS_TAIL(sb, link_bh)->parent));
- if (!dir)
+ dir = affs_iget(sb, be32_to_cpu(AFFS_TAIL(sb, link_bh)->parent));
+ if (IS_ERR(dir)) {
+ retval = PTR_ERR(dir);
goto done;
+ }
affs_lock_dir(dir);
affs_fix_dcache(dentry, link_ino);
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 4609a6c13fe9..27fe6cbe43ae 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -15,20 +15,25 @@
extern const struct inode_operations affs_symlink_inode_operations;
extern struct timezone sys_tz;
-void
-affs_read_inode(struct inode *inode)
+struct inode *affs_iget(struct super_block *sb, unsigned long ino)
{
- struct super_block *sb = inode->i_sb;
struct affs_sb_info *sbi = AFFS_SB(sb);
struct buffer_head *bh;
struct affs_head *head;
struct affs_tail *tail;
+ struct inode *inode;
u32 block;
u32 size;
u32 prot;
u16 id;
- pr_debug("AFFS: read_inode(%lu)\n",inode->i_ino);
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ pr_debug("AFFS: affs_iget(%lu)\n", inode->i_ino);
block = inode->i_ino;
bh = affs_bread(sb, block);
@@ -154,12 +159,13 @@ affs_read_inode(struct inode *inode)
sys_tz.tz_minuteswest * 60;
inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0;
affs_brelse(bh);
- return;
+ unlock_new_inode(inode);
+ return inode;
bad_inode:
- make_bad_inode(inode);
affs_brelse(bh);
- return;
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
int
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index a42143ca0169..2218f1ee71ce 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -208,9 +208,8 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
affs_lock_dir(dir);
bh = affs_find_entry(dir, dentry);
affs_unlock_dir(dir);
- if (IS_ERR(bh)) {
- return ERR_PTR(PTR_ERR(bh));
- }
+ if (IS_ERR(bh))
+ return ERR_CAST(bh);
if (bh) {
u32 ino = bh->b_blocknr;
@@ -223,10 +222,9 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original);
}
affs_brelse(bh);
- inode = iget(sb, ino);
- if (!inode) {
- return ERR_PTR(-EACCES);
- }
+ inode = affs_iget(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_PTR(PTR_ERR(inode));
}
dentry->d_op = AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations;
d_add(dentry, inode);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index b53e5d0ec65c..3c45d49c0d26 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -113,7 +113,6 @@ static void destroy_inodecache(void)
static const struct super_operations affs_sops = {
.alloc_inode = affs_alloc_inode,
.destroy_inode = affs_destroy_inode,
- .read_inode = affs_read_inode,
.write_inode = affs_write_inode,
.put_inode = affs_put_inode,
.drop_inode = affs_drop_inode,
@@ -271,6 +270,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
unsigned long mount_flags;
int tmp_flags; /* fix remount prototype... */
u8 sig[4];
+ int ret = -EINVAL;
pr_debug("AFFS: read_super(%s)\n",data ? (const char *)data : "no options");
@@ -444,7 +444,12 @@ got_root:
/* set up enough so that it can read an inode */
- root_inode = iget(sb, root_block);
+ root_inode = affs_iget(sb, root_block);
+ if (IS_ERR(root_inode)) {
+ ret = PTR_ERR(root_inode);
+ goto out_error_noinode;
+ }
+
sb->s_root = d_alloc_root(root_inode);
if (!sb->s_root) {
printk(KERN_ERR "AFFS: Get root inode failed\n");
@@ -461,12 +466,13 @@ got_root:
out_error:
if (root_inode)
iput(root_inode);
+out_error_noinode:
kfree(sbi->s_bitmap);
affs_brelse(root_bh);
kfree(sbi->s_prefix);
kfree(sbi);
sb->s_fs_info = NULL;
- return -EINVAL;
+ return ret;
}
static int
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 0cc3597c1197..b58af8f18bc4 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -512,7 +512,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key)) {
_leave(" = %ld [key]", PTR_ERR(key));
- return ERR_PTR(PTR_ERR(key));
+ return ERR_CAST(key);
}
ret = afs_validate(vnode, key);
@@ -540,7 +540,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
key_put(key);
if (IS_ERR(inode)) {
_leave(" = %ld", PTR_ERR(inode));
- return ERR_PTR(PTR_ERR(inode));
+ return ERR_CAST(inode);
}
dentry->d_op = &afs_fs_dentry_operations;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 84750c8e9f95..08db82e1343a 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -196,10 +196,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
/* failure */
bad_inode:
- make_bad_inode(inode);
- unlock_new_inode(inode);
- iput(inode);
-
+ iget_failed(inode);
_leave(" = %d [bad]", ret);
return ERR_PTR(ret);
}
diff --git a/fs/afs/security.c b/fs/afs/security.c
index 566fe712c682..9446a1fd108a 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -95,7 +95,7 @@ static struct afs_vnode *afs_get_auth_inode(struct afs_vnode *vnode,
auth_inode = afs_iget(vnode->vfs_inode.i_sb, key,
&vnode->status.parent, NULL, NULL);
if (IS_ERR(auth_inode))
- return ERR_PTR(PTR_ERR(auth_inode));
+ return ERR_CAST(auth_inode);
}
auth_vnode = AFS_FS_I(auth_inode);
diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
index 8b4cca3c4705..901a3e67ec45 100644
--- a/fs/autofs/autofs_i.h
+++ b/fs/autofs/autofs_i.h
@@ -150,6 +150,7 @@ extern const struct file_operations autofs_root_operations;
int autofs_fill_super(struct super_block *, void *, int);
void autofs_kill_sb(struct super_block *sb);
+struct inode *autofs_iget(struct super_block *, unsigned long);
/* Queue management functions */
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 45f5992a0957..708bdb89fea1 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -52,10 +52,7 @@ out_kill_sb:
kill_anon_super(sb);
}
-static void autofs_read_inode(struct inode *inode);
-
static const struct super_operations autofs_sops = {
- .read_inode = autofs_read_inode,
.statfs = simple_statfs,
};
@@ -164,7 +161,9 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
s->s_time_gran = 1;
sbi->sb = s;
- root_inode = iget(s, AUTOFS_ROOT_INO);
+ root_inode = autofs_iget(s, AUTOFS_ROOT_INO);
+ if (IS_ERR(root_inode))
+ goto fail_free;
root = d_alloc_root(root_inode);
pipe = NULL;
@@ -230,11 +229,17 @@ fail_unlock:
return -EINVAL;
}
-static void autofs_read_inode(struct inode *inode)
+struct inode *autofs_iget(struct super_block *sb, unsigned long ino)
{
- ino_t ino = inode->i_ino;
unsigned int n;
- struct autofs_sb_info *sbi = autofs_sbi(inode->i_sb);
+ struct autofs_sb_info *sbi = autofs_sbi(sb);
+ struct inode *inode;
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
/* Initialize to the default case (stub directory) */
@@ -250,7 +255,7 @@ static void autofs_read_inode(struct inode *inode)
inode->i_op = &autofs_root_inode_operations;
inode->i_fop = &autofs_root_operations;
inode->i_uid = inode->i_gid = 0; /* Changed in read_super */
- return;
+ goto done;
}
inode->i_uid = inode->i_sb->s_root->d_inode->i_uid;
@@ -263,7 +268,7 @@ static void autofs_read_inode(struct inode *inode)
n = ino - AUTOFS_FIRST_SYMLINK;
if (n >= AUTOFS_MAX_SYMLINKS || !test_bit(n,sbi->symlink_bitmap)) {
printk("autofs: Looking for bad symlink inode %u\n", (unsigned int) ino);
- return;
+ goto done;
}
inode->i_op = &autofs_symlink_inode_operations;
@@ -275,4 +280,8 @@ static void autofs_read_inode(struct inode *inode)
inode->i_size = sl->len;
inode->i_nlink = 1;
}
+
+done:
+ unlock_new_inode(inode);
+ return inode;
}
diff --git a/fs/autofs/root.c b/fs/autofs/root.c
index 5efff3c0d886..8aacade56956 100644
--- a/fs/autofs/root.c
+++ b/fs/autofs/root.c
@@ -114,8 +114,8 @@ static int try_to_fill_dentry(struct dentry *dentry, struct super_block *sb, str
dentry->d_time = (unsigned long) ent;
if (!dentry->d_inode) {
- inode = iget(sb, ent->ino);
- if (!inode) {
+ inode = autofs_iget(sb, ent->ino);
+ if (IS_ERR(inode)) {
/* Failed, but leave pending for next time */
return 1;
}
@@ -274,6 +274,7 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
unsigned int n;
int slsize;
struct autofs_symlink *sl;
+ struct inode *inode;
DPRINTK(("autofs_root_symlink: %s <- ", symname));
autofs_say(dentry->d_name.name,dentry->d_name.len);
@@ -331,7 +332,12 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
ent->dentry = NULL; /* We don't keep the dentry for symlinks */
autofs_hash_insert(dh,ent);
- d_instantiate(dentry, iget(dir->i_sb,ent->ino));
+
+ inode = autofs_iget(dir->i_sb, ent->ino);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ d_instantiate(dentry, inode);
unlock_kernel();
return 0;
}
@@ -428,6 +434,7 @@ static int autofs_root_mkdir(struct inode *dir, struct dentry *dentry, int mode)
struct autofs_sb_info *sbi = autofs_sbi(dir->i_sb);
struct autofs_dirhash *dh = &sbi->dirhash;
struct autofs_dir_ent *ent;
+ struct inode *inode;
ino_t ino;
lock_kernel();
@@ -469,7 +476,14 @@ static int autofs_root_mkdir(struct inode *dir, struct dentry *dentry, int mode)
autofs_hash_insert(dh,ent);
inc_nlink(dir);
- d_instantiate(dentry, iget(dir->i_sb,ino));
+
+ inode = autofs_iget(dir->i_sb, ino);
+ if (IS_ERR(inode)) {
+ drop_nlink(dir);
+ return PTR_ERR(inode);
+ }
+
+ d_instantiate(dentry, inode);
unlock_kernel();
return 0;
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 521ff7caadbd..f1c2ea8342f5 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -359,3 +359,17 @@ int is_bad_inode(struct inode *inode)
}
EXPORT_SYMBOL(is_bad_inode);
+
+/**
+ * iget_failed - Mark an under-construction inode as dead and release it
+ * @inode: The inode to discard
+ *
+ * Mark an under-construction inode as dead and release it.
+ */
+void iget_failed(struct inode *inode)
+{
+ make_bad_inode(inode);
+ unlock_new_inode(inode);
+ iput(inode);
+}
+EXPORT_SYMBOL(iget_failed);
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index b28a20e61b80..403fe661c144 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -35,7 +35,7 @@ static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
static int befs_readpage(struct file *file, struct page *page);
static sector_t befs_bmap(struct address_space *mapping, sector_t block);
static struct dentry *befs_lookup(struct inode *, struct dentry *, struct nameidata *);
-static void befs_read_inode(struct inode *ino);
+static struct inode *befs_iget(struct super_block *, unsigned long);
static struct inode *befs_alloc_inode(struct super_block *sb);
static void befs_destroy_inode(struct inode *inode);
static int befs_init_inodecache(void);
@@ -52,7 +52,6 @@ static int befs_statfs(struct dentry *, struct kstatfs *);
static int parse_options(char *, befs_mount_options *);
static const struct super_operations befs_sops = {
- .read_inode = befs_read_inode, /* initialize & read inode */
.alloc_inode = befs_alloc_inode, /* allocate a new inode */
.destroy_inode = befs_destroy_inode, /* deallocate an inode */
.put_super = befs_put_super, /* uninit super */
@@ -198,9 +197,9 @@ befs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
return ERR_PTR(-ENODATA);
}
- inode = iget(dir->i_sb, (ino_t) offset);
- if (!inode)
- return ERR_PTR(-EACCES);
+ inode = befs_iget(dir->i_sb, (ino_t) offset);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
d_add(dentry, inode);
@@ -296,17 +295,23 @@ static void init_once(struct kmem_cache *cachep, void *foo)
inode_init_once(&bi->vfs_inode);
}
-static void
-befs_read_inode(struct inode *inode)
+static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
{
struct buffer_head *bh = NULL;
befs_inode *raw_inode = NULL;
- struct super_block *sb = inode->i_sb;
befs_sb_info *befs_sb = BEFS_SB(sb);
befs_inode_info *befs_ino = NULL;
+ struct inode *inode;
+ long ret = -EIO;
- befs_debug(sb, "---> befs_read_inode() " "inode = %lu", inode->i_ino);
+ befs_debug(sb, "---> befs_read_inode() " "inode = %lu", ino);
+
+ inode = iget_locked(sb, ino);
+ if (IS_ERR(inode))
+ return inode;
+ if (!(inode->i_state & I_NEW))
+ return inode;
befs_ino = BEFS_I(inode);
@@ -402,15 +407,16 @@ befs_read_inode(struct inode *inode)
brelse(bh);
befs_debug(sb, "<--- befs_read_inode()");
- return;
+ unlock_new_inode(inode);
+ return inode;
unacquire_bh:
brelse(bh);
unacquire_none:
- make_bad_inode(inode);
+ iget_failed(inode);
befs_debug(sb, "<--- befs_read_inode() - Bad inode");
- return;
+ return ERR_PTR(ret);
}
/* Initialize the inode cache. Called at fs setup.
@@ -752,6 +758,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
befs_sb_info *befs_sb;
befs_super_block *disk_sb;
struct inode *root;
+ long ret = -EINVAL;
const unsigned long sb_block = 0;
const off_t x86_sb_off = 512;
@@ -833,7 +840,11 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
/* Set real blocksize of fs */
sb_set_blocksize(sb, (ulong) befs_sb->block_size);
sb->s_op = (struct super_operations *) &befs_sops;
- root = iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
+ root = befs_iget(sb, iaddr2blockno(sb, &(befs_sb->root_dir)));
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
+ goto unacquire_priv_sbp;
+ }
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
iput(root);
@@ -868,7 +879,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
unacquire_none:
sb->s_fs_info = NULL;
- return -EINVAL;
+ return ret;
}
static int
diff --git a/fs/bfs/bfs.h b/fs/bfs/bfs.h
index ac7a8b1d6c3a..71faf4d23908 100644
--- a/fs/bfs/bfs.h
+++ b/fs/bfs/bfs.h
@@ -44,6 +44,8 @@ static inline struct bfs_inode_info *BFS_I(struct inode *inode)
#define printf(format, args...) \
printk(KERN_ERR "BFS-fs: %s(): " format, __FUNCTION__, ## args)
+/* inode.c */
+extern struct inode *bfs_iget(struct super_block *sb, unsigned long ino);
/* file.c */
extern const struct inode_operations bfs_file_inops;
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 1fd056d0fc3d..034950cb3cbe 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -148,10 +148,10 @@ static struct dentry *bfs_lookup(struct inode *dir, struct dentry *dentry,
if (bh) {
unsigned long ino = (unsigned long)le16_to_cpu(de->ino);
brelse(bh);
- inode = iget(dir->i_sb, ino);
- if (!inode) {
+ inode = bfs_iget(dir->i_sb, ino);
+ if (IS_ERR(inode)) {
unlock_kernel();
- return ERR_PTR(-EACCES);
+ return ERR_CAST(inode);
}
}
unlock_kernel();
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index a64a71d444f5..8db623838b50 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -32,17 +32,22 @@ MODULE_LICENSE("GPL");
void dump_imap(const char *prefix, struct super_block *s);
-static void bfs_read_inode(struct inode *inode)
+struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
{
- unsigned long ino = inode->i_ino;
struct bfs_inode *di;
+ struct inode *inode;
struct buffer_head *bh;
int block, off;
+ inode = iget_locked(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
if ((ino < BFS_ROOT_INO) || (ino > BFS_SB(inode->i_sb)->si_lasti)) {
printf("Bad inode number %s:%08lx\n", inode->i_sb->s_id, ino);
- make_bad_inode(inode);
- return;
+ goto error;
}
block = (ino - BFS_ROOT_INO) / BFS_INODES_PER_BLOCK + 1;
@@ -50,8 +55,7 @@ static void bfs_read_inode(struct inode *inode)
if (!bh) {
printf("Unable to read inode %s:%08lx\n", inode->i_sb->s_id,
ino);
- make_bad_inode(inode);
- return;
+ goto error;
}
off = (ino - BFS_ROOT_INO) % BFS_INODES_PER_BLOCK;
@@ -85,6 +89,12 @@ static void bfs_read_inode(struct inode *inode)
inode->i_ctime.tv_nsec = 0;
brelse(bh);
+ unlock_new_inode(inode);
+ return inode;
+
+error:
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
static int bfs_write_inode(struct inode *inode, int unused)
@@ -276,7 +286,6 @@ static void destroy_inodecache(void)
static const struct super_operations bfs_sops = {
.alloc_inode = bfs_alloc_inode,
.destroy_inode = bfs_destroy_inode,
- .read_inode = bfs_read_inode,
.write_inode = bfs_write_inode,
.delete_inode = bfs_delete_inode,
.put_super = bfs_put_super,
@@ -312,6 +321,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
struct inode *inode;
unsigned i, imap_len;
struct bfs_sb_info *info;
+ long ret = -EINVAL;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
@@ -346,14 +356,16 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
set_bit(i, info->si_imap);
s->s_op = &bfs_sops;
- inode = iget(s, BFS_ROOT_INO);
- if (!inode) {
+ inode = bfs_iget(s, BFS_ROOT_INO);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
kfree(info->si_imap);
goto out;
}
s->s_root = d_alloc_root(inode);
if (!s->s_root) {
iput(inode);
+ ret = -ENOMEM;
kfree(info->si_imap);
goto out;
}
@@ -404,7 +416,7 @@ out:
brelse(bh);
kfree(info);
s->s_fs_info = NULL;
- return -EINVAL;
+ return ret;
}
static int bfs_get_sb(struct file_system_type *fs_type,
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 7596e1e94cde..7f65e71bf859 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -115,7 +115,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
current->flags |= PF_DUMPCORE;
strncpy(dump.u_comm, current->comm, sizeof(dump.u_comm));
#ifndef __sparc__
- dump.u_ar0 = (void *)(((unsigned long)(&dump.regs)) - ((unsigned long)(&dump)));
+ dump.u_ar0 = offsetof(struct user, regs);
#endif
dump.signal = signr;
dump_thread(regs, &dump);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 4628c42ca892..111771d38e6e 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1077,7 +1077,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
current->mm->start_stack = bprm->p;
#ifdef arch_randomize_brk
- if (current->flags & PF_RANDOMIZE)
+ if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
current->mm->brk = current->mm->start_brk =
arch_randomize_brk(current->mm);
#endif
diff --git a/fs/block_dev.c b/fs/block_dev.c
index e48a630ae266..e63067d25cdb 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -534,7 +534,6 @@ void __init bdev_cache_init(void)
if (err)
panic("Cannot register bdev pseudo-fs");
bd_mnt = kern_mount(&bd_type);
- err = PTR_ERR(bd_mnt);
if (IS_ERR(bd_mnt))
panic("Cannot create bdev pseudo-fs");
blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
diff --git a/fs/buffer.c b/fs/buffer.c
index 456c9ab7705b..826baf4f04bc 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1798,7 +1798,7 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
start = max(from, block_start);
size = min(to, block_end) - start;
- zero_user_page(page, start, size, KM_USER0);
+ zero_user(page, start, size);
set_buffer_uptodate(bh);
}
@@ -1861,19 +1861,10 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
mark_buffer_dirty(bh);
continue;
}
- if (block_end > to || block_start < from) {
- void *kaddr;
-
- kaddr = kmap_atomic(page, KM_USER0);
- if (block_end > to)
- memset(kaddr+to, 0,
- block_end-to);
- if (block_start < from)
- memset(kaddr+block_start,
- 0, from-block_start);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- }
+ if (block_end > to || block_start < from)
+ zero_user_segments(page,
+ to, block_end,
+ block_start, from);
continue;
}
}
@@ -2104,8 +2095,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
SetPageError(page);
}
if (!buffer_mapped(bh)) {
- zero_user_page(page, i * blocksize, blocksize,
- KM_USER0);
+ zero_user(page, i * blocksize, blocksize);
if (!err)
set_buffer_uptodate(bh);
continue;
@@ -2218,7 +2208,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
&page, &fsdata);
if (err)
goto out;
- zero_user_page(page, zerofrom, len, KM_USER0);
+ zero_user(page, zerofrom, len);
err = pagecache_write_end(file, mapping, curpos, len, len,
page, fsdata);
if (err < 0)
@@ -2245,7 +2235,7 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
&page, &fsdata);
if (err)
goto out;
- zero_user_page(page, zerofrom, len, KM_USER0);
+ zero_user(page, zerofrom, len);
err = pagecache_write_end(file, mapping, curpos, len, len,
page, fsdata);
if (err < 0)
@@ -2422,7 +2412,6 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
unsigned block_in_page;
unsigned block_start, block_end;
sector_t block_in_file;
- char *kaddr;
int nr_reads = 0;
int ret = 0;
int is_mapped_to_disk = 1;
@@ -2493,13 +2482,8 @@ int nobh_write_begin(struct file *file, struct address_space *mapping,
continue;
}
if (buffer_new(bh) || !buffer_mapped(bh)) {
- kaddr = kmap_atomic(page, KM_USER0);
- if (block_start < from)
- memset(kaddr+block_start, 0, from-block_start);
- if (block_end > to)
- memset(kaddr + to, 0, block_end - to);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user_segments(page, block_start, from,
+ to, block_end);
continue;
}
if (buffer_uptodate(bh))
@@ -2636,7 +2620,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
+ zero_user_segment(page, offset, PAGE_CACHE_SIZE);
out:
ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN)
@@ -2709,7 +2693,7 @@ has_buffers:
if (page_has_buffers(page))
goto has_buffers;
}
- zero_user_page(page, offset, length, KM_USER0);
+ zero_user(page, offset, length);
set_page_dirty(page);
err = 0;
@@ -2785,7 +2769,7 @@ int block_truncate_page(struct address_space *mapping,
goto unlock;
}
- zero_user_page(page, offset, length, KM_USER0);
+ zero_user(page, offset, length);
mark_buffer_dirty(bh);
err = 0;
@@ -2831,7 +2815,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
- zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
+ zero_user_segment(page, offset, PAGE_CACHE_SIZE);
return __block_write_full_page(inode, page, get_block, wbc);
}
@@ -3169,7 +3153,7 @@ static void recalc_bh_state(void)
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
- struct buffer_head *ret = kmem_cache_zalloc(bh_cachep,
+ struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
@@ -3257,12 +3241,24 @@ int bh_submit_read(struct buffer_head *bh)
}
EXPORT_SYMBOL(bh_submit_read);
+static void
+init_buffer_head(struct kmem_cache *cachep, void *data)
+{
+ struct buffer_head *bh = data;
+
+ memset(bh, 0, sizeof(*bh));
+ INIT_LIST_HEAD(&bh->b_assoc_buffers);
+}
+
void __init buffer_init(void)
{
int nrpages;
- bh_cachep = KMEM_CACHE(buffer_head,
- SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
+ bh_cachep = kmem_cache_create("buffer_head",
+ sizeof(struct buffer_head), 0,
+ (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
+ SLAB_MEM_SPREAD),
+ init_buffer_head);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index e9f4ec701092..fcc434227691 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -147,10 +147,11 @@ cifs_read_super(struct super_block *sb, void *data,
#endif
sb->s_blocksize = CIFS_MAX_MSGSIZE;
sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
- inode = iget(sb, ROOT_I);
+ inode = cifs_iget(sb, ROOT_I);
- if (!inode) {
- rc = -ENOMEM;
+ if (IS_ERR(inode)) {
+ rc = PTR_ERR(inode);
+ inode = NULL;
goto out_no_root;
}
@@ -520,7 +521,6 @@ static int cifs_remount(struct super_block *sb, int *flags, char *data)
}
static const struct super_operations cifs_super_ops = {
- .read_inode = cifs_read_inode,
.put_super = cifs_put_super,
.statfs = cifs_statfs,
.alloc_inode = cifs_alloc_inode,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 195b14de5567..68978306c3ca 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -44,6 +44,7 @@ extern void cifs_read_inode(struct inode *);
/* Functions related to inodes */
extern const struct inode_operations cifs_dir_inode_ops;
+extern struct inode *cifs_iget(struct super_block *, unsigned long);
extern int cifs_create(struct inode *, struct dentry *, int,
struct nameidata *);
extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index d9567ba2960b..b1a4a65eaa08 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -586,10 +586,18 @@ static const struct inode_operations cifs_ipc_inode_ops = {
};
/* gets root inode */
-void cifs_read_inode(struct inode *inode)
+struct inode *cifs_iget(struct super_block *sb, unsigned long ino)
{
- int xid, rc;
+ int xid;
struct cifs_sb_info *cifs_sb;
+ struct inode *inode;
+ long rc;
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
cifs_sb = CIFS_SB(inode->i_sb);
xid = GetXid();
@@ -606,10 +614,18 @@ void cifs_read_inode(struct inode *inode)
inode->i_fop = &simple_dir_operations;
inode->i_uid = cifs_sb->mnt_uid;
inode->i_gid = cifs_sb->mnt_gid;
+ _FreeXid(xid);
+ iget_failed(inode);
+ return ERR_PTR(rc);
}
- /* can not call macro FreeXid here since in a void func */
+ unlock_new_inode(inode);
+
+ /* can not call macro FreeXid here since in a void func
+ * TODO: This is no longer true
+ */
_FreeXid(xid);
+ return inode;
}
int cifs_unlink(struct inode *inode, struct dentry *direntry)
@@ -1386,7 +1402,7 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from)
if (!page)
return -ENOMEM;
- zero_user_page(page, offset, PAGE_CACHE_SIZE - offset, KM_USER0);
+ zero_user_segment(page, offset, PAGE_CACHE_SIZE);
unlock_page(page);
page_cache_release(page);
return rc;
diff --git a/fs/compat.c b/fs/compat.c
index 5216c3fd7517..ee80ff341d37 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -2083,51 +2083,6 @@ long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2)
#ifdef CONFIG_EPOLL
-#ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT
-asmlinkage long compat_sys_epoll_ctl(int epfd, int op, int fd,
- struct compat_epoll_event __user *event)
-{
- long err = 0;
- struct compat_epoll_event user;
- struct epoll_event __user *kernel = NULL;
-
- if (event) {
- if (copy_from_user(&user, event, sizeof(user)))
- return -EFAULT;
- kernel = compat_alloc_user_space(sizeof(struct epoll_event));
- err |= __put_user(user.events, &kernel->events);
- err |= __put_user(user.data, &kernel->data);
- }
-
- return err ? err : sys_epoll_ctl(epfd, op, fd, kernel);
-}
-
-
-asmlinkage long compat_sys_epoll_wait(int epfd,
- struct compat_epoll_event __user *events,
- int maxevents, int timeout)
-{
- long i, ret, err = 0;
- struct epoll_event __user *kbuf;
- struct epoll_event ev;
-
- if ((maxevents <= 0) ||
- (maxevents > (INT_MAX / sizeof(struct epoll_event))))
- return -EINVAL;
- kbuf = compat_alloc_user_space(sizeof(struct epoll_event) * maxevents);
- ret = sys_epoll_wait(epfd, kbuf, maxevents, timeout);
- for (i = 0; i < ret; i++) {
- err |= __get_user(ev.events, &kbuf[i].events);
- err |= __get_user(ev.data, &kbuf[i].data);
- err |= __put_user(ev.events, &events->events);
- err |= __put_user_unaligned(ev.data, &events->data);
- events++;
- }
-
- return err ? -EFAULT: ret;
-}
-#endif /* CONFIG_HAS_COMPAT_EPOLL_EVENT */
-
#ifdef TIF_RESTORE_SIGMASK
asmlinkage long compat_sys_epoll_pwait(int epfd,
struct compat_epoll_event __user *events,
@@ -2153,11 +2108,7 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
}
-#ifdef CONFIG_HAS_COMPAT_EPOLL_EVENT
- err = compat_sys_epoll_wait(epfd, events, maxevents, timeout);
-#else
err = sys_epoll_wait(epfd, events, maxevents, timeout);
-#endif
/*
* If we changed the signal mask, we need to restore the original one.
@@ -2206,19 +2157,41 @@ asmlinkage long compat_sys_signalfd(int ufd,
#ifdef CONFIG_TIMERFD
-asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags,
- const struct compat_itimerspec __user *utmr)
+asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
+ const struct compat_itimerspec __user *utmr,
+ struct compat_itimerspec __user *otmr)
{
+ int error;
struct itimerspec t;
struct itimerspec __user *ut;
if (get_compat_itimerspec(&t, utmr))
return -EFAULT;
- ut = compat_alloc_user_space(sizeof(*ut));
- if (copy_to_user(ut, &t, sizeof(t)))
+ ut = compat_alloc_user_space(2 * sizeof(struct itimerspec));
+ if (copy_to_user(&ut[0], &t, sizeof(t)))
return -EFAULT;
+ error = sys_timerfd_settime(ufd, flags, &ut[0], &ut[1]);
+ if (!error && otmr)
+ error = (copy_from_user(&t, &ut[1], sizeof(struct itimerspec)) ||
+ put_compat_itimerspec(otmr, &t)) ? -EFAULT: 0;
+
+ return error;
+}
+
+asmlinkage long compat_sys_timerfd_gettime(int ufd,
+ struct compat_itimerspec __user *otmr)
+{
+ int error;
+ struct itimerspec t;
+ struct itimerspec __user *ut;
- return sys_timerfd(ufd, clockid, flags, ut);
+ ut = compat_alloc_user_space(sizeof(struct itimerspec));
+ error = sys_timerfd_gettime(ufd, ut);
+ if (!error)
+ error = (copy_from_user(&t, ut, sizeof(struct itimerspec)) ||
+ put_compat_itimerspec(otmr, &t)) ? -EFAULT: 0;
+
+ return error;
}
#endif /* CONFIG_TIMERFD */
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index ffdc022cae64..614bd75b5a4a 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -2986,7 +2986,7 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
}
do_ioctl:
- error = vfs_ioctl(filp, fd, cmd, arg);
+ error = do_vfs_ioctl(filp, fd, cmd, arg);
out_fput:
fput_light(filp, fput_needed);
out:
diff --git a/fs/dcache.c b/fs/dcache.c
index d9ca1e5ceb92..44f6cf23b70e 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -89,7 +89,7 @@ static void d_free(struct dentry *dentry)
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
/* if dentry was never inserted into hash, immediate free is OK */
- if (dentry->d_hash.pprev == NULL)
+ if (hlist_unhashed(&dentry->d_hash))
__d_free(dentry);
else
call_rcu(&dentry->d_u.d_rcu, d_callback);
@@ -1408,9 +1408,6 @@ void d_delete(struct dentry * dentry)
if (atomic_read(&dentry->d_count) == 1) {
dentry_iput(dentry);
fsnotify_nameremove(dentry, isdir);
-
- /* remove this and other inotify debug checks after 2.6.18 */
- dentry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
return;
}
diff --git a/fs/direct-io.c b/fs/direct-io.c
index acf0da1bd257..9e81addbd6ea 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -878,8 +878,8 @@ do_holes:
page_cache_release(page);
goto out;
}
- zero_user_page(page, block_in_page << blkbits,
- 1 << blkbits, KM_USER0);
+ zero_user(page, block_in_page << blkbits,
+ 1 << blkbits);
dio->block_in_file++;
block_in_page++;
goto next_block;
diff --git a/fs/dquot.c b/fs/dquot.c
index cee7c6f428f0..def4e969df77 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -696,9 +696,8 @@ static int dqinit_needed(struct inode *inode, int type)
/* This routine is guarded by dqonoff_mutex mutex */
static void add_dquot_ref(struct super_block *sb, int type)
{
- struct inode *inode;
+ struct inode *inode, *old_inode = NULL;
-restart:
spin_lock(&inode_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
if (!atomic_read(&inode->i_writecount))
@@ -711,12 +710,18 @@ restart:
__iget(inode);
spin_unlock(&inode_lock);
+ iput(old_inode);
sb->dq_op->initialize(inode, type);
- iput(inode);
- /* As we may have blocked we had better restart... */
- goto restart;
+ /* We hold a reference to 'inode' so it couldn't have been
+ * removed from s_inodes list while we dropped the inode_lock.
+ * We cannot iput the inode now as we can be holding the last
+ * reference and we cannot iput it under inode_lock. So we
+ * keep the reference and iput it later. */
+ old_inode = inode;
+ spin_lock(&inode_lock);
}
spin_unlock(&inode_lock);
+ iput(old_inode);
}
/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index f8ef0af919e7..a066e109ad9c 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -355,8 +355,11 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
}
/* Consider doing this once, when the file is opened */
mutex_lock(&crypt_stat->cs_tfm_mutex);
- rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
- crypt_stat->key_size);
+ if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
+ rc = crypto_blkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+ crypt_stat->key_size);
+ crypt_stat->flags |= ECRYPTFS_KEY_SET;
+ }
if (rc) {
ecryptfs_printk(KERN_ERR, "Error setting key; rc = [%d]\n",
rc);
@@ -376,11 +379,10 @@ out:
*
* Convert an eCryptfs page index into a lower byte offset
*/
-void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num,
- struct ecryptfs_crypt_stat *crypt_stat)
+static void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num,
+ struct ecryptfs_crypt_stat *crypt_stat)
{
- (*offset) = ((crypt_stat->extent_size
- * crypt_stat->num_header_extents_at_front)
+ (*offset) = (crypt_stat->num_header_bytes_at_front
+ (crypt_stat->extent_size * extent_num));
}
@@ -842,15 +844,13 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat)
set_extent_mask_and_shift(crypt_stat);
crypt_stat->iv_bytes = ECRYPTFS_DEFAULT_IV_BYTES;
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
- crypt_stat->num_header_extents_at_front = 0;
+ crypt_stat->num_header_bytes_at_front = 0;
else {
if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)
- crypt_stat->num_header_extents_at_front =
- (ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE
- / crypt_stat->extent_size);
+ crypt_stat->num_header_bytes_at_front =
+ ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
else
- crypt_stat->num_header_extents_at_front =
- (PAGE_CACHE_SIZE / crypt_stat->extent_size);
+ crypt_stat->num_header_bytes_at_front = PAGE_CACHE_SIZE;
}
}
@@ -1128,7 +1128,7 @@ write_ecryptfs_flags(char *page_virt, struct ecryptfs_crypt_stat *crypt_stat,
struct ecryptfs_cipher_code_str_map_elem {
char cipher_str[16];
- u16 cipher_code;
+ u8 cipher_code;
};
/* Add support for additional ciphers by adding elements here. The
@@ -1152,10 +1152,10 @@ ecryptfs_cipher_code_str_map[] = {
*
* Returns zero on no match, or the cipher code on match
*/
-u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat)
+u8 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat)
{
int i;
- u16 code = 0;
+ u8 code = 0;
struct ecryptfs_cipher_code_str_map_elem *map =
ecryptfs_cipher_code_str_map;
@@ -1187,7 +1187,7 @@ u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat)
*
* Returns zero on success
*/
-int ecryptfs_cipher_code_to_string(char *str, u16 cipher_code)
+int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code)
{
int rc = 0;
int i;
@@ -1236,7 +1236,8 @@ ecryptfs_write_header_metadata(char *virt,
header_extent_size = (u32)crypt_stat->extent_size;
num_header_extents_at_front =
- (u16)crypt_stat->num_header_extents_at_front;
+ (u16)(crypt_stat->num_header_bytes_at_front
+ / crypt_stat->extent_size);
header_extent_size = cpu_to_be32(header_extent_size);
memcpy(virt, &header_extent_size, 4);
virt += 4;
@@ -1311,40 +1312,16 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t *size,
static int
ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat,
struct dentry *ecryptfs_dentry,
- char *page_virt)
+ char *virt)
{
- int current_header_page;
- int header_pages;
int rc;
- rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, page_virt,
- 0, PAGE_CACHE_SIZE);
- if (rc) {
+ rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt,
+ 0, crypt_stat->num_header_bytes_at_front);
+ if (rc)
printk(KERN_ERR "%s: Error attempting to write header "
"information to lower file; rc = [%d]\n", __FUNCTION__,
rc);
- goto out;
- }
- header_pages = ((crypt_stat->extent_size
- * crypt_stat->num_header_extents_at_front)
- / PAGE_CACHE_SIZE);
- memset(page_virt, 0, PAGE_CACHE_SIZE);
- current_header_page = 1;
- while (current_header_page < header_pages) {
- loff_t offset;
-
- offset = (((loff_t)current_header_page) << PAGE_CACHE_SHIFT);
- if ((rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode,
- page_virt, offset,
- PAGE_CACHE_SIZE))) {
- printk(KERN_ERR "%s: Error attempting to write header "
- "information to lower file; rc = [%d]\n",
- __FUNCTION__, rc);
- goto out;
- }
- current_header_page++;
- }
-out:
return rc;
}
@@ -1370,15 +1347,13 @@ ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry,
* retrieved via a prompt. Exactly what happens at this point should
* be policy-dependent.
*
- * TODO: Support header information spanning multiple pages
- *
* Returns zero on success; non-zero on error
*/
int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
{
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
- char *page_virt;
+ char *virt;
size_t size = 0;
int rc = 0;
@@ -1389,40 +1364,39 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
goto out;
}
} else {
+ printk(KERN_WARNING "%s: Encrypted flag not set\n",
+ __FUNCTION__);
rc = -EINVAL;
- ecryptfs_printk(KERN_WARNING,
- "Called with crypt_stat->encrypted == 0\n");
goto out;
}
/* Released in this function */
- page_virt = kmem_cache_zalloc(ecryptfs_header_cache_0, GFP_USER);
- if (!page_virt) {
- ecryptfs_printk(KERN_ERR, "Out of memory\n");
+ virt = kzalloc(crypt_stat->num_header_bytes_at_front, GFP_KERNEL);
+ if (!virt) {
+ printk(KERN_ERR "%s: Out of memory\n", __FUNCTION__);
rc = -ENOMEM;
goto out;
}
- rc = ecryptfs_write_headers_virt(page_virt, &size, crypt_stat,
- ecryptfs_dentry);
+ rc = ecryptfs_write_headers_virt(virt, &size, crypt_stat,
+ ecryptfs_dentry);
if (unlikely(rc)) {
- ecryptfs_printk(KERN_ERR, "Error whilst writing headers\n");
- memset(page_virt, 0, PAGE_CACHE_SIZE);
+ printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n",
+ __FUNCTION__, rc);
goto out_free;
}
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry,
- crypt_stat, page_virt,
- size);
+ crypt_stat, virt, size);
else
rc = ecryptfs_write_metadata_to_contents(crypt_stat,
- ecryptfs_dentry,
- page_virt);
+ ecryptfs_dentry, virt);
if (rc) {
- printk(KERN_ERR "Error writing metadata out to lower file; "
- "rc = [%d]\n", rc);
+ printk(KERN_ERR "%s: Error writing metadata out to lower file; "
+ "rc = [%d]\n", __FUNCTION__, rc);
goto out_free;
}
out_free:
- kmem_cache_free(ecryptfs_header_cache_0, page_virt);
+ memset(virt, 0, crypt_stat->num_header_bytes_at_front);
+ kfree(virt);
out:
return rc;
}
@@ -1442,16 +1416,16 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
virt += sizeof(u32);
memcpy(&num_header_extents_at_front, virt, sizeof(u16));
num_header_extents_at_front = be16_to_cpu(num_header_extents_at_front);
- crypt_stat->num_header_extents_at_front =
- (int)num_header_extents_at_front;
+ crypt_stat->num_header_bytes_at_front =
+ (((size_t)num_header_extents_at_front
+ * (size_t)header_extent_size));
(*bytes_read) = (sizeof(u32) + sizeof(u16));
if ((validate_header_size == ECRYPTFS_VALIDATE_HEADER_SIZE)
- && ((crypt_stat->extent_size
- * crypt_stat->num_header_extents_at_front)
+ && (crypt_stat->num_header_bytes_at_front
< ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE)) {
rc = -EINVAL;
- printk(KERN_WARNING "Invalid number of header extents: [%zd]\n",
- crypt_stat->num_header_extents_at_front);
+ printk(KERN_WARNING "Invalid header size: [%zd]\n",
+ crypt_stat->num_header_bytes_at_front);
}
return rc;
}
@@ -1466,7 +1440,8 @@ static int parse_header_metadata(struct ecryptfs_crypt_stat *crypt_stat,
*/
static void set_default_header_data(struct ecryptfs_crypt_stat *crypt_stat)
{
- crypt_stat->num_header_extents_at_front = 2;
+ crypt_stat->num_header_bytes_at_front =
+ ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
}
/**
@@ -1552,9 +1527,10 @@ int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode)
size = ecryptfs_getxattr_lower(lower_dentry, ECRYPTFS_XATTR_NAME,
page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE);
if (size < 0) {
- printk(KERN_ERR "Error attempting to read the [%s] "
- "xattr from the lower file; return value = [%zd]\n",
- ECRYPTFS_XATTR_NAME, size);
+ if (unlikely(ecryptfs_verbosity > 0))
+ printk(KERN_INFO "Error attempting to read the [%s] "
+ "xattr from the lower file; return value = "
+ "[%zd]\n", ECRYPTFS_XATTR_NAME, size);
rc = -EINVAL;
goto out;
}
@@ -1802,7 +1778,7 @@ out:
}
struct kmem_cache *ecryptfs_key_tfm_cache;
-struct list_head key_tfm_list;
+static struct list_head key_tfm_list;
struct mutex key_tfm_list_mutex;
int ecryptfs_init_crypto(void)
@@ -1812,6 +1788,11 @@ int ecryptfs_init_crypto(void)
return 0;
}
+/**
+ * ecryptfs_destroy_crypto - free all cached key_tfms on key_tfm_list
+ *
+ * Called only at module unload time
+ */
int ecryptfs_destroy_crypto(void)
{
struct ecryptfs_key_tfm *key_tfm, *key_tfm_tmp;
@@ -1835,6 +1816,8 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
struct ecryptfs_key_tfm *tmp_tfm;
int rc = 0;
+ BUG_ON(!mutex_is_locked(&key_tfm_list_mutex));
+
tmp_tfm = kmem_cache_alloc(ecryptfs_key_tfm_cache, GFP_KERNEL);
if (key_tfm != NULL)
(*key_tfm) = tmp_tfm;
@@ -1861,13 +1844,50 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
(*key_tfm) = NULL;
goto out;
}
- mutex_lock(&key_tfm_list_mutex);
list_add(&tmp_tfm->key_tfm_list, &key_tfm_list);
- mutex_unlock(&key_tfm_list_mutex);
out:
return rc;
}
+/**
+ * ecryptfs_tfm_exists - Search for existing tfm for cipher_name.
+ * @cipher_name: the name of the cipher to search for
+ * @key_tfm: set to corresponding tfm if found
+ *
+ * Searches for cached key_tfm matching @cipher_name
+ * Must be called with &key_tfm_list_mutex held
+ * Returns 1 if found, with @key_tfm set
+ * Returns 0 if not found, with @key_tfm set to NULL
+ */
+int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm)
+{
+ struct ecryptfs_key_tfm *tmp_key_tfm;
+
+ BUG_ON(!mutex_is_locked(&key_tfm_list_mutex));
+
+ list_for_each_entry(tmp_key_tfm, &key_tfm_list, key_tfm_list) {
+ if (strcmp(tmp_key_tfm->cipher_name, cipher_name) == 0) {
+ if (key_tfm)
+ (*key_tfm) = tmp_key_tfm;
+ return 1;
+ }
+ }
+ if (key_tfm)
+ (*key_tfm) = NULL;
+ return 0;
+}
+
+/**
+ * ecryptfs_get_tfm_and_mutex_for_cipher_name
+ *
+ * @tfm: set to cached tfm found, or new tfm created
+ * @tfm_mutex: set to mutex for cached tfm found, or new tfm created
+ * @cipher_name: the name of the cipher to search for and/or add
+ *
+ * Sets pointers to @tfm & @tfm_mutex matching @cipher_name.
+ * Searches for cached item first, and creates new if not found.
+ * Returns 0 on success, non-zero if adding new cipher failed
+ */
int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
struct mutex **tfm_mutex,
char *cipher_name)
@@ -1877,22 +1897,17 @@ int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
(*tfm) = NULL;
(*tfm_mutex) = NULL;
+
mutex_lock(&key_tfm_list_mutex);
- list_for_each_entry(key_tfm, &key_tfm_list, key_tfm_list) {
- if (strcmp(key_tfm->cipher_name, cipher_name) == 0) {
- (*tfm) = key_tfm->key_tfm;
- (*tfm_mutex) = &key_tfm->key_tfm_mutex;
- mutex_unlock(&key_tfm_list_mutex);
+ if (!ecryptfs_tfm_exists(cipher_name, &key_tfm)) {
+ rc = ecryptfs_add_new_key_tfm(&key_tfm, cipher_name, 0);
+ if (rc) {
+ printk(KERN_ERR "Error adding new key_tfm to list; "
+ "rc = [%d]\n", rc);
goto out;
}
}
mutex_unlock(&key_tfm_list_mutex);
- rc = ecryptfs_add_new_key_tfm(&key_tfm, cipher_name, 0);
- if (rc) {
- printk(KERN_ERR "Error adding new key_tfm to list; rc = [%d]\n",
- rc);
- goto out;
- }
(*tfm) = key_tfm->key_tfm;
(*tfm_mutex) = &key_tfm->key_tfm_mutex;
out:
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index ce7a5d4aec36..5007f788da01 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -234,10 +234,11 @@ struct ecryptfs_crypt_stat {
#define ECRYPTFS_KEY_VALID 0x00000080
#define ECRYPTFS_METADATA_IN_XATTR 0x00000100
#define ECRYPTFS_VIEW_AS_ENCRYPTED 0x00000200
+#define ECRYPTFS_KEY_SET 0x00000400
u32 flags;
unsigned int file_version;
size_t iv_bytes;
- size_t num_header_extents_at_front;
+ size_t num_header_bytes_at_front;
size_t extent_size; /* Data extent size; default is 4096 */
size_t key_size;
size_t extent_shift;
@@ -322,7 +323,6 @@ struct ecryptfs_key_tfm {
unsigned char cipher_name[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
};
-extern struct list_head key_tfm_list;
extern struct mutex key_tfm_list_mutex;
/**
@@ -521,11 +521,9 @@ extern struct kmem_cache *ecryptfs_file_info_cache;
extern struct kmem_cache *ecryptfs_dentry_info_cache;
extern struct kmem_cache *ecryptfs_inode_info_cache;
extern struct kmem_cache *ecryptfs_sb_info_cache;
-extern struct kmem_cache *ecryptfs_header_cache_0;
extern struct kmem_cache *ecryptfs_header_cache_1;
extern struct kmem_cache *ecryptfs_header_cache_2;
extern struct kmem_cache *ecryptfs_xattr_cache;
-extern struct kmem_cache *ecryptfs_lower_page_cache;
extern struct kmem_cache *ecryptfs_key_record_cache;
extern struct kmem_cache *ecryptfs_key_sig_cache;
extern struct kmem_cache *ecryptfs_global_auth_tok_cache;
@@ -562,8 +560,8 @@ int ecryptfs_read_and_validate_header_region(char *data,
struct inode *ecryptfs_inode);
int ecryptfs_read_and_validate_xattr_region(char *page_virt,
struct dentry *ecryptfs_dentry);
-u16 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat);
-int ecryptfs_cipher_code_to_string(char *str, u16 cipher_code);
+u8 ecryptfs_code_for_cipher_string(struct ecryptfs_crypt_stat *crypt_stat);
+int ecryptfs_cipher_code_to_string(char *str, u8 cipher_code);
void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat);
int ecryptfs_generate_key_packet_set(char *dest_base,
struct ecryptfs_crypt_stat *crypt_stat,
@@ -576,8 +574,6 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length);
int ecryptfs_inode_test(struct inode *inode, void *candidate_lower_inode);
int ecryptfs_inode_set(struct inode *inode, void *lower_inode);
void ecryptfs_init_inode(struct inode *inode, struct inode *lower_inode);
-ssize_t ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value,
- size_t size);
ssize_t
ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
void *value, size_t size);
@@ -623,6 +619,7 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
size_t key_size);
int ecryptfs_init_crypto(void);
int ecryptfs_destroy_crypto(void);
+int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm);
int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
struct mutex **tfm_mutex,
char *cipher_name);
@@ -631,8 +628,6 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
char *sig);
int ecryptfs_write_zeros(struct file *file, pgoff_t index, int start,
int num_zeros);
-void ecryptfs_lower_offset_for_extent(loff_t *offset, loff_t extent_num,
- struct ecryptfs_crypt_stat *crypt_stat);
int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
loff_t offset, size_t size);
int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
@@ -646,8 +641,6 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
pgoff_t page_index,
size_t offset_in_page, size_t size,
struct inode *ecryptfs_inode);
-int ecryptfs_read(char *data, loff_t offset, size_t size,
- struct file *ecryptfs_file);
struct page *ecryptfs_get_locked_page(struct file *file, loff_t index);
#endif /* #ifndef ECRYPTFS_KERNEL_H */
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index c98c4690a771..2b8f5ed4adea 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -209,9 +209,10 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
if (!(mount_crypt_stat->flags
& ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
rc = -EIO;
- printk(KERN_WARNING "Attempt to read file that "
+ printk(KERN_WARNING "Either the lower file "
"is not in a valid eCryptfs format, "
- "and plaintext passthrough mode is not "
+ "or the key could not be retrieved. "
+ "Plaintext passthrough mode is not "
"enabled; returning -EIO\n");
mutex_unlock(&crypt_stat->cs_mutex);
goto out_free;
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 5a719180983c..edd1e44e9d47 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -365,8 +365,7 @@ static struct dentry *ecryptfs_lookup(struct inode *dir, struct dentry *dentry,
dentry->d_sb)->mount_crypt_stat;
if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED) {
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
- file_size = ((crypt_stat->extent_size
- * crypt_stat->num_header_extents_at_front)
+ file_size = (crypt_stat->num_header_bytes_at_front
+ i_size_read(lower_dentry->d_inode));
else
file_size = i_size_read(lower_dentry->d_inode);
@@ -685,7 +684,7 @@ ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
* @crypt_stat: Crypt_stat associated with file
* @upper_size: Size of the upper file
*
- * Calculate the requried size of the lower file based on the
+ * Calculate the required size of the lower file based on the
* specified size of the upper file. This calculation is based on the
* number of headers in the underlying file and the extent size.
*
@@ -697,8 +696,7 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
{
loff_t lower_size;
- lower_size = (crypt_stat->extent_size
- * crypt_stat->num_header_extents_at_front);
+ lower_size = crypt_stat->num_header_bytes_at_front;
if (upper_size != 0) {
loff_t num_extents;
@@ -875,11 +873,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
if (!(mount_crypt_stat->flags
& ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
rc = -EIO;
- printk(KERN_WARNING "Attempt to read file that "
+ printk(KERN_WARNING "Either the lower file "
"is not in a valid eCryptfs format, "
- "and plaintext passthrough mode is not "
+ "or the key could not be retrieved. "
+ "Plaintext passthrough mode is not "
"enabled; returning -EIO\n");
-
mutex_unlock(&crypt_stat->cs_mutex);
goto out;
}
@@ -954,7 +952,7 @@ out:
return rc;
}
-ssize_t
+static ssize_t
ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value,
size_t size)
{
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index f458c1f35565..682b1b2482c2 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -189,7 +189,7 @@ out:
}
static int
-parse_tag_65_packet(struct ecryptfs_session_key *session_key, u16 *cipher_code,
+parse_tag_65_packet(struct ecryptfs_session_key *session_key, u8 *cipher_code,
struct ecryptfs_message *msg)
{
size_t i = 0;
@@ -275,7 +275,7 @@ out:
static int
-write_tag_66_packet(char *signature, size_t cipher_code,
+write_tag_66_packet(char *signature, u8 cipher_code,
struct ecryptfs_crypt_stat *crypt_stat, char **packet,
size_t *packet_len)
{
@@ -428,7 +428,7 @@ static int
decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
struct ecryptfs_crypt_stat *crypt_stat)
{
- u16 cipher_code = 0;
+ u8 cipher_code = 0;
struct ecryptfs_msg_ctx *msg_ctx;
struct ecryptfs_message *msg = NULL;
char *auth_tok_sig;
@@ -1537,7 +1537,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
struct scatterlist dst_sg;
struct scatterlist src_sg;
struct mutex *tfm_mutex = NULL;
- size_t cipher_code;
+ u8 cipher_code;
size_t packet_size_length;
size_t max_packet_size;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 0249aa4ae181..778c420e4cac 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -117,7 +117,7 @@ void __ecryptfs_printk(const char *fmt, ...)
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
+static int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry)
{
struct ecryptfs_inode_info *inode_info =
ecryptfs_inode_to_private(ecryptfs_dentry->d_inode);
@@ -226,17 +226,15 @@ out:
return rc;
}
-enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, ecryptfs_opt_debug,
- ecryptfs_opt_ecryptfs_debug, ecryptfs_opt_cipher,
- ecryptfs_opt_ecryptfs_cipher, ecryptfs_opt_ecryptfs_key_bytes,
+enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig,
+ ecryptfs_opt_cipher, ecryptfs_opt_ecryptfs_cipher,
+ ecryptfs_opt_ecryptfs_key_bytes,
ecryptfs_opt_passthrough, ecryptfs_opt_xattr_metadata,
ecryptfs_opt_encrypted_view, ecryptfs_opt_err };
static match_table_t tokens = {
{ecryptfs_opt_sig, "sig=%s"},
{ecryptfs_opt_ecryptfs_sig, "ecryptfs_sig=%s"},
- {ecryptfs_opt_debug, "debug=%u"},
- {ecryptfs_opt_ecryptfs_debug, "ecryptfs_debug=%u"},
{ecryptfs_opt_cipher, "cipher=%s"},
{ecryptfs_opt_ecryptfs_cipher, "ecryptfs_cipher=%s"},
{ecryptfs_opt_ecryptfs_key_bytes, "ecryptfs_key_bytes=%u"},
@@ -313,7 +311,6 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
substring_t args[MAX_OPT_ARGS];
int token;
char *sig_src;
- char *debug_src;
char *cipher_name_dst;
char *cipher_name_src;
char *cipher_key_bytes_src;
@@ -341,16 +338,6 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
}
sig_set = 1;
break;
- case ecryptfs_opt_debug:
- case ecryptfs_opt_ecryptfs_debug:
- debug_src = args[0].from;
- ecryptfs_verbosity =
- (int)simple_strtol(debug_src, &debug_src,
- 0);
- ecryptfs_printk(KERN_DEBUG,
- "Verbosity set to [%d]" "\n",
- ecryptfs_verbosity);
- break;
case ecryptfs_opt_cipher:
case ecryptfs_opt_ecryptfs_cipher:
cipher_name_src = args[0].from;
@@ -423,9 +410,13 @@ static int ecryptfs_parse_options(struct super_block *sb, char *options)
if (!cipher_key_bytes_set) {
mount_crypt_stat->global_default_cipher_key_size = 0;
}
- rc = ecryptfs_add_new_key_tfm(
- NULL, mount_crypt_stat->global_default_cipher_name,
- mount_crypt_stat->global_default_cipher_key_size);
+ mutex_lock(&key_tfm_list_mutex);
+ if (!ecryptfs_tfm_exists(mount_crypt_stat->global_default_cipher_name,
+ NULL))
+ rc = ecryptfs_add_new_key_tfm(
+ NULL, mount_crypt_stat->global_default_cipher_name,
+ mount_crypt_stat->global_default_cipher_key_size);
+ mutex_unlock(&key_tfm_list_mutex);
if (rc) {
printk(KERN_ERR "Error attempting to initialize cipher with "
"name = [%s] and key size = [%td]; rc = [%d]\n",
@@ -654,11 +645,6 @@ static struct ecryptfs_cache_info {
.size = sizeof(struct ecryptfs_sb_info),
},
{
- .cache = &ecryptfs_header_cache_0,
- .name = "ecryptfs_headers_0",
- .size = PAGE_CACHE_SIZE,
- },
- {
.cache = &ecryptfs_header_cache_1,
.name = "ecryptfs_headers_1",
.size = PAGE_CACHE_SIZE,
@@ -821,6 +807,10 @@ static int __init ecryptfs_init(void)
"rc = [%d]\n", rc);
goto out_release_messaging;
}
+ if (ecryptfs_verbosity > 0)
+ printk(KERN_CRIT "eCryptfs verbosity set to %d. Secret values "
+ "will be written to the syslog!\n", ecryptfs_verbosity);
+
goto out;
out_release_messaging:
ecryptfs_release_messaging(ecryptfs_transport);
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 32c5711d79a3..dc74b186145d 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -34,8 +34,6 @@
#include <linux/scatterlist.h>
#include "ecryptfs_kernel.h"
-struct kmem_cache *ecryptfs_lower_page_cache;
-
/**
* ecryptfs_get_locked_page
*
@@ -102,13 +100,14 @@ static void set_header_info(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat)
{
size_t written;
- int save_num_header_extents_at_front =
- crypt_stat->num_header_extents_at_front;
+ size_t save_num_header_bytes_at_front =
+ crypt_stat->num_header_bytes_at_front;
- crypt_stat->num_header_extents_at_front = 1;
+ crypt_stat->num_header_bytes_at_front =
+ ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE;
ecryptfs_write_header_metadata(page_virt + 20, crypt_stat, &written);
- crypt_stat->num_header_extents_at_front =
- save_num_header_extents_at_front;
+ crypt_stat->num_header_bytes_at_front =
+ save_num_header_bytes_at_front;
}
/**
@@ -134,8 +133,11 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
loff_t view_extent_num = ((((loff_t)page->index)
* num_extents_per_page)
+ extent_num_in_page);
+ size_t num_header_extents_at_front =
+ (crypt_stat->num_header_bytes_at_front
+ / crypt_stat->extent_size);
- if (view_extent_num < crypt_stat->num_header_extents_at_front) {
+ if (view_extent_num < num_header_extents_at_front) {
/* This is a header extent */
char *page_virt;
@@ -157,9 +159,8 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
} else {
/* This is an encrypted data extent */
loff_t lower_offset =
- ((view_extent_num -
- crypt_stat->num_header_extents_at_front)
- * crypt_stat->extent_size);
+ ((view_extent_num * crypt_stat->extent_size)
+ - crypt_stat->num_header_bytes_at_front);
rc = ecryptfs_read_lower_page_segment(
page, (lower_offset >> PAGE_CACHE_SHIFT),
@@ -257,8 +258,7 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to)
end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE;
if (to > end_byte_in_page)
end_byte_in_page = to;
- zero_user_page(page, end_byte_in_page,
- PAGE_CACHE_SIZE - end_byte_in_page, KM_USER0);
+ zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE);
out:
return 0;
}
@@ -307,7 +307,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
*/
if ((i_size_read(page->mapping->host) == prev_page_end_size) &&
(from != 0)) {
- zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+ zero_user(page, 0, PAGE_CACHE_SIZE);
}
out:
return rc;
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index 948f57624c05..0c4928623bbc 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -293,6 +293,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
return rc;
}
+#if 0
/**
* ecryptfs_read
* @data: The virtual address into which to write the data read (and
@@ -371,3 +372,4 @@ int ecryptfs_read(char *data, loff_t offset, size_t size,
out:
return rc;
}
+#endif /* 0 */
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 4859c4eecd65..c27ac2b358a1 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -156,32 +156,38 @@ static void ecryptfs_clear_inode(struct inode *inode)
/**
* ecryptfs_show_options
*
- * Prints the directory we are currently mounted over.
- * Returns zero on success; non-zero otherwise
+ * Prints the mount options for a given superblock.
+ * Returns zero; does not fail.
*/
static int ecryptfs_show_options(struct seq_file *m, struct vfsmount *mnt)
{
struct super_block *sb = mnt->mnt_sb;
- struct dentry *lower_root_dentry = ecryptfs_dentry_to_lower(sb->s_root);
- struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(sb->s_root);
- char *tmp_page;
- char *path;
- int rc = 0;
-
- tmp_page = (char *)__get_free_page(GFP_KERNEL);
- if (!tmp_page) {
- rc = -ENOMEM;
- goto out;
- }
- path = d_path(lower_root_dentry, lower_mnt, tmp_page, PAGE_SIZE);
- if (IS_ERR(path)) {
- rc = PTR_ERR(path);
- goto out;
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
+ &ecryptfs_superblock_to_private(sb)->mount_crypt_stat;
+ struct ecryptfs_global_auth_tok *walker;
+
+ mutex_lock(&mount_crypt_stat->global_auth_tok_list_mutex);
+ list_for_each_entry(walker,
+ &mount_crypt_stat->global_auth_tok_list,
+ mount_crypt_stat_list) {
+ seq_printf(m, ",ecryptfs_sig=%s", walker->sig);
}
- seq_printf(m, ",dir=%s", path);
- free_page((unsigned long)tmp_page);
-out:
- return rc;
+ mutex_unlock(&mount_crypt_stat->global_auth_tok_list_mutex);
+
+ seq_printf(m, ",ecryptfs_cipher=%s",
+ mount_crypt_stat->global_default_cipher_name);
+
+ if (mount_crypt_stat->global_default_cipher_key_size)
+ seq_printf(m, ",ecryptfs_key_bytes=%zd",
+ mount_crypt_stat->global_default_cipher_key_size);
+ if (mount_crypt_stat->flags & ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)
+ seq_printf(m, ",ecryptfs_passthrough");
+ if (mount_crypt_stat->flags & ECRYPTFS_XATTR_METADATA_ENABLED)
+ seq_printf(m, ",ecryptfs_xattr_metadata");
+ if (mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
+ seq_printf(m, ",ecryptfs_encrypted_view");
+
+ return 0;
}
const struct super_operations ecryptfs_sops = {
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 174696f9bf14..627c3026946d 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -45,17 +45,26 @@ static inline void extent_copy(efs_extent *src, efs_extent *dst) {
return;
}
-void efs_read_inode(struct inode *inode)
+struct inode *efs_iget(struct super_block *super, unsigned long ino)
{
int i, inode_index;
dev_t device;
u32 rdev;
struct buffer_head *bh;
- struct efs_sb_info *sb = SUPER_INFO(inode->i_sb);
- struct efs_inode_info *in = INODE_INFO(inode);
+ struct efs_sb_info *sb = SUPER_INFO(super);
+ struct efs_inode_info *in;
efs_block_t block, offset;
struct efs_dinode *efs_inode;
-
+ struct inode *inode;
+
+ inode = iget_locked(super, ino);
+ if (IS_ERR(inode))
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ in = INODE_INFO(inode);
+
/*
** EFS layout:
**
@@ -159,13 +168,13 @@ void efs_read_inode(struct inode *inode)
break;
}
- return;
+ unlock_new_inode(inode);
+ return inode;
read_inode_error:
printk(KERN_WARNING "EFS: failed to read inode %lu\n", inode->i_ino);
- make_bad_inode(inode);
-
- return;
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
static inline efs_block_t
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index f7f407075be1..e26704742d41 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -66,9 +66,10 @@ struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct namei
lock_kernel();
inodenum = efs_find_entry(dir, dentry->d_name.name, dentry->d_name.len);
if (inodenum) {
- if (!(inode = iget(dir->i_sb, inodenum))) {
+ inode = efs_iget(dir->i_sb, inodenum);
+ if (IS_ERR(inode)) {
unlock_kernel();
- return ERR_PTR(-EACCES);
+ return ERR_CAST(inode);
}
}
unlock_kernel();
@@ -84,12 +85,11 @@ static struct inode *efs_nfs_get_inode(struct super_block *sb, u64 ino,
if (ino == 0)
return ERR_PTR(-ESTALE);
- inode = iget(sb, ino);
- if (inode == NULL)
- return ERR_PTR(-ENOMEM);
+ inode = efs_iget(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
- if (is_bad_inode(inode) ||
- (generation && inode->i_generation != generation)) {
+ if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
@@ -116,7 +116,7 @@ struct dentry *efs_get_parent(struct dentry *child)
struct dentry *parent;
struct inode *inode;
efs_ino_t ino;
- int error;
+ long error;
lock_kernel();
@@ -125,10 +125,11 @@ struct dentry *efs_get_parent(struct dentry *child)
if (!ino)
goto fail;
- error = -EACCES;
- inode = iget(child->d_inode->i_sb, ino);
- if (!inode)
+ inode = efs_iget(child->d_inode->i_sb, ino);
+ if (IS_ERR(inode)) {
+ error = PTR_ERR(inode);
goto fail;
+ }
error = -ENOMEM;
parent = d_alloc_anon(inode);
diff --git a/fs/efs/super.c b/fs/efs/super.c
index c79bc627f107..14082405cdd1 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -107,7 +107,6 @@ static int efs_remount(struct super_block *sb, int *flags, char *data)
static const struct super_operations efs_superblock_operations = {
.alloc_inode = efs_alloc_inode,
.destroy_inode = efs_destroy_inode,
- .read_inode = efs_read_inode,
.put_super = efs_put_super,
.statfs = efs_statfs,
.remount_fs = efs_remount,
@@ -247,6 +246,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
struct efs_sb_info *sb;
struct buffer_head *bh;
struct inode *root;
+ int ret = -EINVAL;
sb = kzalloc(sizeof(struct efs_sb_info), GFP_KERNEL);
if (!sb)
@@ -303,12 +303,18 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
}
s->s_op = &efs_superblock_operations;
s->s_export_op = &efs_export_ops;
- root = iget(s, EFS_ROOTINODE);
+ root = efs_iget(s, EFS_ROOTINODE);
+ if (IS_ERR(root)) {
+ printk(KERN_ERR "EFS: get root inode failed\n");
+ ret = PTR_ERR(root);
+ goto out_no_fs;
+ }
+
s->s_root = d_alloc_root(root);
-
if (!(s->s_root)) {
- printk(KERN_ERR "EFS: get root inode failed\n");
+ printk(KERN_ERR "EFS: get root dentry failed\n");
iput(root);
+ ret = -ENOMEM;
goto out_no_fs;
}
@@ -318,7 +324,7 @@ out_no_fs_ul:
out_no_fs:
s->s_fs_info = NULL;
kfree(sb);
- return -EINVAL;
+ return ret;
}
static int efs_statfs(struct dentry *dentry, struct kstatfs *buf) {
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 2ce19c000d2a..a9f130cd50ac 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/anon_inodes.h>
#include <linux/eventfd.h>
+#include <linux/syscalls.h>
struct eventfd_ctx {
wait_queue_head_t wqh;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 81c04abfb1aa..a415f42d32cf 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -353,7 +353,7 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)
spin_unlock_irqrestore(&psw->lock, flags);
/* Do really wake up now */
- wake_up(wq);
+ wake_up_nested(wq, 1 + wake_nests);
/* Remove the current task from the list */
spin_lock_irqsave(&psw->lock, flags);
diff --git a/fs/exec.c b/fs/exec.c
index 282240afe99e..be923e4bc389 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -760,7 +760,7 @@ static int de_thread(struct task_struct *tsk)
*/
read_lock(&tasklist_lock);
spin_lock_irq(lock);
- if (sig->flags & SIGNAL_GROUP_EXIT) {
+ if (signal_group_exit(sig)) {
/*
* Another group action in progress, just
* return so that the signal is processed.
@@ -778,6 +778,7 @@ static int de_thread(struct task_struct *tsk)
if (unlikely(tsk->group_leader == task_child_reaper(tsk)))
task_active_pid_ns(tsk)->child_reaper = tsk;
+ sig->group_exit_task = tsk;
zap_other_threads(tsk);
read_unlock(&tasklist_lock);
@@ -802,7 +803,6 @@ static int de_thread(struct task_struct *tsk)
}
sig->notify_count = count;
- sig->group_exit_task = tsk;
while (atomic_read(&sig->count) > count) {
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(lock);
@@ -871,15 +871,10 @@ static int de_thread(struct task_struct *tsk)
leader->exit_state = EXIT_DEAD;
write_unlock_irq(&tasklist_lock);
- }
+ }
sig->group_exit_task = NULL;
sig->notify_count = 0;
- /*
- * There may be one thread left which is just exiting,
- * but it's safe to stop telling the group to kill themselves.
- */
- sig->flags = 0;
no_thread_group:
exit_itimers(sig);
@@ -947,12 +942,13 @@ static void flush_old_files(struct files_struct * files)
spin_unlock(&files->file_lock);
}
-void get_task_comm(char *buf, struct task_struct *tsk)
+char *get_task_comm(char *buf, struct task_struct *tsk)
{
/* buf must be at least sizeof(tsk->comm) in size */
task_lock(tsk);
strncpy(buf, tsk->comm, sizeof(tsk->comm));
task_unlock(tsk);
+ return buf;
}
void set_task_comm(struct task_struct *tsk, char *buf)
@@ -1548,7 +1544,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
int err = -EAGAIN;
spin_lock_irq(&tsk->sighand->siglock);
- if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
+ if (!signal_group_exit(tsk->signal)) {
tsk->signal->group_exit_code = exit_code;
zap_process(tsk);
err = 0;
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 377ad172d74b..e7b2bafa1dd9 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -69,9 +69,53 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
return desc + offset;
}
+static int ext2_valid_block_bitmap(struct super_block *sb,
+ struct ext2_group_desc *desc,
+ unsigned int block_group,
+ struct buffer_head *bh)
+{
+ ext2_grpblk_t offset;
+ ext2_grpblk_t next_zero_bit;
+ ext2_fsblk_t bitmap_blk;
+ ext2_fsblk_t group_first_block;
+
+ group_first_block = ext2_group_first_block_no(sb, block_group);
+
+ /* check whether block bitmap block number is set */
+ bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
+ offset = bitmap_blk - group_first_block;
+ if (!ext2_test_bit(offset, bh->b_data))
+ /* bad block bitmap */
+ goto err_out;
+
+ /* check whether the inode bitmap block number is set */
+ bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
+ offset = bitmap_blk - group_first_block;
+ if (!ext2_test_bit(offset, bh->b_data))
+ /* bad block bitmap */
+ goto err_out;
+
+ /* check whether the inode table block number is set */
+ bitmap_blk = le32_to_cpu(desc->bg_inode_table);
+ offset = bitmap_blk - group_first_block;
+ next_zero_bit = ext2_find_next_zero_bit(bh->b_data,
+ offset + EXT2_SB(sb)->s_itb_per_group,
+ offset);
+ if (next_zero_bit >= offset + EXT2_SB(sb)->s_itb_per_group)
+ /* good bitmap for inode tables */
+ return 1;
+
+err_out:
+ ext2_error(sb, __FUNCTION__,
+ "Invalid block bitmap - "
+ "block_group = %d, block = %lu",
+ block_group, bitmap_blk);
+ return 0;
+}
+
/*
- * Read the bitmap for a given block_group, reading into the specified
- * slot in the superblock's bitmap cache.
+ * Read the bitmap for a given block_group,and validate the
+ * bits for block/inode/inode tables are set in the bitmaps
*
* Return buffer_head on success or NULL in case of failure.
*/
@@ -80,17 +124,36 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
{
struct ext2_group_desc * desc;
struct buffer_head * bh = NULL;
-
- desc = ext2_get_group_desc (sb, block_group, NULL);
+ ext2_fsblk_t bitmap_blk;
+
+ desc = ext2_get_group_desc(sb, block_group, NULL);
if (!desc)
- goto error_out;
- bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
- if (!bh)
- ext2_error (sb, "read_block_bitmap",
+ return NULL;
+ bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
+ bh = sb_getblk(sb, bitmap_blk);
+ if (unlikely(!bh)) {
+ ext2_error(sb, __FUNCTION__,
+ "Cannot read block bitmap - "
+ "block_group = %d, block_bitmap = %u",
+ block_group, le32_to_cpu(desc->bg_block_bitmap));
+ return NULL;
+ }
+ if (likely(bh_uptodate_or_lock(bh)))
+ return bh;
+
+ if (bh_submit_read(bh) < 0) {
+ brelse(bh);
+ ext2_error(sb, __FUNCTION__,
"Cannot read block bitmap - "
"block_group = %d, block_bitmap = %u",
block_group, le32_to_cpu(desc->bg_block_bitmap));
-error_out:
+ return NULL;
+ }
+ if (!ext2_valid_block_bitmap(sb, desc, block_group, bh)) {
+ brelse(bh);
+ return NULL;
+ }
+
return bh;
}
@@ -474,11 +537,13 @@ do_more:
in_range (block, le32_to_cpu(desc->bg_inode_table),
sbi->s_itb_per_group) ||
in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
- sbi->s_itb_per_group))
+ sbi->s_itb_per_group)) {
ext2_error (sb, "ext2_free_blocks",
"Freeing blocks in system zones - "
"Block = %lu, count = %lu",
block, count);
+ goto error_return;
+ }
for (i = 0, group_freed = 0; i < count; i++) {
if (!ext2_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
@@ -1250,8 +1315,8 @@ retry_alloc:
smp_rmb();
/*
- * Now search the rest of the groups. We assume that
- * i and gdp correctly point to the last group visited.
+ * Now search the rest of the groups. We assume that
+ * group_no and gdp correctly point to the last group visited.
*/
for (bgi = 0; bgi < ngroups; bgi++) {
group_no++;
@@ -1311,11 +1376,13 @@ allocated:
in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
EXT2_SB(sb)->s_itb_per_group) ||
in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
- EXT2_SB(sb)->s_itb_per_group))
+ EXT2_SB(sb)->s_itb_per_group)) {
ext2_error(sb, "ext2_new_blocks",
"Allocating block in system zone - "
"blocks from "E2FSBLK", length %lu",
ret_block, num);
+ goto out;
+ }
performed_allocation = 1;
@@ -1466,9 +1533,6 @@ int ext2_bg_has_super(struct super_block *sb, int group)
*/
unsigned long ext2_bg_num_gdb(struct super_block *sb, int group)
{
- if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)&&
- !ext2_group_sparse(group))
- return 0;
- return EXT2_SB(sb)->s_gdb_count;
+ return ext2_bg_has_super(sb, group) ? EXT2_SB(sb)->s_gdb_count : 0;
}
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index d868e26c15eb..8dededd80fe2 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -703,7 +703,7 @@ const struct file_operations ext2_dir_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = ext2_readdir,
- .ioctl = ext2_ioctl,
+ .unlocked_ioctl = ext2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index c87ae29c19cb..f1e5705e75f1 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -124,7 +124,7 @@ extern void ext2_check_inodes_bitmap (struct super_block *);
extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
/* inode.c */
-extern void ext2_read_inode (struct inode *);
+extern struct inode *ext2_iget (struct super_block *, unsigned long);
extern int ext2_write_inode (struct inode *, int);
extern void ext2_put_inode (struct inode *);
extern void ext2_delete_inode (struct inode *);
@@ -139,8 +139,7 @@ int __ext2_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata);
/* ioctl.c */
-extern int ext2_ioctl (struct inode *, struct file *, unsigned int,
- unsigned long);
+extern long ext2_ioctl(struct file *, unsigned int, unsigned long);
extern long ext2_compat_ioctl(struct file *, unsigned int, unsigned long);
/* namei.c */
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index c051798459a1..5f2fa9c36293 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -48,7 +48,7 @@ const struct file_operations ext2_file_operations = {
.write = do_sync_write,
.aio_read = generic_file_aio_read,
.aio_write = generic_file_aio_write,
- .ioctl = ext2_ioctl,
+ .unlocked_ioctl = ext2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
@@ -65,7 +65,7 @@ const struct file_operations ext2_xip_file_operations = {
.llseek = generic_file_llseek,
.read = xip_file_read,
.write = xip_file_write,
- .ioctl = ext2_ioctl,
+ .unlocked_ioctl = ext2_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext2_compat_ioctl,
#endif
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index b1ab32ab5a77..c62006805427 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -286,15 +286,12 @@ static unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
* ext2_find_goal - find a prefered place for allocation.
* @inode: owner
* @block: block we want
- * @chain: chain of indirect blocks
* @partial: pointer to the last triple within a chain
*
* Returns preferred place for a block (the goal).
*/
-static inline int ext2_find_goal(struct inode *inode,
- long block,
- Indirect chain[4],
+static inline int ext2_find_goal(struct inode *inode, long block,
Indirect *partial)
{
struct ext2_block_alloc_info *block_i;
@@ -569,7 +566,6 @@ static void ext2_splice_branch(struct inode *inode,
*
* `handle' can be NULL if create == 0.
*
- * The BKL may not be held on entry here. Be sure to take it early.
* return > 0, # of blocks mapped or allocated.
* return = 0, if plain lookup failed.
* return < 0, error case.
@@ -639,7 +635,7 @@ reread:
if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
ext2_init_block_alloc_info(inode);
- goal = ext2_find_goal(inode, iblock, chain, partial);
+ goal = ext2_find_goal(inode, iblock, partial);
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
@@ -1185,22 +1181,33 @@ void ext2_get_inode_flags(struct ext2_inode_info *ei)
ei->i_flags |= EXT2_DIRSYNC_FL;
}
-void ext2_read_inode (struct inode * inode)
+struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
{
- struct ext2_inode_info *ei = EXT2_I(inode);
- ino_t ino = inode->i_ino;
+ struct ext2_inode_info *ei;
struct buffer_head * bh;
- struct ext2_inode * raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
+ struct ext2_inode *raw_inode;
+ struct inode *inode;
+ long ret = -EIO;
int n;
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ ei = EXT2_I(inode);
#ifdef CONFIG_EXT2_FS_POSIX_ACL
ei->i_acl = EXT2_ACL_NOT_CACHED;
ei->i_default_acl = EXT2_ACL_NOT_CACHED;
#endif
ei->i_block_alloc_info = NULL;
- if (IS_ERR(raw_inode))
+ raw_inode = ext2_get_inode(inode->i_sb, ino, &bh);
+ if (IS_ERR(raw_inode)) {
+ ret = PTR_ERR(raw_inode);
goto bad_inode;
+ }
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
@@ -1224,6 +1231,7 @@ void ext2_read_inode (struct inode * inode)
if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) {
/* this inode is deleted */
brelse (bh);
+ ret = -ESTALE;
goto bad_inode;
}
inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
@@ -1290,11 +1298,12 @@ void ext2_read_inode (struct inode * inode)
}
brelse (bh);
ext2_set_inode_flags(inode);
- return;
+ unlock_new_inode(inode);
+ return inode;
bad_inode:
- make_bad_inode(inode);
- return;
+ iget_failed(inode);
+ return ERR_PTR(ret);
}
static int ext2_update_inode(struct inode * inode, int do_sync)
diff --git a/fs/ext2/ioctl.c b/fs/ext2/ioctl.c
index 320b2cb3d4d2..b8ea11fee5c6 100644
--- a/fs/ext2/ioctl.c
+++ b/fs/ext2/ioctl.c
@@ -17,9 +17,9 @@
#include <asm/uaccess.h>
-int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
- unsigned long arg)
+long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = filp->f_dentry->d_inode;
struct ext2_inode_info *ei = EXT2_I(inode);
unsigned int flags;
unsigned short rsv_window_size;
@@ -141,9 +141,6 @@ int ext2_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
#ifdef CONFIG_COMPAT
long ext2_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- int ret;
-
/* These are just misnamed, they actually get/put from/to user an int */
switch (cmd) {
case EXT2_IOC32_GETFLAGS:
@@ -161,9 +158,6 @@ long ext2_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
default:
return -ENOIOCTLCMD;
}
- lock_kernel();
- ret = ext2_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg));
- unlock_kernel();
- return ret;
+ return ext2_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index e69beed839ac..80c97fd8c571 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -63,9 +63,9 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str
ino = ext2_inode_by_name(dir, dentry);
inode = NULL;
if (ino) {
- inode = iget(dir->i_sb, ino);
- if (!inode)
- return ERR_PTR(-EACCES);
+ inode = ext2_iget(dir->i_sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
}
@@ -83,10 +83,10 @@ struct dentry *ext2_get_parent(struct dentry *child)
ino = ext2_inode_by_name(child->d_inode, &dotdot);
if (!ino)
return ERR_PTR(-ENOENT);
- inode = iget(child->d_inode->i_sb, ino);
+ inode = ext2_iget(child->d_inode->i_sb, ino);
- if (!inode)
- return ERR_PTR(-EACCES);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
parent = d_alloc_anon(inode);
if (!parent) {
iput(inode);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 6abaf75163f0..22f1010bf79f 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -234,16 +234,16 @@ static int ext2_show_options(struct seq_file *seq, struct vfsmount *vfs)
le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) {
seq_printf(seq, ",resgid=%u", sbi->s_resgid);
}
- if (test_opt(sb, ERRORS_CONT)) {
+ if (test_opt(sb, ERRORS_RO)) {
int def_errors = le16_to_cpu(es->s_errors);
if (def_errors == EXT2_ERRORS_PANIC ||
- def_errors == EXT2_ERRORS_RO) {
- seq_puts(seq, ",errors=continue");
+ def_errors == EXT2_ERRORS_CONTINUE) {
+ seq_puts(seq, ",errors=remount-ro");
}
}
- if (test_opt(sb, ERRORS_RO))
- seq_puts(seq, ",errors=remount-ro");
+ if (test_opt(sb, ERRORS_CONT))
+ seq_puts(seq, ",errors=continue");
if (test_opt(sb, ERRORS_PANIC))
seq_puts(seq, ",errors=panic");
if (test_opt(sb, NO_UID32))
@@ -296,7 +296,6 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *da
static const struct super_operations ext2_sops = {
.alloc_inode = ext2_alloc_inode,
.destroy_inode = ext2_destroy_inode,
- .read_inode = ext2_read_inode,
.write_inode = ext2_write_inode,
.delete_inode = ext2_delete_inode,
.put_super = ext2_put_super,
@@ -326,11 +325,10 @@ static struct inode *ext2_nfs_get_inode(struct super_block *sb,
* it might be "neater" to call ext2_get_inode first and check
* if the inode is valid.....
*/
- inode = iget(sb, ino);
- if (inode == NULL)
- return ERR_PTR(-ENOMEM);
- if (is_bad_inode(inode) ||
- (generation && inode->i_generation != generation)) {
+ inode = ext2_iget(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (generation && inode->i_generation != generation) {
/* we didn't find the right inode.. */
iput(inode);
return ERR_PTR(-ESTALE);
@@ -617,27 +615,24 @@ static int ext2_setup_super (struct super_block * sb,
return res;
}
-static int ext2_check_descriptors (struct super_block * sb)
+static int ext2_check_descriptors(struct super_block *sb)
{
int i;
- int desc_block = 0;
struct ext2_sb_info *sbi = EXT2_SB(sb);
unsigned long first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
unsigned long last_block;
- struct ext2_group_desc * gdp = NULL;
ext2_debug ("Checking group descriptors");
- for (i = 0; i < sbi->s_groups_count; i++)
- {
+ for (i = 0; i < sbi->s_groups_count; i++) {
+ struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
+
if (i == sbi->s_groups_count - 1)
last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
else
last_block = first_block +
(EXT2_BLOCKS_PER_GROUP(sb) - 1);
- if ((i % EXT2_DESC_PER_BLOCK(sb)) == 0)
- gdp = (struct ext2_group_desc *) sbi->s_group_desc[desc_block++]->b_data;
if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
le32_to_cpu(gdp->bg_block_bitmap) > last_block)
{
@@ -667,7 +662,6 @@ static int ext2_check_descriptors (struct super_block * sb)
return 0;
}
first_block += EXT2_BLOCKS_PER_GROUP(sb);
- gdp++;
}
return 1;
}
@@ -750,6 +744,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
unsigned long logic_sb_block;
unsigned long offset = 0;
unsigned long def_mount_opts;
+ long ret = -EINVAL;
int blocksize = BLOCK_SIZE;
int db_count;
int i, j;
@@ -820,10 +815,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
- else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_RO)
- set_opt(sbi->s_mount_opt, ERRORS_RO);
- else
+ else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
set_opt(sbi->s_mount_opt, ERRORS_CONT);
+ else
+ set_opt(sbi->s_mount_opt, ERRORS_RO);
sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
@@ -868,8 +863,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
- if ((ext2_use_xip(sb)) && ((blocksize != PAGE_SIZE) ||
- (sb->s_blocksize != blocksize))) {
+ if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
if (!silent)
printk("XIP: Unsupported blocksize\n");
goto failed_mount;
@@ -1046,19 +1040,24 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &ext2_sops;
sb->s_export_op = &ext2_export_ops;
sb->s_xattr = ext2_xattr_handlers;
- root = iget(sb, EXT2_ROOT_INO);
- sb->s_root = d_alloc_root(root);
- if (!sb->s_root) {
- iput(root);
- printk(KERN_ERR "EXT2-fs: get root inode failed\n");
+ root = ext2_iget(sb, EXT2_ROOT_INO);
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
goto failed_mount3;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
- dput(sb->s_root);
- sb->s_root = NULL;
+ iput(root);
printk(KERN_ERR "EXT2-fs: corrupt root inode, run e2fsck\n");
goto failed_mount3;
}
+
+ sb->s_root = d_alloc_root(root);
+ if (!sb->s_root) {
+ iput(root);
+ printk(KERN_ERR "EXT2-fs: get root inode failed\n");
+ ret = -ENOMEM;
+ goto failed_mount3;
+ }
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_warning(sb, __FUNCTION__,
"mounting ext3 filesystem as ext2");
@@ -1085,7 +1084,7 @@ failed_mount:
failed_sbi:
sb->s_fs_info = NULL;
kfree(sbi);
- return -EINVAL;
+ return ret;
}
static void ext2_commit_super (struct super_block * sb,
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index a8ba7e831278..a75713031105 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -80,13 +80,57 @@ struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
return desc + offset;
}
+static int ext3_valid_block_bitmap(struct super_block *sb,
+ struct ext3_group_desc *desc,
+ unsigned int block_group,
+ struct buffer_head *bh)
+{
+ ext3_grpblk_t offset;
+ ext3_grpblk_t next_zero_bit;
+ ext3_fsblk_t bitmap_blk;
+ ext3_fsblk_t group_first_block;
+
+ group_first_block = ext3_group_first_block_no(sb, block_group);
+
+ /* check whether block bitmap block number is set */
+ bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
+ offset = bitmap_blk - group_first_block;
+ if (!ext3_test_bit(offset, bh->b_data))
+ /* bad block bitmap */
+ goto err_out;
+
+ /* check whether the inode bitmap block number is set */
+ bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
+ offset = bitmap_blk - group_first_block;
+ if (!ext3_test_bit(offset, bh->b_data))
+ /* bad block bitmap */
+ goto err_out;
+
+ /* check whether the inode table block number is set */
+ bitmap_blk = le32_to_cpu(desc->bg_inode_table);
+ offset = bitmap_blk - group_first_block;
+ next_zero_bit = ext3_find_next_zero_bit(bh->b_data,
+ offset + EXT3_SB(sb)->s_itb_per_group,
+ offset);
+ if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group)
+ /* good bitmap for inode tables */
+ return 1;
+
+err_out:
+ ext3_error(sb, __FUNCTION__,
+ "Invalid block bitmap - "
+ "block_group = %d, block = %lu",
+ block_group, bitmap_blk);
+ return 0;
+}
+
/**
* read_block_bitmap()
* @sb: super block
* @block_group: given block group
*
- * Read the bitmap for a given block_group, reading into the specified
- * slot in the superblock's bitmap cache.
+ * Read the bitmap for a given block_group,and validate the
+ * bits for block/inode/inode tables are set in the bitmaps
*
* Return buffer_head on success or NULL in case of failure.
*/
@@ -95,17 +139,35 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
{
struct ext3_group_desc * desc;
struct buffer_head * bh = NULL;
+ ext3_fsblk_t bitmap_blk;
- desc = ext3_get_group_desc (sb, block_group, NULL);
+ desc = ext3_get_group_desc(sb, block_group, NULL);
if (!desc)
- goto error_out;
- bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
- if (!bh)
- ext3_error (sb, "read_block_bitmap",
+ return NULL;
+ bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
+ bh = sb_getblk(sb, bitmap_blk);
+ if (unlikely(!bh)) {
+ ext3_error(sb, __FUNCTION__,
"Cannot read block bitmap - "
"block_group = %d, block_bitmap = %u",
block_group, le32_to_cpu(desc->bg_block_bitmap));
-error_out:
+ return NULL;
+ }
+ if (likely(bh_uptodate_or_lock(bh)))
+ return bh;
+
+ if (bh_submit_read(bh) < 0) {
+ brelse(bh);
+ ext3_error(sb, __FUNCTION__,
+ "Cannot read block bitmap - "
+ "block_group = %d, block_bitmap = %u",
+ block_group, le32_to_cpu(desc->bg_block_bitmap));
+ return NULL;
+ }
+ if (!ext3_valid_block_bitmap(sb, desc, block_group, bh)) {
+ brelse(bh);
+ return NULL;
+ }
return bh;
}
/*
@@ -468,11 +530,13 @@ do_more:
in_range (block, le32_to_cpu(desc->bg_inode_table),
sbi->s_itb_per_group) ||
in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
- sbi->s_itb_per_group))
+ sbi->s_itb_per_group)) {
ext3_error (sb, "ext3_free_blocks",
"Freeing blocks in system zones - "
"Block = "E3FSBLK", count = %lu",
block, count);
+ goto error_return;
+ }
/*
* We are about to start releasing blocks in the bitmap,
@@ -1508,7 +1572,7 @@ retry_alloc:
/*
* Now search the rest of the groups. We assume that
- * i and gdp correctly point to the last group visited.
+ * group_no and gdp correctly point to the last group visited.
*/
for (bgi = 0; bgi < ngroups; bgi++) {
group_no++;
@@ -1575,11 +1639,13 @@ allocated:
in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
EXT3_SB(sb)->s_itb_per_group) ||
in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
- EXT3_SB(sb)->s_itb_per_group))
+ EXT3_SB(sb)->s_itb_per_group)) {
ext3_error(sb, "ext3_new_block",
"Allocating block in system zone - "
"blocks from "E3FSBLK", length %lu",
ret_block, num);
+ goto out;
+ }
performed_allocation = 1;
@@ -1782,11 +1848,7 @@ static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group)
static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group)
{
- if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
- EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
- !ext3_group_sparse(group))
- return 0;
- return EXT3_SB(sb)->s_gdb_count;
+ return ext3_bg_has_super(sb, group) ? EXT3_SB(sb)->s_gdb_count : 0;
}
/**
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 1bc8cd89c51d..58ae2f943f12 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -642,14 +642,15 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
unsigned long max_ino = le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count);
unsigned long block_group;
int bit;
- struct buffer_head *bitmap_bh = NULL;
+ struct buffer_head *bitmap_bh;
struct inode *inode = NULL;
+ long err = -EIO;
/* Error cases - e2fsck has already cleaned up for us */
if (ino > max_ino) {
ext3_warning(sb, __FUNCTION__,
"bad orphan ino %lu! e2fsck was run?", ino);
- goto out;
+ goto error;
}
block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
@@ -658,38 +659,49 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
if (!bitmap_bh) {
ext3_warning(sb, __FUNCTION__,
"inode bitmap error for orphan %lu", ino);
- goto out;
+ goto error;
}
/* Having the inode bit set should be a 100% indicator that this
* is a valid orphan (no e2fsck run on fs). Orphans also include
* inodes that were being truncated, so we can't check i_nlink==0.
*/
- if (!ext3_test_bit(bit, bitmap_bh->b_data) ||
- !(inode = iget(sb, ino)) || is_bad_inode(inode) ||
- NEXT_ORPHAN(inode) > max_ino) {
- ext3_warning(sb, __FUNCTION__,
- "bad orphan inode %lu! e2fsck was run?", ino);
- printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
- bit, (unsigned long long)bitmap_bh->b_blocknr,
- ext3_test_bit(bit, bitmap_bh->b_data));
- printk(KERN_NOTICE "inode=%p\n", inode);
- if (inode) {
- printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
- is_bad_inode(inode));
- printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
- NEXT_ORPHAN(inode));
- printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
- }
+ if (!ext3_test_bit(bit, bitmap_bh->b_data))
+ goto bad_orphan;
+
+ inode = ext3_iget(sb, ino);
+ if (IS_ERR(inode))
+ goto iget_failed;
+
+ if (NEXT_ORPHAN(inode) > max_ino)
+ goto bad_orphan;
+ brelse(bitmap_bh);
+ return inode;
+
+iget_failed:
+ err = PTR_ERR(inode);
+ inode = NULL;
+bad_orphan:
+ ext3_warning(sb, __FUNCTION__,
+ "bad orphan inode %lu! e2fsck was run?", ino);
+ printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
+ bit, (unsigned long long)bitmap_bh->b_blocknr,
+ ext3_test_bit(bit, bitmap_bh->b_data));
+ printk(KERN_NOTICE "inode=%p\n", inode);
+ if (inode) {
+ printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
+ is_bad_inode(inode));
+ printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
+ NEXT_ORPHAN(inode));
+ printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
/* Avoid freeing blocks if we got a bad deleted inode */
- if (inode && inode->i_nlink == 0)
+ if (inode->i_nlink == 0)
inode->i_blocks = 0;
iput(inode);
- inode = NULL;
}
-out:
brelse(bitmap_bh);
- return inode;
+error:
+ return ERR_PTR(err);
}
unsigned long ext3_count_free_inodes (struct super_block * sb)
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 9b162cd6c16c..eb95670a27eb 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -439,16 +439,14 @@ static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
* ext3_find_goal - find a prefered place for allocation.
* @inode: owner
* @block: block we want
- * @chain: chain of indirect blocks
* @partial: pointer to the last triple within a chain
- * @goal: place to store the result.
*
* Normally this function find the prefered place for block allocation,
- * stores it in *@goal and returns zero.
+ * returns it.
*/
static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
- Indirect chain[4], Indirect *partial)
+ Indirect *partial)
{
struct ext3_block_alloc_info *block_i;
@@ -884,7 +882,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
ext3_init_block_alloc_info(inode);
- goal = ext3_find_goal(inode, iblock, chain, partial);
+ goal = ext3_find_goal(inode, iblock, partial);
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
@@ -941,55 +939,45 @@ out:
return err;
}
-#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
+/* Maximum number of blocks we map for direct IO at once. */
+#define DIO_MAX_BLOCKS 4096
+/*
+ * Number of credits we need for writing DIO_MAX_BLOCKS:
+ * We need sb + group descriptor + bitmap + inode -> 4
+ * For B blocks with A block pointers per block we need:
+ * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
+ * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
+ */
+#define DIO_CREDITS 25
static int ext3_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
handle_t *handle = ext3_journal_current_handle();
- int ret = 0;
+ int ret = 0, started = 0;
unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
- if (!create)
- goto get_block; /* A read */
-
- if (max_blocks == 1)
- goto get_block; /* A single block get */
-
- if (handle->h_transaction->t_state == T_LOCKED) {
- /*
- * Huge direct-io writes can hold off commits for long
- * periods of time. Let this commit run.
- */
- ext3_journal_stop(handle);
- handle = ext3_journal_start(inode, DIO_CREDITS);
- if (IS_ERR(handle))
+ if (create && !handle) { /* Direct IO write... */
+ if (max_blocks > DIO_MAX_BLOCKS)
+ max_blocks = DIO_MAX_BLOCKS;
+ handle = ext3_journal_start(inode, DIO_CREDITS +
+ 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb));
+ if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
- goto get_block;
- }
-
- if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
- /*
- * Getting low on buffer credits...
- */
- ret = ext3_journal_extend(handle, DIO_CREDITS);
- if (ret > 0) {
- /*
- * Couldn't extend the transaction. Start a new one.
- */
- ret = ext3_journal_restart(handle, DIO_CREDITS);
+ goto out;
}
+ started = 1;
}
-get_block:
- if (ret == 0) {
- ret = ext3_get_blocks_handle(handle, inode, iblock,
+ ret = ext3_get_blocks_handle(handle, inode, iblock,
max_blocks, bh_result, create, 0);
- if (ret > 0) {
- bh_result->b_size = (ret << inode->i_blkbits);
- ret = 0;
- }
+ if (ret > 0) {
+ bh_result->b_size = (ret << inode->i_blkbits);
+ ret = 0;
}
+ if (started)
+ ext3_journal_stop(handle);
+out:
return ret;
}
@@ -1680,7 +1668,8 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
* if the machine crashes during the write.
*
* If the O_DIRECT write is intantiating holes inside i_size and the machine
- * crashes then stale disk data _may_ be exposed inside the file.
+ * crashes then stale disk data _may_ be exposed inside the file. But current
+ * VFS code falls back into buffered path in that case so we are safe.
*/
static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset,
@@ -1689,7 +1678,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ext3_inode_info *ei = EXT3_I(inode);
- handle_t *handle = NULL;
+ handle_t *handle;
ssize_t ret;
int orphan = 0;
size_t count = iov_length(iov, nr_segs);
@@ -1697,17 +1686,21 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
if (rw == WRITE) {
loff_t final_size = offset + count;
- handle = ext3_journal_start(inode, DIO_CREDITS);
- if (IS_ERR(handle)) {
- ret = PTR_ERR(handle);
- goto out;
- }
if (final_size > inode->i_size) {
+ /* Credits for sb + inode write */
+ handle = ext3_journal_start(inode, 2);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out;
+ }
ret = ext3_orphan_add(handle, inode);
- if (ret)
- goto out_stop;
+ if (ret) {
+ ext3_journal_stop(handle);
+ goto out;
+ }
orphan = 1;
ei->i_disksize = inode->i_size;
+ ext3_journal_stop(handle);
}
}
@@ -1715,18 +1708,21 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
offset, nr_segs,
ext3_get_block, NULL);
- /*
- * Reacquire the handle: ext3_get_block() can restart the transaction
- */
- handle = ext3_journal_current_handle();
-
-out_stop:
- if (handle) {
+ if (orphan) {
int err;
- if (orphan && inode->i_nlink)
+ /* Credits for sb + inode write */
+ handle = ext3_journal_start(inode, 2);
+ if (IS_ERR(handle)) {
+ /* This is really bad luck. We've written the data
+ * but cannot extend i_size. Bail out and pretend
+ * the write failed... */
+ ret = PTR_ERR(handle);
+ goto out;
+ }
+ if (inode->i_nlink)
ext3_orphan_del(handle, inode);
- if (orphan && ret > 0) {
+ if (ret > 0) {
loff_t end = offset + ret;
if (end > inode->i_size) {
ei->i_disksize = end;
@@ -1845,7 +1841,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
*/
if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
ext3_should_writeback_data(inode) && PageUptodate(page)) {
- zero_user_page(page, offset, length, KM_USER0);
+ zero_user(page, offset, length);
set_page_dirty(page);
goto unlock;
}
@@ -1898,7 +1894,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
goto unlock;
}
- zero_user_page(page, offset, length, KM_USER0);
+ zero_user(page, offset, length);
BUFFER_TRACE(bh, "zeroed end of block");
err = 0;
@@ -2658,21 +2654,31 @@ void ext3_get_inode_flags(struct ext3_inode_info *ei)
ei->i_flags |= EXT3_DIRSYNC_FL;
}
-void ext3_read_inode(struct inode * inode)
+struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
{
struct ext3_iloc iloc;
struct ext3_inode *raw_inode;
- struct ext3_inode_info *ei = EXT3_I(inode);
+ struct ext3_inode_info *ei;
struct buffer_head *bh;
+ struct inode *inode;
+ long ret;
int block;
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ ei = EXT3_I(inode);
#ifdef CONFIG_EXT3_FS_POSIX_ACL
ei->i_acl = EXT3_ACL_NOT_CACHED;
ei->i_default_acl = EXT3_ACL_NOT_CACHED;
#endif
ei->i_block_alloc_info = NULL;
- if (__ext3_get_inode_loc(inode, &iloc, 0))
+ ret = __ext3_get_inode_loc(inode, &iloc, 0);
+ if (ret < 0)
goto bad_inode;
bh = iloc.bh;
raw_inode = ext3_raw_inode(&iloc);
@@ -2703,6 +2709,7 @@ void ext3_read_inode(struct inode * inode)
!(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
/* this inode is deleted */
brelse (bh);
+ ret = -ESTALE;
goto bad_inode;
}
/* The only unlinked inodes we let through here have
@@ -2746,6 +2753,7 @@ void ext3_read_inode(struct inode * inode)
if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
EXT3_INODE_SIZE(inode->i_sb)) {
brelse (bh);
+ ret = -EIO;
goto bad_inode;
}
if (ei->i_extra_isize == 0) {
@@ -2787,11 +2795,12 @@ void ext3_read_inode(struct inode * inode)
}
brelse (iloc.bh);
ext3_set_inode_flags(inode);
- return;
+ unlock_new_inode(inode);
+ return inode;
bad_inode:
- make_bad_inode(inode);
- return;
+ iget_failed(inode);
+ return ERR_PTR(ret);
}
/*
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 4ab6f76e63d0..dec3e0d88ab1 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -860,14 +860,10 @@ static struct buffer_head * ext3_find_entry (struct dentry *dentry,
int nblocks, i, err;
struct inode *dir = dentry->d_parent->d_inode;
int namelen;
- const u8 *name;
- unsigned blocksize;
*res_dir = NULL;
sb = dir->i_sb;
- blocksize = sb->s_blocksize;
namelen = dentry->d_name.len;
- name = dentry->d_name.name;
if (namelen > EXT3_NAME_LEN)
return NULL;
if (is_dx(dir)) {
@@ -1041,17 +1037,11 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str
if (!ext3_valid_inum(dir->i_sb, ino)) {
ext3_error(dir->i_sb, "ext3_lookup",
"bad inode number: %lu", ino);
- inode = NULL;
- } else
- inode = iget(dir->i_sb, ino);
-
- if (!inode)
- return ERR_PTR(-EACCES);
-
- if (is_bad_inode(inode)) {
- iput(inode);
- return ERR_PTR(-ENOENT);
+ return ERR_PTR(-EIO);
}
+ inode = ext3_iget(dir->i_sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
}
@@ -1080,18 +1070,13 @@ struct dentry *ext3_get_parent(struct dentry *child)
if (!ext3_valid_inum(child->d_inode->i_sb, ino)) {
ext3_error(child->d_inode->i_sb, "ext3_get_parent",
"bad inode number: %lu", ino);
- inode = NULL;
- } else
- inode = iget(child->d_inode->i_sb, ino);
-
- if (!inode)
- return ERR_PTR(-EACCES);
-
- if (is_bad_inode(inode)) {
- iput(inode);
- return ERR_PTR(-ENOENT);
+ return ERR_PTR(-EIO);
}
+ inode = ext3_iget(child->d_inode->i_sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
parent = d_alloc_anon(inode);
if (!parent) {
iput(inode);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 44de1453c301..ebc05af7343a 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -795,12 +795,11 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
"No reserved GDT blocks, can't resize");
return -EPERM;
}
- inode = iget(sb, EXT3_RESIZE_INO);
- if (!inode || is_bad_inode(inode)) {
+ inode = ext3_iget(sb, EXT3_RESIZE_INO);
+ if (IS_ERR(inode)) {
ext3_warning(sb, __FUNCTION__,
"Error opening resize inode");
- iput(inode);
- return -ENOENT;
+ return PTR_ERR(inode);
}
}
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index f3675cc630e9..cf2a2c3660ec 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -575,16 +575,16 @@ static int ext3_show_options(struct seq_file *seq, struct vfsmount *vfs)
le16_to_cpu(es->s_def_resgid) != EXT3_DEF_RESGID) {
seq_printf(seq, ",resgid=%u", sbi->s_resgid);
}
- if (test_opt(sb, ERRORS_CONT)) {
+ if (test_opt(sb, ERRORS_RO)) {
int def_errors = le16_to_cpu(es->s_errors);
if (def_errors == EXT3_ERRORS_PANIC ||
- def_errors == EXT3_ERRORS_RO) {
- seq_puts(seq, ",errors=continue");
+ def_errors == EXT3_ERRORS_CONTINUE) {
+ seq_puts(seq, ",errors=remount-ro");
}
}
- if (test_opt(sb, ERRORS_RO))
- seq_puts(seq, ",errors=remount-ro");
+ if (test_opt(sb, ERRORS_CONT))
+ seq_puts(seq, ",errors=continue");
if (test_opt(sb, ERRORS_PANIC))
seq_puts(seq, ",errors=panic");
if (test_opt(sb, NO_UID32))
@@ -649,11 +649,10 @@ static struct inode *ext3_nfs_get_inode(struct super_block *sb,
* Currently we don't know the generation for parent directory, so
* a generation of 0 means "accept any"
*/
- inode = iget(sb, ino);
- if (inode == NULL)
- return ERR_PTR(-ENOMEM);
- if (is_bad_inode(inode) ||
- (generation && inode->i_generation != generation)) {
+ inode = ext3_iget(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
@@ -722,7 +721,6 @@ static struct quotactl_ops ext3_qctl_operations = {
static const struct super_operations ext3_sops = {
.alloc_inode = ext3_alloc_inode,
.destroy_inode = ext3_destroy_inode,
- .read_inode = ext3_read_inode,
.write_inode = ext3_write_inode,
.dirty_inode = ext3_dirty_inode,
.delete_inode = ext3_delete_inode,
@@ -1252,28 +1250,24 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
}
/* Called at mount-time, super-block is locked */
-static int ext3_check_descriptors (struct super_block * sb)
+static int ext3_check_descriptors(struct super_block *sb)
{
struct ext3_sb_info *sbi = EXT3_SB(sb);
ext3_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
ext3_fsblk_t last_block;
- struct ext3_group_desc * gdp = NULL;
- int desc_block = 0;
int i;
ext3_debug ("Checking group descriptors");
- for (i = 0; i < sbi->s_groups_count; i++)
- {
+ for (i = 0; i < sbi->s_groups_count; i++) {
+ struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL);
+
if (i == sbi->s_groups_count - 1)
last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
else
last_block = first_block +
(EXT3_BLOCKS_PER_GROUP(sb) - 1);
- if ((i % EXT3_DESC_PER_BLOCK(sb)) == 0)
- gdp = (struct ext3_group_desc *)
- sbi->s_group_desc[desc_block++]->b_data;
if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
le32_to_cpu(gdp->bg_block_bitmap) > last_block)
{
@@ -1306,7 +1300,6 @@ static int ext3_check_descriptors (struct super_block * sb)
return 0;
}
first_block += EXT3_BLOCKS_PER_GROUP(sb);
- gdp++;
}
sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb));
@@ -1383,8 +1376,8 @@ static void ext3_orphan_cleanup (struct super_block * sb,
while (es->s_last_orphan) {
struct inode *inode;
- if (!(inode =
- ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan)))) {
+ inode = ext3_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
+ if (IS_ERR(inode)) {
es->s_last_orphan = 0;
break;
}
@@ -1513,6 +1506,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
int db_count;
int i;
int needs_recovery;
+ int ret = -EINVAL;
__le32 features;
int err;
@@ -1583,10 +1577,10 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC)
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
- else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_RO)
- set_opt(sbi->s_mount_opt, ERRORS_RO);
- else
+ else if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_CONTINUE)
set_opt(sbi->s_mount_opt, ERRORS_CONT);
+ else
+ set_opt(sbi->s_mount_opt, ERRORS_RO);
sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
@@ -1882,19 +1876,24 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
* so we can safely mount the rest of the filesystem now.
*/
- root = iget(sb, EXT3_ROOT_INO);
- sb->s_root = d_alloc_root(root);
- if (!sb->s_root) {
+ root = ext3_iget(sb, EXT3_ROOT_INO);
+ if (IS_ERR(root)) {
printk(KERN_ERR "EXT3-fs: get root inode failed\n");
- iput(root);
+ ret = PTR_ERR(root);
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
- dput(sb->s_root);
- sb->s_root = NULL;
+ iput(root);
printk(KERN_ERR "EXT3-fs: corrupt root inode, run e2fsck\n");
goto failed_mount4;
}
+ sb->s_root = d_alloc_root(root);
+ if (!sb->s_root) {
+ printk(KERN_ERR "EXT3-fs: get root dentry failed\n");
+ iput(root);
+ ret = -ENOMEM;
+ goto failed_mount4;
+ }
ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
/*
@@ -1946,7 +1945,7 @@ out_fail:
sb->s_fs_info = NULL;
kfree(sbi);
lock_kernel();
- return -EINVAL;
+ return ret;
}
/*
@@ -1982,8 +1981,8 @@ static journal_t *ext3_get_journal(struct super_block *sb,
* things happen if we iget() an unused inode, as the subsequent
* iput() will try to delete it. */
- journal_inode = iget(sb, journal_inum);
- if (!journal_inode) {
+ journal_inode = ext3_iget(sb, journal_inum);
+ if (IS_ERR(journal_inode)) {
printk(KERN_ERR "EXT3-fs: no journal found.\n");
return NULL;
}
@@ -1996,7 +1995,7 @@ static journal_t *ext3_get_journal(struct super_block *sb,
jbd_debug(2, "Journal inode found at %p: %Ld bytes\n",
journal_inode, journal_inode->i_size);
- if (is_bad_inode(journal_inode) || !S_ISREG(journal_inode->i_mode)) {
+ if (!S_ISREG(journal_inode->i_mode)) {
printk(KERN_ERR "EXT3-fs: invalid journal inode.\n");
iput(journal_inode);
return NULL;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index ac75ea953d83..0737e05ba3dd 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -1700,7 +1700,7 @@ retry_alloc:
/*
* Now search the rest of the groups. We assume that
- * i and gdp correctly point to the last group visited.
+ * group_no and gdp correctly point to the last group visited.
*/
for (bgi = 0; bgi < ngroups; bgi++) {
group_no++;
@@ -2011,11 +2011,7 @@ static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
ext4_group_t group)
{
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
- EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
- !ext4_group_sparse(group))
- return 0;
- return EXT4_SB(sb)->s_gdb_count;
+ return ext4_bg_has_super(sb, group) ? EXT4_SB(sb)->s_gdb_count : 0;
}
/**
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 575b5215c808..da18a74b966a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -782,14 +782,15 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
ext4_group_t block_group;
int bit;
- struct buffer_head *bitmap_bh = NULL;
+ struct buffer_head *bitmap_bh;
struct inode *inode = NULL;
+ long err = -EIO;
/* Error cases - e2fsck has already cleaned up for us */
if (ino > max_ino) {
ext4_warning(sb, __FUNCTION__,
"bad orphan ino %lu! e2fsck was run?", ino);
- goto out;
+ goto error;
}
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
@@ -798,38 +799,49 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
if (!bitmap_bh) {
ext4_warning(sb, __FUNCTION__,
"inode bitmap error for orphan %lu", ino);
- goto out;
+ goto error;
}
/* Having the inode bit set should be a 100% indicator that this
* is a valid orphan (no e2fsck run on fs). Orphans also include
* inodes that were being truncated, so we can't check i_nlink==0.
*/
- if (!ext4_test_bit(bit, bitmap_bh->b_data) ||
- !(inode = iget(sb, ino)) || is_bad_inode(inode) ||
- NEXT_ORPHAN(inode) > max_ino) {
- ext4_warning(sb, __FUNCTION__,
- "bad orphan inode %lu! e2fsck was run?", ino);
- printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
- bit, (unsigned long long)bitmap_bh->b_blocknr,
- ext4_test_bit(bit, bitmap_bh->b_data));
- printk(KERN_NOTICE "inode=%p\n", inode);
- if (inode) {
- printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
- is_bad_inode(inode));
- printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
- NEXT_ORPHAN(inode));
- printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
- }
+ if (!ext4_test_bit(bit, bitmap_bh->b_data))
+ goto bad_orphan;
+
+ inode = ext4_iget(sb, ino);
+ if (IS_ERR(inode))
+ goto iget_failed;
+
+ if (NEXT_ORPHAN(inode) > max_ino)
+ goto bad_orphan;
+ brelse(bitmap_bh);
+ return inode;
+
+iget_failed:
+ err = PTR_ERR(inode);
+ inode = NULL;
+bad_orphan:
+ ext4_warning(sb, __FUNCTION__,
+ "bad orphan inode %lu! e2fsck was run?", ino);
+ printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
+ bit, (unsigned long long)bitmap_bh->b_blocknr,
+ ext4_test_bit(bit, bitmap_bh->b_data));
+ printk(KERN_NOTICE "inode=%p\n", inode);
+ if (inode) {
+ printk(KERN_NOTICE "is_bad_inode(inode)=%d\n",
+ is_bad_inode(inode));
+ printk(KERN_NOTICE "NEXT_ORPHAN(inode)=%u\n",
+ NEXT_ORPHAN(inode));
+ printk(KERN_NOTICE "max_ino=%lu\n", max_ino);
/* Avoid freeing blocks if we got a bad deleted inode */
- if (inode && inode->i_nlink == 0)
+ if (inode->i_nlink == 0)
inode->i_blocks = 0;
iput(inode);
- inode = NULL;
}
-out:
brelse(bitmap_bh);
- return inode;
+error:
+ return ERR_PTR(err);
}
unsigned long ext4_count_free_inodes (struct super_block * sb)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bb717cbb749c..f4e387452246 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -429,16 +429,13 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
* ext4_find_goal - find a prefered place for allocation.
* @inode: owner
* @block: block we want
- * @chain: chain of indirect blocks
* @partial: pointer to the last triple within a chain
- * @goal: place to store the result.
*
* Normally this function find the prefered place for block allocation,
- * stores it in *@goal and returns zero.
+ * returns it.
*/
-
static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
- Indirect chain[4], Indirect *partial)
+ Indirect *partial)
{
struct ext4_block_alloc_info *block_i;
@@ -839,7 +836,7 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
ext4_init_block_alloc_info(inode);
- goal = ext4_find_goal(inode, iblock, chain, partial);
+ goal = ext4_find_goal(inode, iblock, partial);
/* the number of blocks need to allocate for [d,t]indirect blocks */
indirect_blks = (chain + depth) - partial - 1;
@@ -1840,7 +1837,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
*/
if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
ext4_should_writeback_data(inode) && PageUptodate(page)) {
- zero_user_page(page, offset, length, KM_USER0);
+ zero_user(page, offset, length);
set_page_dirty(page);
goto unlock;
}
@@ -1893,7 +1890,7 @@ int ext4_block_truncate_page(handle_t *handle, struct page *page,
goto unlock;
}
- zero_user_page(page, offset, length, KM_USER0);
+ zero_user(page, offset, length);
BUFFER_TRACE(bh, "zeroed end of block");
@@ -2683,21 +2680,31 @@ static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
}
}
-void ext4_read_inode(struct inode * inode)
+struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
{
struct ext4_iloc iloc;
struct ext4_inode *raw_inode;
- struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_inode_info *ei;
struct buffer_head *bh;
+ struct inode *inode;
+ long ret;
int block;
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ ei = EXT4_I(inode);
#ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
ei->i_acl = EXT4_ACL_NOT_CACHED;
ei->i_default_acl = EXT4_ACL_NOT_CACHED;
#endif
ei->i_block_alloc_info = NULL;
- if (__ext4_get_inode_loc(inode, &iloc, 0))
+ ret = __ext4_get_inode_loc(inode, &iloc, 0);
+ if (ret < 0)
goto bad_inode;
bh = iloc.bh;
raw_inode = ext4_raw_inode(&iloc);
@@ -2723,6 +2730,7 @@ void ext4_read_inode(struct inode * inode)
!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
/* this inode is deleted */
brelse (bh);
+ ret = -ESTALE;
goto bad_inode;
}
/* The only unlinked inodes we let through here have
@@ -2761,6 +2769,7 @@ void ext4_read_inode(struct inode * inode)
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
EXT4_INODE_SIZE(inode->i_sb)) {
brelse (bh);
+ ret = -EIO;
goto bad_inode;
}
if (ei->i_extra_isize == 0) {
@@ -2814,11 +2823,12 @@ void ext4_read_inode(struct inode * inode)
}
brelse (iloc.bh);
ext4_set_inode_flags(inode);
- return;
+ unlock_new_inode(inode);
+ return inode;
bad_inode:
- make_bad_inode(inode);
- return;
+ iget_failed(inode);
+ return ERR_PTR(ret);
}
static int ext4_inode_blocks_set(handle_t *handle,
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 67b6d8a1ceff..d153bb5922fc 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1039,17 +1039,11 @@ static struct dentry *ext4_lookup(struct inode * dir, struct dentry *dentry, str
if (!ext4_valid_inum(dir->i_sb, ino)) {
ext4_error(dir->i_sb, "ext4_lookup",
"bad inode number: %lu", ino);
- inode = NULL;
- } else
- inode = iget(dir->i_sb, ino);
-
- if (!inode)
- return ERR_PTR(-EACCES);
-
- if (is_bad_inode(inode)) {
- iput(inode);
- return ERR_PTR(-ENOENT);
+ return ERR_PTR(-EIO);
}
+ inode = ext4_iget(dir->i_sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
}
return d_splice_alias(inode, dentry);
}
@@ -1078,18 +1072,13 @@ struct dentry *ext4_get_parent(struct dentry *child)
if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
ext4_error(child->d_inode->i_sb, "ext4_get_parent",
"bad inode number: %lu", ino);
- inode = NULL;
- } else
- inode = iget(child->d_inode->i_sb, ino);
-
- if (!inode)
- return ERR_PTR(-EACCES);
-
- if (is_bad_inode(inode)) {
- iput(inode);
- return ERR_PTR(-ENOENT);
+ return ERR_PTR(-EIO);
}
+ inode = ext4_iget(child->d_inode->i_sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
parent = d_alloc_anon(inode);
if (!parent) {
iput(inode);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 4fbba60816f4..9477a2bd6ff2 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -779,12 +779,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
"No reserved GDT blocks, can't resize");
return -EPERM;
}
- inode = iget(sb, EXT4_RESIZE_INO);
- if (!inode || is_bad_inode(inode)) {
+ inode = ext4_iget(sb, EXT4_RESIZE_INO);
+ if (IS_ERR(inode)) {
ext4_warning(sb, __FUNCTION__,
"Error opening resize inode");
- iput(inode);
- return -ENOENT;
+ return PTR_ERR(inode);
}
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 055a0cd0168e..93beb865c20d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -777,11 +777,10 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
* Currently we don't know the generation for parent directory, so
* a generation of 0 means "accept any"
*/
- inode = iget(sb, ino);
- if (inode == NULL)
- return ERR_PTR(-ENOMEM);
- if (is_bad_inode(inode) ||
- (generation && inode->i_generation != generation)) {
+ inode = ext4_iget(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
@@ -850,7 +849,6 @@ static struct quotactl_ops ext4_qctl_operations = {
static const struct super_operations ext4_sops = {
.alloc_inode = ext4_alloc_inode,
.destroy_inode = ext4_destroy_inode,
- .read_inode = ext4_read_inode,
.write_inode = ext4_write_inode,
.dirty_inode = ext4_dirty_inode,
.delete_inode = ext4_delete_inode,
@@ -1458,7 +1456,7 @@ int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
}
/* Called at mount-time, super-block is locked */
-static int ext4_check_descriptors (struct super_block * sb)
+static int ext4_check_descriptors(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
@@ -1466,8 +1464,6 @@ static int ext4_check_descriptors (struct super_block * sb)
ext4_fsblk_t block_bitmap;
ext4_fsblk_t inode_bitmap;
ext4_fsblk_t inode_table;
- struct ext4_group_desc * gdp = NULL;
- int desc_block = 0;
int flexbg_flag = 0;
ext4_group_t i;
@@ -1476,17 +1472,15 @@ static int ext4_check_descriptors (struct super_block * sb)
ext4_debug ("Checking group descriptors");
- for (i = 0; i < sbi->s_groups_count; i++)
- {
+ for (i = 0; i < sbi->s_groups_count; i++) {
+ struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
+
if (i == sbi->s_groups_count - 1 || flexbg_flag)
last_block = ext4_blocks_count(sbi->s_es) - 1;
else
last_block = first_block +
(EXT4_BLOCKS_PER_GROUP(sb) - 1);
- if ((i % EXT4_DESC_PER_BLOCK(sb)) == 0)
- gdp = (struct ext4_group_desc *)
- sbi->s_group_desc[desc_block++]->b_data;
block_bitmap = ext4_block_bitmap(sb, gdp);
if (block_bitmap < first_block || block_bitmap > last_block)
{
@@ -1524,8 +1518,6 @@ static int ext4_check_descriptors (struct super_block * sb)
}
if (!flexbg_flag)
first_block += EXT4_BLOCKS_PER_GROUP(sb);
- gdp = (struct ext4_group_desc *)
- ((__u8 *)gdp + EXT4_DESC_SIZE(sb));
}
ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb));
@@ -1811,6 +1803,7 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
unsigned long journal_devnum = 0;
unsigned long def_mount_opts;
struct inode *root;
+ int ret = -EINVAL;
int blocksize;
int db_count;
int i;
@@ -2243,19 +2236,24 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
* so we can safely mount the rest of the filesystem now.
*/
- root = iget(sb, EXT4_ROOT_INO);
- sb->s_root = d_alloc_root(root);
- if (!sb->s_root) {
+ root = ext4_iget(sb, EXT4_ROOT_INO);
+ if (IS_ERR(root)) {
printk(KERN_ERR "EXT4-fs: get root inode failed\n");
- iput(root);
+ ret = PTR_ERR(root);
goto failed_mount4;
}
if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
- dput(sb->s_root);
- sb->s_root = NULL;
+ iput(root);
printk(KERN_ERR "EXT4-fs: corrupt root inode, run e2fsck\n");
goto failed_mount4;
}
+ sb->s_root = d_alloc_root(root);
+ if (!sb->s_root) {
+ printk(KERN_ERR "EXT4-fs: get root dentry failed\n");
+ iput(root);
+ ret = -ENOMEM;
+ goto failed_mount4;
+ }
ext4_setup_super (sb, es, sb->s_flags & MS_RDONLY);
@@ -2336,7 +2334,7 @@ out_fail:
sb->s_fs_info = NULL;
kfree(sbi);
lock_kernel();
- return -EINVAL;
+ return ret;
}
/*
@@ -2372,8 +2370,8 @@ static journal_t *ext4_get_journal(struct super_block *sb,
* things happen if we iget() an unused inode, as the subsequent
* iput() will try to delete it. */
- journal_inode = iget(sb, journal_inum);
- if (!journal_inode) {
+ journal_inode = ext4_iget(sb, journal_inum);
+ if (IS_ERR(journal_inode)) {
printk(KERN_ERR "EXT4-fs: no journal found.\n");
return NULL;
}
@@ -2386,7 +2384,7 @@ static journal_t *ext4_get_journal(struct super_block *sb,
jbd_debug(2, "Journal inode found at %p: %Ld bytes\n",
journal_inode, journal_inode->i_size);
- if (is_bad_inode(journal_inode) || !S_ISREG(journal_inode->i_mode)) {
+ if (!S_ISREG(journal_inode->i_mode)) {
printk(KERN_ERR "EXT4-fs: invalid journal inode.\n");
iput(journal_inode);
return NULL;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 69a83b59dce8..c614175876e0 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -155,6 +155,42 @@ out:
return err;
}
+static int check_mode(const struct msdos_sb_info *sbi, mode_t mode)
+{
+ mode_t req = mode & ~S_IFMT;
+
+ /*
+ * Of the r and x bits, all (subject to umask) must be present. Of the
+ * w bits, either all (subject to umask) or none must be present.
+ */
+
+ if (S_ISREG(mode)) {
+ req &= ~sbi->options.fs_fmask;
+
+ if ((req & (S_IRUGO | S_IXUGO)) !=
+ ((S_IRUGO | S_IXUGO) & ~sbi->options.fs_fmask))
+ return -EPERM;
+
+ if ((req & S_IWUGO) != 0 &&
+ (req & S_IWUGO) != (S_IWUGO & ~sbi->options.fs_fmask))
+ return -EPERM;
+ } else if (S_ISDIR(mode)) {
+ req &= ~sbi->options.fs_dmask;
+
+ if ((req & (S_IRUGO | S_IXUGO)) !=
+ ((S_IRUGO | S_IXUGO) & ~sbi->options.fs_dmask))
+ return -EPERM;
+
+ if ((req & S_IWUGO) != 0 &&
+ (req & S_IWUGO) != (S_IWUGO & ~sbi->options.fs_dmask))
+ return -EPERM;
+ } else {
+ return -EPERM;
+ }
+
+ return 0;
+}
+
int fat_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
@@ -186,9 +222,7 @@ int fat_notify_change(struct dentry *dentry, struct iattr *attr)
if (((attr->ia_valid & ATTR_UID) &&
(attr->ia_uid != sbi->options.fs_uid)) ||
((attr->ia_valid & ATTR_GID) &&
- (attr->ia_gid != sbi->options.fs_gid)) ||
- ((attr->ia_valid & ATTR_MODE) &&
- (attr->ia_mode & ~MSDOS_VALID_MODE)))
+ (attr->ia_gid != sbi->options.fs_gid)))
error = -EPERM;
if (error) {
@@ -196,6 +230,13 @@ int fat_notify_change(struct dentry *dentry, struct iattr *attr)
error = 0;
goto out;
}
+
+ if (attr->ia_valid & ATTR_MODE) {
+ error = check_mode(sbi, attr->ia_mode);
+ if (error != 0 && !sbi->options.quiet)
+ goto out;
+ }
+
error = inode_setattr(inode, attr);
if (error)
goto out;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 920a576e1c25..085269e07fb3 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -634,8 +634,6 @@ static const struct super_operations fat_sops = {
.clear_inode = fat_clear_inode,
.remount_fs = fat_remount,
- .read_inode = make_bad_inode,
-
.show_options = fat_show_options,
};
@@ -663,8 +661,8 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb,
if (fh_len < 5 || fh_type != 3)
return NULL;
- inode = iget(sb, fh[0]);
- if (!inode || is_bad_inode(inode) || inode->i_generation != fh[1]) {
+ inode = ilookup(sb, fh[0]);
+ if (!inode || inode->i_generation != fh[1]) {
if (inode)
iput(inode);
inode = NULL;
@@ -760,7 +758,7 @@ static struct dentry *fat_get_parent(struct dentry *child)
inode = fat_build_inode(child->d_sb, de, i_pos);
brelse(bh);
if (IS_ERR(inode)) {
- parent = ERR_PTR(PTR_ERR(inode));
+ parent = ERR_CAST(inode);
goto out;
}
parent = d_alloc_anon(inode);
@@ -1295,10 +1293,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
fsinfo = (struct fat_boot_fsinfo *)fsinfo_bh->b_data;
if (!IS_FSINFO(fsinfo)) {
- printk(KERN_WARNING
- "FAT: Did not find valid FSINFO signature.\n"
- " Found signature1 0x%08x signature2 0x%08x"
- " (sector = %lu)\n",
+ printk(KERN_WARNING "FAT: Invalid FSINFO signature: "
+ "0x%08x, 0x%08x (sector = %lu)\n",
le32_to_cpu(fsinfo->signature1),
le32_to_cpu(fsinfo->signature2),
sbi->fsinfo_sector);
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 308f2b6b5026..61f23511eacf 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -55,9 +55,8 @@ void fat_clusters_flush(struct super_block *sb)
fsinfo = (struct fat_boot_fsinfo *)bh->b_data;
/* Sanity check */
if (!IS_FSINFO(fsinfo)) {
- printk(KERN_ERR "FAT: Did not find valid FSINFO signature.\n"
- " Found signature1 0x%08x signature2 0x%08x"
- " (sector = %lu)\n",
+ printk(KERN_ERR "FAT: Invalid FSINFO signature: "
+ "0x%08x, 0x%08x (sector = %lu)\n",
le32_to_cpu(fsinfo->signature1),
le32_to_cpu(fsinfo->signature2),
sbi->fsinfo_sector);
diff --git a/fs/file.c b/fs/file.c
index c5575de01113..5110acb1c9ef 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -24,6 +24,8 @@ struct fdtable_defer {
struct fdtable *next;
};
+int sysctl_nr_open __read_mostly = 1024*1024;
+
/*
* We use this list to defer free fdtables that have vmalloced
* sets/arrays. By keeping a per-cpu list, we avoid having to embed
@@ -147,8 +149,8 @@ static struct fdtable * alloc_fdtable(unsigned int nr)
nr /= (1024 / sizeof(struct file *));
nr = roundup_pow_of_two(nr + 1);
nr *= (1024 / sizeof(struct file *));
- if (nr > NR_OPEN)
- nr = NR_OPEN;
+ if (nr > sysctl_nr_open)
+ nr = sysctl_nr_open;
fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL);
if (!fdt)
@@ -233,7 +235,7 @@ int expand_files(struct files_struct *files, int nr)
if (nr < fdt->max_fds)
return 0;
/* Can we expand? */
- if (nr >= NR_OPEN)
+ if (nr >= sysctl_nr_open)
return -EMFILE;
/* All good, so we try */
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h
index 91ccee8723f7..2b46064f66b2 100644
--- a/fs/freevxfs/vxfs_extern.h
+++ b/fs/freevxfs/vxfs_extern.h
@@ -58,7 +58,7 @@ extern struct inode * vxfs_get_fake_inode(struct super_block *,
extern void vxfs_put_fake_inode(struct inode *);
extern struct vxfs_inode_info * vxfs_blkiget(struct super_block *, u_long, ino_t);
extern struct vxfs_inode_info * vxfs_stiget(struct super_block *, ino_t);
-extern void vxfs_read_inode(struct inode *);
+extern struct inode * vxfs_iget(struct super_block *, ino_t);
extern void vxfs_clear_inode(struct inode *);
/* vxfs_lookup.c */
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index d1f7c5b5b3c3..ad88d2364bc2 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -129,7 +129,7 @@ fail:
* Description:
* Search the for inode number @ino in the filesystem
* described by @sbp. Use the specified inode table (@ilistp).
- * Returns the matching VxFS inode on success, else a NULL pointer.
+ * Returns the matching VxFS inode on success, else an error code.
*/
static struct vxfs_inode_info *
__vxfs_iget(ino_t ino, struct inode *ilistp)
@@ -157,12 +157,12 @@ __vxfs_iget(ino_t ino, struct inode *ilistp)
}
printk(KERN_WARNING "vxfs: error on page %p\n", pp);
- return NULL;
+ return ERR_CAST(pp);
fail:
printk(KERN_WARNING "vxfs: unable to read inode %ld\n", (unsigned long)ino);
vxfs_put_page(pp);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
/**
@@ -178,7 +178,10 @@ fail:
struct vxfs_inode_info *
vxfs_stiget(struct super_block *sbp, ino_t ino)
{
- return __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_stilist);
+ struct vxfs_inode_info *vip;
+
+ vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_stilist);
+ return IS_ERR(vip) ? NULL : vip;
}
/**
@@ -282,23 +285,32 @@ vxfs_put_fake_inode(struct inode *ip)
}
/**
- * vxfs_read_inode - fill in inode information
- * @ip: inode pointer to fill
+ * vxfs_iget - get an inode
+ * @sbp: the superblock to get the inode for
+ * @ino: the number of the inode to get
*
* Description:
- * vxfs_read_inode reads the disk inode for @ip and fills
- * in all relevant fields in @ip.
+ * vxfs_read_inode creates an inode, reads the disk inode for @ino and fills
+ * in all relevant fields in the new inode.
*/
-void
-vxfs_read_inode(struct inode *ip)
+struct inode *
+vxfs_iget(struct super_block *sbp, ino_t ino)
{
- struct super_block *sbp = ip->i_sb;
struct vxfs_inode_info *vip;
const struct address_space_operations *aops;
- ino_t ino = ip->i_ino;
-
- if (!(vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist)))
- return;
+ struct inode *ip;
+
+ ip = iget_locked(sbp, ino);
+ if (!ip)
+ return ERR_PTR(-ENOMEM);
+ if (!(ip->i_state & I_NEW))
+ return ip;
+
+ vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist);
+ if (IS_ERR(vip)) {
+ iget_failed(ip);
+ return ERR_CAST(vip);
+ }
vxfs_iinit(ip, vip);
@@ -323,7 +335,8 @@ vxfs_read_inode(struct inode *ip)
} else
init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev));
- return;
+ unlock_new_inode(ip);
+ return ip;
}
/**
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index bf86e5444ea6..aee049cb9f84 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -213,10 +213,10 @@ vxfs_lookup(struct inode *dip, struct dentry *dp, struct nameidata *nd)
lock_kernel();
ino = vxfs_inode_by_name(dip, dp);
if (ino) {
- ip = iget(dip->i_sb, ino);
- if (!ip) {
+ ip = vxfs_iget(dip->i_sb, ino);
+ if (IS_ERR(ip)) {
unlock_kernel();
- return ERR_PTR(-EACCES);
+ return ERR_CAST(ip);
}
}
unlock_kernel();
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 4f95572d2722..1dacda831577 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -60,7 +60,6 @@ static int vxfs_statfs(struct dentry *, struct kstatfs *);
static int vxfs_remount(struct super_block *, int *, char *);
static const struct super_operations vxfs_super_ops = {
- .read_inode = vxfs_read_inode,
.clear_inode = vxfs_clear_inode,
.put_super = vxfs_put_super,
.statfs = vxfs_statfs,
@@ -153,6 +152,7 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
struct buffer_head *bp = NULL;
u_long bsize;
struct inode *root;
+ int ret = -EINVAL;
sbp->s_flags |= MS_RDONLY;
@@ -219,7 +219,11 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
}
sbp->s_op = &vxfs_super_ops;
- root = iget(sbp, VXFS_ROOT_INO);
+ root = vxfs_iget(sbp, VXFS_ROOT_INO);
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
+ goto out;
+ }
sbp->s_root = d_alloc_root(root);
if (!sbp->s_root) {
iput(root);
@@ -236,7 +240,7 @@ out_free_ilist:
out:
brelse(bp);
kfree(infp);
- return -EINVAL;
+ return ret;
}
/*
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 300324bd563c..db80ce9eb1d0 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -284,7 +284,17 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
* soon as the queue becomes uncongested.
*/
inode->i_state |= I_DIRTY_PAGES;
- requeue_io(inode);
+ if (wbc->nr_to_write <= 0) {
+ /*
+ * slice used up: queue for next turn
+ */
+ requeue_io(inode);
+ } else {
+ /*
+ * somehow blocked: retry later
+ */
+ redirty_tail(inode);
+ }
} else {
/*
* Otherwise fully redirty the inode so that
@@ -334,9 +344,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
WARN_ON(inode->i_state & I_WILL_FREE);
if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) {
- struct address_space *mapping = inode->i_mapping;
- int ret;
-
/*
* We're skipping this inode because it's locked, and we're not
* doing writeback-for-data-integrity. Move it to s_more_io so
@@ -345,15 +352,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* completed a full scan of s_io.
*/
requeue_io(inode);
-
- /*
- * Even if we don't actually write the inode itself here,
- * we can at least start some of the data writeout..
- */
- spin_unlock(&inode_lock);
- ret = do_writepages(mapping, wbc);
- spin_lock(&inode_lock);
- return ret;
+ return 0;
}
/*
@@ -479,8 +478,12 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
iput(inode);
cond_resched();
spin_lock(&inode_lock);
- if (wbc->nr_to_write <= 0)
+ if (wbc->nr_to_write <= 0) {
+ wbc->more_io = 1;
break;
+ }
+ if (!list_empty(&sb->s_more_io))
+ wbc->more_io = 1;
}
return; /* Leave any unwritten inodes on s_io */
}
@@ -512,8 +515,7 @@ writeback_inodes(struct writeback_control *wbc)
might_sleep();
spin_lock(&sb_lock);
restart:
- sb = sb_entry(super_blocks.prev);
- for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
+ list_for_each_entry_reverse(sb, &super_blocks, s_list) {
if (sb_has_dirty_inodes(sb)) {
/* we're making our own get_super here */
sb->s_count++;
@@ -578,10 +580,8 @@ static void set_sb_syncing(int val)
{
struct super_block *sb;
spin_lock(&sb_lock);
- sb = sb_entry(super_blocks.prev);
- for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
+ list_for_each_entry_reverse(sb, &super_blocks, s_list)
sb->s_syncing = val;
- }
spin_unlock(&sb_lock);
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index db534bcde45f..af639807524e 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -201,6 +201,55 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
}
}
+static unsigned len_args(unsigned numargs, struct fuse_arg *args)
+{
+ unsigned nbytes = 0;
+ unsigned i;
+
+ for (i = 0; i < numargs; i++)
+ nbytes += args[i].size;
+
+ return nbytes;
+}
+
+static u64 fuse_get_unique(struct fuse_conn *fc)
+{
+ fc->reqctr++;
+ /* zero is special */
+ if (fc->reqctr == 0)
+ fc->reqctr = 1;
+
+ return fc->reqctr;
+}
+
+static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
+{
+ req->in.h.unique = fuse_get_unique(fc);
+ req->in.h.len = sizeof(struct fuse_in_header) +
+ len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
+ list_add_tail(&req->list, &fc->pending);
+ req->state = FUSE_REQ_PENDING;
+ if (!req->waiting) {
+ req->waiting = 1;
+ atomic_inc(&fc->num_waiting);
+ }
+ wake_up(&fc->waitq);
+ kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+}
+
+static void flush_bg_queue(struct fuse_conn *fc)
+{
+ while (fc->active_background < FUSE_MAX_BACKGROUND &&
+ !list_empty(&fc->bg_queue)) {
+ struct fuse_req *req;
+
+ req = list_entry(fc->bg_queue.next, struct fuse_req, list);
+ list_del(&req->list);
+ fc->active_background++;
+ queue_request(fc, req);
+ }
+}
+
/*
* This function is called when a request is finished. Either a reply
* has arrived or it was aborted (and not yet sent) or some error
@@ -229,6 +278,8 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
clear_bdi_congested(&fc->bdi, WRITE);
}
fc->num_background--;
+ fc->active_background--;
+ flush_bg_queue(fc);
}
spin_unlock(&fc->lock);
wake_up(&req->waitq);
@@ -320,42 +371,6 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
}
}
-static unsigned len_args(unsigned numargs, struct fuse_arg *args)
-{
- unsigned nbytes = 0;
- unsigned i;
-
- for (i = 0; i < numargs; i++)
- nbytes += args[i].size;
-
- return nbytes;
-}
-
-static u64 fuse_get_unique(struct fuse_conn *fc)
- {
- fc->reqctr++;
- /* zero is special */
- if (fc->reqctr == 0)
- fc->reqctr = 1;
-
- return fc->reqctr;
-}
-
-static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
-{
- req->in.h.unique = fuse_get_unique(fc);
- req->in.h.len = sizeof(struct fuse_in_header) +
- len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
- list_add_tail(&req->list, &fc->pending);
- req->state = FUSE_REQ_PENDING;
- if (!req->waiting) {
- req->waiting = 1;
- atomic_inc(&fc->num_waiting);
- }
- wake_up(&fc->waitq);
- kill_fasync(&fc->fasync, SIGIO, POLL_IN);
-}
-
void request_send(struct fuse_conn *fc, struct fuse_req *req)
{
req->isreply = 1;
@@ -375,20 +390,26 @@ void request_send(struct fuse_conn *fc, struct fuse_req *req)
spin_unlock(&fc->lock);
}
+static void request_send_nowait_locked(struct fuse_conn *fc,
+ struct fuse_req *req)
+{
+ req->background = 1;
+ fc->num_background++;
+ if (fc->num_background == FUSE_MAX_BACKGROUND)
+ fc->blocked = 1;
+ if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
+ set_bdi_congested(&fc->bdi, READ);
+ set_bdi_congested(&fc->bdi, WRITE);
+ }
+ list_add_tail(&req->list, &fc->bg_queue);
+ flush_bg_queue(fc);
+}
+
static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
{
spin_lock(&fc->lock);
if (fc->connected) {
- req->background = 1;
- fc->num_background++;
- if (fc->num_background == FUSE_MAX_BACKGROUND)
- fc->blocked = 1;
- if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
- set_bdi_congested(&fc->bdi, READ);
- set_bdi_congested(&fc->bdi, WRITE);
- }
-
- queue_request(fc, req);
+ request_send_nowait_locked(fc, req);
spin_unlock(&fc->lock);
} else {
req->out.h.error = -ENOTCONN;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 80d2f5292cf9..7fb514b6d852 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -269,12 +269,12 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
req = fuse_get_req(fc);
if (IS_ERR(req))
- return ERR_PTR(PTR_ERR(req));
+ return ERR_CAST(req);
forget_req = fuse_get_req(fc);
if (IS_ERR(forget_req)) {
fuse_put_request(fc, req);
- return ERR_PTR(PTR_ERR(forget_req));
+ return ERR_CAST(forget_req);
}
attr_version = fuse_get_attr_version(fc);
@@ -416,6 +416,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
fuse_put_request(fc, forget_req);
d_instantiate(entry, inode);
fuse_change_entry_timeout(entry, &outentry);
+ fuse_invalidate_attr(dir);
file = lookup_instantiate_filp(nd, entry, generic_file_open);
if (IS_ERR(file)) {
ff->fh = outopen.fh;
@@ -1005,7 +1006,7 @@ static char *read_link(struct dentry *dentry)
char *link;
if (IS_ERR(req))
- return ERR_PTR(PTR_ERR(req));
+ return ERR_CAST(req);
link = (char *) __get_free_page(GFP_KERNEL);
if (!link) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index bb05d227cf30..676b0bc8a86d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -77,8 +77,8 @@ static struct fuse_file *fuse_file_get(struct fuse_file *ff)
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
- dput(req->dentry);
- mntput(req->vfsmount);
+ dput(req->misc.release.dentry);
+ mntput(req->misc.release.vfsmount);
fuse_put_request(fc, req);
}
@@ -86,7 +86,8 @@ static void fuse_file_put(struct fuse_file *ff)
{
if (atomic_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
- struct fuse_conn *fc = get_fuse_conn(req->dentry->d_inode);
+ struct inode *inode = req->misc.release.dentry->d_inode;
+ struct fuse_conn *fc = get_fuse_conn(inode);
req->end = fuse_release_end;
request_send_background(fc, req);
kfree(ff);
@@ -137,7 +138,7 @@ int fuse_open_common(struct inode *inode, struct file *file, int isdir)
void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode)
{
struct fuse_req *req = ff->reserved_req;
- struct fuse_release_in *inarg = &req->misc.release_in;
+ struct fuse_release_in *inarg = &req->misc.release.in;
inarg->fh = ff->fh;
inarg->flags = flags;
@@ -153,13 +154,14 @@ int fuse_release_common(struct inode *inode, struct file *file, int isdir)
struct fuse_file *ff = file->private_data;
if (ff) {
struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_req *req = ff->reserved_req;
fuse_release_fill(ff, get_node_id(inode), file->f_flags,
isdir ? FUSE_RELEASEDIR : FUSE_RELEASE);
/* Hold vfsmount and dentry until release is finished */
- ff->reserved_req->vfsmount = mntget(file->f_path.mnt);
- ff->reserved_req->dentry = dget(file->f_path.dentry);
+ req->misc.release.vfsmount = mntget(file->f_path.mnt);
+ req->misc.release.dentry = dget(file->f_path.dentry);
spin_lock(&fc->lock);
list_del(&ff->write_entry);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 3ab8a3048e8b..67aaf6ee38ea 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -215,7 +215,11 @@ struct fuse_req {
/** Data for asynchronous requests */
union {
struct fuse_forget_in forget_in;
- struct fuse_release_in release_in;
+ struct {
+ struct fuse_release_in in;
+ struct vfsmount *vfsmount;
+ struct dentry *dentry;
+ } release;
struct fuse_init_in init_in;
struct fuse_init_out init_out;
struct fuse_read_in read_in;
@@ -238,12 +242,6 @@ struct fuse_req {
/** File used in the request (or NULL) */
struct fuse_file *ff;
- /** vfsmount used in release */
- struct vfsmount *vfsmount;
-
- /** dentry used in release */
- struct dentry *dentry;
-
/** Request completion callback */
void (*end)(struct fuse_conn *, struct fuse_req *);
@@ -298,6 +296,12 @@ struct fuse_conn {
/** Number of requests currently in the background */
unsigned num_background;
+ /** Number of background requests currently queued for userspace */
+ unsigned active_background;
+
+ /** The list of background requests set aside for later queuing */
+ struct list_head bg_queue;
+
/** Pending interrupts */
struct list_head interrupts;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e5e80d1a4687..574707409bbf 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -76,11 +76,6 @@ static void fuse_destroy_inode(struct inode *inode)
kmem_cache_free(fuse_inode_cachep, inode);
}
-static void fuse_read_inode(struct inode *inode)
-{
- /* No op */
-}
-
void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
unsigned long nodeid, u64 nlookup)
{
@@ -465,6 +460,7 @@ static struct fuse_conn *new_conn(void)
INIT_LIST_HEAD(&fc->processing);
INIT_LIST_HEAD(&fc->io);
INIT_LIST_HEAD(&fc->interrupts);
+ INIT_LIST_HEAD(&fc->bg_queue);
atomic_set(&fc->num_waiting, 0);
fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
fc->bdi.unplug_io_fn = default_unplug_io_fn;
@@ -514,7 +510,6 @@ static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
static const struct super_operations fuse_super_operations = {
.alloc_inode = fuse_alloc_inode,
.destroy_inode = fuse_destroy_inode,
- .read_inode = fuse_read_inode,
.clear_inode = fuse_clear_inode,
.drop_inode = generic_delete_inode,
.remount_fs = fuse_remount_fs,
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index e4effc47abfc..e9456ebd3bb6 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -932,7 +932,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping)
if (!gfs2_is_writeback(ip))
gfs2_trans_add_bh(ip->i_gl, bh, 0);
- zero_user_page(page, offset, length, KM_USER0);
+ zero_user(page, offset, length);
unlock:
unlock_page(page);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 57e2ed932adc..c34709512b19 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1498,7 +1498,7 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
if (dent) {
if (IS_ERR(dent))
- return ERR_PTR(PTR_ERR(dent));
+ return ERR_CAST(dent);
inode = gfs2_inode_lookup(dir->i_sb,
be16_to_cpu(dent->de_type),
be64_to_cpu(dent->de_inum.no_addr),
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 80e09c50590a..7175a4d06435 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -334,7 +334,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
gl->gl_hash = hash;
- gl->gl_owner_pid = 0;
+ gl->gl_owner_pid = NULL;
gl->gl_ip = 0;
gl->gl_ops = glops;
gl->gl_req_gh = NULL;
@@ -399,7 +399,7 @@ void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
INIT_LIST_HEAD(&gh->gh_list);
gh->gh_gl = gl;
gh->gh_ip = (unsigned long)__builtin_return_address(0);
- gh->gh_owner_pid = current->pid;
+ gh->gh_owner_pid = get_pid(task_pid(current));
gh->gh_state = state;
gh->gh_flags = flags;
gh->gh_error = 0;
@@ -433,6 +433,7 @@ void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *
void gfs2_holder_uninit(struct gfs2_holder *gh)
{
+ put_pid(gh->gh_owner_pid);
gfs2_glock_put(gh->gh_gl);
gh->gh_gl = NULL;
gh->gh_ip = 0;
@@ -631,7 +632,7 @@ static void gfs2_glmutex_lock(struct gfs2_glock *gl)
wait_on_holder(&gh);
gfs2_holder_uninit(&gh);
} else {
- gl->gl_owner_pid = current->pid;
+ gl->gl_owner_pid = get_pid(task_pid(current));
gl->gl_ip = (unsigned long)__builtin_return_address(0);
spin_unlock(&gl->gl_spin);
}
@@ -652,7 +653,7 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
acquired = 0;
} else {
- gl->gl_owner_pid = current->pid;
+ gl->gl_owner_pid = get_pid(task_pid(current));
gl->gl_ip = (unsigned long)__builtin_return_address(0);
}
spin_unlock(&gl->gl_spin);
@@ -668,12 +669,17 @@ static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
{
+ struct pid *pid;
+
spin_lock(&gl->gl_spin);
clear_bit(GLF_LOCK, &gl->gl_flags);
- gl->gl_owner_pid = 0;
+ pid = gl->gl_owner_pid;
+ gl->gl_owner_pid = NULL;
gl->gl_ip = 0;
run_queue(gl);
spin_unlock(&gl->gl_spin);
+
+ put_pid(pid);
}
/**
@@ -1045,7 +1051,7 @@ static int glock_wait_internal(struct gfs2_holder *gh)
}
static inline struct gfs2_holder *
-find_holder_by_owner(struct list_head *head, pid_t pid)
+find_holder_by_owner(struct list_head *head, struct pid *pid)
{
struct gfs2_holder *gh;
@@ -1082,7 +1088,7 @@ static void add_to_queue(struct gfs2_holder *gh)
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_holder *existing;
- BUG_ON(!gh->gh_owner_pid);
+ BUG_ON(gh->gh_owner_pid == NULL);
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
BUG();
@@ -1092,12 +1098,14 @@ static void add_to_queue(struct gfs2_holder *gh)
if (existing) {
print_symbol(KERN_WARNING "original: %s\n",
existing->gh_ip);
- printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
+ printk(KERN_INFO "pid : %d\n",
+ pid_nr(existing->gh_owner_pid));
printk(KERN_INFO "lock type : %d lock state : %d\n",
existing->gh_gl->gl_name.ln_type,
existing->gh_gl->gl_state);
print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
- printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
+ printk(KERN_INFO "pid : %d\n",
+ pid_nr(gh->gh_owner_pid));
printk(KERN_INFO "lock type : %d lock state : %d\n",
gl->gl_name.ln_type, gl->gl_state);
BUG();
@@ -1798,8 +1806,9 @@ static int dump_holder(struct glock_iter *gi, char *str,
print_dbg(gi, " %s\n", str);
if (gh->gh_owner_pid) {
- print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid);
- gh_owner = find_task_by_pid(gh->gh_owner_pid);
+ print_dbg(gi, " owner = %ld ",
+ (long)pid_nr(gh->gh_owner_pid));
+ gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
if (gh_owner)
print_dbg(gi, "(%s)\n", gh_owner->comm);
else
@@ -1877,13 +1886,13 @@ static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
print_dbg(gi, " gl_state = %u\n", gl->gl_state);
if (gl->gl_owner_pid) {
- gl_owner = find_task_by_pid(gl->gl_owner_pid);
+ gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID);
if (gl_owner)
print_dbg(gi, " gl_owner = pid %d (%s)\n",
- gl->gl_owner_pid, gl_owner->comm);
+ pid_nr(gl->gl_owner_pid), gl_owner->comm);
else
print_dbg(gi, " gl_owner = %d (ended)\n",
- gl->gl_owner_pid);
+ pid_nr(gl->gl_owner_pid));
} else
print_dbg(gi, " gl_owner = -1\n");
print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index b16f604eea9f..2f9c6d136b37 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -36,11 +36,13 @@ static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
{
struct gfs2_holder *gh;
int locked = 0;
+ struct pid *pid;
/* Look in glock's list of holders for one with current task as owner */
spin_lock(&gl->gl_spin);
+ pid = task_pid(current);
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
- if (gh->gh_owner_pid == current->pid) {
+ if (gh->gh_owner_pid == pid) {
locked = 1;
break;
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 513aaf0dc0ab..525dcae352d6 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -151,7 +151,7 @@ struct gfs2_holder {
struct list_head gh_list;
struct gfs2_glock *gh_gl;
- pid_t gh_owner_pid;
+ struct pid *gh_owner_pid;
unsigned int gh_state;
unsigned gh_flags;
@@ -182,7 +182,7 @@ struct gfs2_glock {
unsigned int gl_hash;
unsigned int gl_demote_state; /* state requested by remote node */
unsigned long gl_demote_time; /* time of first demote request */
- pid_t gl_owner_pid;
+ struct pid *gl_owner_pid;
unsigned long gl_ip;
struct list_head gl_holders;
struct list_head gl_waiters1; /* HIF_MUTEX */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 728d3169e7bd..37725ade3c51 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -240,7 +240,7 @@ fail_put:
ip->i_gl->gl_object = NULL;
gfs2_glock_put(ip->i_gl);
fail:
- iput(inode);
+ iget_failed(inode);
return ERR_PTR(error);
}
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 38dbe99a30ed..ac772b6d9dbb 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -446,7 +446,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
* so we need to supply one here. It doesn't happen often.
*/
if (unlikely(page->index)) {
- zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+ zero_user(page, 0, PAGE_CACHE_SIZE);
return 0;
}
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index b9da62348a87..334c7f85351b 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -143,7 +143,7 @@ static struct dentry *gfs2_get_parent(struct dentry *child)
* have to return that as a(n invalid) pointer to dentry.
*/
if (IS_ERR(inode))
- return ERR_PTR(PTR_ERR(inode));
+ return ERR_CAST(inode);
dentry = d_alloc_anon(inode);
if (!dentry) {
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 9f71372c1757..e87412902bed 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -111,7 +111,7 @@ static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
inode = gfs2_lookupi(dir, &dentry->d_name, 0, nd);
if (inode && IS_ERR(inode))
- return ERR_PTR(PTR_ERR(inode));
+ return ERR_CAST(inode);
if (inode) {
struct gfs2_glock *gl = GFS2_I(inode)->i_gl;
diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
index f8452a0eab56..4129cdb3f0d8 100644
--- a/fs/hfs/bfind.c
+++ b/fs/hfs/bfind.c
@@ -52,9 +52,9 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
rec = (e + b) / 2;
len = hfs_brec_lenoff(bnode, rec, &off);
keylen = hfs_brec_keylen(bnode, rec);
- if (keylen == HFS_BAD_KEYLEN) {
+ if (keylen == 0) {
res = -EINVAL;
- goto done;
+ goto fail;
}
hfs_bnode_read(bnode, fd->key, off, keylen);
cmpval = bnode->tree->keycmp(fd->key, fd->search_key);
@@ -71,9 +71,9 @@ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd)
if (rec != e && e >= 0) {
len = hfs_brec_lenoff(bnode, e, &off);
keylen = hfs_brec_keylen(bnode, e);
- if (keylen == HFS_BAD_KEYLEN) {
+ if (keylen == 0) {
res = -EINVAL;
- goto done;
+ goto fail;
}
hfs_bnode_read(bnode, fd->key, off, keylen);
}
@@ -83,6 +83,7 @@ done:
fd->keylength = keylen;
fd->entryoffset = off + keylen;
fd->entrylength = len - keylen;
+fail:
return res;
}
@@ -206,7 +207,7 @@ int hfs_brec_goto(struct hfs_find_data *fd, int cnt)
len = hfs_brec_lenoff(bnode, fd->record, &off);
keylen = hfs_brec_keylen(bnode, fd->record);
- if (keylen == HFS_BAD_KEYLEN) {
+ if (keylen == 0) {
res = -EINVAL;
goto out;
}
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 8626ee375ea8..878bf25dbc6a 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -49,14 +49,14 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
if (retval > node->tree->max_key_len + 2) {
printk(KERN_ERR "hfs: keylen %d too large\n",
retval);
- retval = HFS_BAD_KEYLEN;
+ retval = 0;
}
} else {
retval = (hfs_bnode_read_u8(node, recoff) | 1) + 1;
if (retval > node->tree->max_key_len + 1) {
printk(KERN_ERR "hfs: keylen %d too large\n",
retval);
- retval = HFS_BAD_KEYLEN;
+ retval = 0;
}
}
}
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 110dd3515dc8..24cf6fc43021 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -81,15 +81,23 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
goto fail_page;
if (!tree->node_count)
goto fail_page;
- if ((id == HFS_EXT_CNID) && (tree->max_key_len != HFS_MAX_EXT_KEYLEN)) {
- printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
- tree->max_key_len);
- goto fail_page;
- }
- if ((id == HFS_CAT_CNID) && (tree->max_key_len != HFS_MAX_CAT_KEYLEN)) {
- printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
- tree->max_key_len);
- goto fail_page;
+ switch (id) {
+ case HFS_EXT_CNID:
+ if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
+ printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
+ tree->max_key_len);
+ goto fail_page;
+ }
+ break;
+ case HFS_CAT_CNID:
+ if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
+ printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
+ tree->max_key_len);
+ goto fail_page;
+ }
+ break;
+ default:
+ BUG();
}
tree->node_size_shift = ffs(size) - 1;
diff --git a/fs/hfs/hfs.h b/fs/hfs/hfs.h
index c6aae61adfe6..6f194d0768b6 100644
--- a/fs/hfs/hfs.h
+++ b/fs/hfs/hfs.h
@@ -28,8 +28,6 @@
#define HFS_MAX_NAMELEN 128
#define HFS_MAX_VALENCE 32767U
-#define HFS_BAD_KEYLEN 0xFF
-
/* Meanings of the drAtrb field of the MDB,
* Reference: _Inside Macintosh: Files_ p. 2-61
*/
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 16cbd902f8b9..32de44ed0021 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -6,7 +6,7 @@
* This file may be distributed under the terms of the GNU General Public License.
*
* This file contains hfs_read_super(), some of the super_ops and
- * init_module() and cleanup_module(). The remaining super_ops are in
+ * init_hfs_fs() and exit_hfs_fs(). The remaining super_ops are in
* inode.c since they deal with inodes.
*
* Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 050d29c0a5b5..bb5433608a42 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -22,6 +22,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
struct hfs_btree *tree;
struct hfs_btree_header_rec *head;
struct address_space *mapping;
+ struct inode *inode;
struct page *page;
unsigned int size;
@@ -33,9 +34,10 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
spin_lock_init(&tree->hash_lock);
tree->sb = sb;
tree->cnid = id;
- tree->inode = iget(sb, id);
- if (!tree->inode)
+ inode = hfsplus_iget(sb, id);
+ if (IS_ERR(inode))
goto free_tree;
+ tree->inode = inode;
mapping = tree->inode->i_mapping;
page = read_mapping_page(mapping, 0, NULL);
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 1955ee61251c..29683645fa0a 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -97,9 +97,9 @@ again:
goto fail;
}
hfs_find_exit(&fd);
- inode = iget(dir->i_sb, cnid);
- if (!inode)
- return ERR_PTR(-EACCES);
+ inode = hfsplus_iget(dir->i_sb, cnid);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
if (S_ISREG(inode->i_mode))
HFSPLUS_I(inode).dev = linkid;
out:
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index d9f5eda6d039..d72d0a8b25aa 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -345,6 +345,9 @@ int hfsplus_parse_options(char *, struct hfsplus_sb_info *);
void hfsplus_fill_defaults(struct hfsplus_sb_info *);
int hfsplus_show_options(struct seq_file *, struct vfsmount *);
+/* super.c */
+struct inode *hfsplus_iget(struct super_block *, unsigned long);
+
/* tables.c */
extern u16 hfsplus_case_fold_table[];
extern u16 hfsplus_decompose_table[];
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index ecf70dafb643..b0f9ad362d1d 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -20,11 +20,18 @@ static void hfsplus_destroy_inode(struct inode *inode);
#include "hfsplus_fs.h"
-static void hfsplus_read_inode(struct inode *inode)
+struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
{
struct hfs_find_data fd;
struct hfsplus_vh *vhdr;
- int err;
+ struct inode *inode;
+ long err = -EIO;
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list);
init_MUTEX(&HFSPLUS_I(inode).extents_lock);
@@ -41,7 +48,7 @@ static void hfsplus_read_inode(struct inode *inode)
hfs_find_exit(&fd);
if (err)
goto bad_inode;
- return;
+ goto done;
}
vhdr = HFSPLUS_SB(inode->i_sb).s_vhdr;
switch(inode->i_ino) {
@@ -70,10 +77,13 @@ static void hfsplus_read_inode(struct inode *inode)
goto bad_inode;
}
- return;
+done:
+ unlock_new_inode(inode);
+ return inode;
- bad_inode:
- make_bad_inode(inode);
+bad_inode:
+ iget_failed(inode);
+ return ERR_PTR(err);
}
static int hfsplus_write_inode(struct inode *inode, int unused)
@@ -262,7 +272,6 @@ static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
static const struct super_operations hfsplus_sops = {
.alloc_inode = hfsplus_alloc_inode,
.destroy_inode = hfsplus_destroy_inode,
- .read_inode = hfsplus_read_inode,
.write_inode = hfsplus_write_inode,
.clear_inode = hfsplus_clear_inode,
.put_super = hfsplus_put_super,
@@ -278,7 +287,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
struct hfsplus_sb_info *sbi;
hfsplus_cat_entry entry;
struct hfs_find_data fd;
- struct inode *root;
+ struct inode *root, *inode;
struct qstr str;
struct nls_table *nls = NULL;
int err = -EINVAL;
@@ -366,18 +375,25 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
goto cleanup;
}
- HFSPLUS_SB(sb).alloc_file = iget(sb, HFSPLUS_ALLOC_CNID);
- if (!HFSPLUS_SB(sb).alloc_file) {
+ inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
+ if (IS_ERR(inode)) {
printk(KERN_ERR "hfs: failed to load allocation file\n");
+ err = PTR_ERR(inode);
goto cleanup;
}
+ HFSPLUS_SB(sb).alloc_file = inode;
/* Load the root directory */
- root = iget(sb, HFSPLUS_ROOT_CNID);
+ root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
+ if (IS_ERR(root)) {
+ printk(KERN_ERR "hfs: failed to load root directory\n");
+ err = PTR_ERR(root);
+ goto cleanup;
+ }
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
- printk(KERN_ERR "hfs: failed to load root directory\n");
iput(root);
+ err = -ENOMEM;
goto cleanup;
}
sb->s_root->d_op = &hfsplus_dentry_operations;
@@ -390,9 +406,12 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
hfs_find_exit(&fd);
if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
goto cleanup;
- HFSPLUS_SB(sb).hidden_dir = iget(sb, be32_to_cpu(entry.folder.id));
- if (!HFSPLUS_SB(sb).hidden_dir)
+ inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
goto cleanup;
+ }
+ HFSPLUS_SB(sb).hidden_dir = inode;
} else
hfs_find_exit(&fd);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 8966b050196e..2b9b35733aac 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -202,7 +202,7 @@ static char *follow_link(char *link)
return ERR_PTR(n);
}
-static int read_inode(struct inode *ino)
+static int hostfs_read_inode(struct inode *ino)
{
char *name;
int err = 0;
@@ -233,6 +233,25 @@ static int read_inode(struct inode *ino)
return err;
}
+static struct inode *hostfs_iget(struct super_block *sb)
+{
+ struct inode *inode;
+ long ret;
+
+ inode = iget_locked(sb, 0);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (inode->i_state & I_NEW) {
+ ret = hostfs_read_inode(inode);
+ if (ret < 0) {
+ iget_failed(inode);
+ return ERR_PTR(ret);
+ }
+ unlock_new_inode(inode);
+ }
+ return inode;
+}
+
int hostfs_statfs(struct dentry *dentry, struct kstatfs *sf)
{
/*
@@ -303,17 +322,11 @@ static void hostfs_destroy_inode(struct inode *inode)
kfree(HOSTFS_I(inode));
}
-static void hostfs_read_inode(struct inode *inode)
-{
- read_inode(inode);
-}
-
static const struct super_operations hostfs_sbops = {
.alloc_inode = hostfs_alloc_inode,
.drop_inode = generic_delete_inode,
.delete_inode = hostfs_delete_inode,
.destroy_inode = hostfs_destroy_inode,
- .read_inode = hostfs_read_inode,
.statfs = hostfs_statfs,
};
@@ -571,10 +584,11 @@ int hostfs_create(struct inode *dir, struct dentry *dentry, int mode,
char *name;
int error, fd;
- error = -ENOMEM;
- inode = iget(dir->i_sb, 0);
- if (inode == NULL)
+ inode = hostfs_iget(dir->i_sb);
+ if (IS_ERR(inode)) {
+ error = PTR_ERR(inode);
goto out;
+ }
error = init_inode(inode, dentry);
if (error)
@@ -615,10 +629,11 @@ struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
char *name;
int err;
- err = -ENOMEM;
- inode = iget(ino->i_sb, 0);
- if (inode == NULL)
+ inode = hostfs_iget(ino->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
goto out;
+ }
err = init_inode(inode, dentry);
if (err)
@@ -736,11 +751,13 @@ int hostfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
{
struct inode *inode;
char *name;
- int err = -ENOMEM;
+ int err;
- inode = iget(dir->i_sb, 0);
- if (inode == NULL)
+ inode = hostfs_iget(dir->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
goto out;
+ }
err = init_inode(inode, dentry);
if (err)
@@ -952,9 +969,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
sprintf(host_root_path, "%s/%s", root_ino, req_root);
- root_inode = iget(sb, 0);
- if (root_inode == NULL)
+ root_inode = hostfs_iget(sb);
+ if (IS_ERR(root_inode)) {
+ err = PTR_ERR(root_inode);
goto out_free;
+ }
err = init_inode(root_inode, NULL);
if (err)
@@ -972,7 +991,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
if (sb->s_root == NULL)
goto out_put;
- err = read_inode(root_inode);
+ err = hostfs_read_inode(root_inode);
if (err) {
/* No iput in this case because the dput does that for us */
dput(sb->s_root);
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 35c1a9f33f47..53fd0a67c11a 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -285,17 +285,17 @@ int set_attr(const char *file, struct hostfs_iattr *attrs, int fd)
return err;
times[0].tv_sec = atime_ts.tv_sec;
- times[0].tv_usec = atime_ts.tv_nsec * 1000;
+ times[0].tv_usec = atime_ts.tv_nsec / 1000;
times[1].tv_sec = mtime_ts.tv_sec;
- times[1].tv_usec = mtime_ts.tv_nsec * 1000;
+ times[1].tv_usec = mtime_ts.tv_nsec / 1000;
if (attrs->ia_valid & HOSTFS_ATTR_ATIME_SET) {
times[0].tv_sec = attrs->ia_atime.tv_sec;
- times[0].tv_usec = attrs->ia_atime.tv_nsec * 1000;
+ times[0].tv_usec = attrs->ia_atime.tv_nsec / 1000;
}
if (attrs->ia_valid & HOSTFS_ATTR_MTIME_SET) {
times[1].tv_sec = attrs->ia_mtime.tv_sec;
- times[1].tv_usec = attrs->ia_mtime.tv_nsec * 1000;
+ times[1].tv_usec = attrs->ia_mtime.tv_nsec / 1000;
}
if (fd >= 0) {
diff --git a/fs/hppfs/hppfs_kern.c b/fs/hppfs/hppfs_kern.c
index affb7412125e..a1e1f0f61aa5 100644
--- a/fs/hppfs/hppfs_kern.c
+++ b/fs/hppfs/hppfs_kern.c
@@ -155,6 +155,20 @@ static void hppfs_read_inode(struct inode *ino)
ino->i_blocks = proc_ino->i_blocks;
}
+static struct inode *hppfs_iget(struct super_block *sb)
+{
+ struct inode *inode;
+
+ inode = iget_locked(sb, 0);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (inode->i_state & I_NEW) {
+ hppfs_read_inode(inode);
+ unlock_new_inode(inode);
+ }
+ return inode;
+}
+
static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
struct nameidata *nd)
{
@@ -190,9 +204,11 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
if(IS_ERR(proc_dentry))
return(proc_dentry);
- inode = iget(ino->i_sb, 0);
- if(inode == NULL)
+ inode = hppfs_iget(ino->i_sb);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
goto out_dput;
+ }
err = init_inode(inode, proc_dentry);
if(err)
@@ -652,7 +668,6 @@ static void hppfs_destroy_inode(struct inode *inode)
static const struct super_operations hppfs_sbops = {
.alloc_inode = hppfs_alloc_inode,
.destroy_inode = hppfs_destroy_inode,
- .read_inode = hppfs_read_inode,
.delete_inode = hppfs_delete_inode,
.statfs = hppfs_statfs,
};
@@ -745,9 +760,11 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent)
sb->s_magic = HPPFS_SUPER_MAGIC;
sb->s_op = &hppfs_sbops;
- root_inode = iget(sb, 0);
- if(root_inode == NULL)
+ root_inode = hppfs_iget(sb);
+ if (IS_ERR(root_inode)) {
+ err = PTR_ERR(root_inode);
goto out;
+ }
err = init_inode(root_inode, proc_sb->s_root);
if(err)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 09ee07f02663..3b3cc28cdefc 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -768,7 +768,7 @@ hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
case Opt_mode:
if (match_octal(&args[0], &option))
goto bad_val;
- pconfig->mode = option & 0777U;
+ pconfig->mode = option & 01777U;
break;
case Opt_size: {
diff --git a/fs/inode.c b/fs/inode.c
index 276ffd6b6fdd..53245ffcf93d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -928,8 +928,6 @@ EXPORT_SYMBOL(ilookup);
* @set: callback used to initialize a new struct inode
* @data: opaque data pointer to pass to @test and @set
*
- * This is iget() without the read_inode() portion of get_new_inode().
- *
* iget5_locked() uses ifind() to search for the inode specified by @hashval
* and @data in the inode cache and if present it is returned with an increased
* reference count. This is a generalized version of iget_locked() for file
@@ -966,8 +964,6 @@ EXPORT_SYMBOL(iget5_locked);
* @sb: super block of file system
* @ino: inode number to get
*
- * This is iget() without the read_inode() portion of get_new_inode_fast().
- *
* iget_locked() uses ifind_fast() to search for the inode specified by @ino in
* the inode cache and if present it is returned with an increased reference
* count. This is for file systems where the inode number is sufficient for
diff --git a/fs/inotify.c b/fs/inotify.c
index 2c5b92152876..690e72595e6e 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -168,20 +168,14 @@ static void set_dentry_child_flags(struct inode *inode, int watched)
struct dentry *child;
list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
- if (!child->d_inode) {
- WARN_ON(child->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
+ if (!child->d_inode)
continue;
- }
+
spin_lock(&child->d_lock);
- if (watched) {
- WARN_ON(child->d_flags &
- DCACHE_INOTIFY_PARENT_WATCHED);
+ if (watched)
child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
- } else {
- WARN_ON(!(child->d_flags &
- DCACHE_INOTIFY_PARENT_WATCHED));
- child->d_flags&=~DCACHE_INOTIFY_PARENT_WATCHED;
- }
+ else
+ child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
spin_unlock(&child->d_lock);
}
}
@@ -253,7 +247,6 @@ void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
if (!inode)
return;
- WARN_ON(entry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
spin_lock(&entry->d_lock);
parent = entry->d_parent;
if (parent->d_inode && inotify_inode_watched(parent->d_inode))
@@ -627,6 +620,7 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
struct inode *inode, u32 mask)
{
int ret = 0;
+ int newly_watched;
/* don't allow invalid bits: we don't want flags set */
mask &= IN_ALL_EVENTS | IN_ONESHOT;
@@ -653,12 +647,18 @@ s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
*/
watch->inode = igrab(inode);
- if (!inotify_inode_watched(inode))
- set_dentry_child_flags(inode, 1);
-
/* Add the watch to the handle's and the inode's list */
+ newly_watched = !inotify_inode_watched(inode);
list_add(&watch->h_list, &ih->watches);
list_add(&watch->i_list, &inode->inotify_watches);
+ /*
+ * Set child flags _after_ adding the watch, so there is no race
+ * windows where newly instantiated children could miss their parent's
+ * watched flag.
+ */
+ if (newly_watched)
+ set_dentry_child_flags(inode, 1);
+
out:
mutex_unlock(&ih->mutex);
mutex_unlock(&inode->inotify_mutex);
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 5e009331c01f..a336c9709f3c 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -79,6 +79,7 @@ struct inotify_device {
atomic_t count; /* reference count */
struct user_struct *user; /* user who opened this dev */
struct inotify_handle *ih; /* inotify handle */
+ struct fasync_struct *fa; /* async notification */
unsigned int queue_size; /* size of the queue (bytes) */
unsigned int event_count; /* number of pending events */
unsigned int max_events; /* maximum number of events */
@@ -248,6 +249,19 @@ inotify_dev_get_event(struct inotify_device *dev)
}
/*
+ * inotify_dev_get_last_event - return the last event in the given dev's queue
+ *
+ * Caller must hold dev->ev_mutex.
+ */
+static inline struct inotify_kernel_event *
+inotify_dev_get_last_event(struct inotify_device *dev)
+{
+ if (list_empty(&dev->events))
+ return NULL;
+ return list_entry(dev->events.prev, struct inotify_kernel_event, list);
+}
+
+/*
* inotify_dev_queue_event - event handler registered with core inotify, adds
* a new event to the given device
*
@@ -273,7 +287,7 @@ static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
put_inotify_watch(w); /* final put */
/* coalescing: drop this event if it is a dupe of the previous */
- last = inotify_dev_get_event(dev);
+ last = inotify_dev_get_last_event(dev);
if (last && last->event.mask == mask && last->event.wd == wd &&
last->event.cookie == cookie) {
const char *lastname = last->name;
@@ -302,6 +316,7 @@ static void inotify_dev_queue_event(struct inotify_watch *w, u32 wd, u32 mask,
dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
list_add_tail(&kevent->list, &dev->events);
wake_up_interruptible(&dev->wq);
+ kill_fasync(&dev->fa, SIGIO, POLL_IN);
out:
mutex_unlock(&dev->ev_mutex);
@@ -490,6 +505,13 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
return ret;
}
+static int inotify_fasync(int fd, struct file *file, int on)
+{
+ struct inotify_device *dev = file->private_data;
+
+ return fasync_helper(fd, file, on, &dev->fa) >= 0 ? 0 : -EIO;
+}
+
static int inotify_release(struct inode *ignored, struct file *file)
{
struct inotify_device *dev = file->private_data;
@@ -502,6 +524,9 @@ static int inotify_release(struct inode *ignored, struct file *file)
inotify_dev_event_dequeue(dev);
mutex_unlock(&dev->ev_mutex);
+ if (file->f_flags & FASYNC)
+ inotify_fasync(-1, file, 0);
+
/* free this device: the put matching the get in inotify_init() */
put_inotify_dev(dev);
@@ -530,6 +555,7 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations inotify_fops = {
.poll = inotify_poll,
.read = inotify_read,
+ .fasync = inotify_fasync,
.release = inotify_release,
.unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl,
@@ -577,6 +603,7 @@ asmlinkage long sys_inotify_init(void)
goto out_free_dev;
}
dev->ih = ih;
+ dev->fa = NULL;
filp->f_op = &inotify_fops;
filp->f_path.mnt = mntget(inotify_mnt);
diff --git a/fs/ioctl.c b/fs/ioctl.c
index c2a773e8620b..683002fefa55 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -12,12 +12,24 @@
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
-#include <asm/uaccess.h>
#include <asm/ioctls.h>
-static long do_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+/**
+ * vfs_ioctl - call filesystem specific ioctl methods
+ * @filp: [in] open file to invoke ioctl method on
+ * @cmd: [in] ioctl command to execute
+ * @arg: [in/out] command-specific argument for ioctl
+ *
+ * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise
+ * invokes * filesystem specific ->ioctl method. If neither method exists,
+ * returns -ENOTTY.
+ *
+ * Returns 0 on success, -errno on error.
+ */
+long vfs_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
{
int error = -ENOTTY;
@@ -40,123 +52,148 @@ static long do_ioctl(struct file *filp, unsigned int cmd,
return error;
}
+static int ioctl_fibmap(struct file *filp, int __user *p)
+{
+ struct address_space *mapping = filp->f_mapping;
+ int res, block;
+
+ /* do we support this mess? */
+ if (!mapping->a_ops->bmap)
+ return -EINVAL;
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ res = get_user(block, p);
+ if (res)
+ return res;
+ lock_kernel();
+ res = mapping->a_ops->bmap(mapping, block);
+ unlock_kernel();
+ return put_user(res, p);
+}
+
static int file_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
- int error;
- int block;
- struct inode * inode = filp->f_path.dentry->d_inode;
+ struct inode *inode = filp->f_path.dentry->d_inode;
int __user *p = (int __user *)arg;
switch (cmd) {
- case FIBMAP:
- {
- struct address_space *mapping = filp->f_mapping;
- int res;
- /* do we support this mess? */
- if (!mapping->a_ops->bmap)
- return -EINVAL;
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- if ((error = get_user(block, p)) != 0)
- return error;
+ case FIBMAP:
+ return ioctl_fibmap(filp, p);
+ case FIGETBSZ:
+ return put_user(inode->i_sb->s_blocksize, p);
+ case FIONREAD:
+ return put_user(i_size_read(inode) - filp->f_pos, p);
+ }
+ return vfs_ioctl(filp, cmd, arg);
+}
+
+static int ioctl_fionbio(struct file *filp, int __user *argp)
+{
+ unsigned int flag;
+ int on, error;
+
+ error = get_user(on, argp);
+ if (error)
+ return error;
+ flag = O_NONBLOCK;
+#ifdef __sparc__
+ /* SunOS compatibility item. */
+ if (O_NONBLOCK != O_NDELAY)
+ flag |= O_NDELAY;
+#endif
+ if (on)
+ filp->f_flags |= flag;
+ else
+ filp->f_flags &= ~flag;
+ return error;
+}
+
+static int ioctl_fioasync(unsigned int fd, struct file *filp,
+ int __user *argp)
+{
+ unsigned int flag;
+ int on, error;
+
+ error = get_user(on, argp);
+ if (error)
+ return error;
+ flag = on ? FASYNC : 0;
+
+ /* Did FASYNC state change ? */
+ if ((flag ^ filp->f_flags) & FASYNC) {
+ if (filp->f_op && filp->f_op->fasync) {
lock_kernel();
- res = mapping->a_ops->bmap(mapping, block);
+ error = filp->f_op->fasync(fd, filp, on);
unlock_kernel();
- return put_user(res, p);
- }
- case FIGETBSZ:
- return put_user(inode->i_sb->s_blocksize, p);
- case FIONREAD:
- return put_user(i_size_read(inode) - filp->f_pos, p);
+ } else
+ error = -ENOTTY;
}
+ if (error)
+ return error;
- return do_ioctl(filp, cmd, arg);
+ if (on)
+ filp->f_flags |= FASYNC;
+ else
+ filp->f_flags &= ~FASYNC;
+ return error;
}
/*
* When you add any new common ioctls to the switches above and below
* please update compat_sys_ioctl() too.
*
- * vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
+ * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
* It's just a simple helper for sys_ioctl and compat_sys_ioctl.
*/
-int vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, unsigned long arg)
+int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
+ unsigned long arg)
{
- unsigned int flag;
- int on, error = 0;
+ int error = 0;
+ int __user *argp = (int __user *)arg;
switch (cmd) {
- case FIOCLEX:
- set_close_on_exec(fd, 1);
- break;
-
- case FIONCLEX:
- set_close_on_exec(fd, 0);
- break;
-
- case FIONBIO:
- if ((error = get_user(on, (int __user *)arg)) != 0)
- break;
- flag = O_NONBLOCK;
-#ifdef __sparc__
- /* SunOS compatibility item. */
- if(O_NONBLOCK != O_NDELAY)
- flag |= O_NDELAY;
-#endif
- if (on)
- filp->f_flags |= flag;
- else
- filp->f_flags &= ~flag;
- break;
-
- case FIOASYNC:
- if ((error = get_user(on, (int __user *)arg)) != 0)
- break;
- flag = on ? FASYNC : 0;
-
- /* Did FASYNC state change ? */
- if ((flag ^ filp->f_flags) & FASYNC) {
- if (filp->f_op && filp->f_op->fasync) {
- lock_kernel();
- error = filp->f_op->fasync(fd, filp, on);
- unlock_kernel();
- }
- else error = -ENOTTY;
- }
- if (error != 0)
- break;
-
- if (on)
- filp->f_flags |= FASYNC;
- else
- filp->f_flags &= ~FASYNC;
- break;
-
- case FIOQSIZE:
- if (S_ISDIR(filp->f_path.dentry->d_inode->i_mode) ||
- S_ISREG(filp->f_path.dentry->d_inode->i_mode) ||
- S_ISLNK(filp->f_path.dentry->d_inode->i_mode)) {
- loff_t res = inode_get_bytes(filp->f_path.dentry->d_inode);
- error = copy_to_user((loff_t __user *)arg, &res, sizeof(res)) ? -EFAULT : 0;
- }
- else
- error = -ENOTTY;
- break;
- default:
- if (S_ISREG(filp->f_path.dentry->d_inode->i_mode))
- error = file_ioctl(filp, cmd, arg);
- else
- error = do_ioctl(filp, cmd, arg);
- break;
+ case FIOCLEX:
+ set_close_on_exec(fd, 1);
+ break;
+
+ case FIONCLEX:
+ set_close_on_exec(fd, 0);
+ break;
+
+ case FIONBIO:
+ error = ioctl_fionbio(filp, argp);
+ break;
+
+ case FIOASYNC:
+ error = ioctl_fioasync(fd, filp, argp);
+ break;
+
+ case FIOQSIZE:
+ if (S_ISDIR(filp->f_path.dentry->d_inode->i_mode) ||
+ S_ISREG(filp->f_path.dentry->d_inode->i_mode) ||
+ S_ISLNK(filp->f_path.dentry->d_inode->i_mode)) {
+ loff_t res =
+ inode_get_bytes(filp->f_path.dentry->d_inode);
+ error = copy_to_user((loff_t __user *)arg, &res,
+ sizeof(res)) ? -EFAULT : 0;
+ } else
+ error = -ENOTTY;
+ break;
+ default:
+ if (S_ISREG(filp->f_path.dentry->d_inode->i_mode))
+ error = file_ioctl(filp, cmd, arg);
+ else
+ error = vfs_ioctl(filp, cmd, arg);
+ break;
}
return error;
}
asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
{
- struct file * filp;
+ struct file *filp;
int error = -EBADF;
int fput_needed;
@@ -168,7 +205,7 @@ asmlinkage long sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
if (error)
goto out_fput;
- error = vfs_ioctl(filp, fd, cmd, arg);
+ error = do_vfs_ioctl(filp, fd, cmd, arg);
out_fput:
fput_light(filp, fput_needed);
out:
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 29f9753ae5e5..bb219138331a 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -26,11 +26,9 @@ isofs_export_iget(struct super_block *sb,
if (block == 0)
return ERR_PTR(-ESTALE);
inode = isofs_iget(sb, block, offset);
- if (inode == NULL)
- return ERR_PTR(-ENOMEM);
- if (is_bad_inode(inode)
- || (generation && inode->i_generation != generation))
- {
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
@@ -110,8 +108,10 @@ static struct dentry *isofs_export_get_parent(struct dentry *child)
parent_inode = isofs_iget(child_inode->i_sb,
parent_block,
parent_offset);
- if (parent_inode == NULL) {
- rv = ERR_PTR(-EACCES);
+ if (IS_ERR(parent_inode)) {
+ rv = ERR_CAST(parent_inode);
+ if (rv != ERR_PTR(-ENOMEM))
+ rv = ERR_PTR(-EACCES);
goto out;
}
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 09e3d306e96f..875d37fb6c70 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -54,7 +54,7 @@ static void isofs_put_super(struct super_block *sb)
return;
}
-static void isofs_read_inode(struct inode *);
+static int isofs_read_inode(struct inode *);
static int isofs_statfs (struct dentry *, struct kstatfs *);
static struct kmem_cache *isofs_inode_cachep;
@@ -107,7 +107,6 @@ static int isofs_remount(struct super_block *sb, int *flags, char *data)
static const struct super_operations isofs_sops = {
.alloc_inode = isofs_alloc_inode,
.destroy_inode = isofs_destroy_inode,
- .read_inode = isofs_read_inode,
.put_super = isofs_put_super,
.statfs = isofs_statfs,
.remount_fs = isofs_remount,
@@ -552,7 +551,7 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
int joliet_level = 0;
int iso_blknum, block;
int orig_zonesize;
- int table;
+ int table, error = -EINVAL;
unsigned int vol_desc_start;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
@@ -810,6 +809,8 @@ root_found:
* we then decide whether to use the Joliet descriptor.
*/
inode = isofs_iget(s, sbi->s_firstdatazone, 0);
+ if (IS_ERR(inode))
+ goto out_no_root;
/*
* If this disk has both Rock Ridge and Joliet on it, then we
@@ -829,6 +830,8 @@ root_found:
"ISOFS: changing to secondary root\n");
iput(inode);
inode = isofs_iget(s, sbi->s_firstdatazone, 0);
+ if (IS_ERR(inode))
+ goto out_no_root;
}
}
@@ -842,8 +845,6 @@ root_found:
sbi->s_joliet_level = joliet_level;
/* check the root inode */
- if (!inode)
- goto out_no_root;
if (!inode->i_op)
goto out_bad_root;
@@ -876,11 +877,14 @@ root_found:
*/
out_bad_root:
printk(KERN_WARNING "%s: root inode not initialized\n", __func__);
- goto out_iput;
-out_no_root:
- printk(KERN_WARNING "%s: get root inode failed\n", __func__);
out_iput:
iput(inode);
+ goto out_no_inode;
+out_no_root:
+ error = PTR_ERR(inode);
+ if (error != -ENOMEM)
+ printk(KERN_WARNING "%s: get root inode failed\n", __func__);
+out_no_inode:
#ifdef CONFIG_JOLIET
if (sbi->s_nls_iocharset)
unload_nls(sbi->s_nls_iocharset);
@@ -908,7 +912,7 @@ out_freesbi:
kfree(opt.iocharset);
kfree(sbi);
s->s_fs_info = NULL;
- return -EINVAL;
+ return error;
}
static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf)
@@ -930,7 +934,7 @@ static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf)
/*
* Get a set of blocks; filling in buffer_heads if already allocated
* or getblk() if they are not. Returns the number of blocks inserted
- * (0 == error.)
+ * (-ve == error.)
*/
int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
struct buffer_head **bh, unsigned long nblocks)
@@ -940,11 +944,12 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
unsigned int firstext;
unsigned long nextblk, nextoff;
long iblock = (long)iblock_s;
- int section, rv;
+ int section, rv, error;
struct iso_inode_info *ei = ISOFS_I(inode);
lock_kernel();
+ error = -EIO;
rv = 0;
if (iblock < 0 || iblock != iblock_s) {
printk(KERN_DEBUG "%s: block number too large\n", __func__);
@@ -983,8 +988,10 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
offset += sect_size;
ninode = isofs_iget(inode->i_sb, nextblk, nextoff);
- if (!ninode)
+ if (IS_ERR(ninode)) {
+ error = PTR_ERR(ninode);
goto abort;
+ }
firstext = ISOFS_I(ninode)->i_first_extent;
sect_size = ISOFS_I(ninode)->i_section_size >> ISOFS_BUFFER_BITS(ninode);
nextblk = ISOFS_I(ninode)->i_next_section_block;
@@ -1015,9 +1022,10 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock_s,
rv++;
}
+ error = 0;
abort:
unlock_kernel();
- return rv;
+ return rv != 0 ? rv : error;
}
/*
@@ -1026,12 +1034,15 @@ abort:
static int isofs_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
+ int ret;
+
if (create) {
printk(KERN_DEBUG "%s: Kernel tries to allocate a block\n", __func__);
return -EROFS;
}
- return isofs_get_blocks(inode, iblock, &bh_result, 1) ? 0 : -EIO;
+ ret = isofs_get_blocks(inode, iblock, &bh_result, 1);
+ return ret < 0 ? ret : 0;
}
static int isofs_bmap(struct inode *inode, sector_t block)
@@ -1186,7 +1197,7 @@ out_toomany:
goto out;
}
-static void isofs_read_inode(struct inode *inode)
+static int isofs_read_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct isofs_sb_info *sbi = ISOFS_SB(sb);
@@ -1199,6 +1210,7 @@ static void isofs_read_inode(struct inode *inode)
unsigned int de_len;
unsigned long offset;
struct iso_inode_info *ei = ISOFS_I(inode);
+ int ret = -EIO;
block = ei->i_iget5_block;
bh = sb_bread(inode->i_sb, block);
@@ -1216,6 +1228,7 @@ static void isofs_read_inode(struct inode *inode)
tmpde = kmalloc(de_len, GFP_KERNEL);
if (tmpde == NULL) {
printk(KERN_INFO "%s: out of memory\n", __func__);
+ ret = -ENOMEM;
goto fail;
}
memcpy(tmpde, bh->b_data + offset, frag1);
@@ -1259,8 +1272,10 @@ static void isofs_read_inode(struct inode *inode)
ei->i_section_size = isonum_733(de->size);
if (de->flags[-high_sierra] & 0x80) {
- if(isofs_read_level3_size(inode))
+ ret = isofs_read_level3_size(inode);
+ if (ret < 0)
goto fail;
+ ret = -EIO;
} else {
ei->i_next_section_block = 0;
ei->i_next_section_offset = 0;
@@ -1346,16 +1361,16 @@ static void isofs_read_inode(struct inode *inode)
/* XXX - parse_rock_ridge_inode() had already set i_rdev. */
init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ ret = 0;
out:
kfree(tmpde);
if (bh)
brelse(bh);
- return;
+ return ret;
out_badread:
printk(KERN_WARNING "ISOFS: unable to read i-node block\n");
fail:
- make_bad_inode(inode);
goto out;
}
@@ -1394,9 +1409,10 @@ struct inode *isofs_iget(struct super_block *sb,
unsigned long hashval;
struct inode *inode;
struct isofs_iget5_callback_data data;
+ long ret;
if (offset >= 1ul << sb->s_blocksize_bits)
- return NULL;
+ return ERR_PTR(-EINVAL);
data.block = block;
data.offset = offset;
@@ -1406,9 +1422,17 @@ struct inode *isofs_iget(struct super_block *sb,
inode = iget5_locked(sb, hashval, &isofs_iget5_test,
&isofs_iget5_set, &data);
- if (inode && (inode->i_state & I_NEW)) {
- sb->s_op->read_inode(inode);
- unlock_new_inode(inode);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (inode->i_state & I_NEW) {
+ ret = isofs_read_inode(inode);
+ if (ret < 0) {
+ iget_failed(inode);
+ inode = ERR_PTR(ret);
+ } else {
+ unlock_new_inode(inode);
+ }
}
return inode;
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index e2b4dad39ca9..344b247bc29a 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -179,9 +179,9 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam
inode = NULL;
if (found) {
inode = isofs_iget(dir->i_sb, block, offset);
- if (!inode) {
+ if (IS_ERR(inode)) {
unlock_kernel();
- return ERR_PTR(-EACCES);
+ return ERR_CAST(inode);
}
}
unlock_kernel();
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index f3a1db3098de..6bd48f0a7047 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -474,8 +474,10 @@ repeat:
isofs_iget(inode->i_sb,
ISOFS_I(inode)->i_first_extent,
0);
- if (!reloc)
+ if (IS_ERR(reloc)) {
+ ret = PTR_ERR(reloc);
goto out;
+ }
inode->i_mode = reloc->i_mode;
inode->i_nlink = reloc->i_nlink;
inode->i_uid = reloc->i_uid;
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 5d14243499d4..3943a8905eb2 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -1457,7 +1457,7 @@ static const char *journal_dev_name(journal_t *journal, char *buffer)
* Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
* and don't attempt to make any other journal updates.
*/
-void __journal_abort_hard(journal_t *journal)
+static void __journal_abort_hard(journal_t *journal)
{
transaction_t *transaction;
char b[BDEVNAME_SIZE];
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index c5d9694b6a2f..2b8edf4d6eaa 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -354,7 +354,7 @@ static int do_one_pass(journal_t *journal,
struct buffer_head * obh;
struct buffer_head * nbh;
- cond_resched(); /* We're under lock_kernel() */
+ cond_resched();
/* If we already know where to stop the log traversal,
* check right now that we haven't gone past the end of
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 921680663fa2..d36356f7d222 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -397,7 +397,7 @@ static int do_one_pass(journal_t *journal,
struct buffer_head * obh;
struct buffer_head * nbh;
- cond_resched(); /* We're under lock_kernel() */
+ cond_resched();
/* If we already know where to stop the log traversal,
* check right now that we haven't gone past the end of
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 77fc5838609c..4c80404a9aba 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -176,7 +176,7 @@ static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct
spin_unlock(&inode->i_lock);
}
-struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
+static struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct posix_acl *acl;
@@ -345,8 +345,10 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
if (!clone)
return -ENOMEM;
rc = posix_acl_create_masq(clone, (mode_t *)i_mode);
- if (rc < 0)
+ if (rc < 0) {
+ posix_acl_release(clone);
return rc;
+ }
if (rc > 0)
jffs2_iset_acl(inode, &f->i_acl_access, clone);
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 76c6ebd1acd9..0bb7f003fd80 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -28,7 +28,6 @@ struct jffs2_acl_header {
#define JFFS2_ACL_NOT_CACHED ((void *)-1)
-extern struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
extern int jffs2_permission(struct inode *, int, struct nameidata *);
extern int jffs2_acl_chmod(struct inode *);
extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
@@ -40,7 +39,6 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler;
#else
-#define jffs2_get_acl(inode, type) (NULL)
#define jffs2_permission (NULL)
#define jffs2_acl_chmod(inode) (0)
#define jffs2_init_acl_pre(dir_i,inode,mode) (0)
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 787e392ffd41..f948f7e6ec82 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -101,10 +101,10 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
ino = fd->ino;
up(&dir_f->sem);
if (ino) {
- inode = iget(dir_i->i_sb, ino);
- if (!inode) {
+ inode = jffs2_iget(dir_i->i_sb, ino);
+ if (IS_ERR(inode)) {
printk(KERN_WARNING "iget() failed for ino #%u\n", ino);
- return (ERR_PTR(-EIO));
+ return ERR_CAST(inode);
}
}
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index d2e06f7ea96f..e26ea78c7892 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -97,11 +97,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
if (ivalid & ATTR_MODE)
- if (iattr->ia_mode & S_ISGID &&
- !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
- ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
- else
- ri->mode = cpu_to_jemode(iattr->ia_mode);
+ ri->mode = cpu_to_jemode(iattr->ia_mode);
else
ri->mode = cpu_to_jemode(inode->i_mode);
@@ -230,16 +226,23 @@ void jffs2_clear_inode (struct inode *inode)
jffs2_do_clear_inode(c, f);
}
-void jffs2_read_inode (struct inode *inode)
+struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
{
struct jffs2_inode_info *f;
struct jffs2_sb_info *c;
struct jffs2_raw_inode latest_node;
union jffs2_device_node jdev;
+ struct inode *inode;
dev_t rdev = 0;
int ret;
- D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino));
+ D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino));
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
f = JFFS2_INODE_INFO(inode);
c = JFFS2_SB_INFO(inode->i_sb);
@@ -250,9 +253,9 @@ void jffs2_read_inode (struct inode *inode)
ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
if (ret) {
- make_bad_inode(inode);
up(&f->sem);
- return;
+ iget_failed(inode);
+ return ERR_PTR(ret);
}
inode->i_mode = jemode_to_cpu(latest_node.mode);
inode->i_uid = je16_to_cpu(latest_node.uid);
@@ -303,19 +306,14 @@ void jffs2_read_inode (struct inode *inode)
if (f->metadata->size != sizeof(jdev.old) &&
f->metadata->size != sizeof(jdev.new)) {
printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
- up(&f->sem);
- jffs2_do_clear_inode(c, f);
- make_bad_inode(inode);
- return;
+ goto error_io;
}
D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
- if (jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size) < 0) {
+ ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
+ if (ret < 0) {
/* Eep */
printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
- up(&f->sem);
- jffs2_do_clear_inode(c, f);
- make_bad_inode(inode);
- return;
+ goto error;
}
if (f->metadata->size == sizeof(jdev.old))
rdev = old_decode_dev(je16_to_cpu(jdev.old));
@@ -335,6 +333,16 @@ void jffs2_read_inode (struct inode *inode)
up(&f->sem);
D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
+ unlock_new_inode(inode);
+ return inode;
+
+error_io:
+ ret = -EIO;
+error:
+ up(&f->sem);
+ jffs2_do_clear_inode(c, f);
+ iget_failed(inode);
+ return ERR_PTR(ret);
}
void jffs2_dirty_inode(struct inode *inode)
@@ -522,15 +530,16 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
if ((ret = jffs2_do_mount_fs(c)))
goto out_inohash;
- ret = -EINVAL;
-
D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n"));
- root_i = iget(sb, 1);
- if (is_bad_inode(root_i)) {
+ root_i = jffs2_iget(sb, 1);
+ if (IS_ERR(root_i)) {
D1(printk(KERN_WARNING "get root inode failed\n"));
- goto out_root_i;
+ ret = PTR_ERR(root_i);
+ goto out_root;
}
+ ret = -ENOMEM;
+
D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
sb->s_root = d_alloc_root(root_i);
if (!sb->s_root)
@@ -546,6 +555,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
out_root_i:
iput(root_i);
+out_root:
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
if (jffs2_blocks_use_vmalloc(c))
@@ -615,9 +625,9 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
jffs2_do_unlink() would need the alloc_sem and we have it.
Just iget() it, and if read_inode() is necessary that's OK.
*/
- inode = iget(OFNI_BS_2SFFJ(c), inum);
- if (!inode)
- return ERR_PTR(-ENOMEM);
+ inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
}
if (is_bad_inode(inode)) {
printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n",
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 4bf86088b3ae..87c6f555e1a0 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -32,15 +32,18 @@ void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new
if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
/* Duplicate. Free one */
if (new->version < (*prev)->version) {
- dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n",
+ dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n",
(*prev)->name, (*prev)->ino);
jffs2_mark_node_obsolete(c, new->raw);
jffs2_free_full_dirent(new);
} else {
- dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n",
+ dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n",
(*prev)->name, (*prev)->ino);
new->next = (*prev)->next;
- jffs2_mark_node_obsolete(c, ((*prev)->raw));
+ /* It may have been a 'placeholder' deletion dirent,
+ if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */
+ if ((*prev)->raw)
+ jffs2_mark_node_obsolete(c, ((*prev)->raw));
jffs2_free_full_dirent(*prev);
*prev = new;
}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index bf64686cf098..1b10d2594092 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -175,7 +175,7 @@ extern const struct inode_operations jffs2_symlink_inode_operations;
/* fs.c */
int jffs2_setattr (struct dentry *, struct iattr *);
int jffs2_do_setattr (struct inode *, struct iattr *);
-void jffs2_read_inode (struct inode *);
+struct inode *jffs2_iget(struct super_block *, unsigned long);
void jffs2_clear_inode (struct inode *);
void jffs2_dirty_inode(struct inode *inode);
struct inode *jffs2_new_inode (struct inode *dir_i, int mode,
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 6c1ba3566f58..e512a93d6249 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -37,23 +37,24 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
BUG_ON(tn->csize == 0);
- if (!jffs2_is_writebuffered(c))
- goto adj_acc;
-
/* Calculate how many bytes were already checked */
ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
- len = ofs % c->wbuf_pagesize;
- if (likely(len))
- len = c->wbuf_pagesize - len;
-
- if (len >= tn->csize) {
- dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
- ref_offset(ref), tn->csize, ofs);
- goto adj_acc;
- }
+ len = tn->csize;
+
+ if (jffs2_is_writebuffered(c)) {
+ int adj = ofs % c->wbuf_pagesize;
+ if (likely(adj))
+ adj = c->wbuf_pagesize - adj;
+
+ if (adj >= tn->csize) {
+ dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
+ ref_offset(ref), tn->csize, ofs);
+ goto adj_acc;
+ }
- ofs += len;
- len = tn->csize - len;
+ ofs += adj;
+ len -= adj;
+ }
dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
@@ -63,7 +64,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
* adding and jffs2_flash_read_end() interface. */
if (c->mtd->point) {
err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
- if (!err && retlen < tn->csize) {
+ if (!err && retlen < len) {
JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
c->mtd->unpoint(c->mtd, buffer, ofs, retlen);
} else if (err)
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index ffa447511e6a..4677355996cc 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -65,7 +65,6 @@ static const struct super_operations jffs2_super_operations =
{
.alloc_inode = jffs2_alloc_inode,
.destroy_inode =jffs2_destroy_inode,
- .read_inode = jffs2_read_inode,
.put_super = jffs2_put_super,
.write_super = jffs2_write_super,
.statfs = jffs2_statfs,
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 147e2cbee9e4..776f13cbf2b5 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -177,7 +177,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
void *hold_err = fn->raw;
/* Release the full_dnode which is now useless, and return */
jffs2_free_full_dnode(fn);
- return ERR_PTR(PTR_ERR(hold_err));
+ return ERR_CAST(hold_err);
}
fn->ofs = je32_to_cpu(ri->offset);
fn->size = je32_to_cpu(ri->dsize);
@@ -313,7 +313,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
void *hold_err = fd->raw;
/* Release the full_dirent which is now useless, and return */
jffs2_free_full_dirent(fd);
- return ERR_PTR(PTR_ERR(hold_err));
+ return ERR_CAST(hold_err);
}
if (retried) {
@@ -582,7 +582,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
up(&dir_f->sem);
} else {
- struct jffs2_full_dirent **prev = &dir_f->dents;
+ struct jffs2_full_dirent *fd = dir_f->dents;
uint32_t nhash = full_name_hash(name, namelen);
/* We don't actually want to reserve any space, but we do
@@ -590,21 +590,22 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
down(&c->alloc_sem);
down(&dir_f->sem);
- while ((*prev) && (*prev)->nhash <= nhash) {
- if ((*prev)->nhash == nhash &&
- !memcmp((*prev)->name, name, namelen) &&
- !(*prev)->name[namelen]) {
- struct jffs2_full_dirent *this = *prev;
+ for (fd = dir_f->dents; fd; fd = fd->next) {
+ if (fd->nhash == nhash &&
+ !memcmp(fd->name, name, namelen) &&
+ !fd->name[namelen]) {
D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n",
- this->ino, ref_offset(this->raw)));
-
- *prev = this->next;
- jffs2_mark_node_obsolete(c, (this->raw));
- jffs2_free_full_dirent(this);
+ fd->ino, ref_offset(fd->raw)));
+ jffs2_mark_node_obsolete(c, fd->raw);
+ /* We don't want to remove it from the list immediately,
+ because that screws up getdents()/seek() semantics even
+ more than they're screwed already. Turn it into a
+ node-less deletion dirent instead -- a placeholder */
+ fd->raw = NULL;
+ fd->ino = 0;
break;
}
- prev = &((*prev)->next);
}
up(&dir_f->sem);
}
@@ -630,7 +631,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n",
fd->name, dead_f->inocache->ino));
}
- jffs2_mark_node_obsolete(c, fd->raw);
+ if (fd->raw)
+ jffs2_mark_node_obsolete(c, fd->raw);
jffs2_free_full_dirent(fd);
}
}
diff --git a/fs/jfs/file.c b/fs/jfs/file.c
index 87eb93694af7..7f6063acaa3b 100644
--- a/fs/jfs/file.c
+++ b/fs/jfs/file.c
@@ -112,5 +112,8 @@ const struct file_operations jfs_file_operations = {
.splice_write = generic_file_splice_write,
.fsync = jfs_fsync,
.release = jfs_release,
- .ioctl = jfs_ioctl,
+ .unlocked_ioctl = jfs_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = jfs_compat_ioctl,
+#endif
};
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 4672013802e1..210339784b56 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -31,11 +31,21 @@
#include "jfs_debug.h"
-void jfs_read_inode(struct inode *inode)
+struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
{
- if (diRead(inode)) {
- make_bad_inode(inode);
- return;
+ struct inode *inode;
+ int ret;
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ ret = diRead(inode);
+ if (ret < 0) {
+ iget_failed(inode);
+ return ERR_PTR(ret);
}
if (S_ISREG(inode->i_mode)) {
@@ -55,6 +65,8 @@ void jfs_read_inode(struct inode *inode)
inode->i_op = &jfs_file_inode_operations;
init_special_inode(inode, inode->i_mode, inode->i_rdev);
}
+ unlock_new_inode(inode);
+ return inode;
}
/*
diff --git a/fs/jfs/ioctl.c b/fs/jfs/ioctl.c
index dfda12a073e1..a1f8e375ad21 100644
--- a/fs/jfs/ioctl.c
+++ b/fs/jfs/ioctl.c
@@ -51,9 +51,9 @@ static long jfs_map_ext2(unsigned long flags, int from)
}
-int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
- unsigned long arg)
+long jfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ struct inode *inode = filp->f_dentry->d_inode;
struct jfs_inode_info *jfs_inode = JFS_IP(inode);
unsigned int flags;
@@ -82,6 +82,10 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
return -EPERM;
+
+ /* Lock against other parallel changes of flags */
+ mutex_lock(&inode->i_mutex);
+
jfs_get_inode_flags(jfs_inode);
oldflags = jfs_inode->mode2;
@@ -92,8 +96,10 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
if ((oldflags & JFS_IMMUTABLE_FL) ||
((flags ^ oldflags) &
(JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
- if (!capable(CAP_LINUX_IMMUTABLE))
+ if (!capable(CAP_LINUX_IMMUTABLE)) {
+ mutex_unlock(&inode->i_mutex);
return -EPERM;
+ }
}
flags = flags & JFS_FL_USER_MODIFIABLE;
@@ -101,6 +107,7 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
jfs_inode->mode2 = flags;
jfs_set_inode_flags(inode);
+ mutex_unlock(&inode->i_mutex);
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
return 0;
@@ -110,3 +117,21 @@ int jfs_ioctl(struct inode * inode, struct file * filp, unsigned int cmd,
}
}
+#ifdef CONFIG_COMPAT
+long jfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ /* While these ioctl numbers defined with 'long' and have different
+ * numbers than the 64bit ABI,
+ * the actual implementation only deals with ints and is compatible.
+ */
+ switch (cmd) {
+ case JFS_IOC_GETFLAGS32:
+ cmd = JFS_IOC_GETFLAGS;
+ break;
+ case JFS_IOC_SETFLAGS32:
+ cmd = JFS_IOC_SETFLAGS;
+ break;
+ }
+ return jfs_ioctl(filp, cmd, arg);
+}
+#endif
diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h
index c387540d3425..395c4c0d0f06 100644
--- a/fs/jfs/jfs_dinode.h
+++ b/fs/jfs/jfs_dinode.h
@@ -170,5 +170,7 @@ struct dinode {
#define JFS_IOC_GETFLAGS _IOR('f', 1, long)
#define JFS_IOC_SETFLAGS _IOW('f', 2, long)
+#define JFS_IOC_GETFLAGS32 _IOR('f', 1, int)
+#define JFS_IOC_SETFLAGS32 _IOW('f', 2, int)
#endif /*_H_JFS_DINODE */
diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h
index 8e2cf2cde185..adb2fafcc544 100644
--- a/fs/jfs/jfs_inode.h
+++ b/fs/jfs/jfs_inode.h
@@ -22,9 +22,9 @@ struct fid;
extern struct inode *ialloc(struct inode *, umode_t);
extern int jfs_fsync(struct file *, struct dentry *, int);
-extern int jfs_ioctl(struct inode *, struct file *,
- unsigned int, unsigned long);
-extern void jfs_read_inode(struct inode *);
+extern long jfs_ioctl(struct file *, unsigned int, unsigned long);
+extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long);
+extern struct inode *jfs_iget(struct super_block *, unsigned long);
extern int jfs_commit_inode(struct inode *, int);
extern int jfs_write_inode(struct inode*, int);
extern void jfs_delete_inode(struct inode *);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index f8718de3505e..0ba6778edaa2 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1462,12 +1462,10 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc
}
}
- ip = iget(dip->i_sb, inum);
- if (ip == NULL || is_bad_inode(ip)) {
+ ip = jfs_iget(dip->i_sb, inum);
+ if (IS_ERR(ip)) {
jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum);
- if (ip)
- iput(ip);
- return ERR_PTR(-EACCES);
+ return ERR_CAST(ip);
}
dentry = d_splice_alias(ip, dentry);
@@ -1485,12 +1483,11 @@ static struct inode *jfs_nfs_get_inode(struct super_block *sb,
if (ino == 0)
return ERR_PTR(-ESTALE);
- inode = iget(sb, ino);
- if (inode == NULL)
- return ERR_PTR(-ENOMEM);
+ inode = jfs_iget(sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
- if (is_bad_inode(inode) ||
- (generation && inode->i_generation != generation)) {
+ if (generation && inode->i_generation != generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
@@ -1521,17 +1518,14 @@ struct dentry *jfs_get_parent(struct dentry *dentry)
parent_ino =
le32_to_cpu(JFS_IP(dentry->d_inode)->i_dtroot.header.idotdot);
- inode = iget(sb, parent_ino);
- if (inode) {
- if (is_bad_inode(inode)) {
+ inode = jfs_iget(sb, parent_ino);
+ if (IS_ERR(inode)) {
+ parent = ERR_CAST(inode);
+ } else {
+ parent = d_alloc_anon(inode);
+ if (!parent) {
+ parent = ERR_PTR(-ENOMEM);
iput(inode);
- parent = ERR_PTR(-EACCES);
- } else {
- parent = d_alloc_anon(inode);
- if (!parent) {
- parent = ERR_PTR(-ENOMEM);
- iput(inode);
- }
}
}
@@ -1562,7 +1556,10 @@ const struct file_operations jfs_dir_operations = {
.read = generic_read_dir,
.readdir = jfs_readdir,
.fsync = jfs_fsync,
- .ioctl = jfs_ioctl,
+ .unlocked_ioctl = jfs_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = jfs_compat_ioctl,
+#endif
};
static int jfs_ci_hash(struct dentry *dir, struct qstr *this)
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 70a14001c98f..50ea65451732 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -414,7 +414,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
struct inode *inode;
int rc;
s64 newLVSize = 0;
- int flag;
+ int flag, ret = -EINVAL;
jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
@@ -461,8 +461,10 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
* Initialize direct-mapping inode/address-space
*/
inode = new_inode(sb);
- if (inode == NULL)
+ if (inode == NULL) {
+ ret = -ENOMEM;
goto out_kfree;
+ }
inode->i_ino = 0;
inode->i_nlink = 1;
inode->i_size = sb->s_bdev->bd_inode->i_size;
@@ -494,9 +496,11 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_magic = JFS_SUPER_MAGIC;
- inode = iget(sb, ROOT_I);
- if (!inode || is_bad_inode(inode))
+ inode = jfs_iget(sb, ROOT_I);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
goto out_no_root;
+ }
sb->s_root = d_alloc_root(inode);
if (!sb->s_root)
goto out_no_root;
@@ -536,7 +540,7 @@ out_kfree:
if (sbi->nls_tab)
unload_nls(sbi->nls_tab);
kfree(sbi);
- return -EINVAL;
+ return ret;
}
static void jfs_write_super_lockfs(struct super_block *sb)
@@ -726,7 +730,6 @@ out:
static const struct super_operations jfs_super_operations = {
.alloc_inode = jfs_alloc_inode,
.destroy_inode = jfs_destroy_inode,
- .read_inode = jfs_read_inode,
.dirty_inode = jfs_dirty_inode,
.write_inode = jfs_write_inode,
.delete_inode = jfs_delete_inode,
diff --git a/fs/libfs.c b/fs/libfs.c
index 6e68b700958d..5523bde96387 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -341,13 +341,10 @@ int simple_prepare_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
if (!PageUptodate(page)) {
- if (to - from != PAGE_CACHE_SIZE) {
- void *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr, 0, from);
- memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- }
+ if (to - from != PAGE_CACHE_SIZE)
+ zero_user_segments(page,
+ 0, from,
+ to, PAGE_CACHE_SIZE);
}
return 0;
}
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index bf4cd316af81..84f6242ba6fc 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -18,7 +18,6 @@
#include <linux/highuid.h>
#include <linux/vfs.h>
-static void minix_read_inode(struct inode * inode);
static int minix_write_inode(struct inode * inode, int wait);
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf);
static int minix_remount (struct super_block * sb, int * flags, char * data);
@@ -96,7 +95,6 @@ static void destroy_inodecache(void)
static const struct super_operations minix_sops = {
.alloc_inode = minix_alloc_inode,
.destroy_inode = minix_destroy_inode,
- .read_inode = minix_read_inode,
.write_inode = minix_write_inode,
.delete_inode = minix_delete_inode,
.put_super = minix_put_super,
@@ -149,6 +147,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
unsigned long i, block;
struct inode *root_inode;
struct minix_sb_info *sbi;
+ int ret = -EINVAL;
sbi = kzalloc(sizeof(struct minix_sb_info), GFP_KERNEL);
if (!sbi)
@@ -246,10 +245,13 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
/* set up enough so that it can read an inode */
s->s_op = &minix_sops;
- root_inode = iget(s, MINIX_ROOT_INO);
- if (!root_inode || is_bad_inode(root_inode))
+ root_inode = minix_iget(s, MINIX_ROOT_INO);
+ if (IS_ERR(root_inode)) {
+ ret = PTR_ERR(root_inode);
goto out_no_root;
+ }
+ ret = -ENOMEM;
s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
goto out_iput;
@@ -290,6 +292,7 @@ out_freemap:
goto out_release;
out_no_map:
+ ret = -ENOMEM;
if (!silent)
printk("MINIX-fs: can't allocate map\n");
goto out_release;
@@ -316,7 +319,7 @@ out_bad_sb:
out:
s->s_fs_info = NULL;
kfree(sbi);
- return -EINVAL;
+ return ret;
}
static int minix_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -409,7 +412,7 @@ void minix_set_inode(struct inode *inode, dev_t rdev)
/*
* The minix V1 function to read an inode.
*/
-static void V1_minix_read_inode(struct inode * inode)
+static struct inode *V1_minix_iget(struct inode *inode)
{
struct buffer_head * bh;
struct minix_inode * raw_inode;
@@ -418,8 +421,8 @@ static void V1_minix_read_inode(struct inode * inode)
raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
if (!raw_inode) {
- make_bad_inode(inode);
- return;
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
inode->i_mode = raw_inode->i_mode;
inode->i_uid = (uid_t)raw_inode->i_uid;
@@ -435,12 +438,14 @@ static void V1_minix_read_inode(struct inode * inode)
minix_inode->u.i1_data[i] = raw_inode->i_zone[i];
minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
brelse(bh);
+ unlock_new_inode(inode);
+ return inode;
}
/*
* The minix V2 function to read an inode.
*/
-static void V2_minix_read_inode(struct inode * inode)
+static struct inode *V2_minix_iget(struct inode *inode)
{
struct buffer_head * bh;
struct minix2_inode * raw_inode;
@@ -449,8 +454,8 @@ static void V2_minix_read_inode(struct inode * inode)
raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
if (!raw_inode) {
- make_bad_inode(inode);
- return;
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
inode->i_mode = raw_inode->i_mode;
inode->i_uid = (uid_t)raw_inode->i_uid;
@@ -468,17 +473,27 @@ static void V2_minix_read_inode(struct inode * inode)
minix_inode->u.i2_data[i] = raw_inode->i_zone[i];
minix_set_inode(inode, old_decode_dev(raw_inode->i_zone[0]));
brelse(bh);
+ unlock_new_inode(inode);
+ return inode;
}
/*
* The global function to read an inode.
*/
-static void minix_read_inode(struct inode * inode)
+struct inode *minix_iget(struct super_block *sb, unsigned long ino)
{
+ struct inode *inode;
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
if (INODE_VERSION(inode) == MINIX_V1)
- V1_minix_read_inode(inode);
+ return V1_minix_iget(inode);
else
- V2_minix_read_inode(inode);
+ return V2_minix_iget(inode);
}
/*
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index ac5d3a75cb0d..326edfe96108 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -45,6 +45,7 @@ struct minix_sb_info {
unsigned short s_version;
};
+extern struct inode *minix_iget(struct super_block *, unsigned long);
extern struct minix_inode * minix_V1_raw_inode(struct super_block *, ino_t, struct buffer_head **);
extern struct minix2_inode * minix_V2_raw_inode(struct super_block *, ino_t, struct buffer_head **);
extern struct inode * minix_new_inode(const struct inode * dir, int * error);
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index f4aa7a939040..102241bc9c79 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -54,10 +54,9 @@ static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, st
ino = minix_inode_by_name(dentry);
if (ino) {
- inode = iget(dir->i_sb, ino);
-
- if (!inode)
- return ERR_PTR(-EACCES);
+ inode = minix_iget(dir->i_sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
}
d_add(dentry, inode);
return NULL;
diff --git a/fs/mpage.c b/fs/mpage.c
index d54f8f897224..5df564366f36 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -276,9 +276,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
}
if (first_hole != blocks_per_page) {
- zero_user_page(page, first_hole << blkbits,
- PAGE_CACHE_SIZE - (first_hole << blkbits),
- KM_USER0);
+ zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
if (first_hole == 0) {
SetPageUptodate(page);
unlock_page(page);
@@ -571,8 +569,7 @@ page_is_mapped:
if (page->index > end_index || !offset)
goto confused;
- zero_user_page(page, offset, PAGE_CACHE_SIZE - offset,
- KM_USER0);
+ zero_user_segment(page, offset, PAGE_CACHE_SIZE);
}
/*
diff --git a/fs/namei.c b/fs/namei.c
index 73e2e665817a..241cff423653 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2188,6 +2188,7 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
+ fsnotify_link_count(dentry->d_inode);
d_delete(dentry);
}
@@ -2360,7 +2361,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
error = dir->i_op->link(old_dentry, dir, new_dentry);
mutex_unlock(&old_dentry->d_inode->i_mutex);
if (!error)
- fsnotify_create(dir, new_dentry);
+ fsnotify_link(dir, old_dentry->d_inode, new_dentry);
return error;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 61bf376e29e8..e9c10cd01e13 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -25,18 +25,21 @@
#include <linux/security.h>
#include <linux/mount.h>
#include <linux/ramfs.h>
+#include <linux/log2.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "pnode.h"
#include "internal.h"
+#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
+#define HASH_SIZE (1UL << HASH_SHIFT)
+
/* spinlock for vfsmount related operations, inplace of dcache_lock */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
static int event;
static struct list_head *mount_hashtable __read_mostly;
-static int hash_mask __read_mostly, hash_bits __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
static struct rw_semaphore namespace_sem;
@@ -48,8 +51,8 @@ static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
- tmp = tmp + (tmp >> hash_bits);
- return tmp & hash_mask;
+ tmp = tmp + (tmp >> HASH_SHIFT);
+ return tmp & (HASH_SIZE - 1);
}
struct vfsmount *alloc_vfsmnt(const char *name)
@@ -1813,9 +1816,7 @@ static void __init init_mount_tree(void)
void __init mnt_init(void)
{
- struct list_head *d;
- unsigned int nr_hash;
- int i;
+ unsigned u;
int err;
init_rwsem(&namespace_sem);
@@ -1828,35 +1829,11 @@ void __init mnt_init(void)
if (!mount_hashtable)
panic("Failed to allocate mount hash table\n");
- /*
- * Find the power-of-two list-heads that can fit into the allocation..
- * We don't guarantee that "sizeof(struct list_head)" is necessarily
- * a power-of-two.
- */
- nr_hash = PAGE_SIZE / sizeof(struct list_head);
- hash_bits = 0;
- do {
- hash_bits++;
- } while ((nr_hash >> hash_bits) != 0);
- hash_bits--;
+ printk("Mount-cache hash table entries: %lu\n", HASH_SIZE);
+
+ for (u = 0; u < HASH_SIZE; u++)
+ INIT_LIST_HEAD(&mount_hashtable[u]);
- /*
- * Re-calculate the actual number of entries and the mask
- * from the number of bits we can fit.
- */
- nr_hash = 1UL << hash_bits;
- hash_mask = nr_hash - 1;
-
- printk("Mount-cache hash table entries: %d\n", nr_hash);
-
- /* And initialize the newly allocated array */
- d = mount_hashtable;
- i = nr_hash;
- do {
- INIT_LIST_HEAD(d);
- d++;
- i--;
- } while (i);
err = sysfs_init();
if (err)
printk(KERN_WARNING "%s: sysfs_init error: %d\n",
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index e1cb70c643f8..eff1f18d034f 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -987,7 +987,7 @@ static struct file_system_type ncp_fs_type = {
static int __init init_ncp_fs(void)
{
int err;
- DPRINTK("ncpfs: init_module called\n");
+ DPRINTK("ncpfs: init_ncp_fs called\n");
err = init_inodecache();
if (err)
@@ -1004,7 +1004,7 @@ out1:
static void __exit exit_ncp_fs(void)
{
- DPRINTK("ncpfs: cleanup_module called\n");
+ DPRINTK("ncpfs: exit_ncp_fs called\n");
unregister_filesystem(&ncp_fs_type);
destroy_inodecache();
}
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index e6242cdbaf91..fae97196daad 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -96,7 +96,7 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh)
inode = nfs_fhget(sb, mntfh, fsinfo.fattr);
if (IS_ERR(inode)) {
dprintk("nfs_get_root: get root inode failed\n");
- return ERR_PTR(PTR_ERR(inode));
+ return ERR_CAST(inode);
}
error = nfs_superblock_set_dummy_root(sb, inode);
@@ -266,7 +266,7 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh)
inode = nfs_fhget(sb, mntfh, &fattr);
if (IS_ERR(inode)) {
dprintk("nfs_get_root: get root inode failed\n");
- return ERR_PTR(PTR_ERR(inode));
+ return ERR_CAST(inode);
}
error = nfs_superblock_set_dummy_root(sb, inode);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 8fd6dfbe1bc3..3d7d9631e125 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -79,7 +79,7 @@ void nfs_readdata_release(void *data)
static
int nfs_return_empty_page(struct page *page)
{
- zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+ zero_user(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
unlock_page(page);
return 0;
@@ -103,10 +103,10 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
pglen = PAGE_CACHE_SIZE - base;
for (;;) {
if (remainder <= pglen) {
- zero_user_page(*pages, base, remainder, KM_USER0);
+ zero_user(*pages, base, remainder);
break;
}
- zero_user_page(*pages, base, pglen, KM_USER0);
+ zero_user(*pages, base, pglen);
pages++;
remainder -= pglen;
pglen = PAGE_CACHE_SIZE;
@@ -130,7 +130,7 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
return PTR_ERR(new);
}
if (len < PAGE_CACHE_SIZE)
- zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
+ zero_user_segment(page, len, PAGE_CACHE_SIZE);
nfs_list_add_request(new, &one_request);
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
@@ -532,7 +532,7 @@ readpage_async_filler(void *data, struct page *page)
goto out_error;
if (len < PAGE_CACHE_SIZE)
- zero_user_page(page, len, PAGE_CACHE_SIZE - len, KM_USER0);
+ zero_user_segment(page, len, PAGE_CACHE_SIZE);
nfs_pageio_add_request(desc->pgio, new);
return 0;
out_error:
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 522efff3e2c5..f55c437124a2 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -665,9 +665,7 @@ zero_page:
* then we need to zero any uninitalised data. */
if (req->wb_pgbase == 0 && req->wb_bytes != PAGE_CACHE_SIZE
&& !PageUptodate(req->wb_page))
- zero_user_page(req->wb_page, req->wb_bytes,
- PAGE_CACHE_SIZE - req->wb_bytes,
- KM_USER0);
+ zero_user_segment(req->wb_page, req->wb_bytes, PAGE_CACHE_SIZE);
return req;
}
@@ -699,6 +697,17 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
}
/*
+ * If the page cache is marked as unsafe or invalid, then we can't rely on
+ * the PageUptodate() flag. In this case, we will need to turn off
+ * write optimisations that depend on the page contents being correct.
+ */
+static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
+{
+ return PageUptodate(page) &&
+ !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
+}
+
+/*
* Update and possibly write a cached page of an NFS file.
*
* XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
@@ -719,10 +728,13 @@ int nfs_updatepage(struct file *file, struct page *page,
(long long)(page_offset(page) +offset));
/* If we're not using byte range locks, and we know the page
- * is entirely in cache, it may be more efficient to avoid
- * fragmenting write requests.
+ * is up to date, it may be more efficient to extend the write
+ * to cover the entire page in order to avoid fragmentation
+ * inefficiencies.
*/
- if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
+ if (nfs_write_pageuptodate(page, inode) &&
+ inode->i_flock == NULL &&
+ !(file->f_mode & O_SYNC)) {
count = max(count + offset, nfs_page_length(page));
offset = 0;
}
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 21928056e35e..d13403e33622 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -11,8 +11,6 @@
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/export.h>
-#define CAP_NFSD_MASK (CAP_FS_MASK|CAP_TO_MASK(CAP_SYS_RESOURCE))
-
int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
{
struct exp_flavor_info *f;
@@ -69,10 +67,12 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
ret = set_current_groups(cred.cr_group_info);
put_group_info(cred.cr_group_info);
if ((cred.cr_uid)) {
- cap_t(current->cap_effective) &= ~CAP_NFSD_MASK;
+ current->cap_effective =
+ cap_drop_nfsd_set(current->cap_effective);
} else {
- cap_t(current->cap_effective) |= (CAP_NFSD_MASK &
- current->cap_permitted);
+ current->cap_effective =
+ cap_raise_nfsd_set(current->cap_effective,
+ current->cap_permitted);
}
return ret;
}
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 79b4bf812960..346570f6d848 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1218,13 +1218,13 @@ static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type,
struct svc_export *exp;
struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp);
if (IS_ERR(ek))
- return ERR_PTR(PTR_ERR(ek));
+ return ERR_CAST(ek);
exp = exp_get_by_name(clp, ek->ek_mnt, ek->ek_dentry, reqp);
cache_put(&ek->h, &svc_expkey_cache);
if (IS_ERR(exp))
- return ERR_PTR(PTR_ERR(exp));
+ return ERR_CAST(exp);
return exp;
}
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index ad87cb01299b..00e9ccde8e42 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -87,13 +87,17 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
/* Check for the current buffer head overflowing. */
if (unlikely(file_ofs + bh->b_size > init_size)) {
int ofs;
+ void *kaddr;
ofs = 0;
if (file_ofs < init_size)
ofs = init_size - file_ofs;
local_irq_save(flags);
- zero_user_page(page, bh_offset(bh) + ofs,
- bh->b_size - ofs, KM_BIO_SRC_IRQ);
+ kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
+ memset(kaddr + bh_offset(bh) + ofs, 0,
+ bh->b_size - ofs);
+ flush_dcache_page(page);
+ kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
local_irq_restore(flags);
}
} else {
@@ -334,7 +338,7 @@ handle_hole:
bh->b_blocknr = -1UL;
clear_buffer_mapped(bh);
handle_zblock:
- zero_user_page(page, i * blocksize, blocksize, KM_USER0);
+ zero_user(page, i * blocksize, blocksize);
if (likely(!err))
set_buffer_uptodate(bh);
} while (i++, iblock++, (bh = bh->b_this_page) != head);
@@ -410,7 +414,7 @@ retry_readpage:
/* Is the page fully outside i_size? (truncate in progress) */
if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT)) {
- zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+ zero_user(page, 0, PAGE_CACHE_SIZE);
ntfs_debug("Read outside i_size - truncated?");
goto done;
}
@@ -459,7 +463,7 @@ retry_readpage:
* ok to ignore the compressed flag here.
*/
if (unlikely(page->index > 0)) {
- zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+ zero_user(page, 0, PAGE_CACHE_SIZE);
goto done;
}
if (!NInoAttr(ni))
@@ -788,8 +792,7 @@ lock_retry_remap:
if (err == -ENOENT || lcn == LCN_ENOENT) {
bh->b_blocknr = -1;
clear_buffer_dirty(bh);
- zero_user_page(page, bh_offset(bh), blocksize,
- KM_USER0);
+ zero_user(page, bh_offset(bh), blocksize);
set_buffer_uptodate(bh);
err = 0;
continue;
@@ -1414,8 +1417,7 @@ retry_writepage:
if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
/* The page straddles i_size. */
unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
- zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs,
- KM_USER0);
+ zero_user_segment(page, ofs, PAGE_CACHE_SIZE);
}
/* Handle mst protected attributes. */
if (NInoMstProtected(ni))
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index d1619d05eb23..33ff314cc507 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -565,7 +565,7 @@ int ntfs_read_compressed_block(struct page *page)
if (xpage >= max_page) {
kfree(bhs);
kfree(pages);
- zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
+ zero_user(page, 0, PAGE_CACHE_SIZE);
ntfs_debug("Compressed read outside i_size - truncated?");
SetPageUptodate(page);
unlock_page(page);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 6cd08dfdc2ed..3c5550cd11d6 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -607,8 +607,8 @@ do_next_page:
ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh;
} else {
- zero_user_page(page, bh_offset(bh),
- blocksize, KM_USER0);
+ zero_user(page, bh_offset(bh),
+ blocksize);
set_buffer_uptodate(bh);
}
}
@@ -683,9 +683,8 @@ map_buffer_cached:
ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh;
} else {
- zero_user_page(page,
- bh_offset(bh),
- blocksize, KM_USER0);
+ zero_user(page, bh_offset(bh),
+ blocksize);
set_buffer_uptodate(bh);
}
}
@@ -703,8 +702,8 @@ map_buffer_cached:
*/
if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh)) {
- zero_user_page(page, bh_offset(bh),
- blocksize, KM_USER0);
+ zero_user(page, bh_offset(bh),
+ blocksize);
set_buffer_uptodate(bh);
}
mark_buffer_dirty(bh);
@@ -743,8 +742,7 @@ map_buffer_cached:
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) {
- zero_user_page(page, bh_offset(bh), blocksize,
- KM_USER0);
+ zero_user(page, bh_offset(bh), blocksize);
set_buffer_uptodate(bh);
}
continue;
@@ -868,8 +866,8 @@ rl_not_mapped_enoent:
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) {
- zero_user_page(page, bh_offset(bh),
- blocksize, KM_USER0);
+ zero_user(page, bh_offset(bh),
+ blocksize);
set_buffer_uptodate(bh);
}
continue;
@@ -1128,8 +1126,8 @@ rl_not_mapped_enoent:
if (likely(bh_pos < initialized_size))
ofs = initialized_size - bh_pos;
- zero_user_page(page, bh_offset(bh) + ofs,
- blocksize - ofs, KM_USER0);
+ zero_user_segment(page, bh_offset(bh) + ofs,
+ blocksize);
}
} else /* if (unlikely(!buffer_uptodate(bh))) */
err = -EIO;
@@ -1269,8 +1267,8 @@ rl_not_mapped_enoent:
if (PageUptodate(page))
set_buffer_uptodate(bh);
else {
- zero_user_page(page, bh_offset(bh),
- blocksize, KM_USER0);
+ zero_user(page, bh_offset(bh),
+ blocksize);
set_buffer_uptodate(bh);
}
}
@@ -1330,7 +1328,7 @@ err_out:
len = PAGE_CACHE_SIZE;
if (len > bytes)
len = bytes;
- zero_user_page(*pages, 0, len, KM_USER0);
+ zero_user(*pages, 0, len);
}
goto out;
}
@@ -1451,7 +1449,7 @@ err_out:
len = PAGE_CACHE_SIZE;
if (len > bytes)
len = bytes;
- zero_user_page(*pages, 0, len, KM_USER0);
+ zero_user(*pages, 0, len);
}
goto out;
}
diff --git a/fs/ntfs/malloc.h b/fs/ntfs/malloc.h
index e38e402e4103..cd0be3f5c3cd 100644
--- a/fs/ntfs/malloc.h
+++ b/fs/ntfs/malloc.h
@@ -85,8 +85,7 @@ static inline void *ntfs_malloc_nofs_nofail(unsigned long size)
static inline void ntfs_free(void *addr)
{
- if (likely(((unsigned long)addr < VMALLOC_START) ||
- ((unsigned long)addr >= VMALLOC_END ))) {
+ if (!is_vmalloc_addr(addr)) {
kfree(addr);
/* free_page((unsigned long)addr); */
return;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 64713e149e46..447206eb5c2e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5670,7 +5670,7 @@ static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
mlog_errno(ret);
if (zero)
- zero_user_page(page, from, to - from, KM_USER0);
+ zero_user_segment(page, from, to);
/*
* Need to set the buffers we zero'd into uptodate
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index bc7b4cbbe8ec..82243127eebf 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -307,7 +307,7 @@ static int ocfs2_readpage(struct file *file, struct page *page)
* XXX sys_readahead() seems to get that wrong?
*/
if (start >= i_size_read(inode)) {
- zero_user_page(page, 0, PAGE_SIZE, KM_USER0);
+ zero_user(page, 0, PAGE_SIZE);
SetPageUptodate(page);
ret = 0;
goto out_alloc;
@@ -869,7 +869,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
if (block_start >= to)
break;
- zero_user_page(page, block_start, bh->b_size, KM_USER0);
+ zero_user(page, block_start, bh->b_size);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
@@ -1034,7 +1034,7 @@ static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to
start = max(from, block_start);
end = min(to, block_end);
- zero_user_page(page, start, end - start, KM_USER0);
+ zero_user_segment(page, start, end);
set_buffer_uptodate(bh);
}
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index b2e832aca567..d25b9af28500 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -38,6 +38,15 @@
* locking semantics of the file system using the protocol. It should
* be somewhere else, I'm sure, but right now it isn't.
*
+ * With version 11, we separate out the filesystem locking portion. The
+ * filesystem now has a major.minor version it negotiates. Version 11
+ * introduces this negotiation to the o2dlm protocol, and as such the
+ * version here in tcp_internal.h should not need to be bumped for
+ * filesystem locking changes.
+ *
+ * New in version 11
+ * - Negotiation of filesystem locking in the dlm join.
+ *
* New in version 10:
* - Meta/data locks combined
*
@@ -66,7 +75,7 @@
* - full 64 bit i_size in the metadata lock lvbs
* - introduction of "rw" lock and pushing meta/data locking down
*/
-#define O2NET_PROTOCOL_VERSION 10ULL
+#define O2NET_PROTOCOL_VERSION 11ULL
struct o2net_handshake {
__be64 protocol_version;
__be64 connector_id;
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index cfd5cb65cab0..b5786a787fab 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -193,7 +193,12 @@ enum dlm_status dlmunlock(struct dlm_ctxt *dlm,
dlm_astunlockfunc_t *unlockast,
void *data);
-struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key);
+struct dlm_protocol_version {
+ u8 pv_major;
+ u8 pv_minor;
+};
+struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key,
+ struct dlm_protocol_version *fs_proto);
void dlm_unregister_domain(struct dlm_ctxt *dlm);
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index e90b92f9ece1..9843ee17ea27 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -142,6 +142,12 @@ struct dlm_ctxt
spinlock_t work_lock;
struct list_head dlm_domain_handlers;
struct list_head dlm_eviction_callbacks;
+
+ /* The filesystem specifies this at domain registration. We
+ * cache it here to know what to tell other nodes. */
+ struct dlm_protocol_version fs_locking_proto;
+ /* This is the inter-dlm communication version */
+ struct dlm_protocol_version dlm_locking_proto;
};
static inline struct hlist_head *dlm_lockres_hash(struct dlm_ctxt *dlm, unsigned i)
@@ -589,10 +595,24 @@ struct dlm_proxy_ast
#define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
#define DLM_MOD_KEY (0x666c6172)
-enum dlm_query_join_response {
+enum dlm_query_join_response_code {
JOIN_DISALLOW = 0,
JOIN_OK,
JOIN_OK_NO_MAP,
+ JOIN_PROTOCOL_MISMATCH,
+};
+
+union dlm_query_join_response {
+ u32 intval;
+ struct {
+ u8 code; /* Response code. dlm_minor and fs_minor
+ are only valid if this is JOIN_OK */
+ u8 dlm_minor; /* The minor version of the protocol the
+ dlm is speaking. */
+ u8 fs_minor; /* The minor version of the protocol the
+ filesystem is speaking. */
+ u8 reserved;
+ } packet;
};
struct dlm_lock_request
@@ -633,6 +653,8 @@ struct dlm_query_join_request
u8 node_idx;
u8 pad1[2];
u8 name_len;
+ struct dlm_protocol_version dlm_proto;
+ struct dlm_protocol_version fs_proto;
u8 domain[O2NM_MAX_NAME_LEN];
u8 node_map[BITS_TO_BYTES(O2NM_MAX_NODES)];
};
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 6954565b8ccb..638d2ebb892b 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -123,6 +123,17 @@ DEFINE_SPINLOCK(dlm_domain_lock);
LIST_HEAD(dlm_domains);
static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
+/*
+ * The supported protocol version for DLM communication. Running domains
+ * will have a negotiated version with the same major number and a minor
+ * number equal or smaller. The dlm_ctxt->dlm_locking_proto field should
+ * be used to determine what a running domain is actually using.
+ */
+static const struct dlm_protocol_version dlm_protocol = {
+ .pv_major = 1,
+ .pv_minor = 0,
+};
+
#define DLM_DOMAIN_BACKOFF_MS 200
static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -133,6 +144,8 @@ static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data);
+static int dlm_protocol_compare(struct dlm_protocol_version *existing,
+ struct dlm_protocol_version *request);
static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
@@ -668,11 +681,45 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
}
EXPORT_SYMBOL_GPL(dlm_unregister_domain);
+static int dlm_query_join_proto_check(char *proto_type, int node,
+ struct dlm_protocol_version *ours,
+ struct dlm_protocol_version *request)
+{
+ int rc;
+ struct dlm_protocol_version proto = *request;
+
+ if (!dlm_protocol_compare(ours, &proto)) {
+ mlog(0,
+ "node %u wanted to join with %s locking protocol "
+ "%u.%u, we respond with %u.%u\n",
+ node, proto_type,
+ request->pv_major,
+ request->pv_minor,
+ proto.pv_major, proto.pv_minor);
+ request->pv_minor = proto.pv_minor;
+ rc = 0;
+ } else {
+ mlog(ML_NOTICE,
+ "Node %u wanted to join with %s locking "
+ "protocol %u.%u, but we have %u.%u, disallowing\n",
+ node, proto_type,
+ request->pv_major,
+ request->pv_minor,
+ ours->pv_major,
+ ours->pv_minor);
+ rc = 1;
+ }
+
+ return rc;
+}
+
static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
void **ret_data)
{
struct dlm_query_join_request *query;
- enum dlm_query_join_response response;
+ union dlm_query_join_response response = {
+ .packet.code = JOIN_DISALLOW,
+ };
struct dlm_ctxt *dlm = NULL;
u8 nodenum;
@@ -690,11 +737,11 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
mlog(0, "node %u is not in our live map yet\n",
query->node_idx);
- response = JOIN_DISALLOW;
+ response.packet.code = JOIN_DISALLOW;
goto respond;
}
- response = JOIN_OK_NO_MAP;
+ response.packet.code = JOIN_OK_NO_MAP;
spin_lock(&dlm_domain_lock);
dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
@@ -713,7 +760,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
mlog(0, "disallow join as node %u does not "
"have node %u in its nodemap\n",
query->node_idx, nodenum);
- response = JOIN_DISALLOW;
+ response.packet.code = JOIN_DISALLOW;
goto unlock_respond;
}
}
@@ -733,30 +780,48 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
/*If this is a brand new context and we
* haven't started our join process yet, then
* the other node won the race. */
- response = JOIN_OK_NO_MAP;
+ response.packet.code = JOIN_OK_NO_MAP;
} else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
/* Disallow parallel joins. */
- response = JOIN_DISALLOW;
+ response.packet.code = JOIN_DISALLOW;
} else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
mlog(0, "node %u trying to join, but recovery "
"is ongoing.\n", bit);
- response = JOIN_DISALLOW;
+ response.packet.code = JOIN_DISALLOW;
} else if (test_bit(bit, dlm->recovery_map)) {
mlog(0, "node %u trying to join, but it "
"still needs recovery.\n", bit);
- response = JOIN_DISALLOW;
+ response.packet.code = JOIN_DISALLOW;
} else if (test_bit(bit, dlm->domain_map)) {
mlog(0, "node %u trying to join, but it "
"is still in the domain! needs recovery?\n",
bit);
- response = JOIN_DISALLOW;
+ response.packet.code = JOIN_DISALLOW;
} else {
/* Alright we're fully a part of this domain
* so we keep some state as to who's joining
* and indicate to him that needs to be fixed
* up. */
- response = JOIN_OK;
- __dlm_set_joining_node(dlm, query->node_idx);
+
+ /* Make sure we speak compatible locking protocols. */
+ if (dlm_query_join_proto_check("DLM", bit,
+ &dlm->dlm_locking_proto,
+ &query->dlm_proto)) {
+ response.packet.code =
+ JOIN_PROTOCOL_MISMATCH;
+ } else if (dlm_query_join_proto_check("fs", bit,
+ &dlm->fs_locking_proto,
+ &query->fs_proto)) {
+ response.packet.code =
+ JOIN_PROTOCOL_MISMATCH;
+ } else {
+ response.packet.dlm_minor =
+ query->dlm_proto.pv_minor;
+ response.packet.fs_minor =
+ query->fs_proto.pv_minor;
+ response.packet.code = JOIN_OK;
+ __dlm_set_joining_node(dlm, query->node_idx);
+ }
}
spin_unlock(&dlm->spinlock);
@@ -765,9 +830,9 @@ unlock_respond:
spin_unlock(&dlm_domain_lock);
respond:
- mlog(0, "We respond with %u\n", response);
+ mlog(0, "We respond with %u\n", response.packet.code);
- return response;
+ return response.intval;
}
static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -899,10 +964,11 @@ static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
static int dlm_request_join(struct dlm_ctxt *dlm,
int node,
- enum dlm_query_join_response *response)
+ enum dlm_query_join_response_code *response)
{
- int status, retval;
+ int status;
struct dlm_query_join_request join_msg;
+ union dlm_query_join_response join_resp;
mlog(0, "querying node %d\n", node);
@@ -910,12 +976,15 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
join_msg.node_idx = dlm->node_num;
join_msg.name_len = strlen(dlm->name);
memcpy(join_msg.domain, dlm->name, join_msg.name_len);
+ join_msg.dlm_proto = dlm->dlm_locking_proto;
+ join_msg.fs_proto = dlm->fs_locking_proto;
/* copy live node map to join message */
byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
- sizeof(join_msg), node, &retval);
+ sizeof(join_msg), node,
+ &join_resp.intval);
if (status < 0 && status != -ENOPROTOOPT) {
mlog_errno(status);
goto bail;
@@ -928,14 +997,41 @@ static int dlm_request_join(struct dlm_ctxt *dlm,
if (status == -ENOPROTOOPT) {
status = 0;
*response = JOIN_OK_NO_MAP;
- } else if (retval == JOIN_DISALLOW ||
- retval == JOIN_OK ||
- retval == JOIN_OK_NO_MAP) {
- *response = retval;
+ } else if (join_resp.packet.code == JOIN_DISALLOW ||
+ join_resp.packet.code == JOIN_OK_NO_MAP) {
+ *response = join_resp.packet.code;
+ } else if (join_resp.packet.code == JOIN_PROTOCOL_MISMATCH) {
+ mlog(ML_NOTICE,
+ "This node requested DLM locking protocol %u.%u and "
+ "filesystem locking protocol %u.%u. At least one of "
+ "the protocol versions on node %d is not compatible, "
+ "disconnecting\n",
+ dlm->dlm_locking_proto.pv_major,
+ dlm->dlm_locking_proto.pv_minor,
+ dlm->fs_locking_proto.pv_major,
+ dlm->fs_locking_proto.pv_minor,
+ node);
+ status = -EPROTO;
+ *response = join_resp.packet.code;
+ } else if (join_resp.packet.code == JOIN_OK) {
+ *response = join_resp.packet.code;
+ /* Use the same locking protocol as the remote node */
+ dlm->dlm_locking_proto.pv_minor =
+ join_resp.packet.dlm_minor;
+ dlm->fs_locking_proto.pv_minor =
+ join_resp.packet.fs_minor;
+ mlog(0,
+ "Node %d responds JOIN_OK with DLM locking protocol "
+ "%u.%u and fs locking protocol %u.%u\n",
+ node,
+ dlm->dlm_locking_proto.pv_major,
+ dlm->dlm_locking_proto.pv_minor,
+ dlm->fs_locking_proto.pv_major,
+ dlm->fs_locking_proto.pv_minor);
} else {
status = -EINVAL;
- mlog(ML_ERROR, "invalid response %d from node %u\n", retval,
- node);
+ mlog(ML_ERROR, "invalid response %d from node %u\n",
+ join_resp.packet.code, node);
}
mlog(0, "status %d, node %d response is %d\n", status, node,
@@ -1008,7 +1104,7 @@ struct domain_join_ctxt {
static int dlm_should_restart_join(struct dlm_ctxt *dlm,
struct domain_join_ctxt *ctxt,
- enum dlm_query_join_response response)
+ enum dlm_query_join_response_code response)
{
int ret;
@@ -1034,7 +1130,7 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
{
int status = 0, tmpstat, node;
struct domain_join_ctxt *ctxt;
- enum dlm_query_join_response response = JOIN_DISALLOW;
+ enum dlm_query_join_response_code response = JOIN_DISALLOW;
mlog_entry("%p", dlm);
@@ -1450,10 +1546,38 @@ leave:
}
/*
- * dlm_register_domain: one-time setup per "domain"
+ * Compare a requested locking protocol version against the current one.
+ *
+ * If the major numbers are different, they are incompatible.
+ * If the current minor is greater than the request, they are incompatible.
+ * If the current minor is less than or equal to the request, they are
+ * compatible, and the requester should run at the current minor version.
+ */
+static int dlm_protocol_compare(struct dlm_protocol_version *existing,
+ struct dlm_protocol_version *request)
+{
+ if (existing->pv_major != request->pv_major)
+ return 1;
+
+ if (existing->pv_minor > request->pv_minor)
+ return 1;
+
+ if (existing->pv_minor < request->pv_minor)
+ request->pv_minor = existing->pv_minor;
+
+ return 0;
+}
+
+/*
+ * dlm_register_domain: one-time setup per "domain".
+ *
+ * The filesystem passes in the requested locking version via proto.
+ * If registration was successful, proto will contain the negotiated
+ * locking protocol.
*/
struct dlm_ctxt * dlm_register_domain(const char *domain,
- u32 key)
+ u32 key,
+ struct dlm_protocol_version *fs_proto)
{
int ret;
struct dlm_ctxt *dlm = NULL;
@@ -1496,6 +1620,15 @@ retry:
goto retry;
}
+ if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
+ mlog(ML_ERROR,
+ "Requested locking protocol version is not "
+ "compatible with already registered domain "
+ "\"%s\"\n", domain);
+ ret = -EPROTO;
+ goto leave;
+ }
+
__dlm_get(dlm);
dlm->num_joins++;
@@ -1526,6 +1659,13 @@ retry:
list_add_tail(&dlm->list, &dlm_domains);
spin_unlock(&dlm_domain_lock);
+ /*
+ * Pass the locking protocol version into the join. If the join
+ * succeeds, it will have the negotiated protocol set.
+ */
+ dlm->dlm_locking_proto = dlm_protocol;
+ dlm->fs_locking_proto = *fs_proto;
+
ret = dlm_join_domain(dlm);
if (ret) {
mlog_errno(ret);
@@ -1533,6 +1673,9 @@ retry:
goto leave;
}
+ /* Tell the caller what locking protocol we negotiated */
+ *fs_proto = dlm->fs_locking_proto;
+
ret = 0;
leave:
if (new_ctxt)
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c
index 6639baab0798..61a000f8524c 100644
--- a/fs/ocfs2/dlm/dlmfs.c
+++ b/fs/ocfs2/dlm/dlmfs.c
@@ -60,6 +60,8 @@
#define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h"
+#include "ocfs2_lockingver.h"
+
static const struct super_operations dlmfs_ops;
static const struct file_operations dlmfs_file_operations;
static const struct inode_operations dlmfs_dir_inode_operations;
@@ -70,6 +72,16 @@ static struct kmem_cache *dlmfs_inode_cache;
struct workqueue_struct *user_dlm_worker;
/*
+ * This is the userdlmfs locking protocol version.
+ *
+ * See fs/ocfs2/dlmglue.c for more details on locking versions.
+ */
+static const struct dlm_protocol_version user_locking_protocol = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+};
+
+/*
* decodes a set of open flags into a valid lock level and a set of flags.
* returns < 0 if we have invalid flags
* flags which mean something to us:
@@ -416,6 +428,7 @@ static int dlmfs_mkdir(struct inode * dir,
struct qstr *domain = &dentry->d_name;
struct dlmfs_inode_private *ip;
struct dlm_ctxt *dlm;
+ struct dlm_protocol_version proto = user_locking_protocol;
mlog(0, "mkdir %.*s\n", domain->len, domain->name);
@@ -435,7 +448,7 @@ static int dlmfs_mkdir(struct inode * dir,
ip = DLMFS_I(inode);
- dlm = user_dlm_register_context(domain);
+ dlm = user_dlm_register_context(domain, &proto);
if (IS_ERR(dlm)) {
status = PTR_ERR(dlm);
mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n",
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c
index 7d2f578b267d..4cb1d3dae250 100644
--- a/fs/ocfs2/dlm/userdlm.c
+++ b/fs/ocfs2/dlm/userdlm.c
@@ -645,7 +645,8 @@ bail:
return status;
}
-struct dlm_ctxt *user_dlm_register_context(struct qstr *name)
+struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
+ struct dlm_protocol_version *proto)
{
struct dlm_ctxt *dlm;
u32 dlm_key;
@@ -661,7 +662,7 @@ struct dlm_ctxt *user_dlm_register_context(struct qstr *name)
snprintf(domain, name->len + 1, "%.*s", name->len, name->name);
- dlm = dlm_register_domain(domain, dlm_key);
+ dlm = dlm_register_domain(domain, dlm_key, proto);
if (IS_ERR(dlm))
mlog_errno(PTR_ERR(dlm));
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlm/userdlm.h
index c400e93bbf79..39ec27738499 100644
--- a/fs/ocfs2/dlm/userdlm.h
+++ b/fs/ocfs2/dlm/userdlm.h
@@ -83,7 +83,8 @@ void user_dlm_write_lvb(struct inode *inode,
void user_dlm_read_lvb(struct inode *inode,
char *val,
unsigned int len);
-struct dlm_ctxt *user_dlm_register_context(struct qstr *name);
+struct dlm_ctxt *user_dlm_register_context(struct qstr *name,
+ struct dlm_protocol_version *proto);
void user_dlm_unregister_context(struct dlm_ctxt *dlm);
struct dlmfs_inode_private {
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 3867244fb144..351130c9b734 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -43,6 +43,7 @@
#include <cluster/masklog.h>
#include "ocfs2.h"
+#include "ocfs2_lockingver.h"
#include "alloc.h"
#include "dcache.h"
@@ -258,6 +259,31 @@ static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
.flags = 0,
};
+/*
+ * This is the filesystem locking protocol version.
+ *
+ * Whenever the filesystem does new things with locks (adds or removes a
+ * lock, orders them differently, does different things underneath a lock),
+ * the version must be changed. The protocol is negotiated when joining
+ * the dlm domain. A node may join the domain if its major version is
+ * identical to all other nodes and its minor version is greater than
+ * or equal to all other nodes. When its minor version is greater than
+ * the other nodes, it will run at the minor version specified by the
+ * other nodes.
+ *
+ * If a locking change is made that will not be compatible with older
+ * versions, the major number must be increased and the minor version set
+ * to zero. If a change merely adds a behavior that can be disabled when
+ * speaking to older versions, the minor version must be increased. If a
+ * change adds a fully backwards compatible change (eg, LVB changes that
+ * are just ignored by older versions), the version does not need to be
+ * updated.
+ */
+const struct dlm_protocol_version ocfs2_locking_protocol = {
+ .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
+ .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
+};
+
static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
{
return lockres->l_type == OCFS2_LOCK_TYPE_META ||
@@ -2506,7 +2532,8 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
dlm_key = crc32_le(0, osb->uuid_str, strlen(osb->uuid_str));
/* for now, uuid == domain */
- dlm = dlm_register_domain(osb->uuid_str, dlm_key);
+ dlm = dlm_register_domain(osb->uuid_str, dlm_key,
+ &osb->osb_locking_proto);
if (IS_ERR(dlm)) {
status = PTR_ERR(dlm);
mlog_errno(status);
diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
index 5f17243ba501..1d5b0699d0a9 100644
--- a/fs/ocfs2/dlmglue.h
+++ b/fs/ocfs2/dlmglue.h
@@ -116,4 +116,5 @@ void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb);
struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void);
void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
+extern const struct dlm_protocol_version ocfs2_locking_protocol;
#endif /* DLMGLUE_H */
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index d08480580470..e8b7292e0152 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -251,6 +251,7 @@ struct ocfs2_super
struct ocfs2_lock_res osb_rename_lockres;
struct dlm_eviction_cb osb_eviction_cb;
struct ocfs2_dlm_debug *osb_dlm_debug;
+ struct dlm_protocol_version osb_locking_proto;
struct dentry *osb_debug_root;
diff --git a/fs/ocfs2/ocfs2_lockingver.h b/fs/ocfs2/ocfs2_lockingver.h
new file mode 100644
index 000000000000..82d5eeac0fff
--- /dev/null
+++ b/fs/ocfs2/ocfs2_lockingver.h
@@ -0,0 +1,30 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * ocfs2_lockingver.h
+ *
+ * Defines OCFS2 Locking version values.
+ *
+ * Copyright (C) 2008 Oracle. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef OCFS2_LOCKINGVER_H
+#define OCFS2_LOCKINGVER_H
+
+/*
+ * The protocol version for ocfs2 cluster locking. See dlmglue.c for
+ * more details.
+ */
+#define OCFS2_LOCKING_PROTOCOL_MAJOR 1
+#define OCFS2_LOCKING_PROTOCOL_MINOR 0
+
+#endif /* OCFS2_LOCKINGVER_H */
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 01fe40ee5ea9..bec75aff3d9f 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1355,6 +1355,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
sb->s_fs_info = osb;
sb->s_op = &ocfs2_sops;
sb->s_export_op = &ocfs2_export_ops;
+ osb->osb_locking_proto = ocfs2_locking_protocol;
sb->s_time_gran = 1;
sb->s_flags |= MS_NOATIME;
/* this is needed to support O_LARGEFILE */
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 6b7ff1618945..d17b4fd204e1 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -38,6 +38,8 @@ struct op_inode_info {
union op_inode_data u;
};
+static struct inode *openprom_iget(struct super_block *sb, ino_t ino);
+
static inline struct op_inode_info *OP_I(struct inode *inode)
{
return container_of(inode, struct op_inode_info, vfs_inode);
@@ -226,10 +228,10 @@ static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry
return ERR_PTR(-ENOENT);
found:
- inode = iget(dir->i_sb, ino);
+ inode = openprom_iget(dir->i_sb, ino);
mutex_unlock(&op_mutex);
- if (!inode)
- return ERR_PTR(-EINVAL);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
ent_oi = OP_I(inode);
ent_oi->type = ent_type;
ent_oi->u = ent_data;
@@ -348,14 +350,23 @@ static void openprom_destroy_inode(struct inode *inode)
kmem_cache_free(op_inode_cachep, OP_I(inode));
}
-static void openprom_read_inode(struct inode * inode)
+static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
{
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
- if (inode->i_ino == OPENPROM_ROOT_INO) {
- inode->i_op = &openprom_inode_operations;
- inode->i_fop = &openprom_operations;
- inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ struct inode *inode;
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (inode->i_state & I_NEW) {
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ if (inode->i_ino == OPENPROM_ROOT_INO) {
+ inode->i_op = &openprom_inode_operations;
+ inode->i_fop = &openprom_operations;
+ inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ }
+ unlock_new_inode(inode);
}
+ return inode;
}
static int openprom_remount(struct super_block *sb, int *flags, char *data)
@@ -367,7 +378,6 @@ static int openprom_remount(struct super_block *sb, int *flags, char *data)
static const struct super_operations openprom_sops = {
.alloc_inode = openprom_alloc_inode,
.destroy_inode = openprom_destroy_inode,
- .read_inode = openprom_read_inode,
.statfs = simple_statfs,
.remount_fs = openprom_remount,
};
@@ -376,6 +386,7 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
{
struct inode *root_inode;
struct op_inode_info *oi;
+ int ret;
s->s_flags |= MS_NOATIME;
s->s_blocksize = 1024;
@@ -383,9 +394,11 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
s->s_magic = OPENPROM_SUPER_MAGIC;
s->s_op = &openprom_sops;
s->s_time_gran = 1;
- root_inode = iget(s, OPENPROM_ROOT_INO);
- if (!root_inode)
+ root_inode = openprom_iget(s, OPENPROM_ROOT_INO);
+ if (IS_ERR(root_inode)) {
+ ret = PTR_ERR(root_inode);
goto out_no_root;
+ }
oi = OP_I(root_inode);
oi->type = op_inode_node;
@@ -393,13 +406,15 @@ static int openprom_fill_super(struct super_block *s, void *data, int silent)
s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
- goto out_no_root;
+ goto out_no_root_dentry;
return 0;
+out_no_root_dentry:
+ iput(root_inode);
+ ret = -ENOMEM;
out_no_root:
printk("openprom_fill_super: get root inode failed\n");
- iput(root_inode);
- return -ENOMEM;
+ return ret;
}
static int openprom_get_sb(struct file_system_type *fs_type,
diff --git a/fs/partitions/Kconfig b/fs/partitions/Kconfig
index a99acd8de353..cb5f0a3f1b03 100644
--- a/fs/partitions/Kconfig
+++ b/fs/partitions/Kconfig
@@ -198,7 +198,7 @@ config LDM_DEBUG
config SGI_PARTITION
bool "SGI partition support" if PARTITION_ADVANCED
- default y if (SGI_IP22 || SGI_IP27 || ((MACH_JAZZ || SNI_RM) && !CPU_LITTLE_ENDIAN))
+ default y if DEFAULT_SGI_PARTITION
help
Say Y here if you would like to be able to read the hard disk
partition table format used by SGI machines.
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 739da701ae7b..9a64045ff845 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -319,6 +319,14 @@ void delete_partition(struct gendisk *disk, int part)
put_device(&p->dev);
}
+static ssize_t whole_disk_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
+ whole_disk_show, NULL);
+
void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len, int flags)
{
struct hd_struct *p;
@@ -352,13 +360,8 @@ void add_partition(struct gendisk *disk, int part, sector_t start, sector_t len,
device_add(&p->dev);
partition_sysfs_add_subdir(p);
p->dev.uevent_suppress = 0;
- if (flags & ADDPART_FLAG_WHOLEDISK) {
- static struct attribute addpartattr = {
- .name = "whole_disk",
- .mode = S_IRUSR | S_IRGRP | S_IROTH,
- };
- err = sysfs_create_file(&p->dev.kobj, &addpartattr);
- }
+ if (flags & ADDPART_FLAG_WHOLEDISK)
+ err = device_create_file(&p->dev, &dev_attr_whole_disk);
/* suppress uevent if the disk supresses it */
if (!disk->dev.uevent_suppress)
diff --git a/fs/pnode.c b/fs/pnode.c
index 89940f243fc2..05ba692bc540 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -83,6 +83,8 @@ void change_mnt_propagation(struct vfsmount *mnt, int type)
mnt->mnt_master = NULL;
if (type == MS_UNBINDABLE)
mnt->mnt_flags |= MNT_UNBINDABLE;
+ else
+ mnt->mnt_flags &= ~MNT_UNBINDABLE;
}
}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index b380313092bd..6ba2746e4517 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -281,14 +281,23 @@ static inline char *task_sig(struct task_struct *p, char *buffer)
return buffer;
}
+static char *render_cap_t(const char *header, kernel_cap_t *a, char *buffer)
+{
+ unsigned __capi;
+
+ buffer += sprintf(buffer, "%s", header);
+ CAP_FOR_EACH_U32(__capi) {
+ buffer += sprintf(buffer, "%08x",
+ a->cap[(_LINUX_CAPABILITY_U32S-1) - __capi]);
+ }
+ return buffer + sprintf(buffer, "\n");
+}
+
static inline char *task_cap(struct task_struct *p, char *buffer)
{
- return buffer + sprintf(buffer, "CapInh:\t%016x\n"
- "CapPrm:\t%016x\n"
- "CapEff:\t%016x\n",
- cap_t(p->cap_inheritable),
- cap_t(p->cap_permitted),
- cap_t(p->cap_effective));
+ buffer = render_cap_t("CapInh:\t", &p->cap_inheritable, buffer);
+ buffer = render_cap_t("CapPrm:\t", &p->cap_permitted, buffer);
+ return render_cap_t("CapEff:\t", &p->cap_effective, buffer);
}
static inline char *task_context_switch_counts(struct task_struct *p,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 33537487f5ab..c59852b38787 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -88,10 +88,6 @@
* in /proc for a task before it execs a suid executable.
*/
-
-/* Worst case buffer size needed for holding an integer. */
-#define PROC_NUMBUF 13
-
struct pid_entry {
char *name;
int len;
@@ -787,7 +783,7 @@ out_no_task:
}
#endif
-static loff_t mem_lseek(struct file * file, loff_t offset, int orig)
+loff_t mem_lseek(struct file *file, loff_t offset, int orig)
{
switch (orig) {
case 0:
@@ -935,42 +931,6 @@ static const struct file_operations proc_oom_adjust_operations = {
.write = oom_adjust_write,
};
-#ifdef CONFIG_MMU
-static ssize_t clear_refs_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct task_struct *task;
- char buffer[PROC_NUMBUF], *end;
- struct mm_struct *mm;
-
- memset(buffer, 0, sizeof(buffer));
- if (count > sizeof(buffer) - 1)
- count = sizeof(buffer) - 1;
- if (copy_from_user(buffer, buf, count))
- return -EFAULT;
- if (!simple_strtol(buffer, &end, 0))
- return -EINVAL;
- if (*end == '\n')
- end++;
- task = get_proc_task(file->f_path.dentry->d_inode);
- if (!task)
- return -ESRCH;
- mm = get_task_mm(task);
- if (mm) {
- clear_refs_smap(mm);
- mmput(mm);
- }
- put_task_struct(task);
- if (end - buffer == 0)
- return -EIO;
- return end - buffer;
-}
-
-static struct file_operations proc_clear_refs_operations = {
- .write = clear_refs_write,
-};
-#endif
-
#ifdef CONFIG_AUDITSYSCALL
#define TMPBUFLEN 21
static ssize_t proc_loginuid_read(struct file * file, char __user * buf,
@@ -2289,9 +2249,10 @@ static const struct pid_entry tgid_base_stuff[] = {
LNK("exe", exe),
REG("mounts", S_IRUGO, mounts),
REG("mountstats", S_IRUSR, mountstats),
-#ifdef CONFIG_MMU
+#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, clear_refs),
REG("smaps", S_IRUGO, smaps),
+ REG("pagemap", S_IRUSR, pagemap),
#endif
#ifdef CONFIG_SECURITY
DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
@@ -2360,7 +2321,8 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
name.len = snprintf(buf, sizeof(buf), "%d", pid);
dentry = d_hash_and_lookup(mnt->mnt_root, &name);
if (dentry) {
- shrink_dcache_parent(dentry);
+ if (!(current->flags & PF_EXITING))
+ shrink_dcache_parent(dentry);
d_drop(dentry);
dput(dentry);
}
@@ -2617,9 +2579,10 @@ static const struct pid_entry tid_base_stuff[] = {
LNK("root", root),
LNK("exe", exe),
REG("mounts", S_IRUGO, mounts),
-#ifdef CONFIG_MMU
+#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, clear_refs),
REG("smaps", S_IRUGO, smaps),
+ REG("pagemap", S_IRUSR, pagemap),
#endif
#ifdef CONFIG_SECURITY
DIR("attr", S_IRUGO|S_IXUGO, attr_dir),
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 1a551d92e1d8..6ecf6396f072 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -73,11 +73,6 @@ static void proc_delete_inode(struct inode *inode)
struct vfsmount *proc_mnt;
-static void proc_read_inode(struct inode * inode)
-{
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
-}
-
static struct kmem_cache * proc_inode_cachep;
static struct inode *proc_alloc_inode(struct super_block *sb)
@@ -128,7 +123,6 @@ static int proc_remount(struct super_block *sb, int *flags, char *data)
static const struct super_operations proc_sops = {
.alloc_inode = proc_alloc_inode,
.destroy_inode = proc_destroy_inode,
- .read_inode = proc_read_inode,
.drop_inode = generic_delete_inode,
.delete_inode = proc_delete_inode,
.statfs = simple_statfs,
@@ -401,39 +395,41 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
if (de != NULL && !try_module_get(de->owner))
goto out_mod;
- inode = iget(sb, ino);
+ inode = iget_locked(sb, ino);
if (!inode)
goto out_ino;
-
- PROC_I(inode)->fd = 0;
- PROC_I(inode)->pde = de;
- if (de) {
- if (de->mode) {
- inode->i_mode = de->mode;
- inode->i_uid = de->uid;
- inode->i_gid = de->gid;
- }
- if (de->size)
- inode->i_size = de->size;
- if (de->nlink)
- inode->i_nlink = de->nlink;
- if (de->proc_iops)
- inode->i_op = de->proc_iops;
- if (de->proc_fops) {
- if (S_ISREG(inode->i_mode)) {
+ if (inode->i_state & I_NEW) {
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ PROC_I(inode)->fd = 0;
+ PROC_I(inode)->pde = de;
+ if (de) {
+ if (de->mode) {
+ inode->i_mode = de->mode;
+ inode->i_uid = de->uid;
+ inode->i_gid = de->gid;
+ }
+ if (de->size)
+ inode->i_size = de->size;
+ if (de->nlink)
+ inode->i_nlink = de->nlink;
+ if (de->proc_iops)
+ inode->i_op = de->proc_iops;
+ if (de->proc_fops) {
+ if (S_ISREG(inode->i_mode)) {
#ifdef CONFIG_COMPAT
- if (!de->proc_fops->compat_ioctl)
- inode->i_fop =
- &proc_reg_file_ops_no_compat;
- else
+ if (!de->proc_fops->compat_ioctl)
+ inode->i_fop =
+ &proc_reg_file_ops_no_compat;
+ else
#endif
- inode->i_fop = &proc_reg_file_ops;
+ inode->i_fop = &proc_reg_file_ops;
+ } else {
+ inode->i_fop = de->proc_fops;
+ }
}
- else
- inode->i_fop = de->proc_fops;
}
+ unlock_new_inode(inode);
}
-
return inode;
out_ino:
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 05b3e9006262..7d57e8069924 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -52,15 +52,13 @@ extern int proc_tid_stat(struct task_struct *, char *);
extern int proc_tgid_stat(struct task_struct *, char *);
extern int proc_pid_status(struct task_struct *, char *);
extern int proc_pid_statm(struct task_struct *, char *);
+extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
extern const struct file_operations proc_maps_operations;
extern const struct file_operations proc_numa_maps_operations;
extern const struct file_operations proc_smaps_operations;
-
-extern const struct file_operations proc_maps_operations;
-extern const struct file_operations proc_numa_maps_operations;
-extern const struct file_operations proc_smaps_operations;
-
+extern const struct file_operations proc_clear_refs_operations;
+extern const struct file_operations proc_pagemap_operations;
void free_proc_entry(struct proc_dir_entry *de);
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 1be73082edd3..7dd26e18cbfd 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -325,7 +325,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
if (m == NULL) {
if (clear_user(buffer, tsz))
return -EFAULT;
- } else if ((start >= VMALLOC_START) && (start < VMALLOC_END)) {
+ } else if (is_vmalloc_addr((void *)start)) {
char * elf_buf;
struct vm_struct *m;
unsigned long curstart = start;
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 3462bfde89f6..2686592dbcb2 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -29,6 +29,7 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/pagemap.h>
+#include <linux/interrupt.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/smp.h>
@@ -46,6 +47,7 @@
#include <linux/vmalloc.h>
#include <linux/crash_dump.h>
#include <linux/pid_namespace.h>
+#include <linux/bootmem.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
@@ -63,7 +65,6 @@
*/
extern int get_hardware_list(char *);
extern int get_stram_list(char *);
-extern int get_filesystem_list(char *);
extern int get_exec_domain_list(char *);
extern int get_dma_list(char *);
@@ -83,10 +84,15 @@ static int loadavg_read_proc(char *page, char **start, off_t off,
{
int a, b, c;
int len;
+ unsigned long seq;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ a = avenrun[0] + (FIXED_1/200);
+ b = avenrun[1] + (FIXED_1/200);
+ c = avenrun[2] + (FIXED_1/200);
+ } while (read_seqretry(&xtime_lock, seq));
- a = avenrun[0] + (FIXED_1/200);
- b = avenrun[1] + (FIXED_1/200);
- c = avenrun[2] + (FIXED_1/200);
len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
LOAD_INT(a), LOAD_FRAC(a),
LOAD_INT(b), LOAD_FRAC(b),
@@ -598,7 +604,6 @@ static void int_seq_stop(struct seq_file *f, void *v)
}
-extern int show_interrupts(struct seq_file *f, void *v); /* In arch code */
static struct seq_operations int_seq_ops = {
.start = int_seq_start,
.next = int_seq_next,
@@ -675,6 +680,137 @@ static const struct file_operations proc_sysrq_trigger_operations = {
};
#endif
+#ifdef CONFIG_PROC_PAGE_MONITOR
+#define KPMSIZE sizeof(u64)
+#define KPMMASK (KPMSIZE - 1)
+/* /proc/kpagecount - an array exposing page counts
+ *
+ * Each entry is a u64 representing the corresponding
+ * physical page count.
+ */
+static ssize_t kpagecount_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 __user *out = (u64 __user *)buf;
+ struct page *ppage;
+ unsigned long src = *ppos;
+ unsigned long pfn;
+ ssize_t ret = 0;
+ u64 pcount;
+
+ pfn = src / KPMSIZE;
+ count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
+ if (src & KPMMASK || count & KPMMASK)
+ return -EIO;
+
+ while (count > 0) {
+ ppage = NULL;
+ if (pfn_valid(pfn))
+ ppage = pfn_to_page(pfn);
+ pfn++;
+ if (!ppage)
+ pcount = 0;
+ else
+ pcount = atomic_read(&ppage->_count);
+
+ if (put_user(pcount, out++)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ count -= KPMSIZE;
+ }
+
+ *ppos += (char __user *)out - buf;
+ if (!ret)
+ ret = (char __user *)out - buf;
+ return ret;
+}
+
+static struct file_operations proc_kpagecount_operations = {
+ .llseek = mem_lseek,
+ .read = kpagecount_read,
+};
+
+/* /proc/kpageflags - an array exposing page flags
+ *
+ * Each entry is a u64 representing the corresponding
+ * physical page flags.
+ */
+
+/* These macros are used to decouple internal flags from exported ones */
+
+#define KPF_LOCKED 0
+#define KPF_ERROR 1
+#define KPF_REFERENCED 2
+#define KPF_UPTODATE 3
+#define KPF_DIRTY 4
+#define KPF_LRU 5
+#define KPF_ACTIVE 6
+#define KPF_SLAB 7
+#define KPF_WRITEBACK 8
+#define KPF_RECLAIM 9
+#define KPF_BUDDY 10
+
+#define kpf_copy_bit(flags, srcpos, dstpos) (((flags >> srcpos) & 1) << dstpos)
+
+static ssize_t kpageflags_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u64 __user *out = (u64 __user *)buf;
+ struct page *ppage;
+ unsigned long src = *ppos;
+ unsigned long pfn;
+ ssize_t ret = 0;
+ u64 kflags, uflags;
+
+ pfn = src / KPMSIZE;
+ count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
+ if (src & KPMMASK || count & KPMMASK)
+ return -EIO;
+
+ while (count > 0) {
+ ppage = NULL;
+ if (pfn_valid(pfn))
+ ppage = pfn_to_page(pfn);
+ pfn++;
+ if (!ppage)
+ kflags = 0;
+ else
+ kflags = ppage->flags;
+
+ uflags = kpf_copy_bit(KPF_LOCKED, PG_locked, kflags) |
+ kpf_copy_bit(kflags, KPF_ERROR, PG_error) |
+ kpf_copy_bit(kflags, KPF_REFERENCED, PG_referenced) |
+ kpf_copy_bit(kflags, KPF_UPTODATE, PG_uptodate) |
+ kpf_copy_bit(kflags, KPF_DIRTY, PG_dirty) |
+ kpf_copy_bit(kflags, KPF_LRU, PG_lru) |
+ kpf_copy_bit(kflags, KPF_ACTIVE, PG_active) |
+ kpf_copy_bit(kflags, KPF_SLAB, PG_slab) |
+ kpf_copy_bit(kflags, KPF_WRITEBACK, PG_writeback) |
+ kpf_copy_bit(kflags, KPF_RECLAIM, PG_reclaim) |
+ kpf_copy_bit(kflags, KPF_BUDDY, PG_buddy);
+
+ if (put_user(uflags, out++)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ count -= KPMSIZE;
+ }
+
+ *ppos += (char __user *)out - buf;
+ if (!ret)
+ ret = (char __user *)out - buf;
+ return ret;
+}
+
+static struct file_operations proc_kpageflags_operations = {
+ .llseek = mem_lseek,
+ .read = kpageflags_read,
+};
+#endif /* CONFIG_PROC_PAGE_MONITOR */
+
struct proc_dir_entry *proc_root_kcore;
void create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
@@ -755,6 +891,10 @@ void __init proc_misc_init(void)
(size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
}
#endif
+#ifdef CONFIG_PROC_PAGE_MONITOR
+ create_seq_entry("kpagecount", S_IRUSR, &proc_kpagecount_operations);
+ create_seq_entry("kpageflags", S_IRUSR, &proc_kpageflags_operations);
+#endif
#ifdef CONFIG_PROC_VMCORE
proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL);
if (proc_vmcore)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 8043a3eab52c..38338ed98cc6 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -5,7 +5,10 @@
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/pagemap.h>
+#include <linux/ptrace.h>
#include <linux/mempolicy.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
#include <asm/elf.h>
#include <asm/uaccess.h>
@@ -114,24 +117,124 @@ static void pad_len_spaces(struct seq_file *m, int len)
seq_printf(m, "%*c", len, ' ');
}
-struct mem_size_stats
+static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
- unsigned long resident;
- unsigned long shared_clean;
- unsigned long shared_dirty;
- unsigned long private_clean;
- unsigned long private_dirty;
- unsigned long referenced;
-};
+ if (vma && vma != priv->tail_vma) {
+ struct mm_struct *mm = vma->vm_mm;
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ }
+}
-struct pmd_walker {
- struct vm_area_struct *vma;
- void *private;
- void (*action)(struct vm_area_struct *, pmd_t *, unsigned long,
- unsigned long, void *);
-};
+static void *m_start(struct seq_file *m, loff_t *pos)
+{
+ struct proc_maps_private *priv = m->private;
+ unsigned long last_addr = m->version;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma, *tail_vma = NULL;
+ loff_t l = *pos;
+
+ /* Clear the per syscall fields in priv */
+ priv->task = NULL;
+ priv->tail_vma = NULL;
+
+ /*
+ * We remember last_addr rather than next_addr to hit with
+ * mmap_cache most of the time. We have zero last_addr at
+ * the beginning and also after lseek. We will have -1 last_addr
+ * after the end of the vmas.
+ */
+
+ if (last_addr == -1UL)
+ return NULL;
+
+ priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
+ if (!priv->task)
+ return NULL;
+
+ mm = mm_for_maps(priv->task);
+ if (!mm)
+ return NULL;
+
+ tail_vma = get_gate_vma(priv->task);
+ priv->tail_vma = tail_vma;
+
+ /* Start with last addr hint */
+ vma = find_vma(mm, last_addr);
+ if (last_addr && vma) {
+ vma = vma->vm_next;
+ goto out;
+ }
+
+ /*
+ * Check the vma index is within the range and do
+ * sequential scan until m_index.
+ */
+ vma = NULL;
+ if ((unsigned long)l < mm->map_count) {
+ vma = mm->mmap;
+ while (l-- && vma)
+ vma = vma->vm_next;
+ goto out;
+ }
+
+ if (l != mm->map_count)
+ tail_vma = NULL; /* After gate vma */
+
+out:
+ if (vma)
+ return vma;
+
+ /* End of vmas has been reached */
+ m->version = (tail_vma != NULL)? 0: -1UL;
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ return tail_vma;
+}
-static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
+static void *m_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct proc_maps_private *priv = m->private;
+ struct vm_area_struct *vma = v;
+ struct vm_area_struct *tail_vma = priv->tail_vma;
+
+ (*pos)++;
+ if (vma && (vma != tail_vma) && vma->vm_next)
+ return vma->vm_next;
+ vma_stop(priv, vma);
+ return (vma != tail_vma)? tail_vma: NULL;
+}
+
+static void m_stop(struct seq_file *m, void *v)
+{
+ struct proc_maps_private *priv = m->private;
+ struct vm_area_struct *vma = v;
+
+ vma_stop(priv, vma);
+ if (priv->task)
+ put_task_struct(priv->task);
+}
+
+static int do_maps_open(struct inode *inode, struct file *file,
+ struct seq_operations *ops)
+{
+ struct proc_maps_private *priv;
+ int ret = -ENOMEM;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (priv) {
+ priv->pid = proc_pid(inode);
+ ret = seq_open(file, ops);
+ if (!ret) {
+ struct seq_file *m = file->private_data;
+ m->private = priv;
+ } else {
+ kfree(priv);
+ }
+ }
+ return ret;
+}
+
+static int show_map(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
@@ -191,41 +294,71 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
}
seq_putc(m, '\n');
- if (mss)
- seq_printf(m,
- "Size: %8lu kB\n"
- "Rss: %8lu kB\n"
- "Shared_Clean: %8lu kB\n"
- "Shared_Dirty: %8lu kB\n"
- "Private_Clean: %8lu kB\n"
- "Private_Dirty: %8lu kB\n"
- "Referenced: %8lu kB\n",
- (vma->vm_end - vma->vm_start) >> 10,
- mss->resident >> 10,
- mss->shared_clean >> 10,
- mss->shared_dirty >> 10,
- mss->private_clean >> 10,
- mss->private_dirty >> 10,
- mss->referenced >> 10);
-
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
return 0;
}
-static int show_map(struct seq_file *m, void *v)
+static struct seq_operations proc_pid_maps_op = {
+ .start = m_start,
+ .next = m_next,
+ .stop = m_stop,
+ .show = show_map
+};
+
+static int maps_open(struct inode *inode, struct file *file)
{
- return show_map_internal(m, v, NULL);
+ return do_maps_open(inode, file, &proc_pid_maps_op);
}
-static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- void *private)
+const struct file_operations proc_maps_operations = {
+ .open = maps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+/*
+ * Proportional Set Size(PSS): my share of RSS.
+ *
+ * PSS of a process is the count of pages it has in memory, where each
+ * page is divided by the number of processes sharing it. So if a
+ * process has 1000 pages all to itself, and 1000 shared with one other
+ * process, its PSS will be 1500.
+ *
+ * To keep (accumulated) division errors low, we adopt a 64bit
+ * fixed-point pss counter to minimize division errors. So (pss >>
+ * PSS_SHIFT) would be the real byte count.
+ *
+ * A shift of 12 before division means (assuming 4K page size):
+ * - 1M 3-user-pages add up to 8KB errors;
+ * - supports mapcount up to 2^24, or 16M;
+ * - supports PSS up to 2^52 bytes, or 4PB.
+ */
+#define PSS_SHIFT 12
+
+#ifdef CONFIG_PROC_PAGE_MONITOR
+struct mem_size_stats
+{
+ struct vm_area_struct *vma;
+ unsigned long resident;
+ unsigned long shared_clean;
+ unsigned long shared_dirty;
+ unsigned long private_clean;
+ unsigned long private_dirty;
+ unsigned long referenced;
+ u64 pss;
+};
+
+static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ void *private)
{
struct mem_size_stats *mss = private;
+ struct vm_area_struct *vma = mss->vma;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
+ int mapcount;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -242,26 +375,88 @@ static void smaps_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
/* Accumulate the size in pages that have been accessed. */
if (pte_young(ptent) || PageReferenced(page))
mss->referenced += PAGE_SIZE;
- if (page_mapcount(page) >= 2) {
+ mapcount = page_mapcount(page);
+ if (mapcount >= 2) {
if (pte_dirty(ptent))
mss->shared_dirty += PAGE_SIZE;
else
mss->shared_clean += PAGE_SIZE;
+ mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
} else {
if (pte_dirty(ptent))
mss->private_dirty += PAGE_SIZE;
else
mss->private_clean += PAGE_SIZE;
+ mss->pss += (PAGE_SIZE << PSS_SHIFT);
}
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
+ return 0;
}
-static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, unsigned long end,
- void *private)
+static struct mm_walk smaps_walk = { .pmd_entry = smaps_pte_range };
+
+static int show_smap(struct seq_file *m, void *v)
{
+ struct vm_area_struct *vma = v;
+ struct mem_size_stats mss;
+ int ret;
+
+ memset(&mss, 0, sizeof mss);
+ mss.vma = vma;
+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
+ walk_page_range(vma->vm_mm, vma->vm_start, vma->vm_end,
+ &smaps_walk, &mss);
+
+ ret = show_map(m, v);
+ if (ret)
+ return ret;
+
+ seq_printf(m,
+ "Size: %8lu kB\n"
+ "Rss: %8lu kB\n"
+ "Pss: %8lu kB\n"
+ "Shared_Clean: %8lu kB\n"
+ "Shared_Dirty: %8lu kB\n"
+ "Private_Clean: %8lu kB\n"
+ "Private_Dirty: %8lu kB\n"
+ "Referenced: %8lu kB\n",
+ (vma->vm_end - vma->vm_start) >> 10,
+ mss.resident >> 10,
+ (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
+ mss.shared_clean >> 10,
+ mss.shared_dirty >> 10,
+ mss.private_clean >> 10,
+ mss.private_dirty >> 10,
+ mss.referenced >> 10);
+
+ return ret;
+}
+
+static struct seq_operations proc_pid_smaps_op = {
+ .start = m_start,
+ .next = m_next,
+ .stop = m_stop,
+ .show = show_smap
+};
+
+static int smaps_open(struct inode *inode, struct file *file)
+{
+ return do_maps_open(inode, file, &proc_pid_smaps_op);
+}
+
+const struct file_operations proc_smaps_operations = {
+ .open = smaps_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end, void *private)
+{
+ struct vm_area_struct *vma = private;
pte_t *pte, ptent;
spinlock_t *ptl;
struct page *page;
@@ -282,235 +477,248 @@ static void clear_refs_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
+ return 0;
}
-static inline void walk_pmd_range(struct pmd_walker *walker, pud_t *pud,
- unsigned long addr, unsigned long end)
+static struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range };
+
+static ssize_t clear_refs_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- pmd_t *pmd;
- unsigned long next;
+ struct task_struct *task;
+ char buffer[PROC_NUMBUF], *end;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
- for (pmd = pmd_offset(pud, addr); addr != end;
- pmd++, addr = next) {
- next = pmd_addr_end(addr, end);
- if (pmd_none_or_clear_bad(pmd))
- continue;
- walker->action(walker->vma, pmd, addr, next, walker->private);
+ memset(buffer, 0, sizeof(buffer));
+ if (count > sizeof(buffer) - 1)
+ count = sizeof(buffer) - 1;
+ if (copy_from_user(buffer, buf, count))
+ return -EFAULT;
+ if (!simple_strtol(buffer, &end, 0))
+ return -EINVAL;
+ if (*end == '\n')
+ end++;
+ task = get_proc_task(file->f_path.dentry->d_inode);
+ if (!task)
+ return -ESRCH;
+ mm = get_task_mm(task);
+ if (mm) {
+ down_read(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ if (!is_vm_hugetlb_page(vma))
+ walk_page_range(mm, vma->vm_start, vma->vm_end,
+ &clear_refs_walk, vma);
+ flush_tlb_mm(mm);
+ up_read(&mm->mmap_sem);
+ mmput(mm);
}
+ put_task_struct(task);
+ if (end - buffer == 0)
+ return -EIO;
+ return end - buffer;
}
-static inline void walk_pud_range(struct pmd_walker *walker, pgd_t *pgd,
- unsigned long addr, unsigned long end)
-{
- pud_t *pud;
- unsigned long next;
+const struct file_operations proc_clear_refs_operations = {
+ .write = clear_refs_write,
+};
- for (pud = pud_offset(pgd, addr); addr != end;
- pud++, addr = next) {
- next = pud_addr_end(addr, end);
- if (pud_none_or_clear_bad(pud))
- continue;
- walk_pmd_range(walker, pud, addr, next);
+struct pagemapread {
+ char __user *out, *end;
+};
+
+#define PM_ENTRY_BYTES sizeof(u64)
+#define PM_RESERVED_BITS 3
+#define PM_RESERVED_OFFSET (64 - PM_RESERVED_BITS)
+#define PM_RESERVED_MASK (((1LL<<PM_RESERVED_BITS)-1) << PM_RESERVED_OFFSET)
+#define PM_SPECIAL(nr) (((nr) << PM_RESERVED_OFFSET) | PM_RESERVED_MASK)
+#define PM_NOT_PRESENT PM_SPECIAL(1LL)
+#define PM_SWAP PM_SPECIAL(2LL)
+#define PM_END_OF_BUFFER 1
+
+static int add_to_pagemap(unsigned long addr, u64 pfn,
+ struct pagemapread *pm)
+{
+ /*
+ * Make sure there's room in the buffer for an
+ * entire entry. Otherwise, only copy part of
+ * the pfn.
+ */
+ if (pm->out + PM_ENTRY_BYTES >= pm->end) {
+ if (copy_to_user(pm->out, &pfn, pm->end - pm->out))
+ return -EFAULT;
+ pm->out = pm->end;
+ return PM_END_OF_BUFFER;
}
+
+ if (put_user(pfn, pm->out))
+ return -EFAULT;
+ pm->out += PM_ENTRY_BYTES;
+ return 0;
}
-/*
- * walk_page_range - walk the page tables of a VMA with a callback
- * @vma - VMA to walk
- * @action - callback invoked for every bottom-level (PTE) page table
- * @private - private data passed to the callback function
- *
- * Recursively walk the page table for the memory area in a VMA, calling
- * a callback for every bottom-level (PTE) page table.
- */
-static inline void walk_page_range(struct vm_area_struct *vma,
- void (*action)(struct vm_area_struct *,
- pmd_t *, unsigned long,
- unsigned long, void *),
- void *private)
+static int pagemap_pte_hole(unsigned long start, unsigned long end,
+ void *private)
{
- unsigned long addr = vma->vm_start;
- unsigned long end = vma->vm_end;
- struct pmd_walker walker = {
- .vma = vma,
- .private = private,
- .action = action,
- };
- pgd_t *pgd;
- unsigned long next;
-
- for (pgd = pgd_offset(vma->vm_mm, addr); addr != end;
- pgd++, addr = next) {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
- continue;
- walk_pud_range(&walker, pgd, addr, next);
+ struct pagemapread *pm = private;
+ unsigned long addr;
+ int err = 0;
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
+ if (err)
+ break;
}
+ return err;
}
-static int show_smap(struct seq_file *m, void *v)
+u64 swap_pte_to_pagemap_entry(pte_t pte)
{
- struct vm_area_struct *vma = v;
- struct mem_size_stats mss;
-
- memset(&mss, 0, sizeof mss);
- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
- walk_page_range(vma, smaps_pte_range, &mss);
- return show_map_internal(m, v, &mss);
+ swp_entry_t e = pte_to_swp_entry(pte);
+ return PM_SWAP | swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}
-void clear_refs_smap(struct mm_struct *mm)
+static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ void *private)
{
- struct vm_area_struct *vma;
+ struct pagemapread *pm = private;
+ pte_t *pte;
+ int err = 0;
+
+ for (; addr != end; addr += PAGE_SIZE) {
+ u64 pfn = PM_NOT_PRESENT;
+ pte = pte_offset_map(pmd, addr);
+ if (is_swap_pte(*pte))
+ pfn = swap_pte_to_pagemap_entry(*pte);
+ else if (pte_present(*pte))
+ pfn = pte_pfn(*pte);
+ /* unmap so we're not in atomic when we copy to userspace */
+ pte_unmap(pte);
+ err = add_to_pagemap(addr, pfn, pm);
+ if (err)
+ return err;
+ }
- down_read(&mm->mmap_sem);
- for (vma = mm->mmap; vma; vma = vma->vm_next)
- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
- walk_page_range(vma, clear_refs_pte_range, NULL);
- flush_tlb_mm(mm);
- up_read(&mm->mmap_sem);
+ cond_resched();
+
+ return err;
}
-static void *m_start(struct seq_file *m, loff_t *pos)
+static struct mm_walk pagemap_walk = {
+ .pmd_entry = pagemap_pte_range,
+ .pte_hole = pagemap_pte_hole
+};
+
+/*
+ * /proc/pid/pagemap - an array mapping virtual pages to pfns
+ *
+ * For each page in the address space, this file contains one 64-bit
+ * entry representing the corresponding physical page frame number
+ * (PFN) if the page is present. If there is a swap entry for the
+ * physical page, then an encoding of the swap file number and the
+ * page's offset into the swap file are returned. If no page is
+ * present at all, PM_NOT_PRESENT is returned. This allows determining
+ * precisely which pages are mapped (or in swap) and comparing mapped
+ * pages between processes.
+ *
+ * Efficient users of this interface will use /proc/pid/maps to
+ * determine which areas of memory are actually mapped and llseek to
+ * skip over unmapped regions.
+ */
+static ssize_t pagemap_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- struct proc_maps_private *priv = m->private;
- unsigned long last_addr = m->version;
+ struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ struct page **pages, *page;
+ unsigned long uaddr, uend;
struct mm_struct *mm;
- struct vm_area_struct *vma, *tail_vma = NULL;
- loff_t l = *pos;
-
- /* Clear the per syscall fields in priv */
- priv->task = NULL;
- priv->tail_vma = NULL;
+ struct pagemapread pm;
+ int pagecount;
+ int ret = -ESRCH;
- /*
- * We remember last_addr rather than next_addr to hit with
- * mmap_cache most of the time. We have zero last_addr at
- * the beginning and also after lseek. We will have -1 last_addr
- * after the end of the vmas.
- */
+ if (!task)
+ goto out;
- if (last_addr == -1UL)
- return NULL;
+ ret = -EACCES;
+ if (!ptrace_may_attach(task))
+ goto out;
- priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
- if (!priv->task)
- return NULL;
+ ret = -EINVAL;
+ /* file position must be aligned */
+ if (*ppos % PM_ENTRY_BYTES)
+ goto out;
- mm = mm_for_maps(priv->task);
+ ret = 0;
+ mm = get_task_mm(task);
if (!mm)
- return NULL;
-
- priv->tail_vma = tail_vma = get_gate_vma(priv->task);
-
- /* Start with last addr hint */
- if (last_addr && (vma = find_vma(mm, last_addr))) {
- vma = vma->vm_next;
goto out;
- }
- /*
- * Check the vma index is within the range and do
- * sequential scan until m_index.
- */
- vma = NULL;
- if ((unsigned long)l < mm->map_count) {
- vma = mm->mmap;
- while (l-- && vma)
- vma = vma->vm_next;
- goto out;
- }
+ ret = -ENOMEM;
+ uaddr = (unsigned long)buf & PAGE_MASK;
+ uend = (unsigned long)(buf + count);
+ pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
+ pages = kmalloc(pagecount * sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ goto out_task;
- if (l != mm->map_count)
- tail_vma = NULL; /* After gate vma */
+ down_read(&current->mm->mmap_sem);
+ ret = get_user_pages(current, current->mm, uaddr, pagecount,
+ 1, 0, pages, NULL);
+ up_read(&current->mm->mmap_sem);
-out:
- if (vma)
- return vma;
+ if (ret < 0)
+ goto out_free;
- /* End of vmas has been reached */
- m->version = (tail_vma != NULL)? 0: -1UL;
- up_read(&mm->mmap_sem);
- mmput(mm);
- return tail_vma;
-}
+ pm.out = buf;
+ pm.end = buf + count;
-static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
-{
- if (vma && vma != priv->tail_vma) {
- struct mm_struct *mm = vma->vm_mm;
- up_read(&mm->mmap_sem);
- mmput(mm);
+ if (!ptrace_may_attach(task)) {
+ ret = -EIO;
+ } else {
+ unsigned long src = *ppos;
+ unsigned long svpfn = src / PM_ENTRY_BYTES;
+ unsigned long start_vaddr = svpfn << PAGE_SHIFT;
+ unsigned long end_vaddr = TASK_SIZE_OF(task);
+
+ /* watch out for wraparound */
+ if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
+ start_vaddr = end_vaddr;
+
+ /*
+ * The odds are that this will stop walking way
+ * before end_vaddr, because the length of the
+ * user buffer is tracked in "pm", and the walk
+ * will stop when we hit the end of the buffer.
+ */
+ ret = walk_page_range(mm, start_vaddr, end_vaddr,
+ &pagemap_walk, &pm);
+ if (ret == PM_END_OF_BUFFER)
+ ret = 0;
+ /* don't need mmap_sem for these, but this looks cleaner */
+ *ppos += pm.out - buf;
+ if (!ret)
+ ret = pm.out - buf;
}
-}
-
-static void *m_next(struct seq_file *m, void *v, loff_t *pos)
-{
- struct proc_maps_private *priv = m->private;
- struct vm_area_struct *vma = v;
- struct vm_area_struct *tail_vma = priv->tail_vma;
-
- (*pos)++;
- if (vma && (vma != tail_vma) && vma->vm_next)
- return vma->vm_next;
- vma_stop(priv, vma);
- return (vma != tail_vma)? tail_vma: NULL;
-}
-
-static void m_stop(struct seq_file *m, void *v)
-{
- struct proc_maps_private *priv = m->private;
- struct vm_area_struct *vma = v;
- vma_stop(priv, vma);
- if (priv->task)
- put_task_struct(priv->task);
-}
-
-static struct seq_operations proc_pid_maps_op = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = show_map
-};
-
-static struct seq_operations proc_pid_smaps_op = {
- .start = m_start,
- .next = m_next,
- .stop = m_stop,
- .show = show_smap
-};
-
-static int do_maps_open(struct inode *inode, struct file *file,
- struct seq_operations *ops)
-{
- struct proc_maps_private *priv;
- int ret = -ENOMEM;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (priv) {
- priv->pid = proc_pid(inode);
- ret = seq_open(file, ops);
- if (!ret) {
- struct seq_file *m = file->private_data;
- m->private = priv;
- } else {
- kfree(priv);
- }
+ for (; pagecount; pagecount--) {
+ page = pages[pagecount-1];
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ page_cache_release(page);
}
+ mmput(mm);
+out_free:
+ kfree(pages);
+out_task:
+ put_task_struct(task);
+out:
return ret;
}
-static int maps_open(struct inode *inode, struct file *file)
-{
- return do_maps_open(inode, file, &proc_pid_maps_op);
-}
-
-const struct file_operations proc_maps_operations = {
- .open = maps_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_private,
+const struct file_operations proc_pagemap_operations = {
+ .llseek = mem_lseek, /* borrow this */
+ .read = pagemap_read,
};
+#endif /* CONFIG_PROC_PAGE_MONITOR */
#ifdef CONFIG_NUMA
extern int show_numa_map(struct seq_file *m, void *v);
@@ -545,15 +753,3 @@ const struct file_operations proc_numa_maps_operations = {
.release = seq_release_private,
};
#endif
-
-static int smaps_open(struct inode *inode, struct file *file)
-{
- return do_maps_open(inode, file, &proc_pid_smaps_op);
-}
-
-const struct file_operations proc_smaps_operations = {
- .open = smaps_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release_private,
-};
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 638bdb963213..b31ab78052b3 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -125,7 +125,6 @@ static int qnx4_write_inode(struct inode *inode, int unused)
static void qnx4_put_super(struct super_block *sb);
static struct inode *qnx4_alloc_inode(struct super_block *sb);
static void qnx4_destroy_inode(struct inode *inode);
-static void qnx4_read_inode(struct inode *);
static int qnx4_remount(struct super_block *sb, int *flags, char *data);
static int qnx4_statfs(struct dentry *, struct kstatfs *);
@@ -133,7 +132,6 @@ static const struct super_operations qnx4_sops =
{
.alloc_inode = qnx4_alloc_inode,
.destroy_inode = qnx4_destroy_inode,
- .read_inode = qnx4_read_inode,
.put_super = qnx4_put_super,
.statfs = qnx4_statfs,
.remount_fs = qnx4_remount,
@@ -357,6 +355,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
struct inode *root;
const char *errmsg;
struct qnx4_sb_info *qs;
+ int ret = -EINVAL;
qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
if (!qs)
@@ -396,12 +395,14 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
}
/* does root not have inode number QNX4_ROOT_INO ?? */
- root = iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
- if (!root) {
+ root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
+ if (IS_ERR(root)) {
printk("qnx4: get inode failed\n");
+ ret = PTR_ERR(root);
goto out;
}
+ ret = -ENOMEM;
s->s_root = d_alloc_root(root);
if (s->s_root == NULL)
goto outi;
@@ -417,7 +418,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
outnobh:
kfree(qs);
s->s_fs_info = NULL;
- return -EINVAL;
+ return ret;
}
static void qnx4_put_super(struct super_block *sb)
@@ -462,29 +463,38 @@ static const struct address_space_operations qnx4_aops = {
.bmap = qnx4_bmap
};
-static void qnx4_read_inode(struct inode *inode)
+struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
{
struct buffer_head *bh;
struct qnx4_inode_entry *raw_inode;
- int block, ino;
- struct super_block *sb = inode->i_sb;
- struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode);
+ int block;
+ struct qnx4_inode_entry *qnx4_inode;
+ struct inode *inode;
- ino = inode->i_ino;
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ qnx4_inode = qnx4_raw_inode(inode);
inode->i_mode = 0;
QNX4DEBUG(("Reading inode : [%d]\n", ino));
if (!ino) {
- printk("qnx4: bad inode number on dev %s: %d is out of range\n",
+ printk(KERN_ERR "qnx4: bad inode number on dev %s: %lu is "
+ "out of range\n",
sb->s_id, ino);
- return;
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
block = ino / QNX4_INODES_PER_BLOCK;
if (!(bh = sb_bread(sb, block))) {
printk("qnx4: major problem: unable to read inode from dev "
"%s\n", sb->s_id);
- return;
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
raw_inode = ((struct qnx4_inode_entry *) bh->b_data) +
(ino % QNX4_INODES_PER_BLOCK);
@@ -515,9 +525,16 @@ static void qnx4_read_inode(struct inode *inode)
inode->i_op = &page_symlink_inode_operations;
inode->i_mapping->a_ops = &qnx4_aops;
qnx4_i(inode)->mmu_private = inode->i_size;
- } else
- printk("qnx4: bad inode %d on dev %s\n",ino,sb->s_id);
+ } else {
+ printk(KERN_ERR "qnx4: bad inode %lu on dev %s\n",
+ ino, sb->s_id);
+ iget_failed(inode);
+ brelse(bh);
+ return ERR_PTR(-EIO);
+ }
brelse(bh);
+ unlock_new_inode(inode);
+ return inode;
}
static struct kmem_cache *qnx4_inode_cachep;
diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c
index 733cdf01d645..775eed3a4085 100644
--- a/fs/qnx4/namei.c
+++ b/fs/qnx4/namei.c
@@ -128,10 +128,12 @@ struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nam
}
brelse(bh);
- if ((foundinode = iget(dir->i_sb, ino)) == NULL) {
+ foundinode = qnx4_iget(dir->i_sb, ino);
+ if (IS_ERR(foundinode)) {
unlock_kernel();
- QNX4DEBUG(("qnx4: lookup->iget -> NULL\n"));
- return ERR_PTR(-EACCES);
+ QNX4DEBUG(("qnx4: lookup->iget -> error %ld\n",
+ PTR_ERR(foundinode)));
+ return ERR_CAST(foundinode);
}
out:
unlock_kernel();
diff --git a/fs/quota.c b/fs/quota.c
index 99b24b52bfc8..84f28dd72116 100644
--- a/fs/quota.c
+++ b/fs/quota.c
@@ -341,11 +341,11 @@ static inline struct super_block *quotactl_block(const char __user *special)
char *tmp = getname(special);
if (IS_ERR(tmp))
- return ERR_PTR(PTR_ERR(tmp));
+ return ERR_CAST(tmp);
bdev = lookup_bdev(tmp);
putname(tmp);
if (IS_ERR(bdev))
- return ERR_PTR(PTR_ERR(bdev));
+ return ERR_CAST(bdev);
sb = get_super(bdev);
bdput(bdev);
if (!sb)
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 231fd5ccadc5..57917932212e 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1536,7 +1536,7 @@ static struct dentry *reiserfs_get_dentry(struct super_block *sb,
if (!inode)
inode = ERR_PTR(-ESTALE);
if (IS_ERR(inode))
- return ERR_PTR(PTR_ERR(inode));
+ return ERR_CAST(inode);
result = d_alloc_anon(inode);
if (!result) {
iput(inode);
@@ -2143,7 +2143,7 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
/* if we are not on a block boundary */
if (length) {
length = blocksize - length;
- zero_user_page(page, offset, length, KM_USER0);
+ zero_user(page, offset, length);
if (buffer_mapped(bh) && bh->b_blocknr != 0) {
mark_buffer_dirty(bh);
}
@@ -2367,7 +2367,7 @@ static int reiserfs_write_full_page(struct page *page,
unlock_page(page);
return 0;
}
- zero_user_page(page, last_offset, PAGE_CACHE_SIZE - last_offset, KM_USER0);
+ zero_user_segment(page, last_offset, PAGE_CACHE_SIZE);
}
bh = head;
block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 5e7388b32d02..740bb8c0c1ae 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -575,6 +575,8 @@ void print_block(struct buffer_head *bh, ...) //int print_mode, int first, int l
printk
("Block %llu contains unformatted data\n",
(unsigned long long)bh->b_blocknr);
+
+ va_end(args);
}
static char print_tb_buf[2048];
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 1597f6b649e0..eba037b3338f 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -155,7 +155,7 @@ static struct dentry *get_xa_file_dentry(const struct inode *inode,
xadir = open_xa_dir(inode, flags);
if (IS_ERR(xadir)) {
- return ERR_PTR(PTR_ERR(xadir));
+ return ERR_CAST(xadir);
} else if (xadir && !xadir->d_inode) {
dput(xadir);
return ERR_PTR(-ENODATA);
@@ -164,7 +164,7 @@ static struct dentry *get_xa_file_dentry(const struct inode *inode,
xafile = lookup_one_len(name, xadir, strlen(name));
if (IS_ERR(xafile)) {
dput(xadir);
- return ERR_PTR(PTR_ERR(xafile));
+ return ERR_CAST(xafile);
}
if (xafile->d_inode) { /* file exists */
@@ -1084,7 +1084,7 @@ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
}
/* This is the implementation for the xattr plugin infrastructure */
-static struct list_head xattr_handlers = LIST_HEAD_INIT(xattr_handlers);
+static LIST_HEAD(xattr_handlers);
static DEFINE_RWLOCK(handler_lock);
static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index a49cf5b9a195..00b6f0a518c8 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -84,6 +84,8 @@ struct romfs_inode_info {
struct inode vfs_inode;
};
+static struct inode *romfs_iget(struct super_block *, unsigned long);
+
/* instead of private superblock data */
static inline unsigned long romfs_maxsize(struct super_block *sb)
{
@@ -117,7 +119,7 @@ static int romfs_fill_super(struct super_block *s, void *data, int silent)
struct buffer_head *bh;
struct romfs_super_block *rsb;
struct inode *root;
- int sz;
+ int sz, ret = -EINVAL;
/* I would parse the options here, but there are none.. :) */
@@ -157,10 +159,13 @@ static int romfs_fill_super(struct super_block *s, void *data, int silent)
& ROMFH_MASK;
s->s_op = &romfs_ops;
- root = iget(s, sz);
- if (!root)
+ root = romfs_iget(s, sz);
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
goto out;
+ }
+ ret = -ENOMEM;
s->s_root = d_alloc_root(root);
if (!s->s_root)
goto outiput;
@@ -173,7 +178,7 @@ outiput:
out:
brelse(bh);
outnobh:
- return -EINVAL;
+ return ret;
}
/* That's simple too. */
@@ -389,8 +394,11 @@ romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
if ((be32_to_cpu(ri.next) & ROMFH_TYPE) == ROMFH_HRD)
offset = be32_to_cpu(ri.spec) & ROMFH_MASK;
- if ((inode = iget(dir->i_sb, offset)))
- goto outi;
+ inode = romfs_iget(dir->i_sb, offset);
+ if (IS_ERR(inode)) {
+ res = PTR_ERR(inode);
+ goto out;
+ }
/*
* it's a bit funky, _lookup needs to return an error code
@@ -402,7 +410,7 @@ romfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
*/
out0: inode = NULL;
-outi: res = 0;
+ res = 0;
d_add (dentry, inode);
out: unlock_kernel();
@@ -478,20 +486,29 @@ static mode_t romfs_modemap[] =
S_IFBLK+0600, S_IFCHR+0600, S_IFSOCK+0644, S_IFIFO+0644
};
-static void
-romfs_read_inode(struct inode *i)
+static struct inode *
+romfs_iget(struct super_block *sb, unsigned long ino)
{
- int nextfh, ino;
+ int nextfh;
struct romfs_inode ri;
+ struct inode *i;
+
+ ino &= ROMFH_MASK;
+ i = iget_locked(sb, ino);
+ if (!i)
+ return ERR_PTR(-ENOMEM);
+ if (!(i->i_state & I_NEW))
+ return i;
- ino = i->i_ino & ROMFH_MASK;
i->i_mode = 0;
/* Loop for finding the real hard link */
for(;;) {
if (romfs_copyfrom(i, &ri, ino, ROMFH_SIZE) <= 0) {
- printk("romfs: read error for inode 0x%x\n", ino);
- return;
+ printk(KERN_ERR "romfs: read error for inode 0x%lx\n",
+ ino);
+ iget_failed(i);
+ return ERR_PTR(-EIO);
}
/* XXX: do romfs_checksum here too (with name) */
@@ -548,6 +565,8 @@ romfs_read_inode(struct inode *i)
init_special_inode(i, ino,
MKDEV(nextfh>>16,nextfh&0xffff));
}
+ unlock_new_inode(i);
+ return i;
}
static struct kmem_cache * romfs_inode_cachep;
@@ -599,7 +618,6 @@ static int romfs_remount(struct super_block *sb, int *flags, char *data)
static const struct super_operations romfs_ops = {
.alloc_inode = romfs_alloc_inode,
.destroy_inode = romfs_destroy_inode,
- .read_inode = romfs_read_inode,
.statfs = romfs_statfs,
.remount_fs = romfs_remount,
};
diff --git a/fs/select.c b/fs/select.c
index 47f47925aea2..5633fe980781 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -739,7 +739,7 @@ asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
timeout_jiffies = -1;
else
#endif
- timeout_jiffies = msecs_to_jiffies(timeout_msecs);
+ timeout_jiffies = msecs_to_jiffies(timeout_msecs) + 1;
} else {
/* Infinite (< 0) or no (0) timeout */
timeout_jiffies = timeout_msecs;
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 2d3e107da2d3..cb2b63ae0bf4 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/anon_inodes.h>
#include <linux/signalfd.h>
+#include <linux/syscalls.h>
struct signalfd_ctx {
sigset_t sigmask;
diff --git a/fs/smbfs/inode.c b/fs/smbfs/inode.c
index 9416ead0c7aa..4e5c22ca802e 100644
--- a/fs/smbfs/inode.c
+++ b/fs/smbfs/inode.c
@@ -500,6 +500,13 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent)
struct smb_fattr root;
int ver;
void *mem;
+ static int warn_count;
+
+ if (warn_count < 5) {
+ warn_count++;
+ printk(KERN_EMERG "smbfs is deprecated and will be removed"
+ "from the 2.6.27 kernel. Please migrate to cifs\n");
+ }
if (!raw_data)
goto out_no_data;
diff --git a/fs/smbfs/sock.c b/fs/smbfs/sock.c
index e48bd8235a8e..e37fe4deebd0 100644
--- a/fs/smbfs/sock.c
+++ b/fs/smbfs/sock.c
@@ -329,9 +329,8 @@ smb_receive(struct smb_sb_info *server, struct smb_request *req)
msg.msg_control = NULL;
/* Dont repeat bytes and count available bufferspace */
- rlen = smb_move_iov(&p, &num, iov, req->rq_bytes_recvd);
- if (req->rq_rlen < rlen)
- rlen = req->rq_rlen;
+ rlen = min_t(int, smb_move_iov(&p, &num, iov, req->rq_bytes_recvd),
+ (req->rq_rlen - req->rq_bytes_recvd));
result = kernel_recvmsg(sock, &msg, p, num, rlen, flags);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 0871c3dadce1..477904915032 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -77,7 +77,12 @@ void sysfs_remove_group(struct kobject * kobj,
if (grp->name) {
sd = sysfs_get_dirent(dir_sd, grp->name);
- BUG_ON(!sd);
+ if (!sd) {
+ printk(KERN_WARNING "sysfs group %p not found for "
+ "kobject '%s'\n", grp, kobject_name(kobj));
+ WARN_ON(!sd);
+ return;
+ }
} else
sd = sysfs_get(dir_sd);
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 81ec6c548c07..c5d60de0658f 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -169,20 +169,27 @@ void sysv_set_inode(struct inode *inode, dev_t rdev)
init_special_inode(inode, inode->i_mode, rdev);
}
-static void sysv_read_inode(struct inode *inode)
+struct inode *sysv_iget(struct super_block *sb, unsigned int ino)
{
- struct super_block * sb = inode->i_sb;
struct sysv_sb_info * sbi = SYSV_SB(sb);
struct buffer_head * bh;
struct sysv_inode * raw_inode;
struct sysv_inode_info * si;
- unsigned int block, ino = inode->i_ino;
+ struct inode *inode;
+ unsigned int block;
if (!ino || ino > sbi->s_ninodes) {
printk("Bad inode number on dev %s: %d is out of range\n",
- inode->i_sb->s_id, ino);
- goto bad_inode;
+ sb->s_id, ino);
+ return ERR_PTR(-EIO);
}
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
raw_inode = sysv_raw_inode(sb, ino, &bh);
if (!raw_inode) {
printk("Major problem: unable to read inode from dev %s\n",
@@ -214,11 +221,12 @@ static void sysv_read_inode(struct inode *inode)
old_decode_dev(fs32_to_cpu(sbi, si->i_data[0])));
else
sysv_set_inode(inode, 0);
- return;
+ unlock_new_inode(inode);
+ return inode;
bad_inode:
- make_bad_inode(inode);
- return;
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
static struct buffer_head * sysv_update_inode(struct inode * inode)
@@ -328,7 +336,6 @@ static void init_once(struct kmem_cache *cachep, void *p)
const struct super_operations sysv_sops = {
.alloc_inode = sysv_alloc_inode,
.destroy_inode = sysv_destroy_inode,
- .read_inode = sysv_read_inode,
.write_inode = sysv_write_inode,
.delete_inode = sysv_delete_inode,
.put_super = sysv_put_super,
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 6bd850b7641a..a1f1ef33e81c 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -53,9 +53,9 @@ static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, st
ino = sysv_inode_by_name(dentry);
if (ino) {
- inode = iget(dir->i_sb, ino);
- if (!inode)
- return ERR_PTR(-EACCES);
+ inode = sysv_iget(dir->i_sb, ino);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
}
d_add(dentry, inode);
return NULL;
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 6f9707a1b954..5a903da54551 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -332,8 +332,8 @@ static int complete_read_super(struct super_block *sb, int silent, int size)
sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type;
/* set up enough so that it can read an inode */
sb->s_op = &sysv_sops;
- root_inode = iget(sb,SYSV_ROOT_INO);
- if (!root_inode || is_bad_inode(root_inode)) {
+ root_inode = sysv_iget(sb, SYSV_ROOT_INO);
+ if (IS_ERR(root_inode)) {
printk("SysV FS: get root inode failed\n");
return 0;
}
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 64c03bdf06a5..42d51d1c05cd 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -141,6 +141,7 @@ extern int __sysv_write_begin(struct file *file, struct address_space *mapping,
struct page **pagep, void **fsdata);
/* inode.c */
+extern struct inode *sysv_iget(struct super_block *, unsigned int);
extern int sysv_write_inode(struct inode *, int);
extern int sysv_sync_inode(struct inode *);
extern int sysv_sync_file(struct file *, struct dentry *, int);
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 61983f3b107c..10c80b59ec4b 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -25,13 +25,15 @@ struct timerfd_ctx {
struct hrtimer tmr;
ktime_t tintv;
wait_queue_head_t wqh;
+ u64 ticks;
int expired;
+ int clockid;
};
/*
* This gets called when the timer event triggers. We set the "expired"
* flag, but we do not re-arm the timer (in case it's necessary,
- * tintv.tv64 != 0) until the timer is read.
+ * tintv.tv64 != 0) until the timer is accessed.
*/
static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
{
@@ -40,13 +42,24 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
spin_lock_irqsave(&ctx->wqh.lock, flags);
ctx->expired = 1;
+ ctx->ticks++;
wake_up_locked(&ctx->wqh);
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return HRTIMER_NORESTART;
}
-static void timerfd_setup(struct timerfd_ctx *ctx, int clockid, int flags,
+static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
+{
+ ktime_t now, remaining;
+
+ now = ctx->tmr.base->get_time();
+ remaining = ktime_sub(ctx->tmr.expires, now);
+
+ return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
+}
+
+static void timerfd_setup(struct timerfd_ctx *ctx, int flags,
const struct itimerspec *ktmr)
{
enum hrtimer_mode htmode;
@@ -57,8 +70,9 @@ static void timerfd_setup(struct timerfd_ctx *ctx, int clockid, int flags,
texp = timespec_to_ktime(ktmr->it_value);
ctx->expired = 0;
+ ctx->ticks = 0;
ctx->tintv = timespec_to_ktime(ktmr->it_interval);
- hrtimer_init(&ctx->tmr, clockid, htmode);
+ hrtimer_init(&ctx->tmr, ctx->clockid, htmode);
ctx->tmr.expires = texp;
ctx->tmr.function = timerfd_tmrproc;
if (texp.tv64 != 0)
@@ -83,7 +97,7 @@ static unsigned int timerfd_poll(struct file *file, poll_table *wait)
poll_wait(file, &ctx->wqh, wait);
spin_lock_irqsave(&ctx->wqh.lock, flags);
- if (ctx->expired)
+ if (ctx->ticks)
events |= POLLIN;
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
@@ -102,11 +116,11 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
return -EINVAL;
spin_lock_irq(&ctx->wqh.lock);
res = -EAGAIN;
- if (!ctx->expired && !(file->f_flags & O_NONBLOCK)) {
+ if (!ctx->ticks && !(file->f_flags & O_NONBLOCK)) {
__add_wait_queue(&ctx->wqh, &wait);
for (res = 0;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (ctx->expired) {
+ if (ctx->ticks) {
res = 0;
break;
}
@@ -121,22 +135,21 @@ static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
}
- if (ctx->expired) {
- ctx->expired = 0;
- if (ctx->tintv.tv64 != 0) {
+ if (ctx->ticks) {
+ ticks = ctx->ticks;
+ if (ctx->expired && ctx->tintv.tv64) {
/*
* If tintv.tv64 != 0, this is a periodic timer that
* needs to be re-armed. We avoid doing it in the timer
* callback to avoid DoS attacks specifying a very
* short timer period.
*/
- ticks = (u64)
- hrtimer_forward(&ctx->tmr,
- hrtimer_cb_get_time(&ctx->tmr),
- ctx->tintv);
+ ticks += hrtimer_forward_now(&ctx->tmr,
+ ctx->tintv) - 1;
hrtimer_restart(&ctx->tmr);
- } else
- ticks = 1;
+ }
+ ctx->expired = 0;
+ ctx->ticks = 0;
}
spin_unlock_irq(&ctx->wqh.lock);
if (ticks)
@@ -150,76 +163,132 @@ static const struct file_operations timerfd_fops = {
.read = timerfd_read,
};
-asmlinkage long sys_timerfd(int ufd, int clockid, int flags,
- const struct itimerspec __user *utmr)
+static struct file *timerfd_fget(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (!file)
+ return ERR_PTR(-EBADF);
+ if (file->f_op != &timerfd_fops) {
+ fput(file);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return file;
+}
+
+asmlinkage long sys_timerfd_create(int clockid, int flags)
{
- int error;
+ int error, ufd;
struct timerfd_ctx *ctx;
struct file *file;
struct inode *inode;
- struct itimerspec ktmr;
-
- if (copy_from_user(&ktmr, utmr, sizeof(ktmr)))
- return -EFAULT;
+ if (flags)
+ return -EINVAL;
if (clockid != CLOCK_MONOTONIC &&
clockid != CLOCK_REALTIME)
return -EINVAL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ init_waitqueue_head(&ctx->wqh);
+ ctx->clockid = clockid;
+ hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS);
+
+ error = anon_inode_getfd(&ufd, &inode, &file, "[timerfd]",
+ &timerfd_fops, ctx);
+ if (error) {
+ kfree(ctx);
+ return error;
+ }
+
+ return ufd;
+}
+
+asmlinkage long sys_timerfd_settime(int ufd, int flags,
+ const struct itimerspec __user *utmr,
+ struct itimerspec __user *otmr)
+{
+ struct file *file;
+ struct timerfd_ctx *ctx;
+ struct itimerspec ktmr, kotmr;
+
+ if (copy_from_user(&ktmr, utmr, sizeof(ktmr)))
+ return -EFAULT;
+
if (!timespec_valid(&ktmr.it_value) ||
!timespec_valid(&ktmr.it_interval))
return -EINVAL;
- if (ufd == -1) {
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- init_waitqueue_head(&ctx->wqh);
-
- timerfd_setup(ctx, clockid, flags, &ktmr);
-
- /*
- * When we call this, the initialization must be complete, since
- * anon_inode_getfd() will install the fd.
- */
- error = anon_inode_getfd(&ufd, &inode, &file, "[timerfd]",
- &timerfd_fops, ctx);
- if (error)
- goto err_tmrcancel;
- } else {
- file = fget(ufd);
- if (!file)
- return -EBADF;
- ctx = file->private_data;
- if (file->f_op != &timerfd_fops) {
- fput(file);
- return -EINVAL;
- }
- /*
- * We need to stop the existing timer before reprogramming
- * it to the new values.
- */
- for (;;) {
- spin_lock_irq(&ctx->wqh.lock);
- if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
- break;
- spin_unlock_irq(&ctx->wqh.lock);
- cpu_relax();
- }
- /*
- * Re-program the timer to the new value ...
- */
- timerfd_setup(ctx, clockid, flags, &ktmr);
+ file = timerfd_fget(ufd);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ ctx = file->private_data;
+ /*
+ * We need to stop the existing timer before reprogramming
+ * it to the new values.
+ */
+ for (;;) {
+ spin_lock_irq(&ctx->wqh.lock);
+ if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
+ break;
spin_unlock_irq(&ctx->wqh.lock);
- fput(file);
+ cpu_relax();
}
- return ufd;
+ /*
+ * If the timer is expired and it's periodic, we need to advance it
+ * because the caller may want to know the previous expiration time.
+ * We do not update "ticks" and "expired" since the timer will be
+ * re-programmed again in the following timerfd_setup() call.
+ */
+ if (ctx->expired && ctx->tintv.tv64)
+ hrtimer_forward_now(&ctx->tmr, ctx->tintv);
-err_tmrcancel:
- hrtimer_cancel(&ctx->tmr);
- kfree(ctx);
- return error;
+ kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
+ kotmr.it_interval = ktime_to_timespec(ctx->tintv);
+
+ /*
+ * Re-program the timer to the new value ...
+ */
+ timerfd_setup(ctx, flags, &ktmr);
+
+ spin_unlock_irq(&ctx->wqh.lock);
+ fput(file);
+ if (otmr && copy_to_user(otmr, &kotmr, sizeof(kotmr)))
+ return -EFAULT;
+
+ return 0;
+}
+
+asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr)
+{
+ struct file *file;
+ struct timerfd_ctx *ctx;
+ struct itimerspec kotmr;
+
+ file = timerfd_fget(ufd);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+ ctx = file->private_data;
+
+ spin_lock_irq(&ctx->wqh.lock);
+ if (ctx->expired && ctx->tintv.tv64) {
+ ctx->expired = 0;
+ ctx->ticks +=
+ hrtimer_forward_now(&ctx->tmr, ctx->tintv) - 1;
+ hrtimer_restart(&ctx->tmr);
+ }
+ kotmr.it_value = ktime_to_timespec(timerfd_get_remaining(ctx));
+ kotmr.it_interval = ktime_to_timespec(ctx->tintv);
+ spin_unlock_irq(&ctx->wqh.lock);
+ fput(file);
+
+ return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0;
}
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 4320782761ae..489f26bc26d9 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -714,26 +714,30 @@ static int ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
return 0;
}
-void ufs_read_inode(struct inode * inode)
+struct inode *ufs_iget(struct super_block *sb, unsigned long ino)
{
- struct ufs_inode_info *ufsi = UFS_I(inode);
- struct super_block * sb;
- struct ufs_sb_private_info * uspi;
+ struct ufs_inode_info *ufsi;
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
struct buffer_head * bh;
+ struct inode *inode;
int err;
- UFSD("ENTER, ino %lu\n", inode->i_ino);
-
- sb = inode->i_sb;
- uspi = UFS_SB(sb)->s_uspi;
+ UFSD("ENTER, ino %lu\n", ino);
- if (inode->i_ino < UFS_ROOTINO ||
- inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
+ if (ino < UFS_ROOTINO || ino > (uspi->s_ncg * uspi->s_ipg)) {
ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
- inode->i_ino);
- goto bad_inode;
+ ino);
+ return ERR_PTR(-EIO);
}
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ ufsi = UFS_I(inode);
+
bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
if (!bh) {
ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
@@ -765,10 +769,12 @@ void ufs_read_inode(struct inode * inode)
brelse(bh);
UFSD("EXIT\n");
- return;
+ unlock_new_inode(inode);
+ return inode;
bad_inode:
- make_bad_inode(inode);
+ iget_failed(inode);
+ return ERR_PTR(-EIO);
}
static void ufs1_update_inode(struct inode *inode, struct ufs_inode *ufs_inode)
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index d8bfbee2fe2b..747a4de6c695 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -57,10 +57,10 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru
lock_kernel();
ino = ufs_inode_by_name(dir, dentry);
if (ino) {
- inode = iget(dir->i_sb, ino);
- if (!inode) {
+ inode = ufs_iget(dir->i_sb, ino);
+ if (IS_ERR(inode)) {
unlock_kernel();
- return ERR_PTR(-EACCES);
+ return ERR_CAST(inode);
}
}
unlock_kernel();
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 0072cb33ebec..73deff475e63 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -633,6 +633,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
unsigned block_size, super_block_size;
unsigned flags;
unsigned super_block_offset;
+ int ret = -EINVAL;
uspi = NULL;
ubh = NULL;
@@ -1065,12 +1066,16 @@ magic_found:
uspi->s_maxsymlinklen =
fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_maxsymlinklen);
- inode = iget(sb, UFS_ROOTINO);
- if (!inode || is_bad_inode(inode))
+ inode = ufs_iget(sb, UFS_ROOTINO);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
goto failed;
+ }
sb->s_root = d_alloc_root(inode);
- if (!sb->s_root)
+ if (!sb->s_root) {
+ ret = -ENOMEM;
goto dalloc_failed;
+ }
ufs_setup_cstotal(sb);
/*
@@ -1092,7 +1097,7 @@ failed:
kfree(sbi);
sb->s_fs_info = NULL;
UFSD("EXIT (FAILED)\n");
- return -EINVAL;
+ return ret;
failed_nomem:
UFSD("EXIT (NOMEM)\n");
@@ -1326,7 +1331,6 @@ static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t,
static const struct super_operations ufs_super_ops = {
.alloc_inode = ufs_alloc_inode,
.destroy_inode = ufs_destroy_inode,
- .read_inode = ufs_read_inode,
.write_inode = ufs_write_inode,
.delete_inode = ufs_delete_inode,
.put_super = ufs_put_super,
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 7faa4cd71a27..fcb9231bb9ed 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -106,7 +106,7 @@ extern void ufs_free_inode (struct inode *inode);
extern struct inode * ufs_new_inode (struct inode *, int);
/* inode.c */
-extern void ufs_read_inode (struct inode *);
+extern struct inode *ufs_iget(struct super_block *, unsigned long);
extern void ufs_put_inode (struct inode *);
extern int ufs_write_inode (struct inode *, int);
extern int ufs_sync_inode (struct inode *);
diff --git a/fs/utimes.c b/fs/utimes.c
index b9912ecbee24..e5588cd8530e 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/stat.h>
#include <linux/utime.h>
+#include <linux/syscalls.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
diff --git a/fs/vfat/namei.c b/fs/vfat/namei.c
index c28add2fbe95..cd450bea9f1a 100644
--- a/fs/vfat/namei.c
+++ b/fs/vfat/namei.c
@@ -705,7 +705,7 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
brelse(sinfo.bh);
if (IS_ERR(inode)) {
unlock_kernel();
- return ERR_PTR(PTR_ERR(inode));
+ return ERR_CAST(inode);
}
alias = d_find_alias(inode);
if (alias) {
diff --git a/fs/xattr.c b/fs/xattr.c
index 6645b7313b33..f7c8f87bb390 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -105,6 +105,33 @@ out:
EXPORT_SYMBOL_GPL(vfs_setxattr);
ssize_t
+xattr_getsecurity(struct inode *inode, const char *name, void *value,
+ size_t size)
+{
+ void *buffer = NULL;
+ ssize_t len;
+
+ if (!value || !size) {
+ len = security_inode_getsecurity(inode, name, &buffer, false);
+ goto out_noalloc;
+ }
+
+ len = security_inode_getsecurity(inode, name, &buffer, true);
+ if (len < 0)
+ return len;
+ if (size < len) {
+ len = -ERANGE;
+ goto out;
+ }
+ memcpy(value, buffer, len);
+out:
+ security_release_secctx(buffer, len);
+out_noalloc:
+ return len;
+}
+EXPORT_SYMBOL_GPL(xattr_getsecurity);
+
+ssize_t
vfs_getxattr(struct dentry *dentry, char *name, void *value, size_t size)
{
struct inode *inode = dentry->d_inode;
@@ -118,23 +145,23 @@ vfs_getxattr(struct dentry *dentry, char *name, void *value, size_t size)
if (error)
return error;
- if (inode->i_op->getxattr)
- error = inode->i_op->getxattr(dentry, name, value, size);
- else
- error = -EOPNOTSUPP;
-
if (!strncmp(name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN)) {
const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
- int ret = security_inode_getsecurity(inode, suffix, value,
- size, error);
+ int ret = xattr_getsecurity(inode, suffix, value, size);
/*
* Only overwrite the return value if a security module
* is actually active.
*/
- if (ret != -EOPNOTSUPP)
- error = ret;
+ if (ret == -EOPNOTSUPP)
+ goto nolsm;
+ return ret;
}
+nolsm:
+ if (inode->i_op->getxattr)
+ error = inode->i_op->getxattr(dentry, name, value, size);
+ else
+ error = -EOPNOTSUPP;
return error;
}
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index ed2b16dff914..e040f1ce1b6a 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -92,8 +92,7 @@ kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
void
kmem_free(void *ptr, size_t size)
{
- if (((unsigned long)ptr < VMALLOC_START) ||
- ((unsigned long)ptr >= VMALLOC_END)) {
+ if (!is_vmalloc_addr(ptr)) {
kfree(ptr);
} else {
vfree(ptr);
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 302273f8e2a9..e347bfd47c91 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -700,8 +700,7 @@ static inline struct page *
mem_to_page(
void *addr)
{
- if (((unsigned long)addr < VMALLOC_START) ||
- ((unsigned long)addr >= VMALLOC_END)) {
+ if ((!is_vmalloc_addr(addr))) {
return virt_to_page(addr);
} else {
return vmalloc_to_page(addr);
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 6c3a846a5267..166353388490 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -152,7 +152,7 @@ xfs_iozero(
if (status)
break;
- zero_user_page(page, offset, bytes, KM_USER0);
+ zero_user(page, offset, bytes);
status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
page, fsdata);
diff --git a/include/acpi/acglobal.h b/include/acpi/acglobal.h
index 347a911d8237..47a1fd8f2d8a 100644
--- a/include/acpi/acglobal.h
+++ b/include/acpi/acglobal.h
@@ -117,10 +117,6 @@ extern u32 acpi_dbg_layer;
extern u32 acpi_gbl_nesting_level;
-/* Event counters */
-
-ACPI_EXTERN u32 acpi_gpe_count;
-
/* Support for dynamic control method tracing mechanism */
ACPI_EXTERN u32 acpi_gbl_original_dbg_level;
diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h
index 45662f6dbdb6..99d171c87c84 100644
--- a/include/acpi/acmacros.h
+++ b/include/acpi/acmacros.h
@@ -486,7 +486,7 @@
#define ACPI_FUNCTION_NAME(name)
#endif
-#ifdef DEBUG_FUNC_TRACE
+#ifdef CONFIG_ACPI_DEBUG_FUNC_TRACE
#define ACPI_FUNCTION_TRACE(a) ACPI_FUNCTION_NAME(a) \
acpi_ut_trace(ACPI_DEBUG_PARAMETERS)
@@ -565,7 +565,7 @@
#endif /* ACPI_SIMPLE_RETURN_MACROS */
-#else /* !DEBUG_FUNC_TRACE */
+#else /* !CONFIG_ACPI_DEBUG_FUNC_TRACE */
#define ACPI_FUNCTION_TRACE(a)
#define ACPI_FUNCTION_TRACE_PTR(a,b)
@@ -584,7 +584,7 @@
#define return_UINT32(s) return(s)
#define return_PTR(s) return(s)
-#endif /* DEBUG_FUNC_TRACE */
+#endif /* CONFIG_ACPI_DEBUG_FUNC_TRACE */
/* Conditional execution */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index fb7171b1bd22..2f1c68c7a727 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -321,6 +321,11 @@ struct acpi_bus_event {
extern struct kobject *acpi_kobj;
extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
+void acpi_bus_private_data_handler(acpi_handle, u32, void *);
+int acpi_bus_get_private_data(acpi_handle, void **);
+extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
+extern int register_acpi_notifier(struct notifier_block *);
+extern int unregister_acpi_notifier(struct notifier_block *);
/*
* External Functions
*/
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index f85f77a538aa..9757a040a505 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -48,6 +48,7 @@
#define ACPI_BUTTON_HID_SLEEPF "LNXSLPBN"
#define ACPI_VIDEO_HID "LNXVIDEO"
#define ACPI_BAY_HID "LNXIOBAY"
+#define ACPI_DOCK_HID "LNXDOCK"
/* --------------------------------------------------------------------------
PCI
@@ -73,7 +74,6 @@ struct pci_bus;
acpi_status acpi_get_pci_id(acpi_handle handle, struct acpi_pci_id *id);
int acpi_pci_bind(struct acpi_device *device);
-int acpi_pci_unbind(struct acpi_device *device);
int acpi_pci_bind_root(struct acpi_device *device, struct acpi_pci_id *id,
struct pci_bus *bus);
diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h
index 62c5ee4311da..173972672175 100644
--- a/include/acpi/acpi_numa.h
+++ b/include/acpi/acpi_numa.h
@@ -15,7 +15,6 @@ extern int pxm_to_node(int);
extern int node_to_pxm(int);
extern void __acpi_map_pxm_to_node(int, int);
extern int acpi_map_pxm_to_node(int);
-extern void __cpuinit acpi_unmap_pxm_to_node(int);
#endif /* CONFIG_ACPI_NUMA */
#endif /* __ACP_NUMA_H */
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index ca882b8e7d10..022a5fd80c8e 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -181,6 +181,9 @@ acpi_os_install_interrupt_handler(u32 gsi,
acpi_status
acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler service_routine);
+void acpi_os_gpe_count(u32 gpe_number);
+void acpi_os_fixed_event_count(u32 fixed_event_number);
+
/*
* Threads and Scheduling
*/
@@ -239,8 +242,8 @@ acpi_status acpi_os_validate_interface(char *interface);
acpi_status acpi_osi_invalidate(char* interface);
acpi_status
-acpi_os_validate_address(u8 space_id,
- acpi_physical_address address, acpi_size length);
+acpi_os_validate_address(u8 space_id, acpi_physical_address address,
+ acpi_size length, char *name);
u64 acpi_os_get_timer(void);
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 6e253b5b0f3b..cdc8004cfd12 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -4,7 +4,7 @@
#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
-
+#include <linux/thermal.h>
#include <asm/acpi.h>
#define ACPI_PROCESSOR_BUSY_METRIC 10
@@ -34,6 +34,7 @@
#define ACPI_CSTATE_SYSTEMIO (0)
#define ACPI_CSTATE_FFH (1)
+#define ACPI_CSTATE_HALT (2)
/* Power Management */
@@ -64,7 +65,7 @@ struct acpi_processor_cx {
u8 valid;
u8 type;
u32 address;
- u8 space_id;
+ u8 entry_method;
u8 index;
u32 latency;
u32 latency_ticks;
@@ -176,6 +177,8 @@ struct acpi_processor_throttling {
u32 address;
u8 duty_offset;
u8 duty_width;
+ u8 tsd_valid_flag;
+ unsigned int shared_type;
struct acpi_processor_tx states[ACPI_PROCESSOR_MAX_THROTTLING];
};
@@ -218,7 +221,7 @@ struct acpi_processor {
struct acpi_processor_performance *performance;
struct acpi_processor_throttling throttling;
struct acpi_processor_limit limit;
-
+ struct thermal_cooling_device *cdev;
/* the _PDC objects for this processor, if any */
struct acpi_object_list *pdc;
};
@@ -316,7 +319,7 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
int acpi_processor_get_throttling_info(struct acpi_processor *pr);
extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
extern struct file_operations acpi_processor_throttling_fops;
-
+extern void acpi_processor_throttling_init(void);
/* in processor_idle.c */
int acpi_processor_power_init(struct acpi_processor *pr,
struct acpi_device *device);
@@ -330,7 +333,7 @@ extern struct cpuidle_driver acpi_idle_driver;
/* in processor_thermal.c */
int acpi_processor_get_limit_info(struct acpi_processor *pr);
extern struct file_operations acpi_processor_limit_fops;
-
+extern struct thermal_cooling_device_ops processor_cooling_ops;
#ifdef CONFIG_CPU_FREQ
void acpi_thermal_cpufreq_init(void);
void acpi_thermal_cpufreq_exit(void);
diff --git a/include/asm-alpha/atomic.h b/include/asm-alpha/atomic.h
index f5cb7b878af2..ca88e54dec93 100644
--- a/include/asm-alpha/atomic.h
+++ b/include/asm-alpha/atomic.h
@@ -100,7 +100,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
/*
* Same as above, but return the result value
*/
-static __inline__ long atomic_add_return(int i, atomic_t * v)
+static inline int atomic_add_return(int i, atomic_t *v)
{
long temp, result;
smp_mb();
diff --git a/include/asm-alpha/elf.h b/include/asm-alpha/elf.h
index 4b518e3b952c..fc1002ea1e0c 100644
--- a/include/asm-alpha/elf.h
+++ b/include/asm-alpha/elf.h
@@ -144,8 +144,6 @@ extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task);
: amask (AMASK_CIX) ? "ev6" : "ev67"); \
})
-#ifdef __KERNEL__
-
#define SET_PERSONALITY(EX, IBCS2) \
set_personality(((EX).e_flags & EF_ALPHA_32BIT) \
? PER_LINUX_32BIT : (IBCS2) ? PER_SVR4 : PER_LINUX)
@@ -164,5 +162,4 @@ extern int alpha_l3_cacheshape;
NEW_AUX_ENT(AT_L3_CACHESHAPE, alpha_l3_cacheshape); \
} while (0)
-#endif /* __KERNEL__ */
#endif /* __ASM_ALPHA_ELF_H */
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h
index 8cc97bfd3789..05f09f997d82 100644
--- a/include/asm-alpha/page.h
+++ b/include/asm-alpha/page.h
@@ -1,8 +1,6 @@
#ifndef _ALPHA_PAGE_H
#define _ALPHA_PAGE_H
-#ifdef __KERNEL__
-
#include <linux/const.h>
#include <asm/pal.h>
@@ -98,5 +96,4 @@ typedef unsigned long pgprot_t;
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
#endif /* _ALPHA_PAGE_H */
diff --git a/include/asm-alpha/pci.h b/include/asm-alpha/pci.h
index 30ee7669b19f..d5b10ef64364 100644
--- a/include/asm-alpha/pci.h
+++ b/include/asm-alpha/pci.h
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
#include <asm/scatterlist.h>
#include <asm/machvec.h>
diff --git a/include/asm-alpha/pgalloc.h b/include/asm-alpha/pgalloc.h
index 471864e8d4c3..fdbedacc7375 100644
--- a/include/asm-alpha/pgalloc.h
+++ b/include/asm-alpha/pgalloc.h
@@ -31,7 +31,7 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
extern pgd_t *pgd_alloc(struct mm_struct *mm);
static inline void
-pgd_free(pgd_t *pgd)
+pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
@@ -44,7 +44,7 @@ pmd_alloc_one(struct mm_struct *mm, unsigned long address)
}
static inline void
-pmd_free(pmd_t *pmd)
+pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
@@ -52,7 +52,7 @@ pmd_free(pmd_t *pmd)
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
static inline void
-pte_free_kernel(pte_t *pte)
+pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
@@ -67,7 +67,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
}
static inline void
-pte_free(struct page *page)
+pte_free(struct mm_struct *mm, struct page *page)
{
__free_page(page);
}
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index fd9dc889f36c..ed221d6408fc 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -681,13 +681,18 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
+#define cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+ })
static inline unsigned long
__cmpxchg_u8_local(volatile char *m, long old, long new)
@@ -803,13 +808,19 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
return old;
}
-#define cmpxchg_local(ptr,o,n) \
+#define cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-alpha/tlb.h b/include/asm-alpha/tlb.h
index aa91335533e0..c13636575fba 100644
--- a/include/asm-alpha/tlb.h
+++ b/include/asm-alpha/tlb.h
@@ -9,7 +9,7 @@
#include <asm-generic/tlb.h>
-#define __pte_free_tlb(tlb,pte) pte_free(pte)
-#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
+#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
#endif
diff --git a/include/asm-alpha/tlbflush.h b/include/asm-alpha/tlbflush.h
index b9e9147226f7..9d87aaa08c0d 100644
--- a/include/asm-alpha/tlbflush.h
+++ b/include/asm-alpha/tlbflush.h
@@ -142,6 +142,10 @@ extern void flush_tlb_range(struct vm_area_struct *, unsigned long,
#endif /* CONFIG_SMP */
-#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
#endif /* _ALPHA_TLBFLUSH_H */
diff --git a/include/asm-alpha/unistd.h b/include/asm-alpha/unistd.h
index 29bf2fdc91c0..5b5c17485942 100644
--- a/include/asm-alpha/unistd.h
+++ b/include/asm-alpha/unistd.h
@@ -442,7 +442,6 @@
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
diff --git a/include/asm-alpha/user.h b/include/asm-alpha/user.h
index 7e417fc9d491..a4eb6a4ca8d1 100644
--- a/include/asm-alpha/user.h
+++ b/include/asm-alpha/user.h
@@ -39,7 +39,7 @@ struct user {
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
- struct regs * u_ar0; /* help gdb find registers */
+ unsigned long u_ar0; /* help gdb find registers */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
diff --git a/include/asm-arm/arch-iop13xx/adma.h b/include/asm-arm/arch-iop13xx/adma.h
index 04006c1c5fd7..efd9a5eb1008 100644
--- a/include/asm-arm/arch-iop13xx/adma.h
+++ b/include/asm-arm/arch-iop13xx/adma.h
@@ -247,7 +247,7 @@ static inline u32 iop_desc_get_src_count(struct iop_adma_desc_slot *desc,
}
static inline void
-iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
+iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union {
@@ -257,13 +257,13 @@ iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
u_desc_ctrl.value = 0;
u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
- u_desc_ctrl.field.int_en = int_en;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->crc_addr = 0;
}
static inline void
-iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
+iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union {
@@ -274,14 +274,15 @@ iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
u_desc_ctrl.value = 0;
u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
u_desc_ctrl.field.block_fill_en = 1;
- u_desc_ctrl.field.int_en = int_en;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->crc_addr = 0;
}
/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */
static inline void
-iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
+iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union {
@@ -292,7 +293,7 @@ iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
u_desc_ctrl.value = 0;
u_desc_ctrl.field.src_select = src_cnt - 1;
u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
- u_desc_ctrl.field.int_en = int_en;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->crc_addr = 0;
@@ -301,7 +302,8 @@ iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
/* to do: support buffers larger than ADMA_MAX_BYTE_COUNT */
static inline int
-iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
+iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
{
struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc;
union {
@@ -314,7 +316,7 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */
u_desc_ctrl.field.zero_result = 1;
u_desc_ctrl.field.status_write_back_en = 1;
- u_desc_ctrl.field.int_en = int_en;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->crc_addr = 0;
diff --git a/include/asm-arm/arch-pxa/gpio.h b/include/asm-arm/arch-pxa/gpio.h
index 9dbc2dc794f7..bdbf5f9ffdd5 100644
--- a/include/asm-arm/arch-pxa/gpio.h
+++ b/include/asm-arm/arch-pxa/gpio.h
@@ -28,43 +28,35 @@
#include <asm/irq.h>
#include <asm/hardware.h>
-static inline int gpio_request(unsigned gpio, const char *label)
-{
- return 0;
-}
+#include <asm-generic/gpio.h>
-static inline void gpio_free(unsigned gpio)
-{
- return;
-}
-extern int gpio_direction_input(unsigned gpio);
-extern int gpio_direction_output(unsigned gpio, int value);
+/* NOTE: some PXAs have fewer on-chip GPIOs (like PXA255, with 85).
+ * Those cases currently cause holes in the GPIO number space.
+ */
+#define NR_BUILTIN_GPIO 128
-static inline int __gpio_get_value(unsigned gpio)
+static inline int gpio_get_value(unsigned gpio)
{
- return GPLR(gpio) & GPIO_bit(gpio);
+ if (__builtin_constant_p(gpio) && (gpio < NR_BUILTIN_GPIO))
+ return GPLR(gpio) & GPIO_bit(gpio);
+ else
+ return __gpio_get_value(gpio);
}
-#define gpio_get_value(gpio) \
- (__builtin_constant_p(gpio) ? \
- __gpio_get_value(gpio) : \
- pxa_gpio_get_value(gpio))
-
-static inline void __gpio_set_value(unsigned gpio, int value)
+static inline void gpio_set_value(unsigned gpio, int value)
{
- if (value)
- GPSR(gpio) = GPIO_bit(gpio);
- else
- GPCR(gpio) = GPIO_bit(gpio);
+ if (__builtin_constant_p(gpio) && (gpio < NR_BUILTIN_GPIO)) {
+ if (value)
+ GPSR(gpio) = GPIO_bit(gpio);
+ else
+ GPCR(gpio) = GPIO_bit(gpio);
+ } else {
+ __gpio_set_value(gpio, value);
+ }
}
-#define gpio_set_value(gpio,value) \
- (__builtin_constant_p(gpio) ? \
- __gpio_set_value(gpio, value) : \
- pxa_gpio_set_value(gpio, value))
-
-#include <asm-generic/gpio.h> /* cansleep wrappers */
+#define gpio_cansleep __gpio_cansleep
#define gpio_to_irq(gpio) IRQ_GPIO(gpio)
#define irq_to_gpio(irq) IRQ_TO_GPIO(irq)
diff --git a/include/asm-arm/arch-pxa/pxa-regs.h b/include/asm-arm/arch-pxa/pxa-regs.h
index 16ed24dbda4e..ac175b4d10cb 100644
--- a/include/asm-arm/arch-pxa/pxa-regs.h
+++ b/include/asm-arm/arch-pxa/pxa-regs.h
@@ -1131,6 +1131,19 @@
* General Purpose I/O
*/
+#define GPIO0_BASE ((void __iomem *)io_p2v(0x40E00000))
+#define GPIO1_BASE ((void __iomem *)io_p2v(0x40E00004))
+#define GPIO2_BASE ((void __iomem *)io_p2v(0x40E00008))
+#define GPIO3_BASE ((void __iomem *)io_p2v(0x40E00100))
+
+#define GPLR_OFFSET 0x00
+#define GPDR_OFFSET 0x0C
+#define GPSR_OFFSET 0x18
+#define GPCR_OFFSET 0x24
+#define GRER_OFFSET 0x30
+#define GFER_OFFSET 0x3C
+#define GEDR_OFFSET 0x48
+
#define GPLR0 __REG(0x40E00000) /* GPIO Pin-Level Register GPIO<31:0> */
#define GPLR1 __REG(0x40E00004) /* GPIO Pin-Level Register GPIO<63:32> */
#define GPLR2 __REG(0x40E00008) /* GPIO Pin-Level Register GPIO<80:64> */
diff --git a/include/asm-arm/arch-pxa/pxa27x_keyboard.h b/include/asm-arm/arch-pxa/pxa27x_keyboard.h
deleted file mode 100644
index 3aaff923b2ca..000000000000
--- a/include/asm-arm/arch-pxa/pxa27x_keyboard.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#define PXAKBD_MAXROW 8
-#define PXAKBD_MAXCOL 8
-
-struct pxa27x_keyboard_platform_data {
- int nr_rows, nr_cols;
- int keycodes[PXAKBD_MAXROW][PXAKBD_MAXCOL];
- int gpio_modes[PXAKBD_MAXROW + PXAKBD_MAXCOL];
-
-#ifdef CONFIG_PM
- u32 reg_kpc;
- u32 reg_kprec;
-#endif
-};
diff --git a/include/asm-arm/arch-pxa/pxa27x_keypad.h b/include/asm-arm/arch-pxa/pxa27x_keypad.h
new file mode 100644
index 000000000000..644f7609b523
--- /dev/null
+++ b/include/asm-arm/arch-pxa/pxa27x_keypad.h
@@ -0,0 +1,56 @@
+#ifndef __ASM_ARCH_PXA27x_KEYPAD_H
+#define __ASM_ARCH_PXA27x_KEYPAD_H
+
+#include <linux/input.h>
+
+#define MAX_MATRIX_KEY_ROWS (8)
+#define MAX_MATRIX_KEY_COLS (8)
+
+/* pxa3xx keypad platform specific parameters
+ *
+ * NOTE:
+ * 1. direct_key_num indicates the number of keys in the direct keypad
+ * _plus_ the number of rotary-encoder sensor inputs, this can be
+ * left as 0 if only rotary encoders are enabled, the driver will
+ * automatically calculate this
+ *
+ * 2. direct_key_map is the key code map for the direct keys, if rotary
+ * encoder(s) are enabled, direct key 0/1(2/3) will be ignored
+ *
+ * 3. rotary can be either interpreted as a relative input event (e.g.
+ * REL_WHEEL/REL_HWHEEL) or specific keys (e.g. UP/DOWN/LEFT/RIGHT)
+ *
+ * 4. matrix key and direct key will use the same debounce_interval by
+ * default, which should be sufficient in most cases
+ */
+struct pxa27x_keypad_platform_data {
+
+ /* code map for the matrix keys */
+ unsigned int matrix_key_rows;
+ unsigned int matrix_key_cols;
+ unsigned int *matrix_key_map;
+ int matrix_key_map_size;
+
+ /* direct keys */
+ int direct_key_num;
+ unsigned int direct_key_map[8];
+
+ /* rotary encoders 0 */
+ int enable_rotary0;
+ int rotary0_rel_code;
+ int rotary0_up_key;
+ int rotary0_down_key;
+
+ /* rotary encoders 1 */
+ int enable_rotary1;
+ int rotary1_rel_code;
+ int rotary1_up_key;
+ int rotary1_down_key;
+
+ /* key debounce interval */
+ unsigned int debounce_interval;
+};
+
+#define KEY(row, col, val) (((row) << 28) | ((col) << 24) | (val))
+
+#endif /* __ASM_ARCH_PXA27x_KEYPAD_H */
diff --git a/include/asm-arm/arch-pxa/tosa.h b/include/asm-arm/arch-pxa/tosa.h
index c3364a2c4758..c05e4faf85a6 100644
--- a/include/asm-arm/arch-pxa/tosa.h
+++ b/include/asm-arm/arch-pxa/tosa.h
@@ -163,4 +163,34 @@
extern struct platform_device tosascoop_jc_device;
extern struct platform_device tosascoop_device;
+
+#define TOSA_KEY_SYNC KEY_102ND /* ??? */
+
+
+#ifndef CONFIG_KEYBOARD_TOSA_USE_EXT_KEYCODES
+#define TOSA_KEY_RECORD KEY_YEN
+#define TOSA_KEY_ADDRESSBOOK KEY_KATAKANA
+#define TOSA_KEY_CANCEL KEY_ESC
+#define TOSA_KEY_CENTER KEY_HIRAGANA
+#define TOSA_KEY_OK KEY_HENKAN
+#define TOSA_KEY_CALENDAR KEY_KATAKANAHIRAGANA
+#define TOSA_KEY_HOMEPAGE KEY_HANGEUL
+#define TOSA_KEY_LIGHT KEY_MUHENKAN
+#define TOSA_KEY_MENU KEY_HANJA
+#define TOSA_KEY_FN KEY_RIGHTALT
+#define TOSA_KEY_MAIL KEY_ZENKAKUHANKAKU
+#else
+#define TOSA_KEY_RECORD KEY_RECORD
+#define TOSA_KEY_ADDRESSBOOK KEY_ADDRESSBOOK
+#define TOSA_KEY_CANCEL KEY_CANCEL
+#define TOSA_KEY_CENTER KEY_SELECT /* ??? */
+#define TOSA_KEY_OK KEY_OK
+#define TOSA_KEY_CALENDAR KEY_CALENDAR
+#define TOSA_KEY_HOMEPAGE KEY_HOMEPAGE
+#define TOSA_KEY_LIGHT KEY_KBDILLUMTOGGLE
+#define TOSA_KEY_MENU KEY_MENU
+#define TOSA_KEY_FN KEY_FN
+#define TOSA_KEY_MAIL KEY_MAIL
+#endif
+
#endif /* _ASM_ARCH_TOSA_H_ */
diff --git a/include/asm-arm/arch-s3c2410/regs-lcd.h b/include/asm-arm/arch-s3c2410/regs-lcd.h
index 76fe5f693426..bd854845697f 100644
--- a/include/asm-arm/arch-s3c2410/regs-lcd.h
+++ b/include/asm-arm/arch-s3c2410/regs-lcd.h
@@ -147,7 +147,16 @@
#define S3C2412_FRCPAT(x) S3C2410_LCDREG(0xB4 + ((x)*4))
-#endif /* ___ASM_ARCH_REGS_LCD_H */
+/* general registers */
+
+/* base of the LCD registers, where INTPND, INTSRC and then INTMSK
+ * are available. */
+#define S3C2410_LCDINTBASE S3C2410_LCDREG(0x54)
+#define S3C2412_LCDINTBASE S3C2410_LCDREG(0x24)
+#define S3C24XX_LCDINTPND (0x00)
+#define S3C24XX_LCDSRCPND (0x04)
+#define S3C24XX_LCDINTMSK (0x08)
+#endif /* ___ASM_ARCH_REGS_LCD_H */
diff --git a/include/asm-arm/arch-s3c2410/spi-gpio.h b/include/asm-arm/arch-s3c2410/spi-gpio.h
index ba1dca88d480..73803731142a 100644
--- a/include/asm-arm/arch-s3c2410/spi-gpio.h
+++ b/include/asm-arm/arch-s3c2410/spi-gpio.h
@@ -13,9 +13,6 @@
#ifndef __ASM_ARCH_SPIGPIO_H
#define __ASM_ARCH_SPIGPIO_H __FILE__
-struct s3c2410_spigpio_info;
-struct spi_board_info;
-
struct s3c2410_spigpio_info {
unsigned long pin_clk;
unsigned long pin_mosi;
@@ -23,9 +20,6 @@ struct s3c2410_spigpio_info {
int bus_num;
- unsigned long board_size;
- struct spi_board_info *board_info;
-
void (*chip_select)(struct s3c2410_spigpio_info *spi, int cs);
};
diff --git a/include/asm-arm/arch-s3c2410/spi.h b/include/asm-arm/arch-s3c2410/spi.h
index 4029a1a1ab40..7ca0ed97a6d0 100644
--- a/include/asm-arm/arch-s3c2410/spi.h
+++ b/include/asm-arm/arch-s3c2410/spi.h
@@ -13,15 +13,9 @@
#ifndef __ASM_ARCH_SPI_H
#define __ASM_ARCH_SPI_H __FILE__
-struct s3c2410_spi_info;
-struct spi_board_info;
-
struct s3c2410_spi_info {
unsigned long pin_cs; /* simple gpio cs */
- unsigned long board_size;
- struct spi_board_info *board_info;
-
void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol);
};
diff --git a/include/asm-arm/elf.h b/include/asm-arm/elf.h
index ec1c685562ce..4ca751627489 100644
--- a/include/asm-arm/elf.h
+++ b/include/asm-arm/elf.h
@@ -41,7 +41,6 @@ typedef struct user_fp elf_fpregset_t;
#endif
#define ELF_ARCH EM_ARM
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
/*
* This yields a string that ld.so will use to load implementation
@@ -115,5 +114,3 @@ extern char elf_platform[];
} while (0)
#endif
-
-#endif
diff --git a/include/asm-arm/hardware/iop3xx-adma.h b/include/asm-arm/hardware/iop3xx-adma.h
index 10834b54f681..5c529e6a5e3b 100644
--- a/include/asm-arm/hardware/iop3xx-adma.h
+++ b/include/asm-arm/hardware/iop3xx-adma.h
@@ -414,7 +414,7 @@ static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
}
static inline void
-iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
+iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
{
struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
union {
@@ -425,14 +425,14 @@ iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, int int_en)
u_desc_ctrl.value = 0;
u_desc_ctrl.field.mem_to_mem_en = 1;
u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
- u_desc_ctrl.field.int_en = int_en;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
hw_desc->upper_pci_src_addr = 0;
hw_desc->crc_addr = 0;
}
static inline void
-iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
+iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
{
struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
union {
@@ -443,12 +443,13 @@ iop_desc_init_memset(struct iop_adma_desc_slot *desc, int int_en)
u_desc_ctrl.value = 0;
u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
u_desc_ctrl.field.dest_write_en = 1;
- u_desc_ctrl.field.int_en = int_en;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
}
static inline u32
-iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en)
+iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
+ unsigned long flags)
{
int i, shift;
u32 edcr;
@@ -509,21 +510,23 @@ iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt, int int_en)
u_desc_ctrl.field.dest_write_en = 1;
u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
- u_desc_ctrl.field.int_en = int_en;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
return u_desc_ctrl.value;
}
static inline void
-iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
+iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
{
- iop3xx_desc_init_xor(desc->hw_desc, src_cnt, int_en);
+ iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
}
/* return the number of operations */
static inline int
-iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
+iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
{
int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
@@ -538,10 +541,10 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
i += slots_per_op, j++) {
iter = iop_hw_desc_slot_idx(hw_desc, i);
- u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, int_en);
+ u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
u_desc_ctrl.field.dest_write_en = 0;
u_desc_ctrl.field.zero_result_en = 1;
- u_desc_ctrl.field.int_en = int_en;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
iter->desc_ctrl = u_desc_ctrl.value;
/* for the subsequent descriptors preserve the store queue
@@ -559,7 +562,8 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
}
static inline void
-iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
+iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
+ unsigned long flags)
{
struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
union {
@@ -591,7 +595,7 @@ iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, int int_en)
}
u_desc_ctrl.field.dest_write_en = 0;
- u_desc_ctrl.field.int_en = int_en;
+ u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
hw_desc->desc_ctrl = u_desc_ctrl.value;
}
diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h
index 7e85db77d99b..31ff12f4ffb7 100644
--- a/include/asm-arm/page.h
+++ b/include/asm-arm/page.h
@@ -10,9 +10,6 @@
#ifndef _ASMARM_PAGE_H
#define _ASMARM_PAGE_H
-
-#ifdef __KERNEL__
-
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (1UL << PAGE_SHIFT)
@@ -192,6 +189,4 @@ typedef unsigned long pgprot_t;
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
-
#endif
diff --git a/include/asm-arm/pgalloc.h b/include/asm-arm/pgalloc.h
index 4d4394552911..fb6c6e3222bd 100644
--- a/include/asm-arm/pgalloc.h
+++ b/include/asm-arm/pgalloc.h
@@ -27,14 +27,14 @@
* Since we have only two-level page tables, these are trivial
*/
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(pmd) do { } while (0)
+#define pmd_free(mm, pmd) do { } while (0)
#define pgd_populate(mm,pmd,pte) BUG()
extern pgd_t *get_pgd_slow(struct mm_struct *mm);
-extern void free_pgd_slow(pgd_t *pgd);
+extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm)
-#define pgd_free(pgd) free_pgd_slow(pgd)
+#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
/*
* Allocate one PTE table.
@@ -83,7 +83,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
/*
* Free one PTE table.
*/
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
if (pte) {
pte -= PTRS_PER_PTE;
@@ -91,7 +91,7 @@ static inline void pte_free_kernel(pte_t *pte)
}
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 28425c473e71..6335de9a2bb3 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -363,6 +363,21 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
extern void disable_hlt(void);
extern void enable_hlt(void);
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#ifndef CONFIG_SMP
+#include <asm-generic/cmpxchg.h>
+#endif
+
#endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)
diff --git a/include/asm-arm/tlb.h b/include/asm-arm/tlb.h
index cb740025d413..36bd402a21cb 100644
--- a/include/asm-arm/tlb.h
+++ b/include/asm-arm/tlb.h
@@ -85,8 +85,8 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
}
#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb,ptep) pte_free(ptep)
-#define pmd_free_tlb(tlb,pmdp) pmd_free(pmdp)
+#define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
+#define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/include/asm-arm/user.h b/include/asm-arm/user.h
index 3e8b0f879159..825c1e7c582d 100644
--- a/include/asm-arm/user.h
+++ b/include/asm-arm/user.h
@@ -67,7 +67,7 @@ struct user{
esp register. */
long int signal; /* Signal that caused the core dump. */
int reserved; /* No longer used */
- struct pt_regs * u_ar0; /* Used by gdb to help find the values for */
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
/* the registers. */
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
diff --git a/include/asm-avr32/arch-at32ap/at32ap700x.h b/include/asm-avr32/arch-at32ap/at32ap700x.h
index 99684d6f3967..31e48b0e7324 100644
--- a/include/asm-avr32/arch-at32ap/at32ap700x.h
+++ b/include/asm-avr32/arch-at32ap/at32ap700x.h
@@ -13,8 +13,6 @@
#define GPIO_PERIPH_A 0
#define GPIO_PERIPH_B 1
-#define NR_GPIO_CONTROLLERS 4
-
/*
* Pin numbers identifying specific GPIO pins on the chip. They can
* also be converted to IRQ numbers by passing them through
diff --git a/include/asm-avr32/arch-at32ap/gpio.h b/include/asm-avr32/arch-at32ap/gpio.h
index af7f9535bab3..0180f584ef03 100644
--- a/include/asm-avr32/arch-at32ap/gpio.h
+++ b/include/asm-avr32/arch-at32ap/gpio.h
@@ -5,20 +5,36 @@
#include <asm/irq.h>
-/* Arch-neutral GPIO API */
-int __must_check gpio_request(unsigned int gpio, const char *label);
-void gpio_free(unsigned int gpio);
+/* Some GPIO chips can manage IRQs; some can't. The exact numbers can
+ * be changed if needed, but for the moment they're not configurable.
+ */
+#define ARCH_NR_GPIOS (NR_GPIO_IRQS + 2 * 32)
-int gpio_direction_input(unsigned int gpio);
-int gpio_direction_output(unsigned int gpio, int value);
-int gpio_get_value(unsigned int gpio);
-void gpio_set_value(unsigned int gpio, int value);
-#include <asm-generic/gpio.h> /* cansleep wrappers */
+/* Arch-neutral GPIO API, supporting both "native" and external GPIOs. */
+#include <asm-generic/gpio.h>
+
+static inline int gpio_get_value(unsigned int gpio)
+{
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned int gpio, int value)
+{
+ __gpio_set_value(gpio, value);
+}
+
+static inline int gpio_cansleep(unsigned int gpio)
+{
+ return __gpio_cansleep(gpio);
+}
+
static inline int gpio_to_irq(unsigned int gpio)
{
- return gpio + GPIO_IRQ_BASE;
+ if (gpio < NR_GPIO_IRQS)
+ return gpio + GPIO_IRQ_BASE;
+ return -EINVAL;
}
static inline int irq_to_gpio(unsigned int irq)
diff --git a/include/asm-avr32/arch-at32ap/irq.h b/include/asm-avr32/arch-at32ap/irq.h
index 5adffab9a577..608e350368c7 100644
--- a/include/asm-avr32/arch-at32ap/irq.h
+++ b/include/asm-avr32/arch-at32ap/irq.h
@@ -3,11 +3,11 @@
#define EIM_IRQ_BASE NR_INTERNAL_IRQS
#define NR_EIM_IRQS 32
-
#define AT32_EXTINT(n) (EIM_IRQ_BASE + (n))
#define GPIO_IRQ_BASE (EIM_IRQ_BASE + NR_EIM_IRQS)
-#define NR_GPIO_IRQS (5 * 32)
+#define NR_GPIO_CTLR (5 /*internal*/ + 1 /*external*/)
+#define NR_GPIO_IRQS (NR_GPIO_CTLR * 32)
#define NR_IRQS (GPIO_IRQ_BASE + NR_GPIO_IRQS)
diff --git a/include/asm-avr32/delay.h b/include/asm-avr32/delay.h
index cc3b2e3343b3..a0ed9a9839a5 100644
--- a/include/asm-avr32/delay.h
+++ b/include/asm-avr32/delay.h
@@ -12,7 +12,7 @@ extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
#define udelay(n) (__builtin_constant_p(n) ? \
diff --git a/include/asm-avr32/elf.h b/include/asm-avr32/elf.h
index d334b4994d2d..64ce40ee1d58 100644
--- a/include/asm-avr32/elf.h
+++ b/include/asm-avr32/elf.h
@@ -103,8 +103,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
#define ELF_PLATFORM (NULL)
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
-#endif
#endif /* __ASM_AVR32_ELF_H */
diff --git a/include/asm-avr32/page.h b/include/asm-avr32/page.h
index 0f630b3e9932..ee23499cec34 100644
--- a/include/asm-avr32/page.h
+++ b/include/asm-avr32/page.h
@@ -8,8 +8,6 @@
#ifndef __ASM_AVR32_PAGE_H
#define __ASM_AVR32_PAGE_H
-#ifdef __KERNEL__
-
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#ifdef __ASSEMBLY__
@@ -107,6 +105,4 @@ static inline int get_order(unsigned long size)
*/
#define HIGHMEM_START 0x20000000UL
-#endif /* __KERNEL__ */
-
#endif /* __ASM_AVR32_PAGE_H */
diff --git a/include/asm-avr32/pgalloc.h b/include/asm-avr32/pgalloc.h
index 0e680f47209f..b77e364b4c44 100644
--- a/include/asm-avr32/pgalloc.h
+++ b/include/asm-avr32/pgalloc.h
@@ -30,7 +30,7 @@ static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
return kcalloc(USER_PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL);
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
kfree(pgd);
}
@@ -55,12 +55,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
diff --git a/include/asm-avr32/system.h b/include/asm-avr32/system.h
index c600cc15cbcb..9702c2213e1e 100644
--- a/include/asm-avr32/system.h
+++ b/include/asm-avr32/system.h
@@ -145,6 +145,29 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
(unsigned long)(new), \
sizeof(*(ptr))))
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+#define cmpxchg_local(ptr, old, new) \
+ ((typeof(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(old), \
+ (unsigned long)(new), \
+ sizeof(*(ptr))))
+
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
struct pt_regs;
void NORET_TYPE die(const char *str, struct pt_regs *regs, long err);
void _exception(long signr, struct pt_regs *regs, int code,
diff --git a/include/asm-avr32/timex.h b/include/asm-avr32/timex.h
index 5e44ecb3ce0c..187dcf38b210 100644
--- a/include/asm-avr32/timex.h
+++ b/include/asm-avr32/timex.h
@@ -34,7 +34,6 @@ static inline cycles_t get_cycles (void)
return 0;
}
-extern int read_current_timer(unsigned long *timer_value);
-#define ARCH_HAS_READ_CURRENT_TIMER 1
+#define ARCH_HAS_READ_CURRENT_TIMER
#endif /* __ASM_AVR32_TIMEX_H */
diff --git a/include/asm-avr32/unistd.h b/include/asm-avr32/unistd.h
index de09009593f8..89861a27543e 100644
--- a/include/asm-avr32/unistd.h
+++ b/include/asm-avr32/unistd.h
@@ -297,7 +297,7 @@
#define __NR_utimensat 278
#define __NR_signalfd 279
-#define __NR_timerfd 280
+/* 280 was __NR_timerfd */
#define __NR_eventfd 281
#ifdef __KERNEL__
diff --git a/include/asm-avr32/user.h b/include/asm-avr32/user.h
index 060fb3acee49..7e9152f81f5e 100644
--- a/include/asm-avr32/user.h
+++ b/include/asm-avr32/user.h
@@ -51,7 +51,7 @@ struct user {
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
- struct regs * u_ar0; /* help gdb find registers */
+ unsigned long u_ar0; /* help gdb find registers */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
diff --git a/include/asm-blackfin/elf.h b/include/asm-blackfin/elf.h
index 5264b5536a70..30303fc8292c 100644
--- a/include/asm-blackfin/elf.h
+++ b/include/asm-blackfin/elf.h
@@ -120,8 +120,6 @@ do { \
#define ELF_PLATFORM (NULL)
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
-#endif
#endif
diff --git a/include/asm-blackfin/io.h b/include/asm-blackfin/io.h
index 1601d62f39a5..574fe56989d1 100644
--- a/include/asm-blackfin/io.h
+++ b/include/asm-blackfin/io.h
@@ -188,8 +188,6 @@ extern void blkfin_inv_cache_all(void);
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
-#define mm_ptov(vaddr) ((void *) (vaddr))
-#define mm_vtop(vaddr) ((unsigned long) (vaddr))
#define phys_to_virt(vaddr) ((void *) (vaddr))
#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
diff --git a/include/asm-blackfin/page.h b/include/asm-blackfin/page.h
index 8bc86717021c..d5c9d1433781 100644
--- a/include/asm-blackfin/page.h
+++ b/include/asm-blackfin/page.h
@@ -11,8 +11,6 @@
#endif
#define PAGE_MASK (~(PAGE_SIZE-1))
-#ifdef __KERNEL__
-
#include <asm/setup.h>
#ifndef __ASSEMBLY__
@@ -88,6 +86,5 @@ extern unsigned long memory_end;
#include <asm-generic/page.h>
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* _BLACKFIN_PAGE_H */
diff --git a/include/asm-blackfin/system.h b/include/asm-blackfin/system.h
index 4a927379ee1c..51494ef5bb41 100644
--- a/include/asm-blackfin/system.h
+++ b/include/asm-blackfin/system.h
@@ -183,55 +183,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
return tmp;
}
+#include <asm-generic/cmpxchg-local.h>
+
/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
*/
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long tmp = 0;
- unsigned long flags = 0;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("%0 = b%3 (z);\n\t"
- "CC = %1 == %0;\n\t"
- "IF !CC JUMP 1f;\n\t"
- "b%3 = %2;\n\t"
- "1:\n\t"
- : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory");
- break;
- case 2:
- __asm__ __volatile__
- ("%0 = w%3 (z);\n\t"
- "CC = %1 == %0;\n\t"
- "IF !CC JUMP 1f;\n\t"
- "w%3 = %2;\n\t"
- "1:\n\t"
- : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory");
- break;
- case 4:
- __asm__ __volatile__
- ("%0 = %3;\n\t"
- "CC = %1 == %0;\n\t"
- "IF !CC JUMP 1f;\n\t"
- "%3 = %2;\n\t"
- "1:\n\t"
- : "=&d" (tmp) : "d" (old), "d" (new), "m" (*__xg(ptr)) : "memory");
- break;
- }
- local_irq_restore(flags);
- return tmp;
-}
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
+#ifndef CONFIG_SMP
+#include <asm-generic/cmpxchg.h>
+#endif
#define prepare_to_switch() do { } while(0)
diff --git a/include/asm-blackfin/user.h b/include/asm-blackfin/user.h
index abc34629bd59..afe6a0e1f7ce 100644
--- a/include/asm-blackfin/user.h
+++ b/include/asm-blackfin/user.h
@@ -75,7 +75,7 @@ struct user {
esp register. */
long int signal; /* Signal that caused the core dump. */
int reserved; /* No longer used */
- struct user_regs_struct *u_ar0;
+ unsigned long u_ar0;
/* Used by gdb to help find the values for */
/* the registers. */
unsigned long magic; /* To uniquely identify a core file */
diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h
index e2f49c27ed29..75ea6e096483 100644
--- a/include/asm-cris/bitops.h
+++ b/include/asm-cris/bitops.h
@@ -24,13 +24,6 @@
#include <linux/compiler.h>
/*
- * Some hacks to defeat gcc over-optimizations..
- */
-struct __dummy { unsigned long a[100]; };
-#define ADDR (*(struct __dummy *) addr)
-#define CONST_ADDR (*(const struct __dummy *) addr)
-
-/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
diff --git a/include/asm-cris/elf.h b/include/asm-cris/elf.h
index 96a40c1de57c..001f64ad11e8 100644
--- a/include/asm-cris/elf.h
+++ b/include/asm-cris/elf.h
@@ -45,7 +45,6 @@ typedef unsigned long elf_fpregset_t;
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_CRIS
-#ifdef __KERNEL__
#include <asm/arch/elf.h>
/* The master for these definitions is {binutils}/include/elf/cris.h: */
@@ -91,6 +90,4 @@ typedef unsigned long elf_fpregset_t;
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
-#endif /* __KERNEL__ */
-
#endif
diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h
index b84353ef6998..3b0156c46311 100644
--- a/include/asm-cris/page.h
+++ b/include/asm-cris/page.h
@@ -1,8 +1,6 @@
#ifndef _CRIS_PAGE_H
#define _CRIS_PAGE_H
-#ifdef __KERNEL__
-
#include <asm/arch/page.h>
#include <linux/const.h>
@@ -74,7 +72,5 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
-
#endif /* _CRIS_PAGE_H */
diff --git a/include/asm-cris/pgalloc.h b/include/asm-cris/pgalloc.h
index deaddfe79bbc..8ddd66f81773 100644
--- a/include/asm-cris/pgalloc.h
+++ b/include/asm-cris/pgalloc.h
@@ -16,7 +16,7 @@ static inline pgd_t *pgd_alloc (struct mm_struct *mm)
return (pgd_t *)get_zeroed_page(GFP_KERNEL);
}
-static inline void pgd_free (pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
@@ -34,12 +34,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
diff --git a/include/asm-cris/system.h b/include/asm-cris/system.h
index fea0e8d57cb5..5bcfe5a10907 100644
--- a/include/asm-cris/system.h
+++ b/include/asm-cris/system.h
@@ -66,6 +66,21 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
return x;
}
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#ifndef CONFIG_SMP
+#include <asm-generic/cmpxchg.h>
+#endif
+
#define arch_align_stack(x) (x)
void default_idle(void);
diff --git a/include/asm-cris/user.h b/include/asm-cris/user.h
index 2538e2a003df..73e60fcbcf38 100644
--- a/include/asm-cris/user.h
+++ b/include/asm-cris/user.h
@@ -38,7 +38,7 @@ struct user {
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
- struct regs * u_ar0; /* help gdb find registers */
+ unsigned long u_ar0; /* help gdb find registers */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
diff --git a/include/asm-frv/Kbuild b/include/asm-frv/Kbuild
index 966a9836d556..bc3f12c5b7e0 100644
--- a/include/asm-frv/Kbuild
+++ b/include/asm-frv/Kbuild
@@ -4,4 +4,3 @@ header-y += registers.h
unifdef-y += termios.h
unifdef-y += ptrace.h
-unifdef-y += page.h
diff --git a/include/asm-frv/dma-mapping.h b/include/asm-frv/dma-mapping.h
index bcb2df68496e..2e8966ca030d 100644
--- a/include/asm-frv/dma-mapping.h
+++ b/include/asm-frv/dma-mapping.h
@@ -17,16 +17,6 @@ void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle
void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
/*
- * These macros should be used after a pci_map_sg call has been done
- * to get bus addresses of each of the SG entries and their lengths.
- * You should only work with the number of sg entries pci_map_sg
- * returns, or alternatively stop on the first sg_dma_len(sg) which
- * is 0.
- */
-#define sg_dma_address(sg) ((sg)->dma_address)
-#define sg_dma_len(sg) ((sg)->length)
-
-/*
* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
*
diff --git a/include/asm-frv/elf.h b/include/asm-frv/elf.h
index 7df58a3e6e4a..9fb946bb7dc9 100644
--- a/include/asm-frv/elf.h
+++ b/include/asm-frv/elf.h
@@ -137,8 +137,6 @@ do { \
#define ELF_PLATFORM (NULL)
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
-#endif
#endif
diff --git a/include/asm-frv/page.h b/include/asm-frv/page.h
index 213d92fd652a..cacc045700de 100644
--- a/include/asm-frv/page.h
+++ b/include/asm-frv/page.h
@@ -1,8 +1,6 @@
#ifndef _ASM_PAGE_H
#define _ASM_PAGE_H
-#ifdef __KERNEL__
-
#include <asm/virtconvert.h>
#include <asm/mem-layout.h>
#include <asm/sections.h>
@@ -76,13 +74,7 @@ extern unsigned long max_pfn;
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_CONTIGUOUS_PAGE_ALLOC
-#define WANT_PAGE_VIRTUAL 1
-#endif
-
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
-
#endif /* _ASM_PAGE_H */
diff --git a/include/asm-frv/pgalloc.h b/include/asm-frv/pgalloc.h
index ce982a6c610f..e89620ef08ca 100644
--- a/include/asm-frv/pgalloc.h
+++ b/include/asm-frv/pgalloc.h
@@ -31,18 +31,18 @@ do { \
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
-extern void pgd_free(pgd_t *);
+extern void pgd_free(struct mm_struct *mm, pgd_t *);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
@@ -55,7 +55,7 @@ static inline void pte_free(struct page *pte)
* (In the PAE case we free the pmds as part of the pgd.)
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *) 2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#endif /* CONFIG_MMU */
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 3c402afb9e74..6c0682ed5fc9 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -226,7 +226,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
* inside the pgd, so has no extra memory associated with it.
*/
#define pud_alloc_one(mm, address) NULL
-#define pud_free(x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x) do { } while (0)
/*
diff --git a/include/asm-frv/scatterlist.h b/include/asm-frv/scatterlist.h
index 2e7143b5a7ad..4bca8a28546c 100644
--- a/include/asm-frv/scatterlist.h
+++ b/include/asm-frv/scatterlist.h
@@ -31,6 +31,16 @@ struct scatterlist {
unsigned int length;
};
+/*
+ * These macros should be used after a pci_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) ((sg)->dma_address)
+#define sg_dma_len(sg) ((sg)->length)
+
#define ISA_DMA_THRESHOLD (0xffffffffUL)
#endif /* !_ASM_SCATTERLIST_H */
diff --git a/include/asm-frv/system.h b/include/asm-frv/system.h
index 9f5663ba19f8..59be5443a68f 100644
--- a/include/asm-frv/system.h
+++ b/include/asm-frv/system.h
@@ -268,5 +268,29 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
#endif
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return cmpxchg(ptr, old, new);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif /* _ASM_SYSTEM_H */
diff --git a/include/asm-frv/unistd.h b/include/asm-frv/unistd.h
index cd84f1771e34..e8c986667532 100644
--- a/include/asm-frv/unistd.h
+++ b/include/asm-frv/unistd.h
@@ -328,7 +328,7 @@
#define __NR_epoll_pwait 319
#define __NR_utimensat 320
#define __NR_signalfd 321
-#define __NR_timerfd 322
+/* #define __NR_timerfd 322 removed */
#define __NR_eventfd 323
#define __NR_fallocate 324
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 7b88d3931e34..9d40e879f99e 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -28,7 +28,7 @@
#undef pud_free_tlb
#define pud_free_tlb(tlb, x) do { } while (0)
-#define pud_free(x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x) do { } while (0)
#undef pud_addr_end
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index 8fd81713cfc0..57ba60635959 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -27,8 +27,3 @@ unifdef-y += termbits.h
unifdef-y += termios.h
unifdef-y += types.h
unifdef-y += unistd.h
-unifdef-y += user.h
-
-# These probably shouldn't be exported
-unifdef-y += elf.h
-unifdef-y += page.h
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h
new file mode 100644
index 000000000000..b2ba2fc8829a
--- /dev/null
+++ b/include/asm-generic/cmpxchg-local.h
@@ -0,0 +1,65 @@
+#ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H
+#define __ASM_GENERIC_CMPXCHG_LOCAL_H
+
+#include <linux/types.h>
+
+extern unsigned long wrong_size_cmpxchg(volatile void *ptr);
+
+/*
+ * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
+ * long parameter, supporting various types of architectures.
+ */
+static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long flags, prev;
+
+ /*
+ * Sanity checking, compile-time.
+ */
+ if (size == 8 && sizeof(unsigned long) != 8)
+ wrong_size_cmpxchg(ptr);
+
+ local_irq_save(flags);
+ switch (size) {
+ case 1: prev = *(u8 *)ptr;
+ if (prev == old)
+ *(u8 *)ptr = (u8)new;
+ break;
+ case 2: prev = *(u16 *)ptr;
+ if (prev == old)
+ *(u16 *)ptr = (u16)new;
+ break;
+ case 4: prev = *(u32 *)ptr;
+ if (prev == old)
+ *(u32 *)ptr = (u32)new;
+ break;
+ case 8: prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = (u64)new;
+ break;
+ default:
+ wrong_size_cmpxchg(ptr);
+ }
+ local_irq_restore(flags);
+ return prev;
+}
+
+/*
+ * Generic version of __cmpxchg64_local. Takes an u64 parameter.
+ */
+static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
+ u64 old, u64 new)
+{
+ u64 prev;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = new;
+ local_irq_restore(flags);
+ return prev;
+}
+
+#endif
diff --git a/include/asm-generic/cmpxchg.h b/include/asm-generic/cmpxchg.h
new file mode 100644
index 000000000000..213ac6e8fe39
--- /dev/null
+++ b/include/asm-generic/cmpxchg.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_GENERIC_CMPXCHG_H
+#define __ASM_GENERIC_CMPXCHG_H
+
+/*
+ * Generic cmpxchg
+ *
+ * Uses the local cmpxchg. Does not support SMP.
+ */
+#ifdef CONFIG_SMP
+#error "Cannot use generic cmpxchg on SMP"
+#endif
+
+/*
+ * Atomic compare and exchange.
+ *
+ * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
+ * a cmpxchg primitive faster than repeated local irq save/restore exists.
+ */
+#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+
+#endif
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 09204e40d663..1c1fa422d18a 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -18,6 +18,7 @@ typedef unsigned long cputime_t;
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime_to_jiffies(__ct) (__ct)
+#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) (__hz)
typedef u64 cputime64_t;
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 2d0aab1d8611..f29a502f4a6c 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -1,6 +1,102 @@
#ifndef _ASM_GENERIC_GPIO_H
#define _ASM_GENERIC_GPIO_H
+#ifdef CONFIG_HAVE_GPIO_LIB
+
+/* Platforms may implement their GPIO interface with library code,
+ * at a small performance cost for non-inlined operations and some
+ * extra memory (for code and for per-GPIO table entries).
+ *
+ * While the GPIO programming interface defines valid GPIO numbers
+ * to be in the range 0..MAX_INT, this library restricts them to the
+ * smaller range 0..ARCH_NR_GPIOS.
+ */
+
+#ifndef ARCH_NR_GPIOS
+#define ARCH_NR_GPIOS 256
+#endif
+
+struct seq_file;
+
+/**
+ * struct gpio_chip - abstract a GPIO controller
+ * @label: for diagnostics
+ * @direction_input: configures signal "offset" as input, or returns error
+ * @get: returns value for signal "offset"; for output signals this
+ * returns either the value actually sensed, or zero
+ * @direction_output: configures signal "offset" as output, or returns error
+ * @set: assigns output value for signal "offset"
+ * @dbg_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra
+ * state (such as pullup/pulldown configuration).
+ * @base: identifies the first GPIO number handled by this chip; or, if
+ * negative during registration, requests dynamic ID allocation.
+ * @ngpio: the number of GPIOs handled by this controller; the last GPIO
+ * handled is (base + ngpio - 1).
+ * @can_sleep: flag must be set iff get()/set() methods sleep, as they
+ * must while accessing GPIO expander chips over I2C or SPI
+ *
+ * A gpio_chip can help platforms abstract various sources of GPIOs so
+ * they can all be accessed through a common programing interface.
+ * Example sources would be SOC controllers, FPGAs, multifunction
+ * chips, dedicated GPIO expanders, and so on.
+ *
+ * Each chip controls a number of signals, identified in method calls
+ * by "offset" values in the range 0..(@ngpio - 1). When those signals
+ * are referenced through calls like gpio_get_value(gpio), the offset
+ * is calculated by subtracting @base from the gpio number.
+ */
+struct gpio_chip {
+ char *label;
+
+ int (*direction_input)(struct gpio_chip *chip,
+ unsigned offset);
+ int (*get)(struct gpio_chip *chip,
+ unsigned offset);
+ int (*direction_output)(struct gpio_chip *chip,
+ unsigned offset, int value);
+ void (*set)(struct gpio_chip *chip,
+ unsigned offset, int value);
+ void (*dbg_show)(struct seq_file *s,
+ struct gpio_chip *chip);
+ int base;
+ u16 ngpio;
+ unsigned can_sleep:1;
+};
+
+extern const char *gpiochip_is_requested(struct gpio_chip *chip,
+ unsigned offset);
+
+/* add/remove chips */
+extern int gpiochip_add(struct gpio_chip *chip);
+extern int __must_check gpiochip_remove(struct gpio_chip *chip);
+
+
+/* Always use the library code for GPIO management calls,
+ * or when sleeping may be involved.
+ */
+extern int gpio_request(unsigned gpio, const char *label);
+extern void gpio_free(unsigned gpio);
+
+extern int gpio_direction_input(unsigned gpio);
+extern int gpio_direction_output(unsigned gpio, int value);
+
+extern int gpio_get_value_cansleep(unsigned gpio);
+extern void gpio_set_value_cansleep(unsigned gpio, int value);
+
+
+/* A platform's <asm/gpio.h> code may want to inline the I/O calls when
+ * the GPIO is constant and refers to some always-present controller,
+ * giving direct access to chip registers and tight bitbanging loops.
+ */
+extern int __gpio_get_value(unsigned gpio);
+extern void __gpio_set_value(unsigned gpio, int value);
+
+extern int __gpio_cansleep(unsigned gpio);
+
+
+#else
+
/* platforms that don't directly support access to GPIOs through I2C, SPI,
* or other blocking infrastructure can use these wrappers.
*/
@@ -22,4 +118,6 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value)
gpio_set_value(gpio, value);
}
+#endif
+
#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index 29ff5d84d8c3..087325ede76c 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -54,7 +54,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
* inside the pud, so has no extra memory associated with it.
*/
#define pmd_alloc_one(mm, address) NULL
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
#undef pmd_addr_end
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index 566464500558..87cf449a6df3 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -51,7 +51,7 @@ static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
* inside the pgd, so has no extra memory associated with it.
*/
#define pud_alloc_one(mm, address) NULL
-#define pud_free(x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
#define __pud_free_tlb(tlb, x) do { } while (0)
#undef pud_addr_end
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 962cad7cfbbd..8feeae1f2369 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -8,8 +8,6 @@ extern char _data[], _sdata[], _edata[];
extern char __bss_start[], __bss_stop[];
extern char __init_begin[], __init_end[];
extern char _sinittext[], _einittext[];
-extern char _sextratext[] __attribute__((weak));
-extern char _eextratext[] __attribute__((weak));
extern char _end[];
extern char __per_cpu_start[], __per_cpu_end[];
extern char __kprobes_text_start[], __kprobes_text_end[];
diff --git a/include/asm-h8300/elf.h b/include/asm-h8300/elf.h
index 7ba6a0af447c..26bfc7e641da 100644
--- a/include/asm-h8300/elf.h
+++ b/include/asm-h8300/elf.h
@@ -55,9 +55,7 @@ typedef unsigned long elf_fpregset_t;
#define ELF_PLATFORM (NULL)
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
-#endif
#define R_H8_NONE 0
#define R_H8_DIR32 1
diff --git a/include/asm-h8300/io.h b/include/asm-h8300/io.h
index 7543a57b4ea1..26dc6ccd9441 100644
--- a/include/asm-h8300/io.h
+++ b/include/asm-h8300/io.h
@@ -302,8 +302,6 @@ static __inline__ void ctrl_outl(unsigned long b, unsigned long addr)
/*
* Macros used for converting between virtual and physical mappings.
*/
-#define mm_ptov(vaddr) ((void *) (vaddr))
-#define mm_vtop(vaddr) ((unsigned long) (vaddr))
#define phys_to_virt(vaddr) ((void *) (vaddr))
#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h
index c8cc81a3aca5..a83492449130 100644
--- a/include/asm-h8300/page.h
+++ b/include/asm-h8300/page.h
@@ -1,8 +1,6 @@
#ifndef _H8300_PAGE_H
#define _H8300_PAGE_H
-#ifdef __KERNEL__
-
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT (12)
@@ -79,6 +77,4 @@ extern unsigned long memory_end;
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
-
#endif /* _H8300_PAGE_H */
diff --git a/include/asm-h8300/system.h b/include/asm-h8300/system.h
index 2c1e83f7b419..4b8e475908ae 100644
--- a/include/asm-h8300/system.h
+++ b/include/asm-h8300/system.h
@@ -138,6 +138,21 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
asm("jmp @@0"); \
})
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#ifndef CONFIG_SMP
+#include <asm-generic/cmpxchg.h>
+#endif
+
#define arch_align_stack(x) (x)
#endif /* _H8300_SYSTEM_H */
diff --git a/include/asm-h8300/user.h b/include/asm-h8300/user.h
index 6c64f99af3e1..14a9e18950f1 100644
--- a/include/asm-h8300/user.h
+++ b/include/asm-h8300/user.h
@@ -62,8 +62,7 @@ struct user{
esp register. */
long int signal; /* Signal that caused the core dump. */
int reserved; /* No longer used */
- struct user_regs_struct *u_ar0;
- /* Used by gdb to help find the values for */
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
/* the registers. */
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
diff --git a/include/asm-h8300/virtconvert.h b/include/asm-h8300/virtconvert.h
index ee7d5ea10065..19cfd62b11c3 100644
--- a/include/asm-h8300/virtconvert.h
+++ b/include/asm-h8300/virtconvert.h
@@ -10,8 +10,6 @@
#include <asm/setup.h>
#include <asm/page.h>
-#define mm_ptov(vaddr) ((void *) (vaddr))
-#define mm_vtop(vaddr) ((unsigned long) (vaddr))
#define phys_to_virt(vaddr) ((void *) (vaddr))
#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h
index a1b9719f5fbb..953d3df9dd22 100644
--- a/include/asm-ia64/bitops.h
+++ b/include/asm-ia64/bitops.h
@@ -122,38 +122,40 @@ clear_bit_unlock (int nr, volatile void *addr)
}
/**
- * __clear_bit_unlock - Non-atomically clear a bit with release
+ * __clear_bit_unlock - Non-atomically clears a bit in memory with release
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
*
- * This is like clear_bit_unlock, but the implementation uses a store
+ * Similarly to clear_bit_unlock, the implementation uses a store
* with release semantics. See also __raw_spin_unlock().
*/
static __inline__ void
-__clear_bit_unlock(int nr, volatile void *addr)
+__clear_bit_unlock(int nr, void *addr)
{
- __u32 mask, new;
- volatile __u32 *m;
+ __u32 * const m = (__u32 *) addr + (nr >> 5);
+ __u32 const new = *m & ~(1 << (nr & 31));
- m = (volatile __u32 *)addr + (nr >> 5);
- mask = ~(1 << (nr & 31));
- new = *m & mask;
- barrier();
ia64_st4_rel_nta(m, new);
}
/**
* __clear_bit - Clears a bit in memory (non-atomic version)
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
*/
static __inline__ void
__clear_bit (int nr, volatile void *addr)
{
- volatile __u32 *p = (__u32 *) addr + (nr >> 5);
- __u32 m = 1 << (nr & 31);
- *p &= ~m;
+ *((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
}
/**
* change_bit - Toggle a bit in memory
- * @nr: Bit to clear
+ * @nr: Bit to toggle
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
@@ -178,7 +180,7 @@ change_bit (int nr, volatile void *addr)
/**
* __change_bit - Toggle a bit in memory
- * @nr: the bit to set
+ * @nr: the bit to toggle
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
@@ -197,7 +199,7 @@ __change_bit (int nr, volatile void *addr)
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
+ * It also implies the acquisition side of the memory barrier.
*/
static __inline__ int
test_and_set_bit (int nr, volatile void *addr)
@@ -247,11 +249,11 @@ __test_and_set_bit (int nr, volatile void *addr)
/**
* test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
+ * @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
+ * It also implies the acquisition side of the memory barrier.
*/
static __inline__ int
test_and_clear_bit (int nr, volatile void *addr)
@@ -272,7 +274,7 @@ test_and_clear_bit (int nr, volatile void *addr)
/**
* __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to set
+ * @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
@@ -292,11 +294,11 @@ __test_and_clear_bit(int nr, volatile void * addr)
/**
* test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to set
+ * @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
+ * It also implies the acquisition side of the memory barrier.
*/
static __inline__ int
test_and_change_bit (int nr, volatile void *addr)
@@ -315,8 +317,12 @@ test_and_change_bit (int nr, volatile void *addr)
return (old & bit) != 0;
}
-/*
- * WARNING: non atomic version.
+/**
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
*/
static __inline__ int
__test_and_change_bit (int nr, void *addr)
diff --git a/include/asm-ia64/elf.h b/include/asm-ia64/elf.h
index f10e29b60b00..f8e83eca67a2 100644
--- a/include/asm-ia64/elf.h
+++ b/include/asm-ia64/elf.h
@@ -177,7 +177,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
relevant until we have real hardware to play with... */
#define ELF_PLATFORM NULL
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
#define elf_read_implies_exec(ex, executable_stack) \
((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
@@ -248,6 +247,4 @@ do { \
} \
} while (0)
-#endif /* __KERNEL__ */
-
#endif /* _ASM_IA64_ELF_H */
diff --git a/include/asm-ia64/gcc_intrin.h b/include/asm-ia64/gcc_intrin.h
index 5b6665c754c9..de2ed2cbdd84 100644
--- a/include/asm-ia64/gcc_intrin.h
+++ b/include/asm-ia64/gcc_intrin.h
@@ -24,7 +24,9 @@
extern void ia64_bad_param_for_setreg (void);
extern void ia64_bad_param_for_getreg (void);
+#ifdef __KERNEL__
register unsigned long ia64_r13 asm ("r13") __used;
+#endif
#define ia64_setreg(regnum, val) \
({ \
diff --git a/include/asm-ia64/intrinsics.h b/include/asm-ia64/intrinsics.h
index 3a95aa432e99..f1135b5b94c3 100644
--- a/include/asm-ia64/intrinsics.h
+++ b/include/asm-ia64/intrinsics.h
@@ -153,11 +153,17 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
(__typeof__(old)) _r_; \
})
-#define cmpxchg_acq(ptr,o,n) ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
-#define cmpxchg_rel(ptr,o,n) ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
+#define cmpxchg_acq(ptr, o, n) \
+ ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
+#define cmpxchg_rel(ptr, o, n) \
+ ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
/* for compatibility with other platforms: */
-#define cmpxchg(ptr,o,n) cmpxchg_acq(ptr,o,n)
+#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+
+#define cmpxchg_local cmpxchg
+#define cmpxchg64_local cmpxchg64
#ifdef CONFIG_IA64_DEBUG_CMPXCHG
# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
diff --git a/include/asm-ia64/mca.h b/include/asm-ia64/mca.h
index 823553bf12e6..f1663aa94a52 100644
--- a/include/asm-ia64/mca.h
+++ b/include/asm-ia64/mca.h
@@ -3,9 +3,9 @@
* Purpose: Machine check handling specific defines
*
* Copyright (C) 1999, 2004 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander (vijay@engr.sgi.com)
- * Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
- * Copyright (C) Russ Anderson (rja@sgi.com)
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
+ * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
+ * Copyright (C) Russ Anderson <rja@sgi.com>
*/
#ifndef _ASM_IA64_MCA_H
diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h
index 76203f9a8718..dd2a5b134390 100644
--- a/include/asm-ia64/mca_asm.h
+++ b/include/asm-ia64/mca_asm.h
@@ -1,8 +1,9 @@
/*
* File: mca_asm.h
+ * Purpose: Machine check handling specific defines
*
* Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) Vijay Chander (vijay@engr.sgi.com)
+ * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
* Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
* Copyright (C) 2000 Hewlett-Packard Co.
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h
index d6345464a2b3..8a8aa3fd7cd4 100644
--- a/include/asm-ia64/page.h
+++ b/include/asm-ia64/page.h
@@ -7,8 +7,6 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
-# ifdef __KERNEL__
-
#include <asm/intrinsics.h>
#include <asm/types.h>
@@ -227,5 +225,4 @@ get_order (unsigned long size)
(((current->personality & READ_IMPLIES_EXEC) != 0) \
? VM_EXEC : 0))
-# endif /* __KERNEL__ */
#endif /* _ASM_IA64_PAGE_H */
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 67552cad5173..556d988123ac 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -27,7 +27,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pgd_free(pgd_t * pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
quicklist_free(0, NULL, pgd);
}
@@ -44,11 +44,11 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pud_free(pud_t * pud)
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
quicklist_free(0, NULL, pud);
}
-#define __pud_free_tlb(tlb, pud) pud_free(pud)
+#define __pud_free_tlb(tlb, pud) pud_free((tlb)->mm, pud)
#endif /* CONFIG_PGTABLE_4 */
static inline void
@@ -62,12 +62,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pmd_free(pmd_t * pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
quicklist_free(0, NULL, pmd);
}
-#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
+#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
@@ -94,12 +94,12 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
quicklist_free_page(0, NULL, pte);
}
-static inline void pte_free_kernel(pte_t * pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
quicklist_free(0, NULL, pte);
}
@@ -109,6 +109,6 @@ static inline void check_pgt_cache(void)
quicklist_trim(0, NULL, 25, 16);
}
-#define __pte_free_tlb(tlb, pte) pte_free(pte)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif /* _ASM_IA64_PGALLOC_H */
diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h
index be3b0ae43270..741f7ecb986a 100644
--- a/include/asm-ia64/processor.h
+++ b/include/asm-ia64/processor.h
@@ -31,7 +31,8 @@
* each (assuming 8KB page size), for a total of 8TB of user virtual
* address space.
*/
-#define TASK_SIZE (current->thread.task_size)
+#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
+#define TASK_SIZE TASK_SIZE_OF(current)
/*
* This decides where the kernel will search for a free chunk of vm
@@ -472,7 +473,7 @@ ia64_set_psr (__u64 psr)
{
ia64_stop();
ia64_setreg(_IA64_REG_PSR_L, psr);
- ia64_srlz_d();
+ ia64_srlz_i();
}
/*
diff --git a/include/asm-ia64/sal.h b/include/asm-ia64/sal.h
index 1f5412d6f9bb..2251118894ae 100644
--- a/include/asm-ia64/sal.h
+++ b/include/asm-ia64/sal.h
@@ -649,17 +649,6 @@ typedef struct err_rec {
* Now define a couple of inline functions for improved type checking
* and convenience.
*/
-static inline long
-ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
- unsigned long *drift_info)
-{
- struct ia64_sal_retval isrv;
-
- SAL_CALL(isrv, SAL_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
- *ticks_per_second = isrv.v0;
- *drift_info = isrv.v1;
- return isrv.status;
-}
extern s64 ia64_sal_cache_flush (u64 cache_type);
extern void __init check_sal_cache_flush (void);
@@ -841,6 +830,9 @@ extern int ia64_sal_oemcall_nolock(struct ia64_sal_retval *, u64, u64, u64,
u64, u64, u64, u64, u64);
extern int ia64_sal_oemcall_reentrant(struct ia64_sal_retval *, u64, u64, u64,
u64, u64, u64, u64, u64);
+extern long
+ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
+ unsigned long *drift_info);
#ifdef CONFIG_HOTPLUG_CPU
/*
* System Abstraction Layer Specification
diff --git a/include/asm-ia64/user.h b/include/asm-ia64/user.h
index 78e5a20140aa..8b9821110348 100644
--- a/include/asm-ia64/user.h
+++ b/include/asm-ia64/user.h
@@ -44,7 +44,7 @@ struct user {
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
- struct regs * u_ar0; /* help gdb find registers */
+ unsigned long u_ar0; /* help gdb find registers */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
diff --git a/include/asm-m32r/delay.h b/include/asm-m32r/delay.h
index 164448d23850..9dd9e999ea69 100644
--- a/include/asm-m32r/delay.h
+++ b/include/asm-m32r/delay.h
@@ -12,7 +12,7 @@ extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
#define udelay(n) (__builtin_constant_p(n) ? \
diff --git a/include/asm-m32r/elf.h b/include/asm-m32r/elf.h
index bbee8b25d175..67bcd77494a5 100644
--- a/include/asm-m32r/elf.h
+++ b/include/asm-m32r/elf.h
@@ -129,8 +129,6 @@ typedef elf_fpreg_t elf_fpregset_t;
intent than poking at uname or /proc/cpuinfo. */
#define ELF_PLATFORM (NULL)
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX)
-#endif
#endif /* _ASM_M32R__ELF_H */
diff --git a/include/asm-m32r/irq.h b/include/asm-m32r/irq.h
index 2f93f4743add..242028b4d86a 100644
--- a/include/asm-m32r/irq.h
+++ b/include/asm-m32r/irq.h
@@ -3,7 +3,7 @@
#define _ASM_M32R_IRQ_H
-#if defined(CONFIG_PLAT_M32700UT_Alpha) || defined(CONFIG_PLAT_USRV)
+#if defined(CONFIG_PLAT_USRV)
/*
* IRQ definitions for M32700UT
* M32700 Chip: 64 interrupts
diff --git a/include/asm-m32r/local.h b/include/asm-m32r/local.h
index def29d095740..22256d138630 100644
--- a/include/asm-m32r/local.h
+++ b/include/asm-m32r/local.h
@@ -1,6 +1,366 @@
#ifndef __M32R_LOCAL_H
#define __M32R_LOCAL_H
-#include <asm-generic/local.h>
+/*
+ * linux/include/asm-m32r/local.h
+ *
+ * M32R version:
+ * Copyright (C) 2001, 2002 Hitoshi Yamamoto
+ * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
+ * Copyright (C) 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ */
+
+#include <linux/percpu.h>
+#include <asm/assembler.h>
+#include <asm/system.h>
+#include <asm/local.h>
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ */
+
+/*
+ * Make sure gcc doesn't try to be clever and move things around
+ * on us. We need to use _exactly_ the address the user gave us,
+ * not some alias that contains the same information.
+ */
+typedef struct { volatile int counter; } local_t;
+
+#define LOCAL_INIT(i) { (i) }
+
+/**
+ * local_read - read local variable
+ * @l: pointer of type local_t
+ *
+ * Atomically reads the value of @l.
+ */
+#define local_read(l) ((l)->counter)
+
+/**
+ * local_set - set local variable
+ * @l: pointer of type local_t
+ * @i: required value
+ *
+ * Atomically sets the value of @l to @i.
+ */
+#define local_set(l, i) (((l)->counter) = (i))
+
+/**
+ * local_add_return - add long to local variable and return it
+ * @i: long value to add
+ * @l: pointer of type local_t
+ *
+ * Atomically adds @i to @l and return (@i + @l).
+ */
+static inline long local_add_return(long i, local_t *l)
+{
+ unsigned long flags;
+ long result;
+
+ local_irq_save(flags);
+ __asm__ __volatile__ (
+ "# local_add_return \n\t"
+ DCACHE_CLEAR("%0", "r4", "%1")
+ "ld %0, @%1; \n\t"
+ "add %0, %2; \n\t"
+ "st %0, @%1; \n\t"
+ : "=&r" (result)
+ : "r" (&l->counter), "r" (i)
+ : "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+ , "r4"
+#endif /* CONFIG_CHIP_M32700_TS1 */
+ );
+ local_irq_restore(flags);
+
+ return result;
+}
+
+/**
+ * local_sub_return - subtract long from local variable and return it
+ * @i: long value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically subtracts @i from @l and return (@l - @i).
+ */
+static inline long local_sub_return(long i, local_t *l)
+{
+ unsigned long flags;
+ long result;
+
+ local_irq_save(flags);
+ __asm__ __volatile__ (
+ "# local_sub_return \n\t"
+ DCACHE_CLEAR("%0", "r4", "%1")
+ "ld %0, @%1; \n\t"
+ "sub %0, %2; \n\t"
+ "st %0, @%1; \n\t"
+ : "=&r" (result)
+ : "r" (&l->counter), "r" (i)
+ : "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+ , "r4"
+#endif /* CONFIG_CHIP_M32700_TS1 */
+ );
+ local_irq_restore(flags);
+
+ return result;
+}
+
+/**
+ * local_add - add long to local variable
+ * @i: long value to add
+ * @l: pointer of type local_t
+ *
+ * Atomically adds @i to @l.
+ */
+#define local_add(i, l) ((void) local_add_return((i), (l)))
+
+/**
+ * local_sub - subtract the local variable
+ * @i: long value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically subtracts @i from @l.
+ */
+#define local_sub(i, l) ((void) local_sub_return((i), (l)))
+
+/**
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer of type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_sub_and_test(i, l) (local_sub_return((i), (l)) == 0)
+
+/**
+ * local_inc_return - increment local variable and return it
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1 and returns the result.
+ */
+static inline long local_inc_return(local_t *l)
+{
+ unsigned long flags;
+ long result;
+
+ local_irq_save(flags);
+ __asm__ __volatile__ (
+ "# local_inc_return \n\t"
+ DCACHE_CLEAR("%0", "r4", "%1")
+ "ld %0, @%1; \n\t"
+ "addi %0, #1; \n\t"
+ "st %0, @%1; \n\t"
+ : "=&r" (result)
+ : "r" (&l->counter)
+ : "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+ , "r4"
+#endif /* CONFIG_CHIP_M32700_TS1 */
+ );
+ local_irq_restore(flags);
+
+ return result;
+}
+
+/**
+ * local_dec_return - decrement local variable and return it
+ * @l: pointer of type local_t
+ *
+ * Atomically decrements @l by 1 and returns the result.
+ */
+static inline long local_dec_return(local_t *l)
+{
+ unsigned long flags;
+ long result;
+
+ local_irq_save(flags);
+ __asm__ __volatile__ (
+ "# local_dec_return \n\t"
+ DCACHE_CLEAR("%0", "r4", "%1")
+ "ld %0, @%1; \n\t"
+ "addi %0, #-1; \n\t"
+ "st %0, @%1; \n\t"
+ : "=&r" (result)
+ : "r" (&l->counter)
+ : "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+ , "r4"
+#endif /* CONFIG_CHIP_M32700_TS1 */
+ );
+ local_irq_restore(flags);
+
+ return result;
+}
+
+/**
+ * local_inc - increment local variable
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1.
+ */
+#define local_inc(l) ((void)local_inc_return(l))
+
+/**
+ * local_dec - decrement local variable
+ * @l: pointer of type local_t
+ *
+ * Atomically decrements @l by 1.
+ */
+#define local_dec(l) ((void)local_dec_return(l))
+
+/**
+ * local_inc_and_test - increment and test
+ * @l: pointer of type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define local_inc_and_test(l) (local_inc_return(l) == 0)
+
+/**
+ * local_dec_and_test - decrement and test
+ * @l: pointer of type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all
+ * other cases.
+ */
+#define local_dec_and_test(l) (local_dec_return(l) == 0)
+
+/**
+ * local_add_negative - add and test if negative
+ * @l: pointer of type local_t
+ * @i: integer value to add
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#define local_add_negative(i, l) (local_add_return((i), (l)) < 0)
+
+#define local_cmpxchg(l, o, n) (cmpxchg_local(&((l)->counter), (o), (n)))
+#define local_xchg(v, new) (xchg_local(&((l)->counter), new))
+
+/**
+ * local_add_unless - add unless the number is a given value
+ * @l: pointer of type local_t
+ * @a: the amount to add to l...
+ * @u: ...unless l is equal to u.
+ *
+ * Atomically adds @a to @l, so long as it was not @u.
+ * Returns non-zero if @l was not @u, and zero otherwise.
+ */
+static inline int local_add_unless(local_t *l, long a, long u)
+{
+ long c, old;
+ c = local_read(l);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = local_cmpxchg((l), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
+#define local_inc_not_zero(l) local_add_unless((l), 1, 0)
+
+static inline void local_clear_mask(unsigned long mask, local_t *addr)
+{
+ unsigned long flags;
+ unsigned long tmp;
+
+ local_irq_save(flags);
+ __asm__ __volatile__ (
+ "# local_clear_mask \n\t"
+ DCACHE_CLEAR("%0", "r5", "%1")
+ "ld %0, @%1; \n\t"
+ "and %0, %2; \n\t"
+ "st %0, @%1; \n\t"
+ : "=&r" (tmp)
+ : "r" (addr), "r" (~mask)
+ : "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+ , "r5"
+#endif /* CONFIG_CHIP_M32700_TS1 */
+ );
+ local_irq_restore(flags);
+}
+
+static inline void local_set_mask(unsigned long mask, local_t *addr)
+{
+ unsigned long flags;
+ unsigned long tmp;
+
+ local_irq_save(flags);
+ __asm__ __volatile__ (
+ "# local_set_mask \n\t"
+ DCACHE_CLEAR("%0", "r5", "%1")
+ "ld %0, @%1; \n\t"
+ "or %0, %2; \n\t"
+ "st %0, @%1; \n\t"
+ : "=&r" (tmp)
+ : "r" (addr), "r" (mask)
+ : "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+ , "r5"
+#endif /* CONFIG_CHIP_M32700_TS1 */
+ );
+ local_irq_restore(flags);
+}
+
+/* Atomic operations are already serializing on m32r */
+#define smp_mb__before_local_dec() barrier()
+#define smp_mb__after_local_dec() barrier()
+#define smp_mb__before_local_inc() barrier()
+#define smp_mb__after_local_inc() barrier()
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+
+#define __local_inc(l) ((l)->a.counter++)
+#define __local_dec(l) ((l)->a.counter++)
+#define __local_add(i, l) ((l)->a.counter += (i))
+#define __local_sub(i, l) ((l)->a.counter -= (i))
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable, not an address.
+ */
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+ still access a variable of a previous CPU in a non local way. */
+#define cpu_local_wrap_v(l) \
+ ({ local_t res__; \
+ preempt_disable(); \
+ res__ = (l); \
+ preempt_enable(); \
+ res__; })
+#define cpu_local_wrap(l) \
+ ({ preempt_disable(); \
+ l; \
+ preempt_enable(); }) \
+
+#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
+#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
+#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
+#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
+#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
+#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
+
+#define __cpu_local_inc(l) cpu_local_inc(l)
+#define __cpu_local_dec(l) cpu_local_dec(l)
+#define __cpu_local_add(i, l) cpu_local_add((i), (l))
+#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
#endif /* __M32R_LOCAL_H */
diff --git a/include/asm-m32r/m32700ut/m32700ut_pld.h b/include/asm-m32r/m32700ut/m32700ut_pld.h
index d39121279a1a..57623beb44cb 100644
--- a/include/asm-m32r/m32700ut/m32700ut_pld.h
+++ b/include/asm-m32r/m32700ut/m32700ut_pld.h
@@ -13,9 +13,7 @@
* this archive for more details.
*/
-#if defined(CONFIG_PLAT_M32700UT_Alpha)
-#define PLD_PLAT_BASE 0x08c00000
-#elif defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV)
+#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_USRV)
#define PLD_PLAT_BASE 0x04c00000
#else
#error "no platform configuration"
diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h
index 04fd183a2c58..05d43bbbf940 100644
--- a/include/asm-m32r/page.h
+++ b/include/asm-m32r/page.h
@@ -6,7 +6,6 @@
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
-#ifdef __KERNEL__
#ifndef __ASSEMBLY__
extern void clear_page(void *to);
@@ -87,5 +86,4 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
#endif /* _ASM_M32R_PAGE_H */
diff --git a/include/asm-m32r/pgalloc.h b/include/asm-m32r/pgalloc.h
index 943ba63c1ebc..e5921adfad1b 100644
--- a/include/asm-m32r/pgalloc.h
+++ b/include/asm-m32r/pgalloc.h
@@ -24,7 +24,7 @@ static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
return pgd;
}
-static __inline__ void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
@@ -46,17 +46,17 @@ static __inline__ struct page *pte_alloc_one(struct mm_struct *mm,
return pte;
}
-static __inline__ void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static __inline__ void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
-#define __pte_free_tlb(tlb, pte) pte_free((pte))
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
@@ -65,7 +65,7 @@ static __inline__ void pte_free(struct page *pte)
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 2365de5c2955..70a57c8c002b 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -121,12 +121,13 @@ static inline void local_irq_disable(void)
#define nop() __asm__ __volatile__ ("nop" : : )
-#define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr, x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+#define xchg_local(ptr, x) \
+ ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr), \
+ sizeof(*(ptr))))
-#ifdef CONFIG_SMP
extern void __xchg_called_with_bad_pointer(void);
-#endif
#ifdef CONFIG_CHIP_M32700_TS1
#define DCACHE_CLEAR(reg0, reg1, addr) \
@@ -146,7 +147,7 @@ extern void __xchg_called_with_bad_pointer(void);
#endif /* CONFIG_CHIP_M32700_TS1 */
static __always_inline unsigned long
-__xchg(unsigned long x, volatile void * ptr, int size)
+__xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long flags;
unsigned long tmp = 0;
@@ -186,9 +187,45 @@ __xchg(unsigned long x, volatile void * ptr, int size)
#endif /* CONFIG_CHIP_M32700_TS1 */
);
break;
+#endif /* CONFIG_SMP */
+ default:
+ __xchg_called_with_bad_pointer();
+ }
+
+ local_irq_restore(flags);
+
+ return (tmp);
+}
+
+static __always_inline unsigned long
+__xchg_local(unsigned long x, volatile void *ptr, int size)
+{
+ unsigned long flags;
+ unsigned long tmp = 0;
+
+ local_irq_save(flags);
+
+ switch (size) {
+ case 1:
+ __asm__ __volatile__ (
+ "ldb %0, @%2 \n\t"
+ "stb %1, @%2 \n\t"
+ : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
+ break;
+ case 2:
+ __asm__ __volatile__ (
+ "ldh %0, @%2 \n\t"
+ "sth %1, @%2 \n\t"
+ : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
+ break;
+ case 4:
+ __asm__ __volatile__ (
+ "ld %0, @%2 \n\t"
+ "st %1, @%2 \n\t"
+ : "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
+ break;
default:
__xchg_called_with_bad_pointer();
-#endif /* CONFIG_SMP */
}
local_irq_restore(flags);
@@ -228,6 +265,37 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
return retval;
}
+static inline unsigned long
+__cmpxchg_local_u32(volatile unsigned int *p, unsigned int old,
+ unsigned int new)
+{
+ unsigned long flags;
+ unsigned int retval;
+
+ local_irq_save(flags);
+ __asm__ __volatile__ (
+ DCACHE_CLEAR("%0", "r4", "%1")
+ "ld %0, @%1; \n"
+ " bne %0, %2, 1f; \n"
+ "st %3, @%1; \n"
+ " bra 2f; \n"
+ " .fillinsn \n"
+ "1:"
+ "st %0, @%1; \n"
+ " .fillinsn \n"
+ "2:"
+ : "=&r" (retval)
+ : "r" (p), "r" (old), "r" (new)
+ : "cbit", "memory"
+#ifdef CONFIG_CHIP_M32700_TS1
+ , "r4"
+#endif /* CONFIG_CHIP_M32700_TS1 */
+ );
+ local_irq_restore(flags);
+
+ return retval;
+}
+
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
@@ -247,13 +315,34 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
-#define cmpxchg(ptr,o,n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_local_u32(ptr, old, new);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif /* __KERNEL__ */
diff --git a/include/asm-m32r/unistd.h b/include/asm-m32r/unistd.h
index f467eac9ba70..cf701c933249 100644
--- a/include/asm-m32r/unistd.h
+++ b/include/asm-m32r/unistd.h
@@ -327,7 +327,7 @@
#define __NR_epoll_pwait 319
#define __NR_utimensat 320
#define __NR_signalfd 321
-#define __NR_timerfd 322
+/* #define __NR_timerfd 322 removed */
#define __NR_eventfd 323
#define __NR_fallocate 324
diff --git a/include/asm-m32r/user.h b/include/asm-m32r/user.h
index 035258d713d0..03b3c11c2aff 100644
--- a/include/asm-m32r/user.h
+++ b/include/asm-m32r/user.h
@@ -38,7 +38,7 @@ struct user {
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
- struct regs * u_ar0; /* help gdb find registers */
+ unsigned long u_ar0; /* help gdb find registers */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
diff --git a/include/asm-m68k/elf.h b/include/asm-m68k/elf.h
index eb63b85f9336..14ea42152b97 100644
--- a/include/asm-m68k/elf.h
+++ b/include/asm-m68k/elf.h
@@ -114,8 +114,6 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
#define ELF_PLATFORM (NULL)
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
-#endif
#endif
diff --git a/include/asm-m68k/macintosh.h b/include/asm-m68k/macintosh.h
index 27d11da2b479..28b0f49ee521 100644
--- a/include/asm-m68k/macintosh.h
+++ b/include/asm-m68k/macintosh.h
@@ -14,8 +14,6 @@ extern void mac_init_IRQ(void);
extern int mac_irq_pending(unsigned int);
extern void mac_identify(void);
extern void mac_report_hardware(void);
-extern void mac_debugging_penguin(int);
-extern void mac_boom(int);
/*
* Floppy driver magic hook - probably shouldnt be here
diff --git a/include/asm-m68k/motorola_pgalloc.h b/include/asm-m68k/motorola_pgalloc.h
index 5158412cd54d..500ec9b8b189 100644
--- a/include/asm-m68k/motorola_pgalloc.h
+++ b/include/asm-m68k/motorola_pgalloc.h
@@ -22,7 +22,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
cache_page(pte);
free_page((unsigned long) pte);
@@ -47,7 +47,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
return page;
}
-static inline void pte_free(struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
{
cache_page(kmap(page));
kunmap(page);
@@ -67,7 +67,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
return get_pointer_table();
}
-static inline int pmd_free(pmd_t *pmd)
+static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
return free_pointer_table(pmd);
}
@@ -78,9 +78,9 @@ static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- pmd_free((pmd_t *)pgd);
+ pmd_free(mm, (pmd_t *)pgd);
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
diff --git a/include/asm-m68k/motorola_pgtable.h b/include/asm-m68k/motorola_pgtable.h
index d029b75bcf04..13135d4821d8 100644
--- a/include/asm-m68k/motorola_pgtable.h
+++ b/include/asm-m68k/motorola_pgtable.h
@@ -191,7 +191,8 @@ static inline pte_t pte_mkcache(pte_t pte)
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
/* to find an entry in a page-table-directory */
-static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address)
+static inline pgd_t *pgd_offset(const struct mm_struct *mm,
+ unsigned long address)
{
return mm->pgd + pgd_index(address);
}
diff --git a/include/asm-m68k/page.h b/include/asm-m68k/page.h
index 1431ea0b59e0..3f29e2a03a43 100644
--- a/include/asm-m68k/page.h
+++ b/include/asm-m68k/page.h
@@ -1,9 +1,6 @@
#ifndef _M68K_PAGE_H
#define _M68K_PAGE_H
-
-#ifdef __KERNEL__
-
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
@@ -230,6 +227,4 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
-
#endif /* _M68K_PAGE_H */
diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h
index 778a4c538eb2..0b604f0f192d 100644
--- a/include/asm-m68k/pgtable.h
+++ b/include/asm-m68k/pgtable.h
@@ -107,8 +107,6 @@ extern void *empty_zero_page;
/* 64-bit machines, beware! SRB. */
#define SIZEOF_PTR_LOG2 2
-#define mm_end_of_chunk(addr, len) 0
-
extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
/*
diff --git a/include/asm-m68k/sun3_pgalloc.h b/include/asm-m68k/sun3_pgalloc.h
index fd8241117649..a5a91e72714b 100644
--- a/include/asm-m68k/sun3_pgalloc.h
+++ b/include/asm-m68k/sun3_pgalloc.h
@@ -21,12 +21,12 @@ extern const char bad_pmd_string[];
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
-static inline void pte_free_kernel(pte_t * pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long) pte);
}
-static inline void pte_free(struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
{
__free_page(page);
}
@@ -72,10 +72,10 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
-static inline void pgd_free(pgd_t * pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long) pgd);
}
diff --git a/include/asm-m68k/system.h b/include/asm-m68k/system.h
index caa9b1663e45..dbb6515ffd5b 100644
--- a/include/asm-m68k/system.h
+++ b/include/asm-m68k/system.h
@@ -154,6 +154,10 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
}
#endif
+#include <asm-generic/cmpxchg-local.h>
+
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
@@ -185,9 +189,26 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
return old;
}
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#else
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+
+#ifndef CONFIG_SMP
+#include <asm-generic/cmpxchg.h>
+#endif
+
#endif
#define arch_align_stack(x) (x)
diff --git a/include/asm-m68k/user.h b/include/asm-m68k/user.h
index 8c56ccab4849..f1f478d6e050 100644
--- a/include/asm-m68k/user.h
+++ b/include/asm-m68k/user.h
@@ -72,8 +72,7 @@ struct user{
esp register. */
long int signal; /* Signal that caused the core dump. */
int reserved; /* No longer used */
- struct user_regs_struct *u_ar0;
- /* Used by gdb to help find the values for */
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
/* the registers. */
struct user_m68kfp_struct* u_fpstate; /* Math Co-processor pointer. */
unsigned long magic; /* To uniquely identify a core file */
diff --git a/include/asm-m68knommu/elf.h b/include/asm-m68knommu/elf.h
index 40b1ed6827db..27f0ec70fba8 100644
--- a/include/asm-m68knommu/elf.h
+++ b/include/asm-m68knommu/elf.h
@@ -105,8 +105,6 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
#define ELF_PLATFORM (NULL)
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
-#endif
#endif
diff --git a/include/asm-m68knommu/io.h b/include/asm-m68knommu/io.h
index 653d9b2d7ddf..6adef1ee2082 100644
--- a/include/asm-m68knommu/io.h
+++ b/include/asm-m68knommu/io.h
@@ -172,8 +172,6 @@ extern void iounmap(void *addr);
/*
* Macros used for converting between virtual and physical mappings.
*/
-#define mm_ptov(vaddr) ((void *) (vaddr))
-#define mm_vtop(vaddr) ((unsigned long) (vaddr))
#define phys_to_virt(vaddr) ((void *) (vaddr))
#define virt_to_phys(vaddr) ((unsigned long) (vaddr))
diff --git a/include/asm-m68knommu/mcfne.h b/include/asm-m68knommu/mcfne.h
index c920ccdb61fe..431f63aadd0e 100644
--- a/include/asm-m68knommu/mcfne.h
+++ b/include/asm-m68knommu/mcfne.h
@@ -60,17 +60,6 @@
#define NE2000_BYTE volatile unsigned char
#endif
-#if defined(CONFIG_CFV240)
-#define NE2000_ADDR 0x40010000
-#define NE2000_ADDR1 0x40010001
-#define NE2000_ODDOFFSET 0x00000000
-#define NE2000_IRQ 1
-#define NE2000_IRQ_VECTOR 0x19
-#define NE2000_IRQ_PRIORITY 2
-#define NE2000_IRQ_LEVEL 1
-#define NE2000_BYTE volatile unsigned char
-#endif
-
#if defined(CONFIG_M5307C3)
#define NE2000_ADDR 0x40000300
#define NE2000_ODDOFFSET 0x00010000
@@ -173,13 +162,8 @@ void ne2000_outsw(unsigned int addr, void *vbuf, unsigned long len);
* On most NE2000 implementations on ColdFire boards the chip is
* mapped in kinda funny, due to its ISA heritage.
*/
-#ifdef CONFIG_CFV240
-#define NE2000_PTR(addr) (NE2000_ADDR + ((addr & 0x3f) << 1) + 1)
-#define NE2000_DATA_PTR(addr) (NE2000_ADDR + ((addr & 0x3f) << 1))
-#else
#define NE2000_PTR(addr) ((addr&0x1)?(NE2000_ODDOFFSET+addr-1):(addr))
#define NE2000_DATA_PTR(addr) (addr)
-#endif
void ne2000_outb(unsigned int val, unsigned int addr)
@@ -285,17 +269,6 @@ void ne2000_irqsetup(int irq)
}
#endif
-#if defined(CONFIG_CFV240)
-void ne2000_irqsetup(int irq)
-{
- volatile unsigned char *icrp;
-
- icrp = (volatile unsigned char *) (MCF_MBAR + MCFSIM_ICR1);
- *icrp = MCFSIM_ICR_LEVEL1 | MCFSIM_ICR_PRI2 | MCFSIM_ICR_AUTOVEC;
- mcf_setimr(mcf_getimr() & ~MCFSIM_IMR_EINT1);
-}
-#endif
-
#if defined(CONFIG_M5206e) && defined(CONFIG_NETtel)
void ne2000_irqsetup(int irq)
{
diff --git a/include/asm-m68knommu/mcfsim.h b/include/asm-m68knommu/mcfsim.h
index 1074ae717f74..da3f2ceff3a4 100644
--- a/include/asm-m68knommu/mcfsim.h
+++ b/include/asm-m68knommu/mcfsim.h
@@ -17,9 +17,7 @@
* Include 5204, 5206/e, 5235, 5249, 5270/5271, 5272, 5280/5282,
* 5307 or 5407 specific addresses.
*/
-#if defined(CONFIG_M5204)
-#include <asm/m5204sim.h>
-#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e)
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e)
#include <asm/m5206sim.h>
#elif defined(CONFIG_M520x)
#include <asm/m520xsim.h>
diff --git a/include/asm-m68knommu/mcftimer.h b/include/asm-m68knommu/mcftimer.h
index 6f4d796e03db..0f90f6d2227a 100644
--- a/include/asm-m68knommu/mcftimer.h
+++ b/include/asm-m68knommu/mcftimer.h
@@ -16,7 +16,7 @@
/*
* Get address specific defines for this ColdFire member.
*/
-#if defined(CONFIG_M5204) || defined(CONFIG_M5206) || defined(CONFIG_M5206e)
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e)
#define MCFTIMER_BASE1 0x100 /* Base address of TIMER1 */
#define MCFTIMER_BASE2 0x120 /* Base address of TIMER2 */
#elif defined(CONFIG_M5272)
diff --git a/include/asm-m68knommu/mcfuart.h b/include/asm-m68knommu/mcfuart.h
index 8a7a67703ac3..ef2293873612 100644
--- a/include/asm-m68knommu/mcfuart.h
+++ b/include/asm-m68knommu/mcfuart.h
@@ -19,7 +19,7 @@
#if defined(CONFIG_M5272)
#define MCFUART_BASE1 0x100 /* Base address of UART1 */
#define MCFUART_BASE2 0x140 /* Base address of UART2 */
-#elif defined(CONFIG_M5204) || defined(CONFIG_M5206) || defined(CONFIG_M5206e)
+#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e)
#if defined(CONFIG_NETtel)
#define MCFUART_BASE1 0x180 /* Base address of UART1 */
#define MCFUART_BASE2 0x140 /* Base address of UART2 */
diff --git a/include/asm-m68knommu/page.h b/include/asm-m68knommu/page.h
index 9efa0a9851b1..6af480c7f291 100644
--- a/include/asm-m68knommu/page.h
+++ b/include/asm-m68knommu/page.h
@@ -1,8 +1,6 @@
#ifndef _M68KNOMMU_PAGE_H
#define _M68KNOMMU_PAGE_H
-#ifdef __KERNEL__
-
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT (12)
@@ -78,6 +76,4 @@ extern unsigned long memory_end;
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
-
#endif /* _M68KNOMMU_PAGE_H */
diff --git a/include/asm-m68knommu/system.h b/include/asm-m68knommu/system.h
index 15b4c7d45c94..039ab3f81732 100644
--- a/include/asm-m68knommu/system.h
+++ b/include/asm-m68knommu/system.h
@@ -186,42 +186,19 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
}
#endif
+#include <asm-generic/cmpxchg-local.h>
+
/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
*/
-#define __HAVE_ARCH_CMPXCHG 1
-
-static __inline__ unsigned long
-cmpxchg(volatile int *p, int old, int new)
-{
- unsigned long flags;
- int prev;
-
- local_irq_save(flags);
- if ((prev = *p) == old)
- *p = new;
- local_irq_restore(flags);
- return(prev);
-}
-
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#ifdef CONFIG_M68332
-#define HARD_RESET_NOW() ({ \
- local_irq_disable(); \
- asm(" \
- movew #0x0000, 0xfffa6a; \
- reset; \
- /*movew #0x1557, 0xfffa44;*/ \
- /*movew #0x0155, 0xfffa46;*/ \
- moveal #0, %a0; \
- movec %a0, %vbr; \
- moveal 0, %sp; \
- moveal 4, %a0; \
- jmp (%a0); \
- "); \
-})
+#ifndef CONFIG_SMP
+#include <asm-generic/cmpxchg.h>
#endif
#if defined( CONFIG_M68328 ) || defined( CONFIG_M68EZ328 ) || \
diff --git a/include/asm-mips/cmpxchg.h b/include/asm-mips/cmpxchg.h
index a5ec0e5dc5b8..4a812c3ceb90 100644
--- a/include/asm-mips/cmpxchg.h
+++ b/include/asm-mips/cmpxchg.h
@@ -104,4 +104,21 @@ extern void __cmpxchg_called_with_bad_pointer(void);
#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_llsc_mb())
#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, )
+#define cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+ })
+
+#ifdef CONFIG_64BIT
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+#else
+#include <asm-generic/cmpxchg-local.h>
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
#endif /* __ASM_CMPXCHG_H */
diff --git a/include/asm-mips/elf.h b/include/asm-mips/elf.h
index 766f91ad5cd3..f69f7acba637 100644
--- a/include/asm-mips/elf.h
+++ b/include/asm-mips/elf.h
@@ -239,8 +239,6 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#endif /* !defined(ELF_ARCH) */
-#ifdef __KERNEL__
-
struct mips_abi;
extern struct mips_abi mips_abi;
@@ -328,8 +326,6 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) \
dump_task_fpu(tsk, elf_fpregs)
-#endif /* __KERNEL__ */
-
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index d2ea983bec06..635aa44d2290 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -9,9 +9,6 @@
#ifndef _ASM_PAGE_H
#define _ASM_PAGE_H
-
-#ifdef __KERNEL__
-
#include <spaces.h>
/*
@@ -190,6 +187,4 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* defined (__KERNEL__) */
-
#endif /* _ASM_PAGE_H */
diff --git a/include/asm-mips/pgalloc.h b/include/asm-mips/pgalloc.h
index 81b72122207a..c4efeced8396 100644
--- a/include/asm-mips/pgalloc.h
+++ b/include/asm-mips/pgalloc.h
@@ -58,7 +58,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return ret;
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_pages((unsigned long)pgd, PGD_ORDER);
}
@@ -85,12 +85,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_pages((unsigned long)pte, PTE_ORDER);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_pages(pte, PTE_ORDER);
}
@@ -103,7 +103,7 @@ static inline void pte_free(struct page *pte)
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb, x) do { } while (0)
#endif
@@ -120,12 +120,12 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
return pmd;
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
free_pages((unsigned long)pmd, PMD_ORDER);
}
-#define __pmd_free_tlb(tlb, x) pmd_free(x)
+#define __pmd_free_tlb(tlb, x) pmd_free((tlb)->mm, x)
#endif
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h
index 83bc94534084..36f42de59409 100644
--- a/include/asm-mips/processor.h
+++ b/include/asm-mips/processor.h
@@ -65,6 +65,8 @@ extern unsigned int vced_count, vcei_count;
#define TASK_UNMAPPED_BASE \
(test_thread_flag(TIF_32BIT_ADDR) ? \
PAGE_ALIGN(TASK_SIZE32 / 3) : PAGE_ALIGN(TASK_SIZE / 3))
+#define TASK_SIZE_OF(tsk) \
+ (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE)
#endif
#define NUM_FPU_REGS 32
diff --git a/include/asm-mips/user.h b/include/asm-mips/user.h
index 61f2a093b91b..afa83a4c1888 100644
--- a/include/asm-mips/user.h
+++ b/include/asm-mips/user.h
@@ -8,8 +8,6 @@
#ifndef _ASM_USER_H
#define _ASM_USER_H
-#ifdef __KERNEL__
-
#include <asm/page.h>
#include <asm/reg.h>
@@ -46,7 +44,7 @@ struct user {
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
- struct regs * u_ar0; /* help gdb find registers */
+ unsigned long u_ar0; /* help gdb find registers */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
@@ -57,6 +55,4 @@ struct user {
#define HOST_DATA_START_ADDR (u.start_data)
#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-#endif /* __KERNEL__ */
-
#endif /* _ASM_USER_H */
diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h
index e894ee35074b..57fcc4a5ebb4 100644
--- a/include/asm-parisc/atomic.h
+++ b/include/asm-parisc/atomic.h
@@ -122,6 +122,39 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
(unsigned long)_n_, sizeof(*(ptr))); \
})
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new_, int size)
+{
+ switch (size) {
+#ifdef CONFIG_64BIT
+ case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
+#endif
+ case 4: return __cmpxchg_u32(ptr, old, new_);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new_, size);
+ }
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#ifdef CONFIG_64BIT
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+#else
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
/* Note that we need not lock read accesses - aligned word writes/reads
* are atomic, so a reader never sees unconsistent values.
*
diff --git a/include/asm-parisc/elf.h b/include/asm-parisc/elf.h
index 8e7946a141de..ce0c0d844c7d 100644
--- a/include/asm-parisc/elf.h
+++ b/include/asm-parisc/elf.h
@@ -237,14 +237,11 @@ typedef unsigned long elf_greg_t;
#define ELF_PLATFORM ("PARISC\0" /*+((boot_cpu_data.x86-3)*5) */)
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) \
current->personality = PER_LINUX; \
current->thread.map_base = DEFAULT_MAP_BASE; \
current->thread.task_size = DEFAULT_TASK_SIZE \
-#endif
-
/*
* Fill in general registers in a core dump. This saves pretty
* much the same registers as hp-ux, although in a different order.
diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h
index b59a1504fc7a..b08d9151c71e 100644
--- a/include/asm-parisc/page.h
+++ b/include/asm-parisc/page.h
@@ -1,8 +1,6 @@
#ifndef _PARISC_PAGE_H
#define _PARISC_PAGE_H
-#ifdef __KERNEL__
-
#include <linux/const.h>
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
@@ -175,6 +173,4 @@ extern int npmem_ranges;
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
-
#endif /* _PARISC_PAGE_H */
diff --git a/include/asm-parisc/pgalloc.h b/include/asm-parisc/pgalloc.h
index 1af1a41e0723..aab66f1bea14 100644
--- a/include/asm-parisc/pgalloc.h
+++ b/include/asm-parisc/pgalloc.h
@@ -43,7 +43,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return actual_pgd;
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_64BIT
pgd -= PTRS_PER_PGD;
@@ -70,7 +70,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
return pmd;
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
#ifdef CONFIG_64BIT
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
@@ -91,7 +91,7 @@ static inline void pmd_free(pmd_t *pmd)
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#endif
@@ -130,12 +130,12 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
return pte;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-#define pte_free(page) pte_free_kernel(page_address(page))
+#define pte_free(mm, page) pte_free_kernel(page_address(page))
#define check_pgt_cache() do { } while (0)
diff --git a/include/asm-parisc/processor.h b/include/asm-parisc/processor.h
index 6b294fb07a23..3bb06e898fde 100644
--- a/include/asm-parisc/processor.h
+++ b/include/asm-parisc/processor.h
@@ -32,7 +32,8 @@
#endif
#define current_text_addr() ({ void *pc; current_ia(pc); pc; })
-#define TASK_SIZE (current->thread.task_size)
+#define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
+#define TASK_SIZE TASK_SIZE_OF(current)
#define TASK_UNMAPPED_BASE (current->thread.map_base)
#define DEFAULT_TASK_SIZE32 (0xFFF00000UL)
diff --git a/include/asm-parisc/tlb.h b/include/asm-parisc/tlb.h
index 33107a248e1f..383b1db310ee 100644
--- a/include/asm-parisc/tlb.h
+++ b/include/asm-parisc/tlb.h
@@ -21,7 +21,7 @@ do { if (!(tlb)->fullmm) \
#include <asm-generic/tlb.h>
-#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
-#define __pte_free_tlb(tlb, pte) pte_free(pte)
+#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 528ef183c221..1e79673b7316 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -46,7 +46,7 @@ enum powerpc_oprofile_type {
PPC_OPROFILE_RS64 = 1,
PPC_OPROFILE_POWER4 = 2,
PPC_OPROFILE_G4 = 3,
- PPC_OPROFILE_BOOKE = 4,
+ PPC_OPROFILE_FSL_EMB = 4,
PPC_OPROFILE_CELL = 5,
PPC_OPROFILE_PA6T = 6,
};
diff --git a/include/asm-powerpc/cputime.h b/include/asm-powerpc/cputime.h
index 310804485208..f42e623030ee 100644
--- a/include/asm-powerpc/cputime.h
+++ b/include/asm-powerpc/cputime.h
@@ -52,12 +52,26 @@ typedef u64 cputime64_t;
* Convert cputime <-> jiffies
*/
extern u64 __cputime_jiffies_factor;
+DECLARE_PER_CPU(unsigned long, cputime_last_delta);
+DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta);
static inline unsigned long cputime_to_jiffies(const cputime_t ct)
{
return mulhdu(ct, __cputime_jiffies_factor);
}
+/* Estimate the scaled cputime by scaling the real cputime based on
+ * the last scaled to real ratio */
+static inline cputime_t cputime_to_scaled(const cputime_t ct)
+{
+ if (cpu_has_feature(CPU_FTR_SPURR) &&
+ per_cpu(cputime_last_delta, smp_processor_id()))
+ return ct *
+ per_cpu(cputime_scaled_last_delta, smp_processor_id())/
+ per_cpu(cputime_last_delta, smp_processor_id());
+ return ct;
+}
+
static inline cputime_t jiffies_to_cputime(const unsigned long jif)
{
cputime_t ct;
diff --git a/include/asm-powerpc/dcr-native.h b/include/asm-powerpc/dcr-native.h
index af5fb31af559..be6c879e8760 100644
--- a/include/asm-powerpc/dcr-native.h
+++ b/include/asm-powerpc/dcr-native.h
@@ -59,25 +59,36 @@ do { \
/* R/W of indirect DCRs make use of standard naming conventions for DCRs */
extern spinlock_t dcr_ind_lock;
-#define mfdcri(base, reg) \
-({ \
- unsigned long flags; \
- unsigned int val; \
- spin_lock_irqsave(&dcr_ind_lock, flags); \
- mtdcr(DCRN_ ## base ## _CONFIG_ADDR, reg); \
- val = mfdcr(DCRN_ ## base ## _CONFIG_DATA); \
- spin_unlock_irqrestore(&dcr_ind_lock, flags); \
- val; \
-})
+static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
+{
+ unsigned long flags;
+ unsigned int val;
-#define mtdcri(base, reg, data) \
-do { \
- unsigned long flags; \
- spin_lock_irqsave(&dcr_ind_lock, flags); \
- mtdcr(DCRN_ ## base ## _CONFIG_ADDR, reg); \
- mtdcr(DCRN_ ## base ## _CONFIG_DATA, data); \
- spin_unlock_irqrestore(&dcr_ind_lock, flags); \
-} while (0)
+ spin_lock_irqsave(&dcr_ind_lock, flags);
+ __mtdcr(base_addr, reg);
+ val = __mfdcr(base_data);
+ spin_unlock_irqrestore(&dcr_ind_lock, flags);
+ return val;
+}
+
+static inline void __mtdcri(int base_addr, int base_data, int reg,
+ unsigned val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcr_ind_lock, flags);
+ __mtdcr(base_addr, reg);
+ __mtdcr(base_data, val);
+ spin_unlock_irqrestore(&dcr_ind_lock, flags);
+}
+
+#define mfdcri(base, reg) __mfdcri(DCRN_ ## base ## _CONFIG_ADDR, \
+ DCRN_ ## base ## _CONFIG_DATA, \
+ reg)
+
+#define mtdcri(base, reg, data) __mtdcri(DCRN_ ## base ## _CONFIG_ADDR, \
+ DCRN_ ## base ## _CONFIG_DATA, \
+ reg, data)
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/dma.h b/include/asm-powerpc/dma.h
index 7a4374bdbef4..a7e06e25c708 100644
--- a/include/asm-powerpc/dma.h
+++ b/include/asm-powerpc/dma.h
@@ -93,16 +93,6 @@
*
*/
-/* see prep_setup_arch() for detailed informations */
-#if defined(CONFIG_SOUND_CS4232) && defined(CONFIG_PPC_PREP)
-extern long ppc_cs4232_dma, ppc_cs4232_dma2;
-#define SND_DMA1 ppc_cs4232_dma
-#define SND_DMA2 ppc_cs4232_dma2
-#else
-#define SND_DMA1 -1
-#define SND_DMA2 -1
-#endif
-
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
@@ -269,24 +259,15 @@ static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
dma_outb(pagenr >> 8, DMA_HI_PAGE_3);
break;
case 5:
- if (SND_DMA1 == 5 || SND_DMA2 == 5)
- dma_outb(pagenr, DMA_LO_PAGE_5);
- else
- dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
dma_outb(pagenr >> 8, DMA_HI_PAGE_5);
break;
case 6:
- if (SND_DMA1 == 6 || SND_DMA2 == 6)
- dma_outb(pagenr, DMA_LO_PAGE_6);
- else
- dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
dma_outb(pagenr >> 8, DMA_HI_PAGE_6);
break;
case 7:
- if (SND_DMA1 == 7 || SND_DMA2 == 7)
- dma_outb(pagenr, DMA_LO_PAGE_7);
- else
- dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
+ dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
dma_outb(pagenr >> 8, DMA_HI_PAGE_7);
break;
}
@@ -302,12 +283,6 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
((dmanr & 3) << 1) + IO_DMA1_BASE);
dma_outb((phys >> 8) & 0xff,
((dmanr & 3) << 1) + IO_DMA1_BASE);
- } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
- dma_outb(phys & 0xff,
- ((dmanr & 3) << 2) + IO_DMA2_BASE);
- dma_outb((phys >> 8) & 0xff,
- ((dmanr & 3) << 2) + IO_DMA2_BASE);
- dma_outb((dmanr & 3), DMA2_EXT_REG);
} else {
dma_outb((phys >> 1) & 0xff,
((dmanr & 3) << 2) + IO_DMA2_BASE);
@@ -334,11 +309,6 @@ static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
dma_outb((count >> 8) & 0xff,
((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
- } else if (dmanr == SND_DMA1 || dmanr == SND_DMA2) {
- dma_outb(count & 0xff,
- ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
- dma_outb((count >> 8) & 0xff,
- ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
} else {
dma_outb((count >> 1) & 0xff,
((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
@@ -368,8 +338,7 @@ static __inline__ int get_dma_residue(unsigned int dmanr)
count = 1 + dma_inb(io_port);
count += dma_inb(io_port) << 8;
- return (dmanr <= 3 || dmanr == SND_DMA1 || dmanr == SND_DMA2)
- ? count : (count << 1);
+ return (dmanr <= 3) ? count : (count << 1);
}
/* These are in kernel/dma.c: */
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index 6bd07ef78ac4..9080d85cb9d0 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -165,8 +165,10 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
+#define compat_elf_check_arch(x) ((x)->e_machine == EM_PPC)
#define USE_ELF_CORE_DUMP
+#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h
index 7a3cef785abd..852e15f51a1e 100644
--- a/include/asm-powerpc/iommu.h
+++ b/include/asm-powerpc/iommu.h
@@ -79,19 +79,19 @@ extern void iommu_free_table(struct iommu_table *tbl, const char *node_name);
extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
int nid);
-extern int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
+extern int iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, unsigned long mask,
enum dma_data_direction direction);
extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction);
-extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, unsigned long mask,
- gfp_t flag, int node);
+extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
+ size_t size, dma_addr_t *dma_handle,
+ unsigned long mask, gfp_t flag, int node);
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle);
-extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
- size_t size, unsigned long mask,
+extern dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
+ void *vaddr, size_t size, unsigned long mask,
enum dma_data_direction direction);
extern void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction);
diff --git a/include/asm-powerpc/mediabay.h b/include/asm-powerpc/mediabay.h
index 9daa3252d7b6..de83fe196309 100644
--- a/include/asm-powerpc/mediabay.h
+++ b/include/asm-powerpc/mediabay.h
@@ -18,14 +18,14 @@
#define MB_NO 7 /* media bay contains nothing */
int check_media_bay(struct device_node *which_bay, int what);
-int check_media_bay_by_base(unsigned long base, int what);
/* Number of bays in the machine or 0 */
extern int media_bay_count;
-/* called by pmac-ide.c to register IDE controller for media bay */
-extern int media_bay_set_ide_infos(struct device_node* which_bay,
- unsigned long base, int irq, int index);
+int check_media_bay_by_base(unsigned long base, int what);
+/* called by IDE PMAC host driver to register IDE controller for media bay */
+int media_bay_set_ide_infos(struct device_node *which_bay, unsigned long base,
+ int irq, int index);
#endif /* __KERNEL__ */
#endif /* _PPC_MEDIABAY_H */
diff --git a/include/asm-powerpc/mpc512x.h b/include/asm-powerpc/mpc512x.h
new file mode 100644
index 000000000000..c48a1658eeac
--- /dev/null
+++ b/include/asm-powerpc/mpc512x.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author: John Rigby, <jrigby@freescale.com>, Friday Apr 13 2007
+ *
+ * Description:
+ * MPC5121 Prototypes and definitions
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __ASM_POWERPC_MPC512x_H__
+#define __ASM_POWERPC_MPC512x_H__
+
+extern unsigned long mpc512x_find_ips_freq(struct device_node *node);
+
+#endif /* __ASM_POWERPC_MPC512x_H__ */
+
diff --git a/include/asm-powerpc/mpc52xx_psc.h b/include/asm-powerpc/mpc52xx_psc.h
index bea42b95390f..710c5d36efaa 100644
--- a/include/asm-powerpc/mpc52xx_psc.h
+++ b/include/asm-powerpc/mpc52xx_psc.h
@@ -190,5 +190,53 @@ struct mpc52xx_psc_fifo {
u16 tflwfptr; /* PSC + 0x9e */
};
+#define MPC512x_PSC_FIFO_RESET_SLICE 0x80
+#define MPC512x_PSC_FIFO_ENABLE_SLICE 0x01
+#define MPC512x_PSC_FIFO_ENABLE_DMA 0x04
+
+#define MPC512x_PSC_FIFO_EMPTY 0x1
+#define MPC512x_PSC_FIFO_FULL 0x2
+#define MPC512x_PSC_FIFO_ALARM 0x4
+#define MPC512x_PSC_FIFO_URERR 0x8
+#define MPC512x_PSC_FIFO_ORERR 0x01
+#define MPC512x_PSC_FIFO_MEMERROR 0x02
+
+struct mpc512x_psc_fifo {
+ u32 reserved1[10];
+ u32 txcmd; /* PSC + 0x80 */
+ u32 txalarm; /* PSC + 0x84 */
+ u32 txsr; /* PSC + 0x88 */
+ u32 txisr; /* PSC + 0x8c */
+ u32 tximr; /* PSC + 0x90 */
+ u32 txcnt; /* PSC + 0x94 */
+ u32 txptr; /* PSC + 0x98 */
+ u32 txsz; /* PSC + 0x9c */
+ u32 reserved2[7];
+ union {
+ u8 txdata_8;
+ u16 txdata_16;
+ u32 txdata_32;
+ } txdata; /* PSC + 0xbc */
+#define txdata_8 txdata.txdata_8
+#define txdata_16 txdata.txdata_16
+#define txdata_32 txdata.txdata_32
+ u32 rxcmd; /* PSC + 0xc0 */
+ u32 rxalarm; /* PSC + 0xc4 */
+ u32 rxsr; /* PSC + 0xc8 */
+ u32 rxisr; /* PSC + 0xcc */
+ u32 rximr; /* PSC + 0xd0 */
+ u32 rxcnt; /* PSC + 0xd4 */
+ u32 rxptr; /* PSC + 0xd8 */
+ u32 rxsz; /* PSC + 0xdc */
+ u32 reserved3[7];
+ union {
+ u8 rxdata_8;
+ u16 rxdata_16;
+ u32 rxdata_32;
+ } rxdata; /* PSC + 0xfc */
+#define rxdata_8 rxdata.rxdata_8
+#define rxdata_16 rxdata.rxdata_16
+#define rxdata_32 rxdata.rxdata_32
+};
#endif /* __ASM_MPC52xx_PSC_H__ */
diff --git a/include/asm-powerpc/nvram.h b/include/asm-powerpc/nvram.h
index 4e7059cc6113..efde5ac82f7b 100644
--- a/include/asm-powerpc/nvram.h
+++ b/include/asm-powerpc/nvram.h
@@ -58,6 +58,9 @@ struct nvram_header {
};
#ifdef __KERNEL__
+
+#include <linux/list.h>
+
struct nvram_partition {
struct list_head partition;
struct nvram_header header;
diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h
index 938fefb4c4bc..95035c602ba6 100644
--- a/include/asm-powerpc/oprofile_impl.h
+++ b/include/asm-powerpc/oprofile_impl.h
@@ -54,7 +54,7 @@ struct op_powerpc_model {
int num_counters;
};
-extern struct op_powerpc_model op_model_fsl_booke;
+extern struct op_powerpc_model op_model_fsl_emb;
extern struct op_powerpc_model op_model_rs64;
extern struct op_powerpc_model op_model_power4;
extern struct op_powerpc_model op_model_7450;
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index f6dfce025adf..748b35ab37b5 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -115,8 +115,6 @@ struct paca_struct {
u64 system_time; /* accumulated system TB ticks */
u64 startpurr; /* PURR/TB value snapshot */
u64 startspurr; /* SPURR value snapshot */
- u64 purrdelta; /* FIXME: document */
- u64 spurrdelta; /* FIXME: document */
};
extern struct paca_struct paca[];
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h
index 236a9210e5fc..61e3725bbd37 100644
--- a/include/asm-powerpc/page.h
+++ b/include/asm-powerpc/page.h
@@ -10,7 +10,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifdef __KERNEL__
#include <asm/asm-compat.h>
#include <asm/kdump.h>
@@ -194,6 +193,4 @@ struct vm_area_struct;
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/include/asm-powerpc/page_32.h b/include/asm-powerpc/page_32.h
index 17110aff26e7..65ea19eec956 100644
--- a/include/asm-powerpc/page_32.h
+++ b/include/asm-powerpc/page_32.h
@@ -1,6 +1,5 @@
#ifndef _ASM_POWERPC_PAGE_32_H
#define _ASM_POWERPC_PAGE_32_H
-#ifdef __KERNEL__
#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
@@ -32,5 +31,4 @@ extern void copy_page(void *to, void *from);
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PAGE_32_H */
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index 4ee82c61e4d7..67834eae5702 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -1,6 +1,5 @@
#ifndef _ASM_POWERPC_PAGE_64_H
#define _ASM_POWERPC_PAGE_64_H
-#ifdef __KERNEL__
/*
* Copyright (C) 2001 PPC64 Team, IBM Corp
@@ -183,5 +182,4 @@ do { \
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_PAGE_64_H */
diff --git a/include/asm-powerpc/pgalloc-32.h b/include/asm-powerpc/pgalloc-32.h
index e1307432163c..c162a4c37b39 100644
--- a/include/asm-powerpc/pgalloc-32.h
+++ b/include/asm-powerpc/pgalloc-32.h
@@ -6,14 +6,14 @@
extern void __bad_pte(pmd_t *pmd);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
/* #define pgd_populate(mm, pmd, pte) BUG() */
@@ -31,10 +31,10 @@ extern void pgd_free(pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
-extern void pte_free_kernel(pte_t *pte);
-extern void pte_free(struct page *pte);
+extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
+extern void pte_free(struct mm_struct *mm, struct page *pte);
-#define __pte_free_tlb(tlb, pte) pte_free((pte))
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define check_pgt_cache() do { } while (0)
diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h
index 43214c8085b7..5afae8593931 100644
--- a/include/asm-powerpc/pgalloc-64.h
+++ b/include/asm-powerpc/pgalloc-64.h
@@ -29,7 +29,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
subpage_prot_free(pgd);
kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
@@ -45,7 +45,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
GFP_KERNEL|__GFP_REPEAT);
}
-static inline void pud_free(pud_t *pud)
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
}
@@ -81,7 +81,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
GFP_KERNEL|__GFP_REPEAT);
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
}
@@ -99,12 +99,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return pte ? virt_to_page(pte) : NULL;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *ptepage)
+static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
{
__free_page(ptepage);
}
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index dba7c948189d..1f4765d6546f 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -99,8 +99,9 @@ extern struct task_struct *last_task_used_spe;
*/
#define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
TASK_SIZE_USER32 : TASK_SIZE_USER64)
+#define TASK_SIZE TASK_SIZE_OF(current)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
diff --git a/include/asm-powerpc/ps3av.h b/include/asm-powerpc/ps3av.h
index 967930b82ed3..fda98715cd35 100644
--- a/include/asm-powerpc/ps3av.h
+++ b/include/asm-powerpc/ps3av.h
@@ -310,19 +310,25 @@
#define PS3AV_MONITOR_TYPE_HDMI 1 /* HDMI */
#define PS3AV_MONITOR_TYPE_DVI 2 /* DVI */
-#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_60 2 /* 480p */
-#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_60 1 /* 480i */
-#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_50 7 /* 576p */
-#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_50 6 /* 576i */
-
-#define PS3AV_REGION_60 0x01
-#define PS3AV_REGION_50 0x02
-#define PS3AV_REGION_RGB 0x10
-
-#define get_status(buf) (((__u32 *)buf)[2])
-#define PS3AV_HDR_SIZE 4 /* version + size */
/* for video mode */
+enum ps3av_mode_num {
+ PS3AV_MODE_AUTO = 0,
+ PS3AV_MODE_480I = 1,
+ PS3AV_MODE_480P = 2,
+ PS3AV_MODE_720P60 = 3,
+ PS3AV_MODE_1080I60 = 4,
+ PS3AV_MODE_1080P60 = 5,
+ PS3AV_MODE_576I = 6,
+ PS3AV_MODE_576P = 7,
+ PS3AV_MODE_720P50 = 8,
+ PS3AV_MODE_1080I50 = 9,
+ PS3AV_MODE_1080P50 = 10,
+ PS3AV_MODE_WXGA = 11,
+ PS3AV_MODE_SXGA = 12,
+ PS3AV_MODE_WUXGA = 13,
+};
+
#define PS3AV_MODE_MASK 0x000F
#define PS3AV_MODE_HDCP_OFF 0x1000 /* Retail PS3 product doesn't support this */
#define PS3AV_MODE_DITHER 0x0800
@@ -333,6 +339,19 @@
#define PS3AV_MODE_RGB 0x0020
+#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_60 PS3AV_MODE_480P
+#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_60 PS3AV_MODE_480I
+#define PS3AV_DEFAULT_HDMI_MODE_ID_REG_50 PS3AV_MODE_576P
+#define PS3AV_DEFAULT_AVMULTI_MODE_ID_REG_50 PS3AV_MODE_576I
+
+#define PS3AV_REGION_60 0x01
+#define PS3AV_REGION_50 0x02
+#define PS3AV_REGION_RGB 0x10
+
+#define get_status(buf) (((__u32 *)buf)[2])
+#define PS3AV_HDR_SIZE 4 /* version + size */
+
+
/** command packet structure **/
struct ps3av_send_hdr {
u16 version;
@@ -713,8 +732,6 @@ extern int ps3av_set_video_mode(u32);
extern int ps3av_set_audio_mode(u32, u32, u32, u32, u32);
extern int ps3av_get_auto_mode(void);
extern int ps3av_get_mode(void);
-extern int ps3av_get_scanmode(int);
-extern int ps3av_get_refresh_rate(int);
extern int ps3av_video_mode2res(u32, u32 *, u32 *);
extern int ps3av_video_mute(int);
extern int ps3av_audio_mute(int);
diff --git a/include/asm-powerpc/ptrace.h b/include/asm-powerpc/ptrace.h
index ffc150f602b8..891d68932f39 100644
--- a/include/asm-powerpc/ptrace.h
+++ b/include/asm-powerpc/ptrace.h
@@ -55,6 +55,8 @@ struct pt_regs {
#ifdef __powerpc64__
+#define __ARCH_WANT_COMPAT_SYS_PTRACE
+
#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
/* Size of dummy stack frame allocated when calling signal handler. */
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index 2408a29507e5..0d6238987df8 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -18,6 +18,10 @@
#include <asm/reg_booke.h>
#endif /* CONFIG_BOOKE || CONFIG_40x */
+#ifdef CONFIG_FSL_EMB_PERFMON
+#include <asm/reg_fsl_emb.h>
+#endif
+
#ifdef CONFIG_8xx
#include <asm/reg_8xx.h>
#endif /* CONFIG_8xx */
diff --git a/include/asm-powerpc/reg_booke.h b/include/asm-powerpc/reg_booke.h
index 0405ef479814..cf54a3f31753 100644
--- a/include/asm-powerpc/reg_booke.h
+++ b/include/asm-powerpc/reg_booke.h
@@ -9,68 +9,6 @@
#ifndef __ASM_POWERPC_REG_BOOKE_H__
#define __ASM_POWERPC_REG_BOOKE_H__
-#ifndef __ASSEMBLY__
-/* Performance Monitor Registers */
-#define mfpmr(rn) ({unsigned int rval; \
- asm volatile("mfpmr %0," __stringify(rn) \
- : "=r" (rval)); rval;})
-#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
-#endif /* __ASSEMBLY__ */
-
-/* Freescale Book E Performance Monitor APU Registers */
-#define PMRN_PMC0 0x010 /* Performance Monitor Counter 0 */
-#define PMRN_PMC1 0x011 /* Performance Monitor Counter 1 */
-#define PMRN_PMC2 0x012 /* Performance Monitor Counter 1 */
-#define PMRN_PMC3 0x013 /* Performance Monitor Counter 1 */
-#define PMRN_PMLCA0 0x090 /* PM Local Control A0 */
-#define PMRN_PMLCA1 0x091 /* PM Local Control A1 */
-#define PMRN_PMLCA2 0x092 /* PM Local Control A2 */
-#define PMRN_PMLCA3 0x093 /* PM Local Control A3 */
-
-#define PMLCA_FC 0x80000000 /* Freeze Counter */
-#define PMLCA_FCS 0x40000000 /* Freeze in Supervisor */
-#define PMLCA_FCU 0x20000000 /* Freeze in User */
-#define PMLCA_FCM1 0x10000000 /* Freeze when PMM==1 */
-#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */
-#define PMLCA_CE 0x04000000 /* Condition Enable */
-
-#define PMLCA_EVENT_MASK 0x007f0000 /* Event field */
-#define PMLCA_EVENT_SHIFT 16
-
-#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */
-#define PMRN_PMLCB1 0x111 /* PM Local Control B1 */
-#define PMRN_PMLCB2 0x112 /* PM Local Control B2 */
-#define PMRN_PMLCB3 0x113 /* PM Local Control B3 */
-
-#define PMLCB_THRESHMUL_MASK 0x0700 /* Threshhold Multiple Field */
-#define PMLCB_THRESHMUL_SHIFT 8
-
-#define PMLCB_THRESHOLD_MASK 0x003f /* Threshold Field */
-#define PMLCB_THRESHOLD_SHIFT 0
-
-#define PMRN_PMGC0 0x190 /* PM Global Control 0 */
-
-#define PMGC0_FAC 0x80000000 /* Freeze all Counters */
-#define PMGC0_PMIE 0x40000000 /* Interrupt Enable */
-#define PMGC0_FCECE 0x20000000 /* Freeze countes on
- Enabled Condition or
- Event */
-
-#define PMRN_UPMC0 0x000 /* User Performance Monitor Counter 0 */
-#define PMRN_UPMC1 0x001 /* User Performance Monitor Counter 1 */
-#define PMRN_UPMC2 0x002 /* User Performance Monitor Counter 1 */
-#define PMRN_UPMC3 0x003 /* User Performance Monitor Counter 1 */
-#define PMRN_UPMLCA0 0x080 /* User PM Local Control A0 */
-#define PMRN_UPMLCA1 0x081 /* User PM Local Control A1 */
-#define PMRN_UPMLCA2 0x082 /* User PM Local Control A2 */
-#define PMRN_UPMLCA3 0x083 /* User PM Local Control A3 */
-#define PMRN_UPMLCB0 0x100 /* User PM Local Control B0 */
-#define PMRN_UPMLCB1 0x101 /* User PM Local Control B1 */
-#define PMRN_UPMLCB2 0x102 /* User PM Local Control B2 */
-#define PMRN_UPMLCB3 0x103 /* User PM Local Control B3 */
-#define PMRN_UPMGC0 0x180 /* User PM Global Control 0 */
-
-
/* Machine State Register (MSR) Fields */
#define MSR_UCLE (1<<26) /* User-mode cache lock enable */
#define MSR_SPE (1<<25) /* Enable SPE */
diff --git a/include/asm-powerpc/reg_fsl_emb.h b/include/asm-powerpc/reg_fsl_emb.h
new file mode 100644
index 000000000000..1e180a594589
--- /dev/null
+++ b/include/asm-powerpc/reg_fsl_emb.h
@@ -0,0 +1,72 @@
+/*
+ * Contains register definitions for the Freescale Embedded Performance
+ * Monitor.
+ */
+#ifdef __KERNEL__
+#ifndef __ASM_POWERPC_REG_FSL_EMB_H__
+#define __ASM_POWERPC_REG_FSL_EMB_H__
+
+#ifndef __ASSEMBLY__
+/* Performance Monitor Registers */
+#define mfpmr(rn) ({unsigned int rval; \
+ asm volatile("mfpmr %0," __stringify(rn) \
+ : "=r" (rval)); rval;})
+#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
+#endif /* __ASSEMBLY__ */
+
+/* Freescale Book E Performance Monitor APU Registers */
+#define PMRN_PMC0 0x010 /* Performance Monitor Counter 0 */
+#define PMRN_PMC1 0x011 /* Performance Monitor Counter 1 */
+#define PMRN_PMC2 0x012 /* Performance Monitor Counter 1 */
+#define PMRN_PMC3 0x013 /* Performance Monitor Counter 1 */
+#define PMRN_PMLCA0 0x090 /* PM Local Control A0 */
+#define PMRN_PMLCA1 0x091 /* PM Local Control A1 */
+#define PMRN_PMLCA2 0x092 /* PM Local Control A2 */
+#define PMRN_PMLCA3 0x093 /* PM Local Control A3 */
+
+#define PMLCA_FC 0x80000000 /* Freeze Counter */
+#define PMLCA_FCS 0x40000000 /* Freeze in Supervisor */
+#define PMLCA_FCU 0x20000000 /* Freeze in User */
+#define PMLCA_FCM1 0x10000000 /* Freeze when PMM==1 */
+#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */
+#define PMLCA_CE 0x04000000 /* Condition Enable */
+
+#define PMLCA_EVENT_MASK 0x007f0000 /* Event field */
+#define PMLCA_EVENT_SHIFT 16
+
+#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */
+#define PMRN_PMLCB1 0x111 /* PM Local Control B1 */
+#define PMRN_PMLCB2 0x112 /* PM Local Control B2 */
+#define PMRN_PMLCB3 0x113 /* PM Local Control B3 */
+
+#define PMLCB_THRESHMUL_MASK 0x0700 /* Threshhold Multiple Field */
+#define PMLCB_THRESHMUL_SHIFT 8
+
+#define PMLCB_THRESHOLD_MASK 0x003f /* Threshold Field */
+#define PMLCB_THRESHOLD_SHIFT 0
+
+#define PMRN_PMGC0 0x190 /* PM Global Control 0 */
+
+#define PMGC0_FAC 0x80000000 /* Freeze all Counters */
+#define PMGC0_PMIE 0x40000000 /* Interrupt Enable */
+#define PMGC0_FCECE 0x20000000 /* Freeze countes on
+ Enabled Condition or
+ Event */
+
+#define PMRN_UPMC0 0x000 /* User Performance Monitor Counter 0 */
+#define PMRN_UPMC1 0x001 /* User Performance Monitor Counter 1 */
+#define PMRN_UPMC2 0x002 /* User Performance Monitor Counter 1 */
+#define PMRN_UPMC3 0x003 /* User Performance Monitor Counter 1 */
+#define PMRN_UPMLCA0 0x080 /* User PM Local Control A0 */
+#define PMRN_UPMLCA1 0x081 /* User PM Local Control A1 */
+#define PMRN_UPMLCA2 0x082 /* User PM Local Control A2 */
+#define PMRN_UPMLCA3 0x083 /* User PM Local Control A3 */
+#define PMRN_UPMLCB0 0x100 /* User PM Local Control B0 */
+#define PMRN_UPMLCB1 0x101 /* User PM Local Control B1 */
+#define PMRN_UPMLCB2 0x102 /* User PM Local Control B2 */
+#define PMRN_UPMLCB3 0x103 /* User PM Local Control B3 */
+#define PMRN_UPMGC0 0x180 /* User PM Global Control 0 */
+
+
+#endif /* __ASM_POWERPC_REG_FSL_EMB_H__ */
+#endif /* __KERNEL__ */
diff --git a/include/asm-powerpc/systbl.h b/include/asm-powerpc/systbl.h
index 0c8b0d679139..e996521fb3a6 100644
--- a/include/asm-powerpc/systbl.h
+++ b/include/asm-powerpc/systbl.h
@@ -309,7 +309,7 @@ SYSCALL_SPU(getcpu)
COMPAT_SYS(epoll_pwait)
COMPAT_SYS_SPU(utimensat)
COMPAT_SYS_SPU(signalfd)
-COMPAT_SYS_SPU(timerfd)
+SYSCALL(ni_syscall)
SYSCALL_SPU(eventfd)
COMPAT_SYS_SPU(sync_file_range2)
COMPAT_SYS(fallocate)
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index bc9739dff5e7..29552ff182aa 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -65,7 +65,7 @@
struct task_struct;
struct pt_regs;
-#ifdef CONFIG_DEBUGGER
+#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
extern int (*__debugger)(struct pt_regs *regs);
extern int (*__debugger_ipi)(struct pt_regs *regs);
@@ -463,7 +463,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -472,7 +472,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
})
-#define cmpxchg_local(ptr,o,n) \
+#define cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -492,6 +492,20 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
*/
#define NET_IP_ALIGN 0
#define NET_SKB_PAD L1_CACHE_BYTES
+
+#define cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+ })
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+#else
+#include <asm-generic/cmpxchg-local.h>
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
#define arch_align_stack(x) (x)
diff --git a/include/asm-powerpc/user.h b/include/asm-powerpc/user.h
index e59ade4b3dfb..3fd4545dd74e 100644
--- a/include/asm-powerpc/user.h
+++ b/include/asm-powerpc/user.h
@@ -1,8 +1,6 @@
#ifndef _ASM_POWERPC_USER_H
#define _ASM_POWERPC_USER_H
-#ifdef __KERNEL__
-
#include <asm/ptrace.h>
#include <asm/page.h>
@@ -40,7 +38,7 @@ struct user {
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
- struct regs * u_ar0; /* help gdb find registers */
+ unsigned long u_ar0; /* help gdb find registers */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
@@ -50,6 +48,4 @@ struct user {
#define HOST_TEXT_START_ADDR (u.start_code)
#define HOST_DATA_START_ADDR (u.start_data)
#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-
-#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_USER_H */
diff --git a/include/asm-powerpc/vio.h b/include/asm-powerpc/vio.h
index 9204c15839c5..56512a968dab 100644
--- a/include/asm-powerpc/vio.h
+++ b/include/asm-powerpc/vio.h
@@ -66,7 +66,7 @@ extern void __devinit vio_unregister_device(struct vio_dev *dev);
struct device_node;
-extern struct vio_dev * __devinit vio_register_device_node(
+extern struct vio_dev *vio_register_device_node(
struct device_node *node_vdev);
extern const void *vio_get_attribute(struct vio_dev *vdev, char *which,
int *length);
diff --git a/include/asm-ppc/pgalloc.h b/include/asm-ppc/pgalloc.h
index 44d88a98e87c..7c39a95829c7 100644
--- a/include/asm-ppc/pgalloc.h
+++ b/include/asm-ppc/pgalloc.h
@@ -7,14 +7,14 @@
extern void __bad_pte(pmd_t *pmd);
extern pgd_t *pgd_alloc(struct mm_struct *mm);
-extern void pgd_free(pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
/*
* We don't have any real pmd's, and this code never triggers because
* the pgd will always be present..
*/
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
@@ -32,10 +32,10 @@ extern void pgd_free(pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
-extern void pte_free_kernel(pte_t *pte);
-extern void pte_free(struct page *pte);
+extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
+extern void pte_free(struct mm_struct *mm, struct page *pte);
-#define __pte_free_tlb(tlb, pte) pte_free((pte))
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
#define check_pgt_cache() do { } while (0)
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index 51df94c73846..0593cb889d45 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -209,12 +209,34 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
return prev;
}
+static inline unsigned long
+__cmpxchg_u32_local(volatile unsigned int *p, unsigned int old,
+ unsigned int new)
+{
+ unsigned int prev;
+
+ __asm__ __volatile__ ("\n\
+1: lwarx %0,0,%2 \n\
+ cmpw 0,%0,%3 \n\
+ bne 2f \n"
+ PPC405_ERR77(0,%2)
+" stwcx. %4,0,%2 \n\
+ bne- 1b\n"
+"2:"
+ : "=&r" (prev), "=m" (*p)
+ : "r" (p), "r" (old), "r" (new), "m" (*p)
+ : "cc", "memory");
+
+ return prev;
+}
+
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */
extern void __cmpxchg_called_with_bad_pointer(void);
static __inline__ unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
+ unsigned int size)
{
switch (size) {
case 4:
@@ -228,7 +250,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -236,6 +258,31 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
(unsigned long)_n_, sizeof(*(ptr))); \
})
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32_local(ptr, old, new);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h
index dba6fecad0be..882db054110c 100644
--- a/include/asm-s390/bitops.h
+++ b/include/asm-s390/bitops.h
@@ -440,242 +440,256 @@ __constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
__test_bit((nr),(addr)) )
/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
+ * Optimized find bit helper functions.
*/
-static inline unsigned long ffz(unsigned long word)
+
+/**
+ * __ffz_word_loop - find byte offset of first long != -1UL
+ * @addr: pointer to array of unsigned long
+ * @size: size of the array in bits
+ */
+static inline unsigned long __ffz_word_loop(const unsigned long *addr,
+ unsigned long size)
{
- unsigned long bit = 0;
+ typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
+ unsigned long bytes = 0;
+ asm volatile(
+#ifndef __s390x__
+ " ahi %1,31\n"
+ " srl %1,5\n"
+ "0: c %2,0(%0,%3)\n"
+ " jne 1f\n"
+ " la %0,4(%0)\n"
+ " brct %1,0b\n"
+ "1:\n"
+#else
+ " aghi %1,63\n"
+ " srlg %1,%1,6\n"
+ "0: cg %2,0(%0,%3)\n"
+ " jne 1f\n"
+ " la %0,8(%0)\n"
+ " brct %1,0b\n"
+ "1:\n"
+#endif
+ : "+a" (bytes), "+d" (size)
+ : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
+ : "cc" );
+ return bytes;
+}
+
+/**
+ * __ffs_word_loop - find byte offset of first long != 0UL
+ * @addr: pointer to array of unsigned long
+ * @size: size of the array in bits
+ */
+static inline unsigned long __ffs_word_loop(const unsigned long *addr,
+ unsigned long size)
+{
+ typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
+ unsigned long bytes = 0;
+
+ asm volatile(
+#ifndef __s390x__
+ " ahi %1,31\n"
+ " srl %1,5\n"
+ "0: c %2,0(%0,%3)\n"
+ " jne 1f\n"
+ " la %0,4(%0)\n"
+ " brct %1,0b\n"
+ "1:\n"
+#else
+ " aghi %1,63\n"
+ " srlg %1,%1,6\n"
+ "0: cg %2,0(%0,%3)\n"
+ " jne 1f\n"
+ " la %0,8(%0)\n"
+ " brct %1,0b\n"
+ "1:\n"
+#endif
+ : "+a" (bytes), "+a" (size)
+ : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
+ : "cc" );
+ return bytes;
+}
+
+/**
+ * __ffz_word - add number of the first unset bit
+ * @nr: base value the bit number is added to
+ * @word: the word that is searched for unset bits
+ */
+static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
+{
#ifdef __s390x__
if (likely((word & 0xffffffff) == 0xffffffff)) {
word >>= 32;
- bit += 32;
+ nr += 32;
}
#endif
if (likely((word & 0xffff) == 0xffff)) {
word >>= 16;
- bit += 16;
+ nr += 16;
}
if (likely((word & 0xff) == 0xff)) {
word >>= 8;
- bit += 8;
+ nr += 8;
}
- return bit + _zb_findmap[word & 0xff];
+ return nr + _zb_findmap[(unsigned char) word];
}
-/*
- * __ffs = find first bit in word. Undefined if no bit exists,
- * so code should check against 0UL first..
+/**
+ * __ffs_word - add number of the first set bit
+ * @nr: base value the bit number is added to
+ * @word: the word that is searched for set bits
*/
-static inline unsigned long __ffs (unsigned long word)
+static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
{
- unsigned long bit = 0;
-
#ifdef __s390x__
if (likely((word & 0xffffffff) == 0)) {
word >>= 32;
- bit += 32;
+ nr += 32;
}
#endif
if (likely((word & 0xffff) == 0)) {
word >>= 16;
- bit += 16;
+ nr += 16;
}
if (likely((word & 0xff) == 0)) {
word >>= 8;
- bit += 8;
+ nr += 8;
}
- return bit + _sb_findmap[word & 0xff];
+ return nr + _sb_findmap[(unsigned char) word];
}
-/*
- * Find-bit routines..
- */
-#ifndef __s390x__
+/**
+ * __load_ulong_be - load big endian unsigned long
+ * @p: pointer to array of unsigned long
+ * @offset: byte offset of source value in the array
+ */
+static inline unsigned long __load_ulong_be(const unsigned long *p,
+ unsigned long offset)
+{
+ p = (unsigned long *)((unsigned long) p + offset);
+ return *p;
+}
-static inline int
-find_first_zero_bit(const unsigned long * addr, unsigned long size)
+/**
+ * __load_ulong_le - load little endian unsigned long
+ * @p: pointer to array of unsigned long
+ * @offset: byte offset of source value in the array
+ */
+static inline unsigned long __load_ulong_le(const unsigned long *p,
+ unsigned long offset)
{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long cmp, count;
- unsigned int res;
+ unsigned long word;
- if (!size)
- return 0;
+ p = (unsigned long *)((unsigned long) p + offset);
+#ifndef __s390x__
asm volatile(
- " lhi %1,-1\n"
- " lr %2,%3\n"
- " slr %0,%0\n"
- " ahi %2,31\n"
- " srl %2,5\n"
- "0: c %1,0(%0,%4)\n"
- " jne 1f\n"
- " la %0,4(%0)\n"
- " brct %2,0b\n"
- " lr %0,%3\n"
- " j 4f\n"
- "1: l %2,0(%0,%4)\n"
- " sll %0,3\n"
- " lhi %1,0xff\n"
- " tml %2,0xffff\n"
- " jno 2f\n"
- " ahi %0,16\n"
- " srl %2,16\n"
- "2: tml %2,0x00ff\n"
- " jno 3f\n"
- " ahi %0,8\n"
- " srl %2,8\n"
- "3: nr %2,%1\n"
- " ic %2,0(%2,%5)\n"
- " alr %0,%2\n"
- "4:"
- : "=&a" (res), "=&d" (cmp), "=&a" (count)
- : "a" (size), "a" (addr), "a" (&_zb_findmap),
- "m" (*(addrtype *) addr) : "cc");
- return (res < size) ? res : size;
+ " ic %0,0(%1)\n"
+ " icm %0,2,1(%1)\n"
+ " icm %0,4,2(%1)\n"
+ " icm %0,8,3(%1)"
+ : "=&d" (word) : "a" (p), "m" (*p) : "cc");
+#else
+ asm volatile(
+ " lrvg %0,%1"
+ : "=d" (word) : "m" (*p) );
+#endif
+ return word;
}
-static inline int
-find_first_bit(const unsigned long * addr, unsigned long size)
+/*
+ * The various find bit functions.
+ */
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long ffz(unsigned long word)
{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long cmp, count;
- unsigned int res;
+ return __ffz_word(0, word);
+}
- if (!size)
- return 0;
- asm volatile(
- " slr %1,%1\n"
- " lr %2,%3\n"
- " slr %0,%0\n"
- " ahi %2,31\n"
- " srl %2,5\n"
- "0: c %1,0(%0,%4)\n"
- " jne 1f\n"
- " la %0,4(%0)\n"
- " brct %2,0b\n"
- " lr %0,%3\n"
- " j 4f\n"
- "1: l %2,0(%0,%4)\n"
- " sll %0,3\n"
- " lhi %1,0xff\n"
- " tml %2,0xffff\n"
- " jnz 2f\n"
- " ahi %0,16\n"
- " srl %2,16\n"
- "2: tml %2,0x00ff\n"
- " jnz 3f\n"
- " ahi %0,8\n"
- " srl %2,8\n"
- "3: nr %2,%1\n"
- " ic %2,0(%2,%5)\n"
- " alr %0,%2\n"
- "4:"
- : "=&a" (res), "=&d" (cmp), "=&a" (count)
- : "a" (size), "a" (addr), "a" (&_sb_findmap),
- "m" (*(addrtype *) addr) : "cc");
- return (res < size) ? res : size;
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs (unsigned long word)
+{
+ return __ffs_word(0, word);
}
-#else /* __s390x__ */
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static inline int ffs(int x)
+{
+ if (!x)
+ return 0;
+ return __ffs_word(1, x);
+}
-static inline unsigned long
-find_first_zero_bit(const unsigned long * addr, unsigned long size)
+/**
+ * find_first_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first zero bit, not the number of the byte
+ * containing a bit.
+ */
+static inline unsigned long find_first_zero_bit(const unsigned long *addr,
+ unsigned long size)
{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long res, cmp, count;
+ unsigned long bytes, bits;
if (!size)
return 0;
- asm volatile(
- " lghi %1,-1\n"
- " lgr %2,%3\n"
- " slgr %0,%0\n"
- " aghi %2,63\n"
- " srlg %2,%2,6\n"
- "0: cg %1,0(%0,%4)\n"
- " jne 1f\n"
- " la %0,8(%0)\n"
- " brct %2,0b\n"
- " lgr %0,%3\n"
- " j 5f\n"
- "1: lg %2,0(%0,%4)\n"
- " sllg %0,%0,3\n"
- " clr %2,%1\n"
- " jne 2f\n"
- " aghi %0,32\n"
- " srlg %2,%2,32\n"
- "2: lghi %1,0xff\n"
- " tmll %2,0xffff\n"
- " jno 3f\n"
- " aghi %0,16\n"
- " srl %2,16\n"
- "3: tmll %2,0x00ff\n"
- " jno 4f\n"
- " aghi %0,8\n"
- " srl %2,8\n"
- "4: ngr %2,%1\n"
- " ic %2,0(%2,%5)\n"
- " algr %0,%2\n"
- "5:"
- : "=&a" (res), "=&d" (cmp), "=&a" (count)
- : "a" (size), "a" (addr), "a" (&_zb_findmap),
- "m" (*(addrtype *) addr) : "cc");
- return (res < size) ? res : size;
-}
-
-static inline unsigned long
-find_first_bit(const unsigned long * addr, unsigned long size)
+ bytes = __ffz_word_loop(addr, size);
+ bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
+ return (bits < size) ? bits : size;
+}
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit-number of the first set bit, not the number of the byte
+ * containing a bit.
+ */
+static inline unsigned long find_first_bit(const unsigned long * addr,
+ unsigned long size)
{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long res, cmp, count;
+ unsigned long bytes, bits;
if (!size)
return 0;
- asm volatile(
- " slgr %1,%1\n"
- " lgr %2,%3\n"
- " slgr %0,%0\n"
- " aghi %2,63\n"
- " srlg %2,%2,6\n"
- "0: cg %1,0(%0,%4)\n"
- " jne 1f\n"
- " aghi %0,8\n"
- " brct %2,0b\n"
- " lgr %0,%3\n"
- " j 5f\n"
- "1: lg %2,0(%0,%4)\n"
- " sllg %0,%0,3\n"
- " clr %2,%1\n"
- " jne 2f\n"
- " aghi %0,32\n"
- " srlg %2,%2,32\n"
- "2: lghi %1,0xff\n"
- " tmll %2,0xffff\n"
- " jnz 3f\n"
- " aghi %0,16\n"
- " srl %2,16\n"
- "3: tmll %2,0x00ff\n"
- " jnz 4f\n"
- " aghi %0,8\n"
- " srl %2,8\n"
- "4: ngr %2,%1\n"
- " ic %2,0(%2,%5)\n"
- " algr %0,%2\n"
- "5:"
- : "=&a" (res), "=&d" (cmp), "=&a" (count)
- : "a" (size), "a" (addr), "a" (&_sb_findmap),
- "m" (*(addrtype *) addr) : "cc");
- return (res < size) ? res : size;
+ bytes = __ffs_word_loop(addr, size);
+ bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
+ return (bits < size) ? bits : size;
}
-#endif /* __s390x__ */
-
-static inline int
-find_next_zero_bit (const unsigned long * addr, unsigned long size,
- unsigned long offset)
+/**
+ * find_next_zero_bit - find the first zero bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+static inline int find_next_zero_bit (const unsigned long * addr,
+ unsigned long size,
+ unsigned long offset)
{
const unsigned long *p;
unsigned long bit, set;
@@ -688,10 +702,10 @@ find_next_zero_bit (const unsigned long * addr, unsigned long size,
p = addr + offset / __BITOPS_WORDSIZE;
if (bit) {
/*
- * s390 version of ffz returns __BITOPS_WORDSIZE
+ * __ffz_word returns __BITOPS_WORDSIZE
* if no zero bit is present in the word.
*/
- set = ffz(*p >> bit) + bit;
+ set = __ffz_word(0, *p >> bit) + bit;
if (set >= size)
return size + offset;
if (set < __BITOPS_WORDSIZE)
@@ -703,9 +717,15 @@ find_next_zero_bit (const unsigned long * addr, unsigned long size,
return offset + find_first_zero_bit(p, size);
}
-static inline int
-find_next_bit (const unsigned long * addr, unsigned long size,
- unsigned long offset)
+/**
+ * find_next_bit - find the first set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+static inline int find_next_bit (const unsigned long * addr,
+ unsigned long size,
+ unsigned long offset)
{
const unsigned long *p;
unsigned long bit, set;
@@ -718,10 +738,10 @@ find_next_bit (const unsigned long * addr, unsigned long size,
p = addr + offset / __BITOPS_WORDSIZE;
if (bit) {
/*
- * s390 version of __ffs returns __BITOPS_WORDSIZE
+ * __ffs_word returns __BITOPS_WORDSIZE
* if no one bit is present in the word.
*/
- set = __ffs(*p & (~0UL << bit));
+ set = __ffs_word(0, *p & (~0UL << bit));
if (set >= size)
return size + offset;
if (set < __BITOPS_WORDSIZE)
@@ -744,8 +764,6 @@ static inline int sched_find_first_bit(unsigned long *b)
return find_first_bit(b, 140);
}
-#include <asm-generic/bitops/ffs.h>
-
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
@@ -772,108 +790,23 @@ static inline int sched_find_first_bit(unsigned long *b)
test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
#define ext2_test_bit(nr, addr) \
test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
-#define ext2_find_next_bit(addr, size, off) \
- generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
-#ifndef __s390x__
-
-static inline int
-ext2_find_first_zero_bit(void *vaddr, unsigned int size)
+static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size)
{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long cmp, count;
- unsigned int res;
+ unsigned long bytes, bits;
if (!size)
return 0;
- asm volatile(
- " lhi %1,-1\n"
- " lr %2,%3\n"
- " ahi %2,31\n"
- " srl %2,5\n"
- " slr %0,%0\n"
- "0: cl %1,0(%0,%4)\n"
- " jne 1f\n"
- " ahi %0,4\n"
- " brct %2,0b\n"
- " lr %0,%3\n"
- " j 4f\n"
- "1: l %2,0(%0,%4)\n"
- " sll %0,3\n"
- " ahi %0,24\n"
- " lhi %1,0xff\n"
- " tmh %2,0xffff\n"
- " jo 2f\n"
- " ahi %0,-16\n"
- " srl %2,16\n"
- "2: tml %2,0xff00\n"
- " jo 3f\n"
- " ahi %0,-8\n"
- " srl %2,8\n"
- "3: nr %2,%1\n"
- " ic %2,0(%2,%5)\n"
- " alr %0,%2\n"
- "4:"
- : "=&a" (res), "=&d" (cmp), "=&a" (count)
- : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
- "m" (*(addrtype *) vaddr) : "cc");
- return (res < size) ? res : size;
+ bytes = __ffz_word_loop(vaddr, size);
+ bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
+ return (bits < size) ? bits : size;
}
-#else /* __s390x__ */
-
-static inline unsigned long
-ext2_find_first_zero_bit(void *vaddr, unsigned long size)
-{
- typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
- unsigned long res, cmp, count;
-
- if (!size)
- return 0;
- asm volatile(
- " lghi %1,-1\n"
- " lgr %2,%3\n"
- " aghi %2,63\n"
- " srlg %2,%2,6\n"
- " slgr %0,%0\n"
- "0: clg %1,0(%0,%4)\n"
- " jne 1f\n"
- " aghi %0,8\n"
- " brct %2,0b\n"
- " lgr %0,%3\n"
- " j 5f\n"
- "1: cl %1,0(%0,%4)\n"
- " jne 2f\n"
- " aghi %0,4\n"
- "2: l %2,0(%0,%4)\n"
- " sllg %0,%0,3\n"
- " aghi %0,24\n"
- " lghi %1,0xff\n"
- " tmlh %2,0xffff\n"
- " jo 3f\n"
- " aghi %0,-16\n"
- " srl %2,16\n"
- "3: tmll %2,0xff00\n"
- " jo 4f\n"
- " aghi %0,-8\n"
- " srl %2,8\n"
- "4: ngr %2,%1\n"
- " ic %2,0(%2,%5)\n"
- " algr %0,%2\n"
- "5:"
- : "=&a" (res), "=&d" (cmp), "=&a" (count)
- : "a" (size), "a" (vaddr), "a" (&_zb_findmap),
- "m" (*(addrtype *) vaddr) : "cc");
- return (res < size) ? res : size;
-}
-
-#endif /* __s390x__ */
-
-static inline int
-ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
+static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size,
+ unsigned long offset)
{
unsigned long *addr = vaddr, *p;
- unsigned long word, bit, set;
+ unsigned long bit, set;
if (offset >= size)
return size;
@@ -882,23 +815,11 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
size -= offset;
p = addr + offset / __BITOPS_WORDSIZE;
if (bit) {
-#ifndef __s390x__
- asm volatile(
- " ic %0,0(%1)\n"
- " icm %0,2,1(%1)\n"
- " icm %0,4,2(%1)\n"
- " icm %0,8,3(%1)"
- : "=&a" (word) : "a" (p), "m" (*p) : "cc");
-#else
- asm volatile(
- " lrvg %0,%1"
- : "=a" (word) : "m" (*p) );
-#endif
/*
* s390 version of ffz returns __BITOPS_WORDSIZE
* if no zero bit is present in the word.
*/
- set = ffz(word >> bit) + bit;
+ set = ffz(__load_ulong_le(p, 0) >> bit) + bit;
if (set >= size)
return size + offset;
if (set < __BITOPS_WORDSIZE)
@@ -910,6 +831,47 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
return offset + ext2_find_first_zero_bit(p, size);
}
+static inline unsigned long ext2_find_first_bit(void *vaddr,
+ unsigned long size)
+{
+ unsigned long bytes, bits;
+
+ if (!size)
+ return 0;
+ bytes = __ffs_word_loop(vaddr, size);
+ bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
+ return (bits < size) ? bits : size;
+}
+
+static inline int ext2_find_next_bit(void *vaddr, unsigned long size,
+ unsigned long offset)
+{
+ unsigned long *addr = vaddr, *p;
+ unsigned long bit, set;
+
+ if (offset >= size)
+ return size;
+ bit = offset & (__BITOPS_WORDSIZE - 1);
+ offset -= bit;
+ size -= offset;
+ p = addr + offset / __BITOPS_WORDSIZE;
+ if (bit) {
+ /*
+ * s390 version of ffz returns __BITOPS_WORDSIZE
+ * if no zero bit is present in the word.
+ */
+ set = ffs(__load_ulong_le(p, 0) >> bit) + bit;
+ if (set >= size)
+ return size + offset;
+ if (set < __BITOPS_WORDSIZE)
+ return set + offset;
+ offset += __BITOPS_WORDSIZE;
+ size -= __BITOPS_WORDSIZE;
+ p++;
+ }
+ return offset + ext2_find_first_bit(p, size);
+}
+
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
diff --git a/include/asm-s390/cacheflush.h b/include/asm-s390/cacheflush.h
index f7cade8083f3..49d5af916d01 100644
--- a/include/asm-s390/cacheflush.h
+++ b/include/asm-s390/cacheflush.h
@@ -24,4 +24,8 @@
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void kernel_map_pages(struct page *page, int numpages, int enable);
+#endif
+
#endif /* _S390_CACHEFLUSH_H */
diff --git a/include/asm-s390/ccwgroup.h b/include/asm-s390/ccwgroup.h
index 7109c7cab87e..289053ef5e60 100644
--- a/include/asm-s390/ccwgroup.h
+++ b/include/asm-s390/ccwgroup.h
@@ -37,6 +37,7 @@ struct ccwgroup_device {
* @remove: function called on remove
* @set_online: function called when device is set online
* @set_offline: function called when device is set offline
+ * @shutdown: function called when device is shut down
* @driver: embedded driver structure
*/
struct ccwgroup_driver {
@@ -49,6 +50,7 @@ struct ccwgroup_driver {
void (*remove) (struct ccwgroup_device *);
int (*set_online) (struct ccwgroup_device *);
int (*set_offline) (struct ccwgroup_device *);
+ void (*shutdown)(struct ccwgroup_device *);
struct device_driver driver;
};
diff --git a/include/asm-s390/cputime.h b/include/asm-s390/cputime.h
index 4b3ef7cad115..133ce054fc89 100644
--- a/include/asm-s390/cputime.h
+++ b/include/asm-s390/cputime.h
@@ -54,6 +54,7 @@ __div(unsigned long long n, unsigned int base)
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
#define cputime_to_jiffies(__ct) (__div((__ct), 1000000 / HZ))
+#define cputime_to_scaled(__ct) (__ct)
#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (1000000 / HZ))
#define cputime64_zero (0ULL)
diff --git a/include/asm-s390/elf.h b/include/asm-s390/elf.h
index 91d06325cc79..b73a424d0f97 100644
--- a/include/asm-s390/elf.h
+++ b/include/asm-s390/elf.h
@@ -113,7 +113,6 @@
typedef s390_fp_regs elf_fpregset_t;
typedef s390_regs elf_gregset_t;
-#ifdef __KERNEL__
#include <linux/sched.h> /* for task_struct */
#include <asm/system.h> /* for save_access_regs */
@@ -214,6 +213,5 @@ do { \
clear_thread_flag(TIF_31BIT); \
} while (0)
#endif /* __s390x__ */
-#endif
#endif
diff --git a/include/asm-s390/kexec.h b/include/asm-s390/kexec.h
index 7592af708b41..f219c6411e0b 100644
--- a/include/asm-s390/kexec.h
+++ b/include/asm-s390/kexec.h
@@ -10,7 +10,9 @@
#ifndef _S390_KEXEC_H
#define _S390_KEXEC_H
+#ifdef __KERNEL__
#include <asm/page.h>
+#endif
#include <asm/processor.h>
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 584d0ee3c7f6..a55f9d979dfb 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -19,7 +19,6 @@
#define PAGE_DEFAULT_ACC 0
#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
-#ifdef __KERNEL__
#include <asm/setup.h>
#ifndef __ASSEMBLY__
@@ -172,6 +171,4 @@ static inline int pfn_valid(unsigned long pfn)
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
-
#endif /* _S390_PAGE_H */
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 709dd1740956..6f6619ba8980 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -57,10 +57,10 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
}
#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
-#define pud_free(x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define pgd_populate(mm, pgd, pud) BUG()
#define pgd_populate_kernel(mm, pgd, pud) BUG()
@@ -76,7 +76,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
}
#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
-#define pud_free(x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
@@ -85,7 +85,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
return (pmd_t *) crst;
}
-#define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
+#define pmd_free(mm, pmd) crst_table_free((unsigned long *)pmd)
#define pgd_populate(mm, pgd, pud) BUG()
#define pgd_populate_kernel(mm, pgd, pud) BUG()
@@ -115,7 +115,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
crst_table_init(crst, pgd_entry_type(mm));
return (pgd_t *) crst;
}
-#define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
+#define pgd_free(mm, pgd) crst_table_free((unsigned long *) pgd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
@@ -151,9 +151,9 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
#define pte_alloc_one(mm, vmaddr) \
virt_to_page(page_table_alloc(s390_noexec))
-#define pte_free_kernel(pte) \
+#define pte_free_kernel(mm, pte) \
page_table_free((unsigned long *) pte)
-#define pte_free(pte) \
+#define pte_free(mm, pte) \
page_table_free((unsigned long *) page_to_phys((struct page *) pte))
#endif /* _S390_PGALLOC_H */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 79b9eab1a0c7..3f520754e71c 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -115,15 +115,21 @@ extern char empty_zero_page[PAGE_SIZE];
#ifndef __s390x__
#define VMALLOC_START 0x78000000UL
#define VMALLOC_END 0x7e000000UL
-#define VMEM_MAP_MAX 0x80000000UL
+#define VMEM_MAP_END 0x80000000UL
#else /* __s390x__ */
#define VMALLOC_START 0x3e000000000UL
#define VMALLOC_END 0x3e040000000UL
-#define VMEM_MAP_MAX 0x40000000000UL
+#define VMEM_MAP_END 0x40000000000UL
#endif /* __s390x__ */
+/*
+ * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1
+ * mapping. This needs to be calculated at compile time since the size of the
+ * VMEM_MAP is static but the size of struct page can change.
+ */
+#define VMEM_MAX_PHYS min(VMALLOC_START, ((VMEM_MAP_END - VMALLOC_END) / \
+ sizeof(struct page) * PAGE_SIZE) & ~((16 << 20) - 1))
#define VMEM_MAP ((struct page *) VMALLOC_END)
-#define VMEM_MAP_SIZE ((VMALLOC_START / PAGE_SIZE) * sizeof(struct page))
/*
* A 31 bit pagetable entry of S390 has following format:
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index c86b982aef5a..4f744609cd11 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -70,8 +70,9 @@ extern int get_cpu_capability(unsigned int *);
#else /* __s390x__ */
-# define TASK_SIZE (test_thread_flag(TIF_31BIT) ? \
+# define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_31BIT) ? \
(0x80000000UL) : (0x40000000000UL))
+# define TASK_SIZE TASK_SIZE_OF(current)
# define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
# define DEFAULT_TASK_SIZE (0x40000000000UL)
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index 44bda786eef7..15aba30601a3 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -201,9 +201,9 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#define __HAVE_ARCH_CMPXCHG 1
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
extern void __cmpxchg_called_with_bad_pointer(void);
@@ -355,6 +355,44 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
#include <linux/irqflags.h>
+#include <asm-generic/cmpxchg-local.h>
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 1:
+ case 2:
+ case 4:
+#ifdef __s390x__
+ case 8:
+#endif
+ return __cmpxchg(ptr, old, new, size);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#ifdef __s390x__
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+#else
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#endif
+
/*
* Use to set psw mask except for the first byte which
* won't be changed by this function.
diff --git a/include/asm-s390/tlb.h b/include/asm-s390/tlb.h
index 618693cfc10f..985de2b88279 100644
--- a/include/asm-s390/tlb.h
+++ b/include/asm-s390/tlb.h
@@ -65,9 +65,9 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb,
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pmds < TLB_NR_PTRS))
__tlb_flush_mm(tlb->mm);
while (tlb->nr_ptes > 0)
- pte_free(tlb->array[--tlb->nr_ptes]);
+ pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]);
while (tlb->nr_pmds < TLB_NR_PTRS)
- pmd_free((pmd_t *) tlb->array[tlb->nr_pmds++]);
+ pmd_free(tlb->mm, (pmd_t *) tlb->array[tlb->nr_pmds++]);
}
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
@@ -102,7 +102,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, struct page *page)
if (tlb->nr_ptes >= tlb->nr_pmds)
tlb_flush_mmu(tlb, 0, 0);
} else
- pte_free(page);
+ pte_free(tlb->mm, page);
}
/*
@@ -117,7 +117,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
if (tlb->nr_ptes >= tlb->nr_pmds)
tlb_flush_mmu(tlb, 0, 0);
} else
- pmd_free(pmd);
+ pmd_free(tlb->mm, pmd);
#endif
}
diff --git a/include/asm-s390/user.h b/include/asm-s390/user.h
index 1dc74baf03c4..1b050e35fdc6 100644
--- a/include/asm-s390/user.h
+++ b/include/asm-s390/user.h
@@ -63,8 +63,7 @@ struct user {
the top of the stack is always found in the
esp register. */
long int signal; /* Signal that caused the core dump. */
- struct user_regs_struct *u_ar0;
- /* Used by gdb to help find the values for */
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
/* the registers. */
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
diff --git a/include/asm-sh/delay.h b/include/asm-sh/delay.h
index 031db84f2aa1..d5d464041003 100644
--- a/include/asm-sh/delay.h
+++ b/include/asm-sh/delay.h
@@ -12,7 +12,7 @@ extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
#ifdef CONFIG_SUPERH32
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 002e64a4f049..e0fe02950f52 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -7,8 +7,6 @@
#include <linux/const.h>
-#ifdef __KERNEL__
-
/* PAGE_SHIFT determines the page size */
#if defined(CONFIG_PAGE_SIZE_4KB)
# define PAGE_SHIFT 12
@@ -178,5 +176,4 @@ typedef struct { unsigned long pgd; } pgd_t;
#define ARCH_SLAB_MINALIGN 8
#endif
-#endif /* __KERNEL__ */
#endif /* __ASM_SH_PAGE_H */
diff --git a/include/asm-sh/pgalloc.h b/include/asm-sh/pgalloc.h
index 18b613c57cf5..59ca16d77a1d 100644
--- a/include/asm-sh/pgalloc.h
+++ b/include/asm-sh/pgalloc.h
@@ -36,7 +36,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
quicklist_free(QUICK_PGD, NULL, pgd);
}
@@ -54,12 +54,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return pg ? virt_to_page(pg) : NULL;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
quicklist_free(QUICK_PT, NULL, pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
quicklist_free_page(QUICK_PT, NULL, pte);
}
@@ -71,7 +71,7 @@ static inline void pte_free(struct page *pte)
* inside the pgd, so has no extra memory associated with it.
*/
-#define pmd_free(x) do { } while (0)
+#define pmd_free(mm, x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
static inline void check_pgt_cache(void)
diff --git a/include/asm-sh/unistd_32.h b/include/asm-sh/unistd_32.h
index b182b1cb05fd..433fd1b48fa2 100644
--- a/include/asm-sh/unistd_32.h
+++ b/include/asm-sh/unistd_32.h
@@ -330,7 +330,7 @@
#define __NR_epoll_pwait 319
#define __NR_utimensat 320
#define __NR_signalfd 321
-#define __NR_timerfd 322
+/* #define __NR_timerfd 322 removed */
#define __NR_eventfd 323
#define __NR_fallocate 324
diff --git a/include/asm-sh/unistd_64.h b/include/asm-sh/unistd_64.h
index 944511882cac..108d2ba897fe 100644
--- a/include/asm-sh/unistd_64.h
+++ b/include/asm-sh/unistd_64.h
@@ -370,7 +370,7 @@
#define __NR_epoll_pwait 347
#define __NR_utimensat 348
#define __NR_signalfd 349
-#define __NR_timerfd 350
+/* #define __NR_timerfd 350 removed */
#define __NR_eventfd 351
#define __NR_fallocate 352
diff --git a/include/asm-sh/user.h b/include/asm-sh/user.h
index 1a4f43c75126..8fd3cf6c58d4 100644
--- a/include/asm-sh/user.h
+++ b/include/asm-sh/user.h
@@ -52,7 +52,7 @@ struct user {
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
- struct regs * u_ar0; /* help gdb find registers */
+ unsigned long u_ar0; /* help gdb find registers */
struct user_fpu_struct* u_fpstate; /* Math Co-processor pointer */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
diff --git a/include/asm-sparc/atomic.h b/include/asm-sparc/atomic.h
index 3328950dbfe6..5c944b5a8040 100644
--- a/include/asm-sparc/atomic.h
+++ b/include/asm-sparc/atomic.h
@@ -17,42 +17,6 @@ typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
-/* Emulate cmpxchg() the same way we emulate atomics,
- * by hashing the object address and indexing into an array
- * of spinlocks to get a bit of performance...
- *
- * See arch/sparc/lib/atomic32.c for implementation.
- *
- * Cribbed from <asm-parisc/atomic.h>
- */
-#define __HAVE_ARCH_CMPXCHG 1
-
-/* bug catcher for when unsupported size is used - won't link */
-extern void __cmpxchg_called_with_bad_pointer(void);
-/* we only need to support cmpxchg of a u32 on sparc */
-extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
-
-/* don't worry...optimizer will get rid of most of this */
-static inline unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
-{
- switch(size) {
- case 4:
- return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
- default:
- __cmpxchg_called_with_bad_pointer();
- break;
- }
- return old;
-}
-
-#define cmpxchg(ptr,o,n) ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
-})
-
#define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *);
diff --git a/include/asm-sparc/elf.h b/include/asm-sparc/elf.h
index aaf6ef40ee2f..d2516eed3a38 100644
--- a/include/asm-sparc/elf.h
+++ b/include/asm-sparc/elf.h
@@ -65,8 +65,14 @@
#define HWCAP_SPARC_V9 16
#define HWCAP_SPARC_ULTRA3 32
-/* For the most part we present code dumps in the format
- * Solaris does.
+#define CORE_DUMP_USE_REGSET
+
+/* Format is:
+ * G0 --> G7
+ * O0 --> O7
+ * L0 --> L7
+ * I0 --> I7
+ * PSR, PC, nPC, Y, WIM, TBR
*/
typedef unsigned long elf_greg_t;
#define ELF_NGREG 38
@@ -85,36 +91,7 @@ typedef struct {
unsigned int pr_q[64];
} elf_fpregset_t;
-#ifdef __KERNEL__
#include <asm/mbus.h>
-#include <asm/uaccess.h>
-
-/* Format is:
- * G0 --> G7
- * O0 --> O7
- * L0 --> L7
- * I0 --> I7
- * PSR, PC, nPC, Y, WIM, TBR
- */
-#define ELF_CORE_COPY_REGS(__elf_regs, __pt_regs) \
-do { unsigned long *dest = &(__elf_regs[0]); \
- struct pt_regs *src = (__pt_regs); \
- unsigned long __user *sp; \
- memcpy(&dest[0], &src->u_regs[0], \
- sizeof(unsigned long) * 16); \
- /* Don't try this at home kids... */ \
- sp = (unsigned long __user *) src->u_regs[14]; \
- copy_from_user(&dest[16], sp, \
- sizeof(unsigned long) * 16); \
- dest[32] = src->psr; \
- dest[33] = src->pc; \
- dest[34] = src->npc; \
- dest[35] = src->y; \
- dest[36] = dest[37] = 0; /* XXX */ \
-} while(0); /* Janitors: Don't touch this semicolon. */
-
-#define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \
- ({ ELF_CORE_COPY_REGS((*(__elf_regs)), (__tsk)->thread.kregs); 1; })
/*
* This is used to ensure we don't load something for the wrong architecture.
@@ -166,6 +143,4 @@ do { unsigned long *dest = &(__elf_regs[0]); \
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
-#endif /* __KERNEL__ */
-
#endif /* !(__ASMSPARC_ELF_H) */
diff --git a/include/asm-sparc/page.h b/include/asm-sparc/page.h
index ff57648eb8f8..cbc48c0c4e15 100644
--- a/include/asm-sparc/page.h
+++ b/include/asm-sparc/page.h
@@ -8,8 +8,6 @@
#ifndef _SPARC_PAGE_H
#define _SPARC_PAGE_H
-#ifdef __KERNEL__
-
#ifdef CONFIG_SUN4
#define PAGE_SHIFT 13
#else
@@ -163,6 +161,4 @@ extern unsigned long pfn_base;
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
-
#endif /* _SPARC_PAGE_H */
diff --git a/include/asm-sparc/pgalloc.h b/include/asm-sparc/pgalloc.h
index a449cd4912d1..b5fbdd36447f 100644
--- a/include/asm-sparc/pgalloc.h
+++ b/include/asm-sparc/pgalloc.h
@@ -32,7 +32,7 @@ BTFIXUPDEF_CALL(pgd_t *, get_pgd_fast, void)
BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
#define free_pgd_fast(pgd) BTFIXUP_CALL(free_pgd_fast)(pgd)
-#define pgd_free(pgd) free_pgd_fast(pgd)
+#define pgd_free(mm, pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
@@ -45,8 +45,8 @@ BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
BTFIXUPDEF_CALL(void, free_pmd_fast, pmd_t *)
#define free_pmd_fast(pmd) BTFIXUP_CALL(free_pmd_fast)(pmd)
-#define pmd_free(pmd) free_pmd_fast(pmd)
-#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
+#define pmd_free(mm, pmd) free_pmd_fast(pmd)
+#define __pmd_free_tlb(tlb, pmd) pmd_free((tlb)->mm, pmd)
BTFIXUPDEF_CALL(void, pmd_populate, pmd_t *, struct page *)
#define pmd_populate(MM, PMD, PTE) BTFIXUP_CALL(pmd_populate)(PMD, PTE)
@@ -59,10 +59,10 @@ BTFIXUPDEF_CALL(pte_t *, pte_alloc_one_kernel, struct mm_struct *, unsigned long
#define pte_alloc_one_kernel(mm, addr) BTFIXUP_CALL(pte_alloc_one_kernel)(mm, addr)
BTFIXUPDEF_CALL(void, free_pte_fast, pte_t *)
-#define pte_free_kernel(pte) BTFIXUP_CALL(free_pte_fast)(pte)
+#define pte_free_kernel(mm, pte) BTFIXUP_CALL(free_pte_fast)(pte)
BTFIXUPDEF_CALL(void, pte_free, struct page *)
-#define pte_free(pte) BTFIXUP_CALL(pte_free)(pte)
-#define __pte_free_tlb(tlb, pte) pte_free(pte)
+#define pte_free(mm, pte) BTFIXUP_CALL(pte_free)(pte)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif /* _SPARC_PGALLOC_H */
diff --git a/include/asm-sparc/ptrace.h b/include/asm-sparc/ptrace.h
index 714497099a42..8201a7b29d49 100644
--- a/include/asm-sparc/ptrace.h
+++ b/include/asm-sparc/ptrace.h
@@ -61,8 +61,6 @@ struct sparc_stackf {
#ifdef __KERNEL__
-#define __ARCH_SYS_PTRACE 1
-
#define user_mode(regs) (!((regs)->psr & PSR_PS))
#define instruction_pointer(regs) ((regs)->pc)
unsigned long profile_pc(struct pt_regs *);
@@ -151,8 +149,6 @@ extern void show_regs(struct pt_regs *);
#define SF_XXARG 0x5c
/* Stuff for the ptrace system call */
-#define PTRACE_SUNATTACH 10
-#define PTRACE_SUNDETACH 11
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14
@@ -164,7 +160,4 @@ extern void show_regs(struct pt_regs *);
#define PTRACE_GETFPAREGS 20
#define PTRACE_SETFPAREGS 21
-#define PTRACE_GETUCODE 29 /* stupid bsd-ism */
-
-
#endif /* !(_SPARC_PTRACE_H) */
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 2655d142b22d..45e47c159a6e 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -225,6 +225,54 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
return x;
}
+/* Emulate cmpxchg() the same way we emulate atomics,
+ * by hashing the object address and indexing into an array
+ * of spinlocks to get a bit of performance...
+ *
+ * See arch/sparc/lib/atomic32.c for implementation.
+ *
+ * Cribbed from <asm-parisc/atomic.h>
+ */
+#define __HAVE_ARCH_CMPXCHG 1
+
+/* bug catcher for when unsupported size is used - won't link */
+extern void __cmpxchg_called_with_bad_pointer(void);
+/* we only need to support cmpxchg of a u32 on sparc */
+extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+
+/* don't worry...optimizer will get rid of most of this */
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
+ default:
+ __cmpxchg_called_with_bad_pointer();
+ break;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr))); \
+})
+
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
#endif /* __KERNEL__ */
diff --git a/include/asm-sparc/unistd.h b/include/asm-sparc/unistd.h
index 0decdf763716..2338a0276377 100644
--- a/include/asm-sparc/unistd.h
+++ b/include/asm-sparc/unistd.h
@@ -327,11 +327,13 @@
#define __NR_epoll_pwait 309
#define __NR_utimensat 310
#define __NR_signalfd 311
-#define __NR_timerfd 312
+#define __NR_timerfd_create 312
#define __NR_eventfd 313
#define __NR_fallocate 314
+#define __NR_timerfd_settime 315
+#define __NR_timerfd_gettime 316
-#define NR_SYSCALLS 315
+#define NR_SYSCALLS 317
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
* it never had the plain ones and there is no value to adding those
diff --git a/include/asm-sparc64/elf.h b/include/asm-sparc64/elf.h
index 8653e8665009..272a65873f2e 100644
--- a/include/asm-sparc64/elf.h
+++ b/include/asm-sparc64/elf.h
@@ -7,11 +7,9 @@
*/
#include <asm/ptrace.h>
-#ifdef __KERNEL__
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/spitfire.h>
-#endif
/*
* Sparc section types
@@ -72,6 +70,8 @@
#define HWCAP_SPARC_BLKINIT 64
#define HWCAP_SPARC_N2 128
+#define CORE_DUMP_USE_REGSET
+
/*
* These are used to set parameters in the core dumps.
*/
@@ -80,10 +80,6 @@
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2MSB
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG 36
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
/* Format of 64-bit elf_gregset_t is:
* G0 --> G7
* O0 --> O7
@@ -94,24 +90,9 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
* TNPC
* Y
*/
-#define ELF_CORE_COPY_REGS(__elf_regs, __pt_regs) \
-do { unsigned long *dest = &(__elf_regs[0]); \
- struct pt_regs *src = (__pt_regs); \
- unsigned long __user *sp; \
- int i; \
- for(i = 0; i < 16; i++) \
- dest[i] = src->u_regs[i]; \
- /* Don't try this at home kids... */ \
- sp = (unsigned long __user *) \
- ((src->u_regs[14] + STACK_BIAS) \
- & 0xfffffffffffffff8UL); \
- for(i = 0; i < 16; i++) \
- __get_user(dest[i+16], &sp[i]); \
- dest[32] = src->tstate; \
- dest[33] = src->tpc; \
- dest[34] = src->tnpc; \
- dest[35] = src->y; \
-} while (0);
+typedef unsigned long elf_greg_t;
+#define ELF_NGREG 36
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct {
unsigned long pr_regs[32];
@@ -121,9 +102,6 @@ typedef struct {
} elf_fpregset_t;
#endif
-#define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs) \
- ({ ELF_CORE_COPY_REGS((*(__elf_regs)), task_pt_regs(__tsk)); 1; })
-
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
@@ -175,7 +153,6 @@ static inline unsigned int sparc64_elf_hwcap(void)
#define ELF_PLATFORM (NULL)
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) \
do { unsigned long new_flags = current_thread_info()->flags; \
new_flags &= _TIF_32BIT; \
@@ -194,6 +171,5 @@ do { unsigned long new_flags = current_thread_info()->flags; \
else if (current->personality != PER_LINUX32) \
set_personality(PER_LINUX); \
} while (0)
-#endif
#endif /* !(__ASM_SPARC64_ELF_H) */
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
index c299b853b5ba..b6ece223562d 100644
--- a/include/asm-sparc64/io.h
+++ b/include/asm-sparc64/io.h
@@ -16,7 +16,7 @@
/* BIO layer definitions. */
extern unsigned long kern_base, kern_size;
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-#define BIO_VMERGE_BOUNDARY 8192
+#define BIO_VMERGE_BOUNDARY 0
static inline u8 _inb(unsigned long addr)
{
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h
index 7af1077451ff..cdf950e017ee 100644
--- a/include/asm-sparc64/page.h
+++ b/include/asm-sparc64/page.h
@@ -3,8 +3,6 @@
#ifndef _SPARC64_PAGE_H
#define _SPARC64_PAGE_H
-#ifdef __KERNEL__
-
#include <linux/const.h>
#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB)
@@ -143,5 +141,4 @@ typedef unsigned long pgprot_t;
#include <asm-generic/page.h>
-#endif /* __KERNEL__ */
#endif /* _SPARC64_PAGE_H */
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index 5d66b858a965..b48f73c2274e 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -20,7 +20,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
quicklist_free(0, NULL, pgd);
}
@@ -32,7 +32,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
quicklist_free(0, NULL, pmd);
}
@@ -50,12 +50,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return pg ? virt_to_page(pg) : NULL;
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
quicklist_free(0, NULL, pte);
}
-static inline void pte_free(struct page *ptepage)
+static inline void pte_free(struct mm_struct *mm, struct page *ptepage)
{
quicklist_free_page(0, NULL, ptepage);
}
diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h
index 7eba90c6c753..734a767f0a4e 100644
--- a/include/asm-sparc64/ptrace.h
+++ b/include/asm-sparc64/ptrace.h
@@ -95,8 +95,6 @@ struct sparc_trapf {
#ifdef __KERNEL__
-#define __ARCH_SYS_PTRACE 1
-
#define force_successful_syscall_return() \
do { current_thread_info()->syscall_noerror = 1; \
} while (0)
@@ -261,8 +259,6 @@ extern void show_regs(struct pt_regs *);
#define SF_XXARG 0x5c
/* Stuff for the ptrace system call */
-#define PTRACE_SUNATTACH 10
-#define PTRACE_SUNDETACH 11
#define PTRACE_GETREGS 12
#define PTRACE_SETREGS 13
#define PTRACE_GETFPREGS 14
@@ -284,18 +280,4 @@ extern void show_regs(struct pt_regs *);
#define PTRACE_GETFPREGS64 25
#define PTRACE_SETFPREGS64 26
-#define PTRACE_GETUCODE 29 /* stupid bsd-ism */
-
-/* These are for 32-bit processes debugging 64-bit ones.
- * Here addr and addr2 are passed in %g2 and %g3 respectively.
- */
-#define PTRACE_PEEKTEXT64 (30 + PTRACE_PEEKTEXT)
-#define PTRACE_POKETEXT64 (30 + PTRACE_POKETEXT)
-#define PTRACE_PEEKDATA64 (30 + PTRACE_PEEKDATA)
-#define PTRACE_POKEDATA64 (30 + PTRACE_POKEDATA)
-#define PTRACE_READDATA64 (30 + PTRACE_READDATA)
-#define PTRACE_WRITEDATA64 (30 + PTRACE_WRITEDATA)
-#define PTRACE_READTEXT64 (30 + PTRACE_READTEXT)
-#define PTRACE_WRITETEXT64 (30 + PTRACE_WRITETEXT)
-
#endif /* !(_SPARC64_PTRACE_H) */
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 99a669c190c7..1faefa6d3708 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -8,6 +8,7 @@
#ifndef __ASSEMBLY__
#include <linux/irqflags.h>
+#include <asm-generic/cmpxchg-local.h>
/*
* Sparc (general) CPU types
@@ -315,6 +316,34 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
(unsigned long)_n_, sizeof(*(ptr))); \
})
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ case 8: return __cmpxchg(ptr, old, new, size);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+
+ return old;
+}
+
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
+
#endif /* !(__ASSEMBLY__) */
#define arch_align_stack(x) (x)
diff --git a/include/asm-sparc64/timex.h b/include/asm-sparc64/timex.h
index 2a5e4ebaad80..c622535c4560 100644
--- a/include/asm-sparc64/timex.h
+++ b/include/asm-sparc64/timex.h
@@ -14,10 +14,6 @@
typedef unsigned long cycles_t;
#define get_cycles() tick_ops->get_tick()
-#define ARCH_HAS_READ_CURRENT_TIMER 1
-#define read_current_timer(timer_val_p) \
-({ *timer_val_p = tick_ops->get_tick(); \
- 0; \
-})
+#define ARCH_HAS_READ_CURRENT_TIMER
#endif
diff --git a/include/asm-sparc64/tlb.h b/include/asm-sparc64/tlb.h
index 349d1d3e9c27..ec81cdedef2c 100644
--- a/include/asm-sparc64/tlb.h
+++ b/include/asm-sparc64/tlb.h
@@ -100,8 +100,8 @@ static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
}
#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
-#define pte_free_tlb(mp,ptepage) pte_free(ptepage)
-#define pmd_free_tlb(mp,pmdp) pmd_free(pmdp)
+#define pte_free_tlb(mp, ptepage) pte_free((mp)->mm, ptepage)
+#define pmd_free_tlb(mp, pmdp) pmd_free((mp)->mm, pmdp)
#define pud_free_tlb(tlb,pudp) __pud_free_tlb(tlb,pudp)
#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h
index cb751b4d0f56..77559da0ea3f 100644
--- a/include/asm-sparc64/unistd.h
+++ b/include/asm-sparc64/unistd.h
@@ -329,11 +329,13 @@
#define __NR_epoll_pwait 309
#define __NR_utimensat 310
#define __NR_signalfd 311
-#define __NR_timerfd 312
+#define __NR_timerfd_create 312
#define __NR_eventfd 313
#define __NR_fallocate 314
+#define __NR_timerfd_settime 315
+#define __NR_timerfd_gettime 316
-#define NR_SYSCALLS 315
+#define NR_SYSCALLS 317
#ifdef __KERNEL__
/* sysconf options, for SunOS compatibility */
diff --git a/include/asm-um/a.out.h b/include/asm-um/a.out.h
index 9281dd8eb334..f42ff14577fa 100644
--- a/include/asm-um/a.out.h
+++ b/include/asm-um/a.out.h
@@ -13,11 +13,9 @@
extern unsigned long stacksizelim;
-extern unsigned long host_task_size;
-
#define STACK_ROOM (stacksizelim)
-#define STACK_TOP task_size
+#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
#define STACK_TOP_MAX STACK_TOP
diff --git a/include/asm-um/current.h b/include/asm-um/current.h
index 8fd72f69ce65..c2191d9aa03d 100644
--- a/include/asm-um/current.h
+++ b/include/asm-um/current.h
@@ -1,32 +1,13 @@
-/*
- * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
+/*
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __UM_CURRENT_H
#define __UM_CURRENT_H
-#ifndef __ASSEMBLY__
-
-#include "asm/page.h"
#include "linux/thread_info.h"
#define current (current_thread_info()->task)
-/*Backward compatibility - it's used inside arch/um.*/
-#define current_thread current_thread_info()
-
-#endif /* __ASSEMBLY__ */
-
#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/include/asm-um/elf-i386.h b/include/asm-um/elf-i386.h
index ca94a136dfe8..23d6893e8617 100644
--- a/include/asm-um/elf-i386.h
+++ b/include/asm-um/elf-i386.h
@@ -1,11 +1,11 @@
/*
- * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __UM_ELF_I386_H
#define __UM_ELF_I386_H
-#include <linux/sched.h>
+#include <asm/user.h>
#include "skas.h"
#define R_386_NONE 0
@@ -46,7 +46,7 @@ typedef struct user_i387_struct elf_fpregset_t;
PT_REGS_EDI(regs) = 0; \
PT_REGS_EBP(regs) = 0; \
PT_REGS_EAX(regs) = 0; \
-} while(0)
+} while (0)
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
@@ -74,14 +74,9 @@ typedef struct user_i387_struct elf_fpregset_t;
pr_reg[14] = PT_REGS_EFLAGS(regs); \
pr_reg[15] = PT_REGS_SP(regs); \
pr_reg[16] = PT_REGS_SS(regs); \
-} while(0);
+} while (0);
-static inline int elf_core_copy_fpregs(struct task_struct *t,
- elf_fpregset_t *fpu)
-{
- int cpu = ((struct thread_info *) t->stack)->cpu;
- return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
-}
+extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
@@ -91,7 +86,7 @@ extern long elf_aux_hwcap;
extern char * elf_aux_platform;
#define ELF_PLATFORM (elf_aux_platform)
-#define SET_PERSONALITY(ex, ibcs2) do ; while(0)
+#define SET_PERSONALITY(ex, ibcs2) do { } while (0)
extern unsigned long vsyscall_ehdr;
extern unsigned long vsyscall_end;
@@ -166,14 +161,3 @@ if ( vsyscall_ehdr ) { \
}
#endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/include/asm-um/elf-x86_64.h b/include/asm-um/elf-x86_64.h
index 3c9d543eb61e..3b2d5224a7e1 100644
--- a/include/asm-um/elf-x86_64.h
+++ b/include/asm-um/elf-x86_64.h
@@ -7,7 +7,6 @@
#ifndef __UM_ELF_X86_64_H
#define __UM_ELF_X86_64_H
-#include <linux/sched.h>
#include <asm/user.h>
#include "skas.h"
@@ -96,12 +95,7 @@ typedef struct user_i387_struct elf_fpregset_t;
(pr_reg)[25] = 0; \
(pr_reg)[26] = 0;
-static inline int elf_core_copy_fpregs(struct task_struct *t,
- elf_fpregset_t *fpu)
-{
- int cpu = current_thread->cpu;
- return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
-}
+extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
#define ELF_CORE_COPY_FPREGS(t, fpu) elf_core_copy_fpregs(t, fpu)
diff --git a/include/asm-um/fixmap.h b/include/asm-um/fixmap.h
index d352a35cfafb..89a87c18b927 100644
--- a/include/asm-um/fixmap.h
+++ b/include/asm-um/fixmap.h
@@ -1,9 +1,10 @@
#ifndef __UM_FIXMAP_H
#define __UM_FIXMAP_H
+#include <asm/system.h>
#include <asm/kmap_types.h>
#include <asm/archparam.h>
-#include <asm/elf.h>
+#include <asm/page.h>
/*
* Here we define all the compile-time 'special' virtual
@@ -55,9 +56,8 @@ extern void __set_fixmap (enum fixed_addresses idx,
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
-extern unsigned long get_kmem_end(void);
-#define FIXADDR_TOP (get_kmem_end() - 0x2000)
+#define FIXADDR_TOP (CONFIG_TOP_ADDR - 2 * PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
diff --git a/include/asm-um/ldt.h b/include/asm-um/ldt.h
index b2553f3e87eb..52af512f5e7d 100644
--- a/include/asm-um/ldt.h
+++ b/include/asm-um/ldt.h
@@ -8,7 +8,7 @@
#ifndef __ASM_LDT_H
#define __ASM_LDT_H
-#include "asm/semaphore.h"
+#include <linux/mutex.h>
#include "asm/host_ldt.h"
extern void ldt_host_info(void);
@@ -27,7 +27,7 @@ struct ldt_entry {
typedef struct uml_ldt {
int entry_count;
- struct semaphore semaphore;
+ struct mutex lock;
union {
struct ldt_entry * pages[LDT_PAGES_MAX];
struct ldt_entry entries[LDT_DIRECT_ENTRIES];
diff --git a/include/asm-um/linkage.h b/include/asm-um/linkage.h
index cdb3024a699a..7dfce37adc8b 100644
--- a/include/asm-um/linkage.h
+++ b/include/asm-um/linkage.h
@@ -3,10 +3,4 @@
#include "asm/arch/linkage.h"
-
-/* <linux/linkage.h> will pick sane defaults */
-#ifdef CONFIG_GPROF
-#undef fastcall
-#endif
-
#endif
diff --git a/include/asm-um/mmu_context.h b/include/asm-um/mmu_context.h
index 5f3b863aef9a..6686fc524ca1 100644
--- a/include/asm-um/mmu_context.h
+++ b/include/asm-um/mmu_context.h
@@ -6,11 +6,12 @@
#ifndef __UM_MMU_CONTEXT_H
#define __UM_MMU_CONTEXT_H
-#include <asm-generic/mm_hooks.h>
-
#include "linux/sched.h"
#include "um_mmu.h"
+extern void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
+extern void arch_exit_mmap(struct mm_struct *mm);
+
#define get_mmu_context(task) do ; while(0)
#define activate_context(tsk) do ; while(0)
@@ -30,6 +31,8 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
*/
if (old != new && (current->flags & PF_BORROWED_MM))
__switch_mm(&new->context.id);
+
+ arch_dup_mmap(old, new);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
diff --git a/include/asm-um/page.h b/include/asm-um/page.h
index 4b424c75fca5..fe2374d705d1 100644
--- a/include/asm-um/page.h
+++ b/include/asm-um/page.h
@@ -30,7 +30,7 @@ struct page;
#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
typedef struct { unsigned long pte_low, pte_high; } pte_t;
-typedef struct { unsigned long long pmd; } pmd_t;
+typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
@@ -106,8 +106,8 @@ extern unsigned long uml_physmem;
#define __pa(virt) to_phys((void *) (unsigned long) (virt))
#define __va(phys) to_virt((unsigned long) (phys))
-#define phys_to_pfn(p) ((p) >> PAGE_SHIFT)
-#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
+#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
diff --git a/include/asm-um/param.h b/include/asm-um/param.h
index f914e7d67b01..4cd4a226f8c1 100644
--- a/include/asm-um/param.h
+++ b/include/asm-um/param.h
@@ -10,7 +10,7 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#ifdef __KERNEL__
-#define HZ 100
+#define HZ CONFIG_HZ
#define USER_HZ 100 /* .. some user interfaces are in "ticks" */
#define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
#endif
diff --git a/include/asm-um/pgalloc.h b/include/asm-um/pgalloc.h
index 14904876e8fb..4f3e62b02861 100644
--- a/include/asm-um/pgalloc.h
+++ b/include/asm-um/pgalloc.h
@@ -23,17 +23,17 @@
* Allocate and free page tables.
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
-extern void pgd_free(pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long) pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
@@ -42,7 +42,7 @@ static inline void pte_free(struct page *pte)
#ifdef CONFIG_3_LEVEL_PGTABLES
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
diff --git a/include/asm-um/pgtable-2level.h b/include/asm-um/pgtable-2level.h
index 172a75fde512..f534b73e753e 100644
--- a/include/asm-um/pgtable-2level.h
+++ b/include/asm-um/pgtable-2level.h
@@ -41,9 +41,6 @@ static inline void pgd_mkuptodate(pgd_t pgd) { }
#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
-#define pmd_page_vaddr(pmd) \
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
/*
* Bits 0 through 4 are taken
*/
diff --git a/include/asm-um/pgtable-3level.h b/include/asm-um/pgtable-3level.h
index 3ebafbaacb24..0446f456b428 100644
--- a/include/asm-um/pgtable-3level.h
+++ b/include/asm-um/pgtable-3level.h
@@ -11,7 +11,11 @@
/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#ifdef CONFIG_64BIT
#define PGDIR_SHIFT 30
+#else
+#define PGDIR_SHIFT 31
+#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -28,9 +32,15 @@
*/
#define PTRS_PER_PTE 512
+#ifdef CONFIG_64BIT
#define PTRS_PER_PMD 512
-#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
#define PTRS_PER_PGD 512
+#else
+#define PTRS_PER_PMD 1024
+#define PTRS_PER_PGD 1024
+#endif
+
+#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define pte_ERROR(e) \
@@ -49,7 +59,12 @@
#define pud_populate(mm, pud, pmd) \
set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
+#ifdef CONFIG_64BIT
#define set_pud(pudptr, pudval) set_64bit((phys_t *) (pudptr), pud_val(pudval))
+#else
+#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
+#endif
+
static inline int pgd_newpage(pgd_t pgd)
{
return(pgd_val(pgd) & _PAGE_NEWPAGE);
@@ -57,17 +72,14 @@ static inline int pgd_newpage(pgd_t pgd)
static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
+#ifdef CONFIG_64BIT
#define set_pmd(pmdptr, pmdval) set_64bit((phys_t *) (pmdptr), pmd_val(pmdval))
+#else
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
+#endif
-static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
-{
- pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
-
- if(pmd)
- memset(pmd, 0, PAGE_SIZE);
-
- return pmd;
-}
+struct mm_struct;
+extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
static inline void pud_clear (pud_t *pud)
{
@@ -75,8 +87,7 @@ static inline void pud_clear (pud_t *pud)
}
#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
-#define pud_page_vaddr(pud) \
- ((struct page *) __va(pud_val(pud) & PAGE_MASK))
+#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
/* Find an entry in the second-level page table.. */
#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
diff --git a/include/asm-um/pgtable.h b/include/asm-um/pgtable.h
index 830fc6e5d49d..4102b443e925 100644
--- a/include/asm-um/pgtable.h
+++ b/include/asm-um/pgtable.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Copyright 2003 PathScale, Inc.
* Derived from include/asm-i386/pgtable.h
* Licensed under the GPL
@@ -8,11 +8,7 @@
#ifndef __UM_PGTABLE_H
#define __UM_PGTABLE_H
-#include "linux/sched.h"
-#include "linux/linkage.h"
-#include "asm/processor.h"
-#include "asm/page.h"
-#include "asm/fixmap.h"
+#include <asm/fixmap.h>
#define _PAGE_PRESENT 0x001
#define _PAGE_NEWPAGE 0x002
@@ -34,22 +30,11 @@
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern void *um_virt_to_phys(struct task_struct *task, unsigned long virt,
- pte_t *pte_out);
-
/* zero page used for uninitialized stuff */
extern unsigned long *empty_zero_page;
#define pgtable_cache_init() do ; while (0)
-/*
- * pgd entries used up by user/kernel:
- */
-
-#define USER_PGD_PTRS (TASK_SIZE >> PGDIR_SHIFT)
-#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
-
-#ifndef __ASSEMBLY__
/* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
@@ -62,16 +47,12 @@ extern unsigned long end_iomem;
#define VMALLOC_OFFSET (__va_space)
#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
#else
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#endif
-#define REGION_SHIFT (sizeof(pte_t) * 8 - 4)
-#define REGION_MASK (((unsigned long) 0xf) << REGION_SHIFT)
-
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
@@ -81,11 +62,12 @@ extern unsigned long end_iomem;
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
/*
- * The i386 can't do page protection for execute, and considers that the same are read.
- * Also, write permissions imply read permissions. This is the closest we can get..
+ * The i386 can't do page protection for execute, and considers that the same
+ * are read.
+ * Also, write permissions imply read permissions. This is the closest we can
+ * get..
*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
@@ -106,40 +88,16 @@ extern unsigned long end_iomem;
#define __S111 PAGE_SHARED
/*
- * Define this if things work differently on an i386 and an i486:
- * it will (on an i486) warn about kernel memory accesses that are
- * done without a 'access_ok(VERIFY_WRITE,..)'
- */
-#undef TEST_VERIFY_AREA
-
-/* page table for 0-4MB for everybody */
-extern unsigned long pg0[1024];
-
-/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-
#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
-/* number of bits that fit into a memory pointer */
-#define BITS_PER_PTR (8*sizeof(unsigned long))
-
-/* to align the pointer to a pointer address */
-#define PTR_MASK (~(sizeof(void*)-1))
-
-/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
-/* 64-bit machines, beware! SRB. */
-#define SIZEOF_PTR_LOG2 3
-
-/* to find an entry in a page-table */
-#define PAGE_PTR(address) \
-((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
-
#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
@@ -149,14 +107,9 @@ extern unsigned long pg0[1024];
#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pte_address(x) (__va(pte_val(x) & PAGE_MASK))
-#define mk_phys(a, r) ((a) + (((unsigned long) r) << REGION_SHIFT))
-#define phys_addr(p) ((p) & ~REGION_MASK)
#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
@@ -309,7 +262,8 @@ static inline void set_pte(pte_t *pteptr, pte_t pteval)
#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
#define __virt_to_page(virt) phys_to_page(__pa(virt))
-#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
+#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
+#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
#define mk_pte(page, pgprot) \
({ pte_t pte; \
@@ -325,8 +279,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
return pte;
}
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
/*
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
*
@@ -335,8 +287,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_index_k(addr) pgd_index(addr)
-
/*
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
@@ -355,8 +305,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
+#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+#define pmd_page_vaddr(pmd) \
+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
/*
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
*
@@ -372,6 +326,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
+struct mm_struct;
+extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
+
#define update_mmu_cache(vma,address,pte) do ; while (0)
/* Encode and de-code a swap entry */
@@ -388,29 +345,4 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#include <asm-generic/pgtable.h>
-#include <asm-generic/pgtable-nopud.h>
-
-#ifdef CONFIG_HIGHMEM
-/* Clear a kernel PTE and flush it from the TLB */
-#define kpte_clear_flush(ptep, vaddr) \
-do { \
- pte_clear(&init_mm, vaddr, ptep); \
- __flush_tlb_one(vaddr); \
-} while (0)
#endif
-
-#endif
-#endif
-
-#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only. This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h
index 78c0599cc80c..b7d9a16a7451 100644
--- a/include/asm-um/processor-generic.h
+++ b/include/asm-um/processor-generic.h
@@ -11,6 +11,7 @@ struct pt_regs;
struct task_struct;
#include "asm/ptrace.h"
+#include "asm/pgtable.h"
#include "registers.h"
#include "sysdep/archsetjmp.h"
@@ -26,7 +27,6 @@ struct thread_struct {
* as of 2.6.11).
*/
int forking;
- int nsyscalls;
struct pt_regs regs;
int singlestep_syscall;
void *fault_addr;
@@ -58,7 +58,6 @@ struct thread_struct {
#define INIT_THREAD \
{ \
.forking = 0, \
- .nsyscalls = 0, \
.regs = EMPTY_REGS, \
.fault_addr = NULL, \
.prev_sched = NULL, \
@@ -68,10 +67,6 @@ struct thread_struct {
.request = { 0 } \
}
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
extern struct task_struct *alloc_task_struct(void);
static inline void release_thread(struct task_struct *task)
@@ -97,9 +92,7 @@ static inline void mm_copy_segments(struct mm_struct *from_mm,
/*
* User space process size: 3GB (default).
*/
-extern unsigned long task_size;
-
-#define TASK_SIZE (task_size)
+#define TASK_SIZE (CONFIG_TOP_ADDR & PGDIR_MASK)
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
@@ -128,6 +121,6 @@ extern struct cpuinfo_um cpu_data[];
#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
-#define get_wchan(p) (0)
+extern unsigned long get_wchan(struct task_struct *p);
#endif
diff --git a/include/asm-um/processor-i386.h b/include/asm-um/processor-i386.h
index 595f1c3e1e40..a2b7fe13fe1e 100644
--- a/include/asm-um/processor-i386.h
+++ b/include/asm-um/processor-i386.h
@@ -10,7 +10,6 @@
#include "asm/host_ldt.h"
#include "asm/segment.h"
-extern int host_has_xmm;
extern int host_has_cmov;
/* include faultinfo structure */
diff --git a/include/asm-um/thread_info.h b/include/asm-um/thread_info.h
index 6e5fd5c892d0..356b83e2c22e 100644
--- a/include/asm-um/thread_info.h
+++ b/include/asm-um/thread_info.h
@@ -1,5 +1,5 @@
-/*
- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+/*
+ * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
@@ -8,8 +8,9 @@
#ifndef __ASSEMBLY__
-#include <asm/processor.h>
#include <asm/types.h>
+#include <asm/page.h>
+#include <asm/uaccess.h>
struct thread_info {
struct task_struct *task; /* main task structure */
@@ -75,8 +76,8 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
- * TIF_NEED_RESCHED
+#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
+ * TIF_NEED_RESCHED
*/
#define TIF_RESTART_BLOCK 4
#define TIF_MEMDIE 5
diff --git a/include/asm-um/tlb.h b/include/asm-um/tlb.h
index c640033bc1fd..39fc475df6c9 100644
--- a/include/asm-um/tlb.h
+++ b/include/asm-um/tlb.h
@@ -1,6 +1,126 @@
#ifndef __UM_TLB_H
#define __UM_TLB_H
-#include <asm/arch/tlb.h>
+#include <linux/swap.h>
+#include <asm/percpu.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+/* struct mmu_gather is an opaque type used by the mm code for passing around
+ * any data needed by arch specific code for tlb_remove_page.
+ */
+struct mmu_gather {
+ struct mm_struct *mm;
+ unsigned int need_flush; /* Really unmapped some ptes? */
+ unsigned long start;
+ unsigned long end;
+ unsigned int fullmm; /* non-zero means full mm flush */
+};
+
+/* Users of the generic TLB shootdown code must declare this storage space. */
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
+ unsigned long address)
+{
+ if (tlb->start > address)
+ tlb->start = address;
+ if (tlb->end < address + PAGE_SIZE)
+ tlb->end = address + PAGE_SIZE;
+}
+
+static inline void init_tlb_gather(struct mmu_gather *tlb)
+{
+ tlb->need_flush = 0;
+
+ tlb->start = TASK_SIZE;
+ tlb->end = 0;
+
+ if (tlb->fullmm) {
+ tlb->start = 0;
+ tlb->end = TASK_SIZE;
+ }
+}
+
+/* tlb_gather_mmu
+ * Return a pointer to an initialized struct mmu_gather.
+ */
+static inline struct mmu_gather *
+tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
+{
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+
+ tlb->mm = mm;
+ tlb->fullmm = full_mm_flush;
+
+ init_tlb_gather(tlb);
+
+ return tlb;
+}
+
+extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end);
+
+static inline void
+tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ if (!tlb->need_flush)
+ return;
+
+ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
+ init_tlb_gather(tlb);
+}
+
+/* tlb_finish_mmu
+ * Called at the end of the shootdown operation to free up any resources
+ * that were required.
+ */
+static inline void
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ tlb_flush_mmu(tlb, start, end);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+
+ put_cpu_var(mmu_gathers);
+}
+
+/* tlb_remove_page
+ * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
+ * while handling the additional races in SMP caused by other CPUs
+ * caching valid mappings in their TLBs.
+ */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ tlb->need_flush = 1;
+ free_page_and_swap_cache(page);
+ return;
+}
+
+/**
+ * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
+ *
+ * Record the fact that pte's were really umapped in ->need_flush, so we can
+ * later optimise away the tlb invalidate. This helps when userspace is
+ * unmapping already-unmapped pages, which happens quite a lot.
+ */
+#define tlb_remove_tlb_entry(tlb, ptep, address) \
+ do { \
+ tlb->need_flush = 1; \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
+ } while (0)
+
+#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
+
+#define pud_free_tlb(tlb, pudp) __pud_free_tlb(tlb, pudp)
+
+#define pmd_free_tlb(tlb, pmdp) __pmd_free_tlb(tlb, pmdp)
+
+#define tlb_migrate_finish(mm) do {} while (0)
#endif
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
index 077032d4fc47..b9a895d6fa1d 100644
--- a/include/asm-um/uaccess.h
+++ b/include/asm-um/uaccess.h
@@ -6,7 +6,15 @@
#ifndef __UM_UACCESS_H
#define __UM_UACCESS_H
-#include "linux/sched.h"
+#include <asm/errno.h>
+#include <asm/processor.h>
+
+/* thread_info has a mm_segment_t in it, so put the definition up here */
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+#include "linux/thread_info.h"
#define VERIFY_READ 0
#define VERIFY_WRITE 1
diff --git a/include/asm-v850/elf.h b/include/asm-v850/elf.h
index 7db8edffb1c6..28f5b176ff1a 100644
--- a/include/asm-v850/elf.h
+++ b/include/asm-v850/elf.h
@@ -94,8 +94,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
0; \
} while (0)
-#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
-#endif
#endif /* __V850_ELF_H__ */
diff --git a/include/asm-v850/io.h b/include/asm-v850/io.h
index cc364fcbec10..cdad251fba9f 100644
--- a/include/asm-v850/io.h
+++ b/include/asm-v850/io.h
@@ -122,8 +122,6 @@ outsl (unsigned long port, const void *src, unsigned long count)
#endif
/* Conversion between virtual and physical mappings. */
-#define mm_ptov(addr) ((void *)__phys_to_virt (addr))
-#define mm_vtop(addr) ((unsigned long)__virt_to_phys (addr))
#define phys_to_virt(addr) ((void *)__phys_to_virt (addr))
#define virt_to_phys(addr) ((unsigned long)__virt_to_phys (addr))
diff --git a/include/asm-v850/page.h b/include/asm-v850/page.h
index d693ffb1364d..661d8cd08839 100644
--- a/include/asm-v850/page.h
+++ b/include/asm-v850/page.h
@@ -14,8 +14,6 @@
#ifndef __V850_PAGE_H__
#define __V850_PAGE_H__
-#ifdef __KERNEL__
-
#include <asm/machdep.h>
@@ -126,6 +124,4 @@ typedef unsigned long pgprot_t;
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
-#endif /* KERNEL */
-
#endif /* __V850_PAGE_H__ */
diff --git a/include/asm-v850/system.h b/include/asm-v850/system.h
index a34ddfafd561..7daf1fdee119 100644
--- a/include/asm-v850/system.h
+++ b/include/asm-v850/system.h
@@ -103,6 +103,21 @@ static inline unsigned long __xchg (unsigned long with,
return tmp;
}
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+
+#ifndef CONFIG_SMP
+#include <asm-generic/cmpxchg.h>
+#endif
+
#define arch_align_stack(x) (x)
#endif /* __V850_SYSTEM_H__ */
diff --git a/include/asm-v850/user.h b/include/asm-v850/user.h
index ccf4cea6dc9c..63cdc567d272 100644
--- a/include/asm-v850/user.h
+++ b/include/asm-v850/user.h
@@ -3,8 +3,6 @@
/* Adapted from <asm-ppc/user.h>. */
-#ifdef __KERNEL__
-
#include <linux/ptrace.h>
#include <asm/page.h>
@@ -40,7 +38,7 @@ struct user {
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
- struct regs * u_ar0; /* help gdb find registers */
+ unsigned long u_ar0; /* help gdb find registers */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
@@ -51,6 +49,4 @@ struct user {
#define HOST_DATA_START_ADDR (u.start_data)
#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-#endif /* __KERNEL__ */
-
#endif /* __V850_USER_H__ */
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild
index 3c6f0f80e827..b04a7ff46df1 100644
--- a/include/asm-x86/Kbuild
+++ b/include/asm-x86/Kbuild
@@ -22,7 +22,5 @@ unifdef-y += posix_types_64.h
unifdef-y += ptrace.h
unifdef-y += unistd_32.h
unifdef-y += unistd_64.h
-unifdef-y += user_32.h
-unifdef-y += user_64.h
unifdef-y += vm86.h
unifdef-y += vsyscall.h
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
index 48adbf56ca60..aaf15194d536 100644
--- a/include/asm-x86/bitops_64.h
+++ b/include/asm-x86/bitops_64.h
@@ -37,12 +37,6 @@ static inline long __scanbit(unsigned long val, unsigned long max)
((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
find_next_zero_bit(addr,size,off)))
-/*
- * Find string of zero bits in a bitmap. -1 when not found.
- */
-extern unsigned long
-find_next_zero_string(unsigned long *bitmap, long start, long nbits, int len);
-
static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
int len)
{
@@ -53,16 +47,6 @@ static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
}
}
-static inline void __clear_bit_string(unsigned long *bitmap, unsigned long i,
- int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __clear_bit(i, bitmap);
- i++;
- }
-}
-
/**
* ffz - find first zero in word.
* @word: The word to search
diff --git a/include/asm-x86/cmpxchg_64.h b/include/asm-x86/cmpxchg_64.h
index 5e182062e6ec..56f5b41e071c 100644
--- a/include/asm-x86/cmpxchg_64.h
+++ b/include/asm-x86/cmpxchg_64.h
@@ -124,11 +124,21 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return old;
}
-#define cmpxchg(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
-#define cmpxchg_local(ptr,o,n)\
- ((__typeof__(*(ptr)))__cmpxchg_local((ptr),(unsigned long)(o),\
- (unsigned long)(n),sizeof(*(ptr))))
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+ })
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) \
+ ({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+ })
#endif
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h
index d11d47fc1a0e..409a649204aa 100644
--- a/include/asm-x86/delay.h
+++ b/include/asm-x86/delay.h
@@ -13,7 +13,7 @@ extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
-extern void __const_udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
/* 0x10c7 is 2**32 / 1000000 (rounded up) */
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index d9c94e707289..fb62f9941e38 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -72,7 +72,6 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
#endif
-#ifdef __KERNEL__
#include <asm/vdso.h>
extern unsigned int vdso_enabled;
@@ -321,6 +320,4 @@ extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
-#endif /* __KERNEL__ */
-
#endif
diff --git a/include/asm-x86/highmem.h b/include/asm-x86/highmem.h
index c25cfcaab589..479767c9195f 100644
--- a/include/asm-x86/highmem.h
+++ b/include/asm-x86/highmem.h
@@ -38,11 +38,6 @@ extern pte_t *pkmap_page_table;
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 512
-#else
-#define LAST_PKMAP 1024
-#endif
/*
* Ordering is:
*
@@ -58,7 +53,6 @@ extern pte_t *pkmap_page_table;
* VMALLOC_START
* high_memory
*/
-#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h
index 5d6f4ce6e6d6..274a59566c45 100644
--- a/include/asm-x86/mmzone_32.h
+++ b/include/asm-x86/mmzone_32.h
@@ -107,8 +107,8 @@ static inline int pfn_valid(int pfn)
/*
* Following are macros that are specific to this numa platform.
*/
-#define reserve_bootmem(addr, size) \
- reserve_bootmem_node(NODE_DATA(0), (addr), (size))
+#define reserve_bootmem(addr, size, flags) \
+ reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
#define alloc_bootmem(x) \
__alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low(x) \
diff --git a/include/asm-x86/pgalloc_32.h b/include/asm-x86/pgalloc_32.h
index 6c21ef951dab..bab12718a913 100644
--- a/include/asm-x86/pgalloc_32.h
+++ b/include/asm-x86/pgalloc_32.h
@@ -36,17 +36,17 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
* Allocate and free page tables.
*/
extern pgd_t *pgd_alloc(struct mm_struct *);
-extern void pgd_free(pgd_t *pgd);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
@@ -63,7 +63,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
free_page((unsigned long)pmd);
diff --git a/include/asm-x86/pgalloc_64.h b/include/asm-x86/pgalloc_64.h
index 8bb564687860..4f6220db22b1 100644
--- a/include/asm-x86/pgalloc_64.h
+++ b/include/asm-x86/pgalloc_64.h
@@ -17,7 +17,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
}
-static inline void pmd_free(pmd_t *pmd)
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
free_page((unsigned long)pmd);
@@ -33,7 +33,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
-static inline void pud_free (pud_t *pud)
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
free_page((unsigned long)pud);
@@ -42,19 +42,21 @@ static inline void pud_free (pud_t *pud)
static inline void pgd_list_add(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
+ unsigned long flags;
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
list_add(&page->lru, &pgd_list);
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
}
static inline void pgd_list_del(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
+ unsigned long flags;
- spin_lock(&pgd_lock);
+ spin_lock_irqsave(&pgd_lock, flags);
list_del(&page->lru);
- spin_unlock(&pgd_lock);
+ spin_unlock_irqrestore(&pgd_lock, flags);
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
@@ -77,7 +79,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
return pgd;
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
pgd_list_del(pgd);
@@ -100,13 +102,13 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
/* Should really implement gc for free page table pages. This could be
done with a reference count in struct page. */
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
free_page((unsigned long)pte);
}
-static inline void pte_free(struct page *pte)
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
{
__free_page(pte);
}
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 935630d17304..80dd438642f6 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -66,6 +66,14 @@ void paging_init(void);
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long) high_memory + \
2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
+#ifdef CONFIG_X86_PAE
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+
+#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
+
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
#else
diff --git a/include/asm-x86/termios.h b/include/asm-x86/termios.h
index d501748700d2..f72956331c49 100644
--- a/include/asm-x86/termios.h
+++ b/include/asm-x86/termios.h
@@ -41,6 +41,8 @@ struct termio {
#ifdef __KERNEL__
+#include <asm/uaccess.h>
+
/* intr=^C quit=^\ erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
start=^Q stop=^S susp=^Z eol=\0
@@ -58,39 +60,53 @@ struct termio {
*(unsigned short *) &(termios)->x = __tmp; \
}
-#define user_termio_to_kernel_termios(termios, termio) \
-({ \
- SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
- SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
- copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
-})
+static inline int user_termio_to_kernel_termios(struct ktermios *termios,
+ struct termio __user *termio)
+{
+ SET_LOW_TERMIOS_BITS(termios, termio, c_iflag);
+ SET_LOW_TERMIOS_BITS(termios, termio, c_oflag);
+ SET_LOW_TERMIOS_BITS(termios, termio, c_cflag);
+ SET_LOW_TERMIOS_BITS(termios, termio, c_lflag);
+ return copy_from_user(termios->c_cc, termio->c_cc, NCC);
+}
/*
* Translate a "termios" structure into a "termio". Ugh.
*/
-#define kernel_termios_to_user_termio(termio, termios) \
-({ \
- put_user((termios)->c_iflag, &(termio)->c_iflag); \
- put_user((termios)->c_oflag, &(termio)->c_oflag); \
- put_user((termios)->c_cflag, &(termio)->c_cflag); \
- put_user((termios)->c_lflag, &(termio)->c_lflag); \
- put_user((termios)->c_line, &(termio)->c_line); \
- copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
-})
-
-#define user_termios_to_kernel_termios(k, u) \
- copy_from_user(k, u, sizeof(struct termios2))
-
-#define kernel_termios_to_user_termios(u, k) \
- copy_to_user(u, k, sizeof(struct termios2))
-
-#define user_termios_to_kernel_termios_1(k, u) \
- copy_from_user(k, u, sizeof(struct termios))
-
-#define kernel_termios_to_user_termios_1(u, k) \
- copy_to_user(u, k, sizeof(struct termios))
+static inline int kernel_termios_to_user_termio(struct termio __user *termio,
+ struct ktermios *termios)
+{
+ put_user((termios)->c_iflag, &(termio)->c_iflag);
+ put_user((termios)->c_oflag, &(termio)->c_oflag);
+ put_user((termios)->c_cflag, &(termio)->c_cflag);
+ put_user((termios)->c_lflag, &(termio)->c_lflag);
+ put_user((termios)->c_line, &(termio)->c_line);
+ return copy_to_user((termio)->c_cc, (termios)->c_cc, NCC);
+}
+
+static inline int user_termios_to_kernel_termios(struct ktermios *k,
+ struct termios2 __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios2));
+}
+
+static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios2));
+}
+
+static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
+ struct termios __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios));
+}
+
+static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios));
+}
#endif /* __KERNEL__ */
diff --git a/include/asm-x86/timex.h b/include/asm-x86/timex.h
index 27cfd6c599ba..43e5a78500c5 100644
--- a/include/asm-x86/timex.h
+++ b/include/asm-x86/timex.h
@@ -14,7 +14,6 @@
#endif
#define CLOCK_TICK_RATE PIT_TICK_RATE
-extern int read_current_timer(unsigned long *timer_value);
-#define ARCH_HAS_READ_CURRENT_TIMER 1
+#define ARCH_HAS_READ_CURRENT_TIMER
#endif
diff --git a/include/asm-x86/unistd_32.h b/include/asm-x86/unistd_32.h
index 8d8f9b5adbb9..984123a68f7c 100644
--- a/include/asm-x86/unistd_32.h
+++ b/include/asm-x86/unistd_32.h
@@ -327,9 +327,11 @@
#define __NR_epoll_pwait 319
#define __NR_utimensat 320
#define __NR_signalfd 321
-#define __NR_timerfd 322
+#define __NR_timerfd_create 322
#define __NR_eventfd 323
#define __NR_fallocate 324
+#define __NR_timerfd_settime 325
+#define __NR_timerfd_gettime 326
#ifdef __KERNEL__
diff --git a/include/asm-x86/unistd_64.h b/include/asm-x86/unistd_64.h
index 5ff4d3e24c34..3883ceb54ef5 100644
--- a/include/asm-x86/unistd_64.h
+++ b/include/asm-x86/unistd_64.h
@@ -629,12 +629,17 @@ __SYSCALL(__NR_utimensat, sys_utimensat)
__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
#define __NR_signalfd 282
__SYSCALL(__NR_signalfd, sys_signalfd)
-#define __NR_timerfd 283
-__SYSCALL(__NR_timerfd, sys_timerfd)
+#define __NR_timerfd_create 283
+__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
#define __NR_eventfd 284
__SYSCALL(__NR_eventfd, sys_eventfd)
#define __NR_fallocate 285
__SYSCALL(__NR_fallocate, sys_fallocate)
+#define __NR_timerfd_settime 286
+__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
+#define __NR_timerfd_gettime 287
+__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
+
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-x86/user.h b/include/asm-x86/user.h
index 484715abe74a..999873b22e7f 100644
--- a/include/asm-x86/user.h
+++ b/include/asm-x86/user.h
@@ -1,13 +1,5 @@
-#ifdef __KERNEL__
-# ifdef CONFIG_X86_32
-# include "user_32.h"
-# else
-# include "user_64.h"
-# endif
+#ifdef CONFIG_X86_32
+# include "user_32.h"
#else
-# ifdef __i386__
-# include "user_32.h"
-# else
-# include "user_64.h"
-# endif
+# include "user_64.h"
#endif
diff --git a/include/asm-x86/user_32.h b/include/asm-x86/user_32.h
index ed8b8fc6906c..6157da6f882c 100644
--- a/include/asm-x86/user_32.h
+++ b/include/asm-x86/user_32.h
@@ -116,7 +116,7 @@ struct user{
esp register. */
long int signal; /* Signal that caused the core dump. */
int reserved; /* No longer used */
- struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
/* the registers. */
struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
unsigned long magic; /* To uniquely identify a core file */
diff --git a/include/asm-x86/user_64.h b/include/asm-x86/user_64.h
index a5449d456cc0..963616455609 100644
--- a/include/asm-x86/user_64.h
+++ b/include/asm-x86/user_64.h
@@ -118,7 +118,7 @@ struct user{
long int signal; /* Signal that caused the core dump. */
int reserved; /* No longer used */
int pad1;
- struct user_pt_regs * u_ar0; /* Used by gdb to help find the values for */
+ unsigned long u_ar0; /* Used by gdb to help find the values for */
/* the registers. */
struct user_i387_struct* u_fpstate; /* Math Co-processor pointer. */
unsigned long magic; /* To uniquely identify a core file */
diff --git a/include/asm-xtensa/elf.h b/include/asm-xtensa/elf.h
index 7083d46766a8..467384542502 100644
--- a/include/asm-xtensa/elf.h
+++ b/include/asm-xtensa/elf.h
@@ -257,8 +257,6 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
_r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
} while (0)
-#ifdef __KERNEL__
-
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
struct task_struct;
@@ -272,5 +270,4 @@ extern void do_save_fpregs (elf_fpregset_t*, struct pt_regs*,
extern int do_restore_fpregs (elf_fpregset_t*, struct pt_regs*,
struct task_struct*);
-#endif /* __KERNEL__ */
#endif /* _XTENSA_ELF_H */
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h
index 55ce2c9749a3..1adedbf41d01 100644
--- a/include/asm-xtensa/page.h
+++ b/include/asm-xtensa/page.h
@@ -11,8 +11,6 @@
#ifndef _XTENSA_PAGE_H
#define _XTENSA_PAGE_H
-#ifdef __KERNEL__
-
#include <asm/processor.h>
#include <asm/types.h>
#include <asm/cache.h>
@@ -174,5 +172,4 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
-#endif /* __KERNEL__ */
#endif /* _XTENSA_PAGE_H */
diff --git a/include/asm-xtensa/pgalloc.h b/include/asm-xtensa/pgalloc.h
index 3e5b56525102..1d51ba5463f9 100644
--- a/include/asm-xtensa/pgalloc.h
+++ b/include/asm-xtensa/pgalloc.h
@@ -31,7 +31,7 @@ pgd_alloc(struct mm_struct *mm)
return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
}
-static inline void pgd_free(pgd_t *pgd)
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
@@ -52,12 +52,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return virt_to_page(pte_alloc_one_kernel(mm, addr));
}
-static inline void pte_free_kernel(pte_t *pte)
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
kmem_cache_free(pgtable_cache, pte);
}
-static inline void pte_free(struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
{
kmem_cache_free(pgtable_cache, page_address(page));
}
diff --git a/include/asm-xtensa/system.h b/include/asm-xtensa/system.h
index ddc970847ae9..e0cb9116d8ab 100644
--- a/include/asm-xtensa/system.h
+++ b/include/asm-xtensa/system.h
@@ -156,8 +156,30 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
(unsigned long)_n_, sizeof (*(ptr))); \
})
+#include <asm-generic/cmpxchg-local.h>
+static inline unsigned long __cmpxchg_local(volatile void *ptr,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ switch (size) {
+ case 4:
+ return __cmpxchg_u32(ptr, old, new);
+ default:
+ return __cmpxchg_local_generic(ptr, old, new, size);
+ }
+ return old;
+}
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
/*
* xchg_u32
diff --git a/include/asm-xtensa/tlb.h b/include/asm-xtensa/tlb.h
index 4830232017af..31c220faca02 100644
--- a/include/asm-xtensa/tlb.h
+++ b/include/asm-xtensa/tlb.h
@@ -42,6 +42,6 @@
#include <asm-generic/tlb.h>
-#define __pte_free_tlb(tlb,pte) pte_free(pte)
+#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, pte)
#endif /* _XTENSA_TLB_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index c0f9bb78727d..2ebf068ba504 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -18,7 +18,6 @@ header-y += usb/
header-y += affs_hardblocks.h
header-y += aio_abi.h
-header-y += a.out.h
header-y += arcfb.h
header-y += atmapi.h
header-y += atmbr2684.h
@@ -60,7 +59,6 @@ header-y += dqblk_v2.h
header-y += dqblk_xfs.h
header-y += efs_fs_sb.h
header-y += elf-fdpic.h
-header-y += elf.h
header-y += elf-em.h
header-y += fadvise.h
header-y += fd.h
@@ -190,6 +188,7 @@ unifdef-y += dccp.h
unifdef-y += dirent.h
unifdef-y += dlm.h
unifdef-y += edd.h
+unifdef-y += elf.h
unifdef-y += elfcore.h
unifdef-y += errno.h
unifdef-y += errqueue.h
@@ -219,6 +218,7 @@ unifdef-y += i2c-dev.h
unifdef-y += icmp.h
unifdef-y += icmpv6.h
unifdef-y += if_addr.h
+unifdef-y += if_addrlabel.h
unifdef-y += if_arp.h
unifdef-y += if_bridge.h
unifdef-y += if_ec.h
@@ -343,7 +343,6 @@ unifdef-y += uinput.h
unifdef-y += uio.h
unifdef-y += unistd.h
unifdef-y += usbdevice_fs.h
-unifdef-y += user.h
unifdef-y += utsname.h
unifdef-y += videodev2.h
unifdef-y += videodev.h
diff --git a/include/linux/a.out.h b/include/linux/a.out.h
index f913cc3e1b0d..82cd918f2ab7 100644
--- a/include/linux/a.out.h
+++ b/include/linux/a.out.h
@@ -128,12 +128,20 @@ enum machine_type {
#endif
#ifdef linux
+#ifdef __KERNEL__
#include <asm/page.h>
+#else
+#include <unistd.h>
+#endif
#if defined(__i386__) || defined(__mc68000__)
#define SEGMENT_SIZE 1024
#else
#ifndef SEGMENT_SIZE
+#ifdef __KERNEL__
#define SEGMENT_SIZE PAGE_SIZE
+#else
+#define SEGMENT_SIZE getpagesize()
+#endif
#endif
#endif
#endif
diff --git a/include/linux/ac97_codec.h b/include/linux/ac97_codec.h
index 22eb9367235a..0260c3e79fdd 100644
--- a/include/linux/ac97_codec.h
+++ b/include/linux/ac97_codec.h
@@ -326,11 +326,7 @@ struct ac97_ops
#define AC97_DEFAULT_POWER_OFF 4 /* Needs warm reset to power up */
};
-extern int ac97_read_proc (char *page_out, char **start, off_t off,
- int count, int *eof, void *data);
extern int ac97_probe_codec(struct ac97_codec *);
-extern unsigned int ac97_set_adc_rate(struct ac97_codec *codec, unsigned int rate);
-extern unsigned int ac97_set_dac_rate(struct ac97_codec *codec, unsigned int rate);
extern struct ac97_codec *ac97_alloc_codec(void);
extern void ac97_release_codec(struct ac97_codec *codec);
@@ -363,7 +359,4 @@ struct ac97_quirk {
int type; /* quirk type above */
};
-struct pci_dev;
-extern int ac97_tune_hardware(struct pci_dev *pdev, struct ac97_quirk *quirk, int override);
-
#endif /* _AC97_CODEC_H_ */
diff --git a/include/linux/acct.h b/include/linux/acct.h
index 302eb727ecb8..e8cae54e8d88 100644
--- a/include/linux/acct.h
+++ b/include/linux/acct.h
@@ -173,7 +173,11 @@ typedef struct acct acct_t;
static inline u32 jiffies_to_AHZ(unsigned long x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / AHZ)) == 0
+# if HZ < AHZ
+ return x * (AHZ / HZ);
+# else
return x / (HZ / AHZ);
+# endif
#else
u64 tmp = (u64)x * TICK_NSEC;
do_div(tmp, (NSEC_PER_SEC / AHZ));
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 63f2e6ed698f..ddbe7efe590e 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -25,6 +25,7 @@
#ifndef _LINUX_ACPI_H
#define _LINUX_ACPI_H
+#include <linux/ioport.h> /* for struct resource */
#ifdef CONFIG_ACPI
@@ -43,8 +44,6 @@
#include <linux/dmi.h>
-#ifdef CONFIG_ACPI
-
enum acpi_irq_model_id {
ACPI_IRQ_MODEL_PIC = 0,
ACPI_IRQ_MODEL_IOAPIC,
@@ -80,7 +79,6 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table);
typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
-unsigned long acpi_find_rsdp (void);
int acpi_boot_init (void);
int acpi_boot_table_init (void);
int acpi_numa_init (void);
@@ -115,8 +113,8 @@ int acpi_unmap_lsapic(int cpu);
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
-
-extern int acpi_mp_config;
+void acpi_irq_stats_init(void);
+extern u32 acpi_irq_handled;
extern struct acpi_mcfg_allocation *pci_mmcfg_config;
extern int pci_mmcfg_config_num;
@@ -124,12 +122,6 @@ extern int pci_mmcfg_config_num;
extern int sbf_port;
extern unsigned long acpi_realmode_flags;
-#else /* !CONFIG_ACPI */
-
-#define acpi_mp_config 0
-
-#endif /* !CONFIG_ACPI */
-
int acpi_register_gsi (u32 gsi, int triggering, int polarity);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
@@ -145,8 +137,6 @@ extern int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity);
*/
void acpi_unregister_gsi (u32 gsi);
-#ifdef CONFIG_ACPI
-
struct acpi_prt_entry {
struct list_head node;
struct acpi_pci_id id;
@@ -179,8 +169,6 @@ struct acpi_pci_driver {
int acpi_pci_register_driver(struct acpi_pci_driver *driver);
void acpi_pci_unregister_driver(struct acpi_pci_driver *driver);
-#endif /* CONFIG_ACPI */
-
#ifdef CONFIG_ACPI_EC
extern int ec_read(u8 addr, u8 *val);
@@ -192,6 +180,26 @@ extern int ec_transaction(u8 command,
#endif /*CONFIG_ACPI_EC*/
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+
+typedef void (*wmi_notify_handler) (u32 value, void *context);
+
+extern acpi_status wmi_evaluate_method(const char *guid, u8 instance,
+ u32 method_id,
+ const struct acpi_buffer *in,
+ struct acpi_buffer *out);
+extern acpi_status wmi_query_block(const char *guid, u8 instance,
+ struct acpi_buffer *out);
+extern acpi_status wmi_set_block(const char *guid, u8 instance,
+ const struct acpi_buffer *in);
+extern acpi_status wmi_install_notify_handler(const char *guid,
+ wmi_notify_handler handler, void *data);
+extern acpi_status wmi_remove_notify_handler(const char *guid);
+extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out);
+extern bool wmi_has_guid(const char *guid);
+
+#endif /* CONFIG_ACPI_WMI */
+
extern int acpi_blacklisted(void);
#ifdef CONFIG_DMI
extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
@@ -217,6 +225,13 @@ extern int pnpacpi_disabled;
#define PXM_INVAL (-1)
#define NID_INVAL (-1)
+int acpi_check_resource_conflict(struct resource *res);
+
+int acpi_check_region(resource_size_t start, resource_size_t n,
+ const char *name);
+int acpi_check_mem_region(resource_size_t start, resource_size_t n,
+ const char *name);
+
#else /* CONFIG_ACPI */
static inline int acpi_boot_init(void)
@@ -229,5 +244,22 @@ static inline int acpi_boot_table_init(void)
return 0;
}
+static inline int acpi_check_resource_conflict(struct resource *res)
+{
+ return 0;
+}
+
+static inline int acpi_check_region(resource_size_t start, resource_size_t n,
+ const char *name)
+{
+ return 0;
+}
+
+static inline int acpi_check_mem_region(resource_size_t start,
+ resource_size_t n, const char *name)
+{
+ return 0;
+}
+
#endif /* !CONFIG_ACPI */
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index abc521cfb084..03e34547d489 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -109,6 +109,7 @@ extern int agp_unbind_memory(struct agp_memory *);
extern void agp_enable(struct agp_bridge_data *, u32);
extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
extern void agp_backend_release(struct agp_bridge_data *);
+extern void agp_flush_chipset(struct agp_bridge_data *);
#endif /* __KERNEL__ */
#endif /* _AGP_BACKEND_H */
diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h
index 09fbf7e5a6cb..62aef589eb94 100644
--- a/include/linux/agpgart.h
+++ b/include/linux/agpgart.h
@@ -38,6 +38,7 @@
#define AGPIOC_DEALLOCATE _IOW (AGPIOC_BASE, 7, int)
#define AGPIOC_BIND _IOW (AGPIOC_BASE, 8, struct agp_bind*)
#define AGPIOC_UNBIND _IOW (AGPIOC_BASE, 9, struct agp_unbind*)
+#define AGPIOC_CHIPSET_FLUSH _IO (AGPIOC_BASE, 10)
#define AGP_DEVICE "/dev/agpgart"
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index bdca3f1b3213..eb640f0acfac 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -47,7 +47,6 @@ struct dma_chan_ref {
* address is an implied source, whereas the asynchronous case it must be listed
* as a source. The destination address must be the first address in the source
* array.
- * @ASYNC_TX_ASSUME_COHERENT: skip cache maintenance operations
* @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
* dependency chain
* @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining.
@@ -55,7 +54,6 @@ struct dma_chan_ref {
enum async_tx_flags {
ASYNC_TX_XOR_ZERO_DST = (1 << 0),
ASYNC_TX_XOR_DROP_DST = (1 << 1),
- ASYNC_TX_ASSUME_COHERENT = (1 << 2),
ASYNC_TX_ACK = (1 << 3),
ASYNC_TX_DEP_ACK = (1 << 4),
};
@@ -64,9 +62,15 @@ enum async_tx_flags {
void async_tx_issue_pending_all(void);
enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx);
void async_tx_run_dependencies(struct dma_async_tx_descriptor *tx);
+#ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+#include <asm/async_tx.h>
+#else
+#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
+ __async_tx_find_channel(dep, type)
struct dma_chan *
-async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
+__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
enum dma_transaction_type tx_type);
+#endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
#else
static inline void async_tx_issue_pending_all(void)
{
@@ -88,7 +92,8 @@ async_tx_run_dependencies(struct dma_async_tx_descriptor *tx,
static inline struct dma_chan *
async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
- enum dma_transaction_type tx_type)
+ enum dma_transaction_type tx_type, struct page **dst, int dst_count,
+ struct page **src, int src_count, size_t len)
{
return NULL;
}
diff --git a/include/linux/pata_platform.h b/include/linux/ata_platform.h
index 6a7a92db294c..b856a2a590d9 100644
--- a/include/linux/pata_platform.h
+++ b/include/linux/ata_platform.h
@@ -1,5 +1,5 @@
-#ifndef __LINUX_PATA_PLATFORM_H
-#define __LINUX_PATA_PLATFORM_H
+#ifndef __LINUX_ATA_PLATFORM_H
+#define __LINUX_ATA_PLATFORM_H
struct pata_platform_info {
/*
@@ -24,4 +24,11 @@ extern int __devinit __pata_platform_probe(struct device *dev,
extern int __devexit __pata_platform_remove(struct device *dev);
-#endif /* __LINUX_PATA_PLATFORM_H */
+/*
+ * Marvell SATA private data
+ */
+struct mv_sata_platform_data {
+ int n_ports; /* number of sata ports */
+};
+
+#endif /* __LINUX_ATA_PLATFORM_H */
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 0365ec9fc0c9..4e4e340592fb 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -60,8 +60,20 @@ extern void *__alloc_bootmem_core(struct bootmem_data *bdata,
unsigned long goal,
unsigned long limit);
+/*
+ * flags for reserve_bootmem (also if CONFIG_HAVE_ARCH_BOOTMEM_NODE,
+ * the architecture-specific code should honor this)
+ */
+#define BOOTMEM_DEFAULT 0
+#define BOOTMEM_EXCLUSIVE (1<<0)
+
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
-extern void reserve_bootmem(unsigned long addr, unsigned long size);
+/*
+ * If flags is 0, then the return value is always 0 (success). If
+ * flags contains BOOTMEM_EXCLUSIVE, then -EBUSY is returned if the
+ * memory already was reserved.
+ */
+extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_low(x) \
@@ -84,7 +96,8 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
unsigned long endpfn);
extern void reserve_bootmem_node(pg_data_t *pgdat,
unsigned long physaddr,
- unsigned long size);
+ unsigned long size,
+ int flags);
extern void free_bootmem_node(pg_data_t *pgdat,
unsigned long addr,
unsigned long size);
diff --git a/include/linux/capability.h b/include/linux/capability.h
index bb017edffd56..7d50ff6d269f 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -14,7 +14,6 @@
#define _LINUX_CAPABILITY_H
#include <linux/types.h>
-#include <linux/compiler.h>
struct task_struct;
@@ -23,13 +22,20 @@ struct task_struct;
kernel might be somewhat backwards compatible, but don't bet on
it. */
-/* XXX - Note, cap_t, is defined by POSIX to be an "opaque" pointer to
+/* Note, cap_t, is defined by POSIX (draft) to be an "opaque" pointer to
a set of three capability sets. The transposition of 3*the
following structure to such a composite is better handled in a user
library since the draft standard requires the use of malloc/free
etc.. */
-#define _LINUX_CAPABILITY_VERSION 0x19980330
+#define _LINUX_CAPABILITY_VERSION_1 0x19980330
+#define _LINUX_CAPABILITY_U32S_1 1
+
+#define _LINUX_CAPABILITY_VERSION_2 0x20071026
+#define _LINUX_CAPABILITY_U32S_2 2
+
+#define _LINUX_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_2
+#define _LINUX_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_2
typedef struct __user_cap_header_struct {
__u32 version;
@@ -42,41 +48,42 @@ typedef struct __user_cap_data_struct {
__u32 inheritable;
} __user *cap_user_data_t;
+
#define XATTR_CAPS_SUFFIX "capability"
#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
-#define XATTR_CAPS_SZ (3*sizeof(__le32))
#define VFS_CAP_REVISION_MASK 0xFF000000
+#define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK
+#define VFS_CAP_FLAGS_EFFECTIVE 0x000001
+
#define VFS_CAP_REVISION_1 0x01000000
+#define VFS_CAP_U32_1 1
+#define XATTR_CAPS_SZ_1 (sizeof(__le32)*(1 + 2*VFS_CAP_U32_1))
-#define VFS_CAP_REVISION VFS_CAP_REVISION_1
+#define VFS_CAP_REVISION_2 0x02000000
+#define VFS_CAP_U32_2 2
+#define XATTR_CAPS_SZ_2 (sizeof(__le32)*(1 + 2*VFS_CAP_U32_2))
+
+#define XATTR_CAPS_SZ XATTR_CAPS_SZ_2
+#define VFS_CAP_U32 VFS_CAP_U32_2
+#define VFS_CAP_REVISION VFS_CAP_REVISION_2
-#define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK
-#define VFS_CAP_FLAGS_EFFECTIVE 0x000001
struct vfs_cap_data {
- __u32 magic_etc; /* Little endian */
- __u32 permitted; /* Little endian */
- __u32 inheritable; /* Little endian */
+ __le32 magic_etc; /* Little endian */
+ struct {
+ __le32 permitted; /* Little endian */
+ __le32 inheritable; /* Little endian */
+ } data[VFS_CAP_U32];
};
#ifdef __KERNEL__
-/* #define STRICT_CAP_T_TYPECHECKS */
-
-#ifdef STRICT_CAP_T_TYPECHECKS
-
typedef struct kernel_cap_struct {
- __u32 cap;
+ __u32 cap[_LINUX_CAPABILITY_U32S];
} kernel_cap_t;
-#else
-
-typedef __u32 kernel_cap_t;
-
-#endif
-
-#define _USER_CAP_HEADER_SIZE (2*sizeof(__u32))
+#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct))
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
#endif
@@ -119,10 +126,6 @@ typedef __u32 kernel_cap_t;
#define CAP_FSETID 4
-/* Used to decide between falling back on the old suser() or fsuser(). */
-
-#define CAP_FS_MASK 0x1f
-
/* Overrides the restriction that the real or effective user ID of a
process sending a signal must match the real or effective user ID
of the process receiving the signal. */
@@ -145,8 +148,14 @@ typedef __u32 kernel_cap_t;
** Linux-specific capabilities
**/
-/* Transfer any capability in your permitted set to any pid,
- remove any capability in your permitted set from any pid */
+/* Without VFS support for capabilities:
+ * Transfer any capability in your permitted set to any pid,
+ * remove any capability in your permitted set from any pid
+ * With VFS support for capabilities (neither of above, but)
+ * Add any capability from current's capability bounding set
+ * to the current process' inheritable set
+ * Allow taking bits out of capability bounding set
+ */
#define CAP_SETPCAP 8
@@ -195,7 +204,6 @@ typedef __u32 kernel_cap_t;
#define CAP_IPC_OWNER 15
/* Insert and remove kernel modules - modify kernel without limit */
-/* Modify cap_bset */
#define CAP_SYS_MODULE 16
/* Allow ioperm/iopl access */
@@ -307,74 +315,183 @@ typedef __u32 kernel_cap_t;
#define CAP_SETFCAP 31
+/* Override MAC access.
+ The base kernel enforces no MAC policy.
+ An LSM may enforce a MAC policy, and if it does and it chooses
+ to implement capability based overrides of that policy, this is
+ the capability it should use to do so. */
+
+#define CAP_MAC_OVERRIDE 32
+
+/* Allow MAC configuration or state changes.
+ The base kernel requires no MAC configuration.
+ An LSM may enforce a MAC policy, and if it does and it chooses
+ to implement capability based checks on modifications to that
+ policy or the data required to maintain it, this is the
+ capability it should use to do so. */
+
+#define CAP_MAC_ADMIN 33
+
+#define CAP_LAST_CAP CAP_MAC_ADMIN
+
+#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
+
+/*
+ * Bit location of each capability (used by user-space library and kernel)
+ */
+
+#define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */
+#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */
+
#ifdef __KERNEL__
/*
* Internal kernel functions only
*/
-#ifdef STRICT_CAP_T_TYPECHECKS
+#define CAP_FOR_EACH_U32(__capi) \
+ for (__capi = 0; __capi < _LINUX_CAPABILITY_U32S; ++__capi)
+
+# define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \
+ | CAP_TO_MASK(CAP_DAC_OVERRIDE) \
+ | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \
+ | CAP_TO_MASK(CAP_FOWNER) \
+ | CAP_TO_MASK(CAP_FSETID))
+
+# define CAP_FS_MASK_B1 (CAP_TO_MASK(CAP_MAC_OVERRIDE))
+
+#if _LINUX_CAPABILITY_U32S != 2
+# error Fix up hand-coded capability macro initializers
+#else /* HAND-CODED capability initializers */
+
+# define CAP_EMPTY_SET {{ 0, 0 }}
+# define CAP_FULL_SET {{ ~0, ~0 }}
+# define CAP_INIT_EFF_SET {{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }}
+# define CAP_FS_SET {{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } }
+# define CAP_NFSD_SET {{ CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE), \
+ CAP_FS_MASK_B1 } }
+
+#endif /* _LINUX_CAPABILITY_U32S != 2 */
+
+#define CAP_INIT_INH_SET CAP_EMPTY_SET
+
+# define cap_clear(c) do { (c) = __cap_empty_set; } while (0)
+# define cap_set_full(c) do { (c) = __cap_full_set; } while (0)
+# define cap_set_init_eff(c) do { (c) = __cap_init_eff_set; } while (0)
+
+#define cap_raise(c, flag) ((c).cap[CAP_TO_INDEX(flag)] |= CAP_TO_MASK(flag))
+#define cap_lower(c, flag) ((c).cap[CAP_TO_INDEX(flag)] &= ~CAP_TO_MASK(flag))
+#define cap_raised(c, flag) ((c).cap[CAP_TO_INDEX(flag)] & CAP_TO_MASK(flag))
+
+#define CAP_BOP_ALL(c, a, b, OP) \
+do { \
+ unsigned __capi; \
+ CAP_FOR_EACH_U32(__capi) { \
+ c.cap[__capi] = a.cap[__capi] OP b.cap[__capi]; \
+ } \
+} while (0)
+
+#define CAP_UOP_ALL(c, a, OP) \
+do { \
+ unsigned __capi; \
+ CAP_FOR_EACH_U32(__capi) { \
+ c.cap[__capi] = OP a.cap[__capi]; \
+ } \
+} while (0)
+
+static inline kernel_cap_t cap_combine(const kernel_cap_t a,
+ const kernel_cap_t b)
+{
+ kernel_cap_t dest;
+ CAP_BOP_ALL(dest, a, b, |);
+ return dest;
+}
-#define to_cap_t(x) { x }
-#define cap_t(x) (x).cap
+static inline kernel_cap_t cap_intersect(const kernel_cap_t a,
+ const kernel_cap_t b)
+{
+ kernel_cap_t dest;
+ CAP_BOP_ALL(dest, a, b, &);
+ return dest;
+}
-#else
+static inline kernel_cap_t cap_drop(const kernel_cap_t a,
+ const kernel_cap_t drop)
+{
+ kernel_cap_t dest;
+ CAP_BOP_ALL(dest, a, drop, &~);
+ return dest;
+}
-#define to_cap_t(x) (x)
-#define cap_t(x) (x)
+static inline kernel_cap_t cap_invert(const kernel_cap_t c)
+{
+ kernel_cap_t dest;
+ CAP_UOP_ALL(dest, c, ~);
+ return dest;
+}
-#endif
+static inline int cap_isclear(const kernel_cap_t a)
+{
+ unsigned __capi;
+ CAP_FOR_EACH_U32(__capi) {
+ if (a.cap[__capi] != 0)
+ return 0;
+ }
+ return 1;
+}
-#define CAP_EMPTY_SET to_cap_t(0)
-#define CAP_FULL_SET to_cap_t(~0)
-#define CAP_INIT_EFF_SET to_cap_t(~0 & ~CAP_TO_MASK(CAP_SETPCAP))
-#define CAP_INIT_INH_SET to_cap_t(0)
+static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
+{
+ kernel_cap_t dest;
+ dest = cap_drop(a, set);
+ return cap_isclear(dest);
+}
-#define CAP_TO_MASK(x) (1 << (x))
-#define cap_raise(c, flag) (cap_t(c) |= CAP_TO_MASK(flag))
-#define cap_lower(c, flag) (cap_t(c) &= ~CAP_TO_MASK(flag))
-#define cap_raised(c, flag) (cap_t(c) & CAP_TO_MASK(flag))
+/* Used to decide between falling back on the old suser() or fsuser(). */
-static inline kernel_cap_t cap_combine(kernel_cap_t a, kernel_cap_t b)
+static inline int cap_is_fs_cap(int cap)
{
- kernel_cap_t dest;
- cap_t(dest) = cap_t(a) | cap_t(b);
- return dest;
+ const kernel_cap_t __cap_fs_set = CAP_FS_SET;
+ return !!(CAP_TO_MASK(cap) & __cap_fs_set.cap[CAP_TO_INDEX(cap)]);
}
-static inline kernel_cap_t cap_intersect(kernel_cap_t a, kernel_cap_t b)
+static inline kernel_cap_t cap_drop_fs_set(const kernel_cap_t a)
{
- kernel_cap_t dest;
- cap_t(dest) = cap_t(a) & cap_t(b);
- return dest;
+ const kernel_cap_t __cap_fs_set = CAP_FS_SET;
+ return cap_drop(a, __cap_fs_set);
}
-static inline kernel_cap_t cap_drop(kernel_cap_t a, kernel_cap_t drop)
+static inline kernel_cap_t cap_raise_fs_set(const kernel_cap_t a,
+ const kernel_cap_t permitted)
{
- kernel_cap_t dest;
- cap_t(dest) = cap_t(a) & ~cap_t(drop);
- return dest;
+ const kernel_cap_t __cap_fs_set = CAP_FS_SET;
+ return cap_combine(a,
+ cap_intersect(permitted, __cap_fs_set));
}
-static inline kernel_cap_t cap_invert(kernel_cap_t c)
+static inline kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a)
{
- kernel_cap_t dest;
- cap_t(dest) = ~cap_t(c);
- return dest;
+ const kernel_cap_t __cap_fs_set = CAP_NFSD_SET;
+ return cap_drop(a, __cap_fs_set);
}
-#define cap_isclear(c) (!cap_t(c))
-#define cap_issubset(a,set) (!(cap_t(a) & ~cap_t(set)))
-
-#define cap_clear(c) do { cap_t(c) = 0; } while(0)
-#define cap_set_full(c) do { cap_t(c) = ~0; } while(0)
-#define cap_mask(c,mask) do { cap_t(c) &= cap_t(mask); } while(0)
+static inline kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a,
+ const kernel_cap_t permitted)
+{
+ const kernel_cap_t __cap_nfsd_set = CAP_NFSD_SET;
+ return cap_combine(a,
+ cap_intersect(permitted, __cap_nfsd_set));
+}
-#define cap_is_fs_cap(c) (CAP_TO_MASK(c) & CAP_FS_MASK)
+extern const kernel_cap_t __cap_empty_set;
+extern const kernel_cap_t __cap_full_set;
+extern const kernel_cap_t __cap_init_eff_set;
int capable(int cap);
int __capable(struct task_struct *t, int cap);
+extern long cap_prctl_drop(unsigned long cap);
+
#endif /* __KERNEL__ */
#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 87479328d46d..ff9055fc3d2a 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -14,6 +14,7 @@
#include <linux/nodemask.h>
#include <linux/rcupdate.h>
#include <linux/cgroupstats.h>
+#include <linux/prio_heap.h>
#ifdef CONFIG_CGROUPS
@@ -207,6 +208,14 @@ struct cftype {
int (*release) (struct inode *inode, struct file *file);
};
+struct cgroup_scanner {
+ struct cgroup *cg;
+ int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
+ void (*process_task)(struct task_struct *p,
+ struct cgroup_scanner *scan);
+ struct ptr_heap *heap;
+};
+
/* Add a new file to the given cgroup directory. Should only be
* called by subsystems from within a populate() method */
int cgroup_add_file(struct cgroup *cont, struct cgroup_subsys *subsys,
@@ -233,6 +242,7 @@ int cgroup_is_descendant(const struct cgroup *cont);
struct cgroup_subsys {
struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
struct cgroup *cont);
+ void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cont);
void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cont);
int (*can_attach)(struct cgroup_subsys *ss,
struct cgroup *cont, struct task_struct *tsk);
@@ -298,11 +308,17 @@ struct cgroup_iter {
* returns NULL or until you want to end the iteration
*
* 3) call cgroup_iter_end() to destroy the iterator.
+ *
+ * Or, call cgroup_scan_tasks() to iterate through every task in a cpuset.
+ * - cgroup_scan_tasks() holds the css_set_lock when calling the test_task()
+ * callback, but not while calling the process_task() callback.
*/
void cgroup_iter_start(struct cgroup *cont, struct cgroup_iter *it);
struct task_struct *cgroup_iter_next(struct cgroup *cont,
struct cgroup_iter *it);
void cgroup_iter_end(struct cgroup *cont, struct cgroup_iter *it);
+int cgroup_scan_tasks(struct cgroup_scanner *scan);
+int cgroup_attach_task(struct cgroup *, struct task_struct *);
#else /* !CONFIG_CGROUPS */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 9ec43186ba80..228235c5ae53 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -37,3 +37,8 @@ SUBSYS(cpuacct)
/* */
+#ifdef CONFIG_CGROUP_MEM_CONT
+SUBSYS(mem_cgroup)
+#endif
+
+/* */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index d38655f2be70..a671dbff7a1f 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -257,16 +257,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
/*
* epoll (fs/eventpoll.c) compat bits follow ...
*/
-#ifndef CONFIG_HAS_COMPAT_EPOLL_EVENT
struct epoll_event;
#define compat_epoll_event epoll_event
-#else
-asmlinkage long compat_sys_epoll_ctl(int epfd, int op, int fd,
- struct compat_epoll_event __user *event);
-asmlinkage long compat_sys_epoll_wait(int epfd,
- struct compat_epoll_event __user *events,
- int maxevents, int timeout);
-#endif
asmlinkage long compat_sys_epoll_pwait(int epfd,
struct compat_epoll_event __user *events,
int maxevents, int timeout,
@@ -279,8 +271,11 @@ asmlinkage long compat_sys_utimensat(unsigned int dfd, char __user *filename,
asmlinkage long compat_sys_signalfd(int ufd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
-asmlinkage long compat_sys_timerfd(int ufd, int clockid, int flags,
- const struct compat_itimerspec __user *utmr);
+asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
+ const struct compat_itimerspec __user *utmr,
+ struct compat_itimerspec __user *otmr);
+asmlinkage long compat_sys_timerfd_gettime(int ufd,
+ struct compat_itimerspec __user *otmr);
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 23932d7741a9..ddd8652fc3f3 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -167,6 +167,10 @@ struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int (*governor) (struct cpufreq_policy *policy,
unsigned int event);
+ ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
+ char *buf);
+ int (*store_setspeed) (struct cpufreq_policy *policy,
+ unsigned int freq);
unsigned int max_transition_latency; /* HW must be able to switch to
next freq faster than this value in nano secs or we
will fallback to performance governor */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index c4e00161a247..385d45b616db 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -46,9 +46,10 @@ struct cpuidle_state {
/* Idle State Flags */
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
#define CPUIDLE_FLAG_CHECK_BM (0x02) /* BM activity will exit state */
-#define CPUIDLE_FLAG_SHALLOW (0x10) /* low latency, minimal savings */
-#define CPUIDLE_FLAG_BALANCED (0x20) /* medium latency, moderate savings */
-#define CPUIDLE_FLAG_DEEP (0x40) /* high latency, large savings */
+#define CPUIDLE_FLAG_POLL (0x10) /* no latency, no savings */
+#define CPUIDLE_FLAG_SHALLOW (0x20) /* low latency, minimal savings */
+#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */
+#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
@@ -79,7 +80,7 @@ struct cpuidle_state_kobj {
};
struct cpuidle_device {
- int enabled:1;
+ unsigned int enabled:1;
unsigned int cpu;
int last_residency;
@@ -178,4 +179,10 @@ static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { }
#endif
+#ifdef CONFIG_ARCH_HAS_CPU_RELAX
+#define CPUIDLE_DRIVER_STATE_START 1
+#else
+#define CPUIDLE_DRIVER_STATE_START 0
+#endif
+
#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/device.h b/include/linux/device.h
index 479c0b31593c..2258d89bf523 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -410,6 +410,15 @@ extern int devres_release_group(struct device *dev, void *id);
extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
extern void devm_kfree(struct device *dev, void *p);
+struct device_dma_parameters {
+ /*
+ * a low level driver may set these to teach IOMMU code about
+ * sg limitations.
+ */
+ unsigned int max_segment_size;
+ unsigned long segment_boundary_mask;
+};
+
struct device {
struct klist klist_children;
struct klist_node knode_parent; /* node in sibling list */
@@ -445,6 +454,8 @@ struct device {
64 bit addresses for consistent
allocations such descriptors. */
+ struct device_dma_parameters *dma_parms;
+
struct list_head dma_pools; /* dma pools (if dma'ble) */
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4470950892be..332030709623 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -60,6 +60,36 @@ static inline int is_device_dma_capable(struct device *dev)
extern u64 dma_get_required_mask(struct device *dev);
+static inline unsigned int dma_get_max_seg_size(struct device *dev)
+{
+ return dev->dma_parms ? dev->dma_parms->max_segment_size : 65536;
+}
+
+static inline unsigned int dma_set_max_seg_size(struct device *dev,
+ unsigned int size)
+{
+ if (dev->dma_parms) {
+ dev->dma_parms->max_segment_size = size;
+ return 0;
+ } else
+ return -EIO;
+}
+
+static inline unsigned long dma_get_seg_boundary(struct device *dev)
+{
+ return dev->dma_parms ?
+ dev->dma_parms->segment_boundary_mask : 0xffffffff;
+}
+
+static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
+{
+ if (dev->dma_parms) {
+ dev->dma_parms->segment_boundary_mask = mask;
+ return 0;
+ } else
+ return -EIO;
+}
+
/* flags for the coherent memory api */
#define DMA_MEMORY_MAP 0x01
#define DMA_MEMORY_IO 0x02
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 5c84bf897593..acbb364674ff 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -95,6 +95,15 @@ enum dma_transaction_type {
#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
/**
+ * enum dma_prep_flags - DMA flags to augment operation preparation
+ * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
+ * this transaction
+ */
+enum dma_prep_flags {
+ DMA_PREP_INTERRUPT = (1 << 0),
+};
+
+/**
* dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
* See linux/cpumask.h
*/
@@ -209,8 +218,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param);
* descriptors
* @chan: target channel for this operation
* @tx_submit: set the prepared descriptor(s) to be executed by the engine
- * @tx_set_dest: set a destination address in a hardware descriptor
- * @tx_set_src: set a source address in a hardware descriptor
* @callback: routine to call after this operation is complete
* @callback_param: general parameter to pass to the callback routine
* ---async_tx api specific fields---
@@ -227,10 +234,6 @@ struct dma_async_tx_descriptor {
struct list_head tx_list;
struct dma_chan *chan;
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
- void (*tx_set_dest)(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx, int index);
- void (*tx_set_src)(dma_addr_t addr,
- struct dma_async_tx_descriptor *tx, int index);
dma_async_tx_callback callback;
void *callback_param;
struct list_head depend_list;
@@ -279,15 +282,17 @@ struct dma_device {
void (*device_free_chan_resources)(struct dma_chan *chan);
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
- struct dma_chan *chan, size_t len, int int_en);
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
- struct dma_chan *chan, unsigned int src_cnt, size_t len,
- int int_en);
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
+ unsigned int src_cnt, size_t len, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
- struct dma_chan *chan, unsigned int src_cnt, size_t len,
- u32 *result, int int_en);
+ struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, u32 *result, unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
- struct dma_chan *chan, int value, size_t len, int int_en);
+ struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
struct dma_chan *chan);
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 5b42a659a308..b1251b2af568 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -79,7 +79,6 @@ extern void dmi_scan_machine(void);
extern int dmi_get_year(int field);
extern int dmi_name_in_vendors(const char *str);
extern int dmi_available;
-extern char *dmi_get_slot(int slot);
#else
@@ -90,7 +89,6 @@ static inline const struct dmi_device * dmi_find_device(int type, const char *na
static inline int dmi_get_year(int year) { return 0; }
static inline int dmi_name_in_vendors(const char *s) { return 0; }
#define dmi_available 0
-static inline char *dmi_get_slot(int slot) { return NULL; }
#endif
diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h
index 31f6e3c427fb..d3c65e48a2e7 100644
--- a/include/linux/ds1wm.h
+++ b/include/linux/ds1wm.h
@@ -6,6 +6,7 @@ struct ds1wm_platform_data {
* e.g. on h5xxx and h2200 this is 2
* (registers aligned to 4-byte boundaries),
* while on hx4700 this is 1 */
+ int active_high;
void (*enable)(struct platform_device *pdev);
void (*disable)(struct platform_device *pdev);
};
diff --git a/include/linux/efs_fs.h b/include/linux/efs_fs.h
index dd57fe523e97..a695d63a07af 100644
--- a/include/linux/efs_fs.h
+++ b/include/linux/efs_fs.h
@@ -41,7 +41,7 @@ extern const struct inode_operations efs_dir_inode_operations;
extern const struct file_operations efs_dir_operations;
extern const struct address_space_operations efs_symlink_aops;
-extern void efs_read_inode(struct inode *);
+extern struct inode *efs_iget(struct super_block *, unsigned long);
extern efs_block_t efs_map_block(struct inode *, efs_block_t);
extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int);
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 7ceb24d87c1a..bad1b16ec49a 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -3,7 +3,9 @@
#include <linux/types.h>
#include <linux/elf-em.h>
+#ifdef __KERNEL__
#include <asm/elf.h>
+#endif
struct file;
@@ -355,6 +357,7 @@ typedef struct elf64_shdr {
#define NT_AUXV 6
#define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */
#define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */
+#define NT_PPC_SPE 0x101 /* PowerPC SPE/EVR registers */
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 9631dddae348..5ca54d77079f 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -4,7 +4,9 @@
#include <linux/types.h>
#include <linux/signal.h>
#include <linux/time.h>
+#ifdef __KERNEL__
#include <linux/user.h>
+#endif
#include <linux/ptrace.h>
struct elf_siginfo
@@ -14,7 +16,9 @@ struct elf_siginfo
int si_errno; /* errno */
};
+#ifdef __KERNEL__
#include <asm/elf.h>
+#endif
#ifndef __KERNEL__
typedef elf_greg_t greg_t;
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
new file mode 100644
index 000000000000..a5978f18ca40
--- /dev/null
+++ b/include/linux/enclosure.h
@@ -0,0 +1,129 @@
+/*
+ * Enclosure Services
+ *
+ * Copyright (C) 2008 James Bottomley <James.Bottomley@HansenPartnership.com>
+ *
+**-----------------------------------------------------------------------------
+**
+** This program is free software; you can redistribute it and/or
+** modify it under the terms of the GNU General Public License
+** version 2 as published by the Free Software Foundation.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+** You should have received a copy of the GNU General Public License
+** along with this program; if not, write to the Free Software
+** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+**
+**-----------------------------------------------------------------------------
+*/
+#ifndef _LINUX_ENCLOSURE_H_
+#define _LINUX_ENCLOSURE_H_
+
+#include <linux/device.h>
+#include <linux/list.h>
+
+/* A few generic types ... taken from ses-2 */
+enum enclosure_component_type {
+ ENCLOSURE_COMPONENT_DEVICE = 0x01,
+ ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17,
+};
+
+/* ses-2 common element status */
+enum enclosure_status {
+ ENCLOSURE_STATUS_UNSUPPORTED = 0,
+ ENCLOSURE_STATUS_OK,
+ ENCLOSURE_STATUS_CRITICAL,
+ ENCLOSURE_STATUS_NON_CRITICAL,
+ ENCLOSURE_STATUS_UNRECOVERABLE,
+ ENCLOSURE_STATUS_NOT_INSTALLED,
+ ENCLOSURE_STATUS_UNKNOWN,
+ ENCLOSURE_STATUS_UNAVAILABLE,
+};
+
+/* SFF-8485 activity light settings */
+enum enclosure_component_setting {
+ ENCLOSURE_SETTING_DISABLED = 0,
+ ENCLOSURE_SETTING_ENABLED = 1,
+ ENCLOSURE_SETTING_BLINK_A_ON_OFF = 2,
+ ENCLOSURE_SETTING_BLINK_A_OFF_ON = 3,
+ ENCLOSURE_SETTING_BLINK_B_ON_OFF = 6,
+ ENCLOSURE_SETTING_BLINK_B_OFF_ON = 7,
+};
+
+struct enclosure_device;
+struct enclosure_component;
+struct enclosure_component_callbacks {
+ void (*get_status)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_status)(struct enclosure_device *,
+ struct enclosure_component *,
+ enum enclosure_status);
+ void (*get_fault)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_fault)(struct enclosure_device *,
+ struct enclosure_component *,
+ enum enclosure_component_setting);
+ void (*get_active)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_active)(struct enclosure_device *,
+ struct enclosure_component *,
+ enum enclosure_component_setting);
+ void (*get_locate)(struct enclosure_device *,
+ struct enclosure_component *);
+ int (*set_locate)(struct enclosure_device *,
+ struct enclosure_component *,
+ enum enclosure_component_setting);
+};
+
+
+struct enclosure_component {
+ void *scratch;
+ struct class_device cdev;
+ enum enclosure_component_type type;
+ int number;
+ int fault;
+ int active;
+ int locate;
+ enum enclosure_status status;
+};
+
+struct enclosure_device {
+ void *scratch;
+ struct list_head node;
+ struct class_device cdev;
+ struct enclosure_component_callbacks *cb;
+ int components;
+ struct enclosure_component component[0];
+};
+
+static inline struct enclosure_device *
+to_enclosure_device(struct class_device *dev)
+{
+ return container_of(dev, struct enclosure_device, cdev);
+}
+
+static inline struct enclosure_component *
+to_enclosure_component(struct class_device *dev)
+{
+ return container_of(dev, struct enclosure_component, cdev);
+}
+
+struct enclosure_device *
+enclosure_register(struct device *, const char *, int,
+ struct enclosure_component_callbacks *);
+void enclosure_unregister(struct enclosure_device *);
+struct enclosure_component *
+enclosure_component_register(struct enclosure_device *, unsigned int,
+ enum enclosure_component_type, const char *);
+int enclosure_add_device(struct enclosure_device *enclosure, int component,
+ struct device *dev);
+int enclosure_remove_device(struct enclosure_device *enclosure, int component);
+struct enclosure_device *enclosure_find(struct device *dev);
+int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
+ void *data);
+
+#endif /* _LINUX_ENCLOSURE_H_ */
diff --git a/include/linux/err.h b/include/linux/err.h
index 1ab1d44f8d3b..ec87f3142bf3 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -34,6 +34,19 @@ static inline long IS_ERR(const void *ptr)
return IS_ERR_VALUE((unsigned long)ptr);
}
+/**
+ * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+ * @ptr: The pointer to cast.
+ *
+ * Explicitly cast an error-valued pointer to another pointer type in such a
+ * way as to make it clear that's what's going on.
+ */
+static inline void *ERR_CAST(const void *ptr)
+{
+ /* cast away the const */
+ return (void *) ptr;
+}
+
#endif
#endif /* _LINUX_ERR_H */
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 241c01cb92b2..36c540396377 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -823,7 +823,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
int create, int extend_disksize);
-extern void ext3_read_inode (struct inode *);
+extern struct inode *ext3_iget(struct super_block *, unsigned long);
extern int ext3_write_inode (struct inode *, int);
extern int ext3_setattr (struct dentry *, struct iattr *);
extern void ext3_delete_inode (struct inode *);
diff --git a/include/linux/ext4_fs.h b/include/linux/ext4_fs.h
index 1852313fc7c7..c4f635a4dd25 100644
--- a/include/linux/ext4_fs.h
+++ b/include/linux/ext4_fs.h
@@ -1024,7 +1024,7 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
struct buffer_head *bh_result,
int create, int extend_disksize);
-extern void ext4_read_inode (struct inode *);
+extern struct inode *ext4_iget(struct super_block *, unsigned long);
extern int ext4_write_inode (struct inode *, int);
extern int ext4_setattr (struct dentry *, struct iattr *);
extern void ext4_delete_inode (struct inode *);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 56bd421c1208..36b7abefacbe 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -21,7 +21,7 @@
/* Fixed constants first: */
#undef NR_OPEN
-#define NR_OPEN (1024*1024) /* Absolute upper limit on fd num */
+extern int sysctl_nr_open;
#define INR_OPEN 1024 /* Initial setting for nfile rlimits */
#define BLOCK_SIZE_BITS 10
@@ -977,7 +977,6 @@ extern int send_sigurg(struct fown_struct *fown);
extern struct list_head super_blocks;
extern spinlock_t sb_lock;
-#define sb_entry(list) list_entry((list), struct super_block, s_list)
#define S_BIAS (1<<30)
struct super_block {
struct list_head s_list; /* Keep this first */
@@ -1242,8 +1241,6 @@ struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
- void (*read_inode) (struct inode *);
-
void (*dirty_inode) (struct inode *);
int (*write_inode) (struct inode *, int);
void (*put_inode) (struct inode *);
@@ -1279,8 +1276,10 @@ struct super_operations {
*
* Two bits are used for locking and completion notification, I_LOCK and I_SYNC.
*
- * I_DIRTY_SYNC Inode itself is dirty.
- * I_DIRTY_DATASYNC Data-related inode changes pending
+ * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
+ * fdatasync(). i_atime is the usual cause.
+ * I_DIRTY_DATASYNC Inode is dirty and must be written on fdatasync(), f.e.
+ * because i_size changed.
* I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
* I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both
* are cleared by unlock_new_inode(), called from iget().
@@ -1312,8 +1311,6 @@ struct super_operations {
* purpose reduces latency and prevents some filesystem-
* specific deadlocks.
*
- * Q: Why does I_DIRTY_DATASYNC exist? It appears as if it could be replaced
- * by (I_DIRTY_SYNC|I_DIRTY_PAGES).
* Q: What is the difference between I_WILL_FREE and I_FREEING?
* Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on
* I_CLEAR? If not, why?
@@ -1768,19 +1765,8 @@ extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*te
extern struct inode * iget_locked(struct super_block *, unsigned long);
extern void unlock_new_inode(struct inode *);
-static inline struct inode *iget(struct super_block *sb, unsigned long ino)
-{
- struct inode *inode = iget_locked(sb, ino);
-
- if (inode && (inode->i_state & I_NEW)) {
- sb->s_op->read_inode(inode);
- unlock_new_inode(inode);
- }
-
- return inode;
-}
-
extern void __iget(struct inode * inode);
+extern void iget_failed(struct inode *);
extern void clear_inode(struct inode *);
extern void destroy_inode(struct inode *);
extern struct inode *new_inode(struct super_block *);
@@ -1942,7 +1928,9 @@ extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
extern int vfs_fstat(unsigned int, struct kstat *);
-extern int vfs_ioctl(struct file *, unsigned int, unsigned int, unsigned long);
+extern long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
+ unsigned long arg);
extern void get_filesystem(struct file_system_type *fs);
extern void put_filesystem(struct file_system_type *fs);
@@ -2113,6 +2101,7 @@ struct ctl_table;
int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos);
+int get_filesystem_list(char * buf);
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 2bd31fa623b6..d4b7c4ac72e6 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -92,6 +92,14 @@ static inline void fsnotify_inoderemove(struct inode *inode)
}
/*
+ * fsnotify_link_count - inode's link count changed
+ */
+static inline void fsnotify_link_count(struct inode *inode)
+{
+ inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
+}
+
+/*
* fsnotify_create - 'name' was linked in
*/
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
@@ -103,6 +111,20 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
}
/*
+ * fsnotify_link - new hardlink in 'inode' directory
+ * Note: We have to pass also the linked inode ptr as some filesystems leave
+ * new_dentry->d_inode NULL and instantiate inode pointer later
+ */
+static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
+{
+ inode_dir_notify(dir, DN_CREATE);
+ inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
+ inode);
+ fsnotify_link_count(inode);
+ audit_inode_child(new_dentry->d_name.name, new_dentry, dir);
+}
+
+/*
* fsnotify_mkdir - directory 'name' was created
*/
static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 7e93a9ae7064..0c6ce515185d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -228,5 +228,7 @@ extern void FASTCALL(free_cold_page(struct page *page));
void page_alloc_init(void);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
+void drain_all_pages(void);
+void drain_local_pages(void *dummy);
#endif /* __LINUX_GFP_H */
diff --git a/include/linux/hash.h b/include/linux/hash.h
index acf17bb8e7f9..06d25c189cc5 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -1,6 +1,6 @@
#ifndef _LINUX_HASH_H
#define _LINUX_HASH_H
-/* Fast hashing routine for a long.
+/* Fast hashing routine for ints, longs and pointers.
(C) 2002 William Lee Irwin III, IBM */
/*
@@ -13,23 +13,30 @@
* them can use shifts and additions instead of multiplications for
* machines where multiplications are slow.
*/
-#if BITS_PER_LONG == 32
+
+#include <asm/types.h>
+
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define GOLDEN_RATIO_PRIME 0x9e370001UL
-#elif BITS_PER_LONG == 64
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
+
+#if BITS_PER_LONG == 32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
+#define hash_long(val, bits) hash_32(val, bits)
+#elif BITS_PER_LONG == 64
+#define hash_long(val, bits) hash_64(val, bits)
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
#else
-#error Define GOLDEN_RATIO_PRIME for your wordsize.
+#error Wordsize not 32 or 64
#endif
-static inline unsigned long hash_long(unsigned long val, unsigned int bits)
+static inline u64 hash_64(u64 val, unsigned int bits)
{
- unsigned long hash = val;
+ u64 hash = val;
-#if BITS_PER_LONG == 64
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
- unsigned long n = hash;
+ u64 n = hash;
n <<= 18;
hash -= n;
n <<= 33;
@@ -42,15 +49,20 @@ static inline unsigned long hash_long(unsigned long val, unsigned int bits)
hash += n;
n <<= 2;
hash += n;
-#else
+
+ /* High bits are more random, so use them. */
+ return hash >> (64 - bits);
+}
+
+static inline u32 hash_32(u32 val, unsigned int bits)
+{
/* On some cpus multiply is faster, on others gcc will do shifts */
- hash *= GOLDEN_RATIO_PRIME;
-#endif
+ u32 hash = val * GOLDEN_RATIO_PRIME_32;
/* High bits are more random, so use them. */
- return hash >> (BITS_PER_LONG - bits);
+ return hash >> (32 - bits);
}
-
+
static inline unsigned long hash_ptr(void *ptr, unsigned int bits)
{
return hash_long((unsigned long)ptr, bits);
diff --git a/include/linux/hayesesp.h b/include/linux/hayesesp.h
index b436be7a7fff..2177ee5b2fe2 100644
--- a/include/linux/hayesesp.h
+++ b/include/linux/hayesesp.h
@@ -71,7 +71,6 @@ struct hayes_esp_config {
#define ESP_STAT_NEVER_DMA 0x08
#define ESP_STAT_USE_PIO 0x10
-#define ESP_EVENT_WRITE_WAKEUP 0
#define ESP_MAGIC 0x53ee
#define ESP_XMIT_SIZE 4096
@@ -92,7 +91,6 @@ struct esp_struct {
unsigned short closing_wait2;
int IER; /* Interrupt Enable Register */
int MCR; /* Modem control register */
- unsigned long event;
unsigned long last_active;
int line;
int count; /* # of fd on device */
@@ -101,8 +99,6 @@ struct esp_struct {
int xmit_head;
int xmit_tail;
int xmit_cnt;
- struct work_struct tqueue;
- struct work_struct tqueue_hangup;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
wait_queue_head_t delta_msr_wait;
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index db390c511ada..6115545a5b9c 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -26,13 +26,6 @@
#include <linux/netdevice.h>
#include <linux/hdlc/ioctl.h>
-
-/* Used by all network devices here, pointed to by netdev_priv(dev) */
-struct hdlc_device_desc {
- int (*netif_rx)(struct sk_buff *skb);
- struct net_device_stats stats;
-};
-
/* This structure is a private property of HDLC protocols.
Hardware drivers have no interest here */
@@ -44,12 +37,15 @@ struct hdlc_proto {
void (*detach)(struct net_device *dev);
int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
__be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
+ int (*netif_rx)(struct sk_buff *skb);
struct module *module;
struct hdlc_proto *next; /* next protocol in the list */
};
+/* Pointed to by dev->priv */
typedef struct hdlc_device {
+ struct net_device_stats stats;
/* used by HDLC layer to take control over HDLC device from hw driver*/
int (*attach)(struct net_device *dev,
unsigned short encoding, unsigned short parity);
@@ -83,18 +79,11 @@ void unregister_hdlc_protocol(struct hdlc_proto *proto);
struct net_device *alloc_hdlcdev(void *priv);
-
-static __inline__ struct hdlc_device_desc* dev_to_desc(struct net_device *dev)
-{
- return netdev_priv(dev);
-}
-
-static __inline__ hdlc_device* dev_to_hdlc(struct net_device *dev)
+static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev)
{
- return netdev_priv(dev) + sizeof(struct hdlc_device_desc);
+ return dev->priv;
}
-
static __inline__ void debug_frame(const struct sk_buff *skb)
{
int i;
@@ -116,13 +105,13 @@ int hdlc_open(struct net_device *dev);
void hdlc_close(struct net_device *dev);
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
- int (*rx)(struct sk_buff *skb), size_t size);
+ size_t size);
/* May be used by hardware driver to gain control over HDLC device */
void detach_hdlc_protocol(struct net_device *dev);
static __inline__ struct net_device_stats *hdlc_stats(struct net_device *dev)
{
- return &dev_to_desc(dev)->stats;
+ return &dev_to_hdlc(dev)->stats;
}
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 1fcb0033179e..7dcbc82f3b7b 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -68,8 +68,6 @@ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
void *addr = kmap_atomic(page, KM_USER0);
clear_user_page(addr, vaddr, page);
kunmap_atomic(addr, KM_USER0);
- /* Make sure this page is cleared on other CPU's too before using it */
- smp_wmb();
}
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
@@ -124,28 +122,40 @@ static inline void clear_highpage(struct page *page)
kunmap_atomic(kaddr, KM_USER0);
}
-/*
- * Same but also flushes aliased cache contents to RAM.
- *
- * This must be a macro because KM_USER0 and friends aren't defined if
- * !CONFIG_HIGHMEM
- */
-#define zero_user_page(page, offset, size, km_type) \
- do { \
- void *kaddr; \
- \
- BUG_ON((offset) + (size) > PAGE_SIZE); \
- \
- kaddr = kmap_atomic(page, km_type); \
- memset((char *)kaddr + (offset), 0, (size)); \
- flush_dcache_page(page); \
- kunmap_atomic(kaddr, (km_type)); \
- } while (0)
+static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+ unsigned start2, unsigned end2)
+{
+ void *kaddr = kmap_atomic(page, KM_USER0);
+
+ BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
+
+ if (end1 > start1)
+ memset(kaddr + start1, 0, end1 - start1);
+
+ if (end2 > start2)
+ memset(kaddr + start2, 0, end2 - start2);
+
+ kunmap_atomic(kaddr, KM_USER0);
+ flush_dcache_page(page);
+}
+
+static inline void zero_user_segment(struct page *page,
+ unsigned start, unsigned end)
+{
+ zero_user_segments(page, start, end, 0, 0);
+}
+
+static inline void zero_user(struct page *page,
+ unsigned start, unsigned size)
+{
+ zero_user_segments(page, start, start + size, 0, 0);
+}
static inline void __deprecated memclear_highpage_flush(struct page *page,
unsigned int offset, unsigned int size)
{
- zero_user_page(page, offset, size, KM_USER0);
+ zero_user(page, offset, size);
}
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
@@ -160,8 +170,6 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
copy_user_page(vto, vfrom, vaddr, to);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
- /* Make sure this page is cleared on other CPU's too before using it */
- smp_wmb();
}
#endif
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index f79dcba4b2c1..203591e23210 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -225,11 +225,14 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
* idea of the (in)accuracy of timers. Timer values are rounded up to
* this resolution values.
*/
-# define KTIME_HIGH_RES (ktime_t) { .tv64 = 1 }
+# define HIGH_RES_NSEC 1
+# define KTIME_HIGH_RES (ktime_t) { .tv64 = HIGH_RES_NSEC }
+# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
#else
+# define MONOTONIC_RES_NSEC LOW_RES_NSEC
# define KTIME_MONOTONIC_RES KTIME_LOW_RES
/*
@@ -301,9 +304,16 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
}
/* Forward a hrtimer so it expires after now: */
-extern unsigned long
+extern u64
hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
+/* Forward a hrtimer so it expires after the hrtimer's current now */
+static inline u64 hrtimer_forward_now(struct hrtimer *timer,
+ ktime_t interval)
+{
+ return hrtimer_forward(timer, timer->base->get_time(), interval);
+}
+
/* Precise sleep: */
extern long hrtimer_nanosleep(struct timespec *rqtp,
struct timespec *rmtp,
@@ -322,9 +332,9 @@ extern void hrtimer_run_pending(void);
extern void __init hrtimers_init(void);
#if BITS_PER_LONG < 64
-extern unsigned long ktime_divns(const ktime_t kt, s64 div);
+extern u64 ktime_divns(const ktime_t kt, s64 div);
#else /* BITS_PER_LONG < 64 */
-# define ktime_divns(kt, div) (unsigned long)((kt).tv64 / (div))
+# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
#endif
/* Show pending timers: */
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 85d11916e9ea..42131820bb89 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -44,7 +44,15 @@ struct hwrng {
/** Register a new Hardware Random Number Generator driver. */
extern int hwrng_register(struct hwrng *rng);
/** Unregister a Hardware Random Number Generator driver. */
-extern void hwrng_unregister(struct hwrng *rng);
+extern void __hwrng_unregister(struct hwrng *rng, bool suspended);
+static inline void hwrng_unregister(struct hwrng *rng)
+{
+ __hwrng_unregister(rng, false);
+}
+static inline void hwrng_unregister_suspended(struct hwrng *rng)
+{
+ __hwrng_unregister(rng, true);
+}
#endif /* __KERNEL__ */
#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/i2c/pca953x.h b/include/linux/i2c/pca953x.h
new file mode 100644
index 000000000000..3c7361217df8
--- /dev/null
+++ b/include/linux/i2c/pca953x.h
@@ -0,0 +1,18 @@
+/* platform data for the PCA9539 16-bit I/O expander driver */
+
+struct pca953x_platform_data {
+ /* number of the first GPIO */
+ unsigned gpio_base;
+
+ /* initial polarity inversion setting */
+ uint16_t invert;
+
+ void *context; /* param to setup/teardown */
+
+ int (*setup)(struct i2c_client *client,
+ unsigned gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct i2c_client *client,
+ unsigned gpio, unsigned ngpio,
+ void *context);
+};
diff --git a/include/linux/i2c/pcf857x.h b/include/linux/i2c/pcf857x.h
new file mode 100644
index 000000000000..ba8ea6e16476
--- /dev/null
+++ b/include/linux/i2c/pcf857x.h
@@ -0,0 +1,45 @@
+#ifndef __LINUX_PCF857X_H
+#define __LINUX_PCF857X_H
+
+/**
+ * struct pcf857x_platform_data - data to set up pcf857x driver
+ * @gpio_base: number of the chip's first GPIO
+ * @n_latch: optional bit-inverse of initial register value; if
+ * you leave this initialized to zero the driver will act
+ * like the chip was just reset
+ * @setup: optional callback issued once the GPIOs are valid
+ * @teardown: optional callback issued before the GPIOs are invalidated
+ * @context: optional parameter passed to setup() and teardown()
+ *
+ * In addition to the I2C_BOARD_INFO() state appropriate to each chip,
+ * the i2c_board_info used with the pcf875x driver must provide the
+ * chip "type" ("pcf8574", "pcf8574a", "pcf8575", "pcf8575c") and its
+ * platform_data (pointer to one of these structures) with at least
+ * the gpio_base value initialized.
+ *
+ * The @setup callback may be used with the kind of board-specific glue
+ * which hands the (now-valid) GPIOs to other drivers, or which puts
+ * devices in their initial states using these GPIOs.
+ *
+ * These GPIO chips are only "quasi-bidirectional"; read the chip specs
+ * to understand the behavior. They don't have separate registers to
+ * record which pins are used for input or output, record which output
+ * values are driven, or provide access to input values. That must be
+ * inferred by reading the chip's value and knowing the last value written
+ * to it. If you leave n_latch initialized to zero, that last written
+ * value is presumed to be all ones (as if the chip were just reset).
+ */
+struct pcf857x_platform_data {
+ unsigned gpio_base;
+ unsigned n_latch;
+
+ int (*setup)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ void *context;
+};
+
+#endif /* __LINUX_PCF857X_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 367c17084a28..acec99da832d 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -115,10 +115,6 @@ typedef unsigned char byte; /* used everywhere */
#define SATA_ERROR_OFFSET (1)
#define SATA_CONTROL_OFFSET (2)
-#define SATA_MISC_OFFSET (0)
-#define SATA_PHY_OFFSET (1)
-#define SATA_IEN_OFFSET (2)
-
/*
* Our Physical Region Descriptor (PRD) table should be large enough
* to handle the biggest I/O request we are likely to see. Since requests
@@ -173,7 +169,7 @@ enum { ide_unknown, ide_generic, ide_pci,
ide_rz1000, ide_trm290,
ide_cmd646, ide_cy82c693, ide_4drives,
ide_pmac, ide_etrax100, ide_acorn,
- ide_au1xxx, ide_forced
+ ide_au1xxx, ide_palm3710, ide_forced
};
typedef u8 hwif_chipset_t;
@@ -198,17 +194,6 @@ struct ide_drive_s;
int ide_register_hw(hw_regs_t *, void (*)(struct ide_drive_s *),
struct hwif_s **);
-void ide_setup_ports( hw_regs_t *hw,
- unsigned long base,
- int *offsets,
- unsigned long ctrl,
- unsigned long intr,
- ide_ack_intr_t *ack_intr,
-#if 0
- ide_io_ops_t *iops,
-#endif
- int irq);
-
static inline void ide_std_init_ports(hw_regs_t *hw,
unsigned long io_addr,
unsigned long ctl_addr)
@@ -473,7 +458,6 @@ typedef struct hwif_s {
/* task file registers for pata and sata */
unsigned long io_ports[IDE_NR_PORTS];
unsigned long sata_scr[SATA_NR_PORTS];
- unsigned long sata_misc[SATA_NR_PORTS];
ide_drive_t drives[MAX_DRIVES]; /* drive info */
@@ -1014,7 +998,8 @@ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *o
void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8 *);
void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+/* FIXME: palm_bk3710 uses BLK_DEV_IDEDMA_PCI without BLK_DEV_IDEPCI! */
+#if defined(CONFIG_BLK_DEV_IDEPCI) && defined(CONFIG_BLK_DEV_IDEDMA_PCI)
void ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
#else
static inline void ide_hwif_setup_dma(ide_hwif_t *hwif,
@@ -1324,4 +1309,25 @@ static inline void ide_set_irq(ide_drive_t *drive, int on)
drive->hwif->OUTB(drive->ctl | (on ? 0 : 2), IDE_CONTROL_REG);
}
+static inline u8 ide_read_status(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ return hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+}
+
+static inline u8 ide_read_altstatus(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ return hwif->INB(hwif->io_ports[IDE_CONTROL_OFFSET]);
+}
+
+static inline u8 ide_read_error(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]);
+}
+
#endif /* _IDE_H */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 34f40efc7607..79504b22a932 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -327,7 +327,7 @@ static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, unsigned short t
*
* Returns error if the skb is not of VLAN type
*/
-static inline int __vlan_get_tag(struct sk_buff *skb, unsigned short *tag)
+static inline int __vlan_get_tag(const struct sk_buff *skb, unsigned short *tag)
{
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
@@ -347,7 +347,8 @@ static inline int __vlan_get_tag(struct sk_buff *skb, unsigned short *tag)
*
* Returns error if @skb->cb[] is not set correctly
*/
-static inline int __vlan_hwaccel_get_tag(struct sk_buff *skb, unsigned short *tag)
+static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
+ unsigned short *tag)
{
struct vlan_skb_tx_cookie *cookie;
@@ -370,7 +371,7 @@ static inline int __vlan_hwaccel_get_tag(struct sk_buff *skb, unsigned short *ta
*
* Returns error if the skb is not VLAN tagged
*/
-static inline int vlan_get_tag(struct sk_buff *skb, unsigned short *tag)
+static inline int vlan_get_tag(const struct sk_buff *skb, unsigned short *tag)
{
if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
return __vlan_hwaccel_get_tag(skb, tag);
diff --git a/include/linux/init.h b/include/linux/init.h
index 90cdbbbbe077..a404a0055dd7 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -110,6 +110,7 @@
#define __FINIT .previous
#define __INITDATA .section ".init.data","aw"
+#define __FINITDATA .previous
#define __DEVINIT .section ".devinit.text", "ax"
#define __DEVINITDATA .section ".devinit.data", "aw"
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index f42663eaf655..1f74e1d7415f 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -121,6 +121,18 @@ extern struct group_info init_groups;
#else
#define INIT_IDS
#endif
+
+#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+/*
+ * Because of the reduced scope of CAP_SETPCAP when filesystem
+ * capabilities are in effect, it is safe to allow CAP_SETPCAP to
+ * be available in the default configuration.
+ */
+# define CAP_INIT_BSET CAP_FULL_SET
+#else
+# define CAP_INIT_BSET CAP_INIT_EFF_SET
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -156,6 +168,7 @@ extern struct group_info init_groups;
.cap_effective = CAP_INIT_EFF_SET, \
.cap_inheritable = CAP_INIT_INH_SET, \
.cap_permitted = CAP_FULL_SET, \
+ .cap_bset = CAP_INIT_BSET, \
.keep_capabilities = 0, \
.user = INIT_USER, \
.comm = "swapper", \
diff --git a/include/linux/input.h b/include/linux/input.h
index 056a17a4f34f..1bdc39a8c76c 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1020,7 +1020,6 @@ struct ff_effect {
* @going_away: marks devices that are in a middle of unregistering and
* causes input_open_device*() fail with -ENODEV.
* @dev: driver model's view of this device
- * @cdev: union for struct device pointer
* @h_list: list of input handles associated with the device. When
* accessing the list dev->mutex must be held
* @node: used to place the device onto input_dev_list
@@ -1085,9 +1084,6 @@ struct input_dev {
int going_away;
struct device dev;
- union { /* temporarily so while we switching to struct device */
- struct device *dev;
- } cdev;
struct list_head h_list;
struct list_head node;
@@ -1311,6 +1307,9 @@ static inline void input_set_abs_params(struct input_dev *dev, int axis, int min
dev->absbit[BIT_WORD(axis)] |= BIT_MASK(axis);
}
+int input_get_keycode(struct input_dev *dev, int scancode, int *keycode);
+int input_set_keycode(struct input_dev *dev, int scancode, int keycode);
+
extern struct class input_class;
/**
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c3db4a00f1fa..dea7598aeff4 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -444,4 +444,6 @@ static inline void init_irq_proc(void)
}
#endif
+int show_interrupts(struct seq_file *p, void *v);
+
#endif
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
new file mode 100644
index 000000000000..4dd4c04ff2f4
--- /dev/null
+++ b/include/linux/iommu-helper.h
@@ -0,0 +1,7 @@
+extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr,
+ unsigned long shift,
+ unsigned long boundary_size,
+ unsigned long align_mask);
+extern void iommu_area_free(unsigned long *map, unsigned long start,
+ unsigned int nr);
diff --git a/include/linux/isdn.h b/include/linux/isdn.h
index d0ecc8eebfbf..9cb2855bb170 100644
--- a/include/linux/isdn.h
+++ b/include/linux/isdn.h
@@ -507,7 +507,6 @@ typedef struct modem_info {
struct ktermios normal_termios; /* For saving termios structs */
struct ktermios callout_termios;
wait_queue_head_t open_wait, close_wait;
- struct semaphore write_sem;
spinlock_t readlock;
} modem_info;
diff --git a/include/linux/isicom.h b/include/linux/isicom.h
index 45b3d48f0978..8f4c71759d73 100644
--- a/include/linux/isicom.h
+++ b/include/linux/isicom.h
@@ -37,8 +37,6 @@
#define BOARD_COUNT 4
#define PORT_COUNT (BOARD_COUNT*16)
-#define SERIAL_TYPE_NORMAL 1
-
/* character sizes */
#define ISICOM_CS5 0x0000
diff --git a/include/linux/istallion.h b/include/linux/istallion.h
index 106a5e85e5c4..5a84fe944b74 100644
--- a/include/linux/istallion.h
+++ b/include/linux/istallion.h
@@ -71,7 +71,6 @@ struct stliport {
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
wait_queue_head_t raw_wait;
- struct work_struct tqhangup;
struct asysigs asig;
unsigned long addr;
unsigned long rxoffset;
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index d9ecd13393b0..b18fd3b9b835 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -33,7 +33,6 @@
#include <linux/lockdep.h>
#include <asm/semaphore.h>
-#endif
#define journal_oom_retry 1
@@ -84,7 +83,6 @@ static inline void jbd_free(void *ptr, size_t size)
#define JFS_MIN_JOURNAL_BLOCKS 1024
-#ifdef __KERNEL__
/**
* typedef handle_t - The handle_t type represents a single atomic update being performed by some process.
@@ -924,7 +922,6 @@ extern int journal_recover (journal_t *journal);
extern int journal_wipe (journal_t *, int);
extern int journal_skip_recovery (journal_t *);
extern void journal_update_superblock (journal_t *, int);
-extern void __journal_abort_hard (journal_t *);
extern void journal_abort (journal_t *, int);
extern int journal_errno (journal_t *);
extern void journal_ack_err (journal_t *);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ff356b2ee478..9e01f376840a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -35,7 +35,7 @@ extern const char linux_proc_banner[];
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
-#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
@@ -243,6 +243,7 @@ extern enum system_states {
#define TAINT_BAD_PAGE (1<<5)
#define TAINT_USER (1<<6)
#define TAINT_DIE (1<<7)
+#define TAINT_OVERRIDDEN_ACPI_TABLE (1<<8)
extern void dump_stack(void) __cold;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 2d9c448d8c52..3265968cd2cd 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -127,17 +127,21 @@ void vmcoreinfo_append_str(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
unsigned long paddr_vmcoreinfo_note(void);
+#define VMCOREINFO_OSRELEASE(name) \
+ vmcoreinfo_append_str("OSRELEASE=%s\n", #name)
+#define VMCOREINFO_PAGESIZE(value) \
+ vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
#define VMCOREINFO_SYMBOL(name) \
vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
#define VMCOREINFO_SIZE(name) \
vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(struct name))
-#define VMCOREINFO_TYPEDEF_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
(unsigned long)sizeof(name))
+#define VMCOREINFO_STRUCT_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(struct name))
#define VMCOREINFO_OFFSET(name, field) \
vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
- (unsigned long)&(((struct name *)0)->field))
+ (unsigned long)offsetof(struct name, field))
#define VMCOREINFO_LENGTH(name, value) \
vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
#define VMCOREINFO_NUMBER(name) \
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 6168c0a44172..4a6ce82ba039 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -152,8 +152,10 @@ static inline int arch_trampoline_kprobe(struct kprobe *p)
struct kretprobe {
struct kprobe kp;
kretprobe_handler_t handler;
+ kretprobe_handler_t entry_handler;
int maxactive;
int nmissed;
+ size_t data_size;
struct hlist_head free_instances;
struct hlist_head used_instances;
};
@@ -164,6 +166,7 @@ struct kretprobe_instance {
struct kretprobe *rp;
kprobe_opcode_t *ret_addr;
struct task_struct *task;
+ char data[0];
};
struct kretprobe_blackpoint {
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index a6ddec141f96..36c542b70c6d 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -316,7 +316,8 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
* idea of the (in)accuracy of timers. Timer values are rounded up to
* this resolution values.
*/
-#define KTIME_LOW_RES (ktime_t){ .tv64 = TICK_NSEC }
+#define LOW_RES_NSEC TICK_NSEC
+#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC }
/* Get the monotonic time in timespec format: */
extern void ktime_get_ts(struct timespec *ts);
diff --git a/include/linux/latency.h b/include/linux/latency.h
deleted file mode 100644
index c08b52bb55b0..000000000000
--- a/include/linux/latency.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * latency.h: Explicit system-wide latency-expectation infrastructure
- *
- * (C) Copyright 2006 Intel Corporation
- * Author: Arjan van de Ven <arjan@linux.intel.com>
- *
- */
-
-#ifndef _INCLUDE_GUARD_LATENCY_H_
-#define _INCLUDE_GUARD_LATENCY_H_
-
-#include <linux/notifier.h>
-
-void set_acceptable_latency(char *identifier, int usecs);
-void modify_acceptable_latency(char *identifier, int usecs);
-void remove_acceptable_latency(char *identifier);
-void synchronize_acceptable_latency(void);
-int system_latency_constraint(void);
-
-int register_latency_notifier(struct notifier_block * nb);
-int unregister_latency_notifier(struct notifier_block * nb);
-
-#define INFINITE_LATENCY 1000000
-
-#endif
diff --git a/include/linux/leds.h b/include/linux/leds.h
index b4130ff58d0c..0201f6f51cea 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -38,6 +38,11 @@ struct led_classdev {
void (*brightness_set)(struct led_classdev *led_cdev,
enum led_brightness brightness);
+ /* Activate hardware accelerated blink */
+ int (*blink_set)(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+
struct device *dev;
struct list_head node; /* LED Device list */
char *default_trigger; /* Trigger to use */
@@ -54,7 +59,15 @@ struct led_classdev {
extern int led_classdev_register(struct device *parent,
struct led_classdev *led_cdev);
-extern void led_classdev_unregister(struct led_classdev *led_cdev);
+extern void __led_classdev_unregister(struct led_classdev *led_cdev, bool sus);
+static inline void led_classdev_unregister(struct led_classdev *lcd)
+{
+ __led_classdev_unregister(lcd, false);
+}
+static inline void led_classdev_unregister_suspended(struct led_classdev *lcd)
+{
+ __led_classdev_unregister(lcd, true);
+}
extern void led_classdev_suspend(struct led_classdev *led_cdev);
extern void led_classdev_resume(struct led_classdev *led_cdev);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 4374c4277780..bc5a8d0c7090 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -457,7 +457,6 @@ struct ata_queued_cmd {
unsigned long flags; /* ATA_QCFLAG_xxx */
unsigned int tag;
unsigned int n_elem;
- unsigned int n_iter;
unsigned int mapped_n_elem;
int dma_dir;
@@ -1367,7 +1366,6 @@ static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
qc->nbytes = qc->raw_nbytes = qc->curbytes = 0;
qc->n_elem = 0;
qc->mapped_n_elem = 0;
- qc->n_iter = 0;
qc->err_mask = 0;
qc->pad_len = 0;
qc->last_sg = NULL;
diff --git a/include/linux/log2.h b/include/linux/log2.h
index c8cf5e8ef171..25b808631cd9 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -190,4 +190,20 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
__rounddown_pow_of_two(n) \
)
+/**
+ * order_base_2 - calculate the (rounded up) base 2 order of the argument
+ * @n: parameter
+ *
+ * The first few values calculated by this routine:
+ * ob2(0) = 0
+ * ob2(1) = 0
+ * ob2(2) = 1
+ * ob2(3) = 2
+ * ob2(4) = 2
+ * ob2(5) = 3
+ * ... and so on.
+ */
+
+#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+
#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 26a0a103898f..46169a7b559b 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -76,6 +76,7 @@ struct loop_device {
enum {
LO_FLAGS_READ_ONLY = 1,
LO_FLAGS_USE_AOPS = 2,
+ LO_FLAGS_AUTOCLEAR = 4,
};
#include <asm/posix_types.h> /* for __kernel_old_dev_t */
diff --git a/include/linux/lp.h b/include/linux/lp.h
index 7059b6b9878a..0df024bfd6f0 100644
--- a/include/linux/lp.h
+++ b/include/linux/lp.h
@@ -99,7 +99,7 @@
#ifdef __KERNEL__
#include <linux/wait.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
/* Magic numbers for defining port-device mappings */
#define LP_PARPORT_UNSPEC -4
@@ -145,7 +145,7 @@ struct lp_struct {
#endif
wait_queue_head_t waitq;
unsigned int last_error;
- struct semaphore port_mutex;
+ struct mutex port_mutex;
wait_queue_head_t dataq;
long timeout;
unsigned int best_mode;
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
new file mode 100644
index 000000000000..9815951ec995
--- /dev/null
+++ b/include/linux/memcontrol.h
@@ -0,0 +1,190 @@
+/* memcontrol.h - Memory Controller
+ *
+ * Copyright IBM Corporation, 2007
+ * Author Balbir Singh <balbir@linux.vnet.ibm.com>
+ *
+ * Copyright 2007 OpenVZ SWsoft Inc
+ * Author: Pavel Emelianov <xemul@openvz.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MEMCONTROL_H
+#define _LINUX_MEMCONTROL_H
+
+#include <linux/rcupdate.h>
+#include <linux/mm.h>
+
+struct mem_cgroup;
+struct page_cgroup;
+struct page;
+struct mm_struct;
+
+#ifdef CONFIG_CGROUP_MEM_CONT
+
+extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
+extern void mm_free_cgroup(struct mm_struct *mm);
+extern void page_assign_page_cgroup(struct page *page,
+ struct page_cgroup *pc);
+extern struct page_cgroup *page_get_page_cgroup(struct page *page);
+extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask);
+extern void mem_cgroup_uncharge(struct page_cgroup *pc);
+extern void mem_cgroup_uncharge_page(struct page *page);
+extern void mem_cgroup_move_lists(struct page_cgroup *pc, bool active);
+extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
+ struct list_head *dst,
+ unsigned long *scanned, int order,
+ int mode, struct zone *z,
+ struct mem_cgroup *mem_cont,
+ int active);
+extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
+extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask);
+int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
+
+static inline struct mem_cgroup *mm_cgroup(const struct mm_struct *mm)
+{
+ return rcu_dereference(mm->mem_cgroup);
+}
+
+extern int mem_cgroup_prepare_migration(struct page *page);
+extern void mem_cgroup_end_migration(struct page *page);
+extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
+
+/*
+ * For memory reclaim.
+ */
+extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
+extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
+
+extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
+extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
+ int priority);
+extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
+ int priority);
+
+extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
+ struct zone *zone, int priority);
+extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
+ struct zone *zone, int priority);
+
+#else /* CONFIG_CGROUP_MEM_CONT */
+static inline void mm_init_cgroup(struct mm_struct *mm,
+ struct task_struct *p)
+{
+}
+
+static inline void mm_free_cgroup(struct mm_struct *mm)
+{
+}
+
+static inline void page_assign_page_cgroup(struct page *page,
+ struct page_cgroup *pc)
+{
+}
+
+static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
+{
+ return NULL;
+}
+
+static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline void mem_cgroup_uncharge(struct page_cgroup *pc)
+{
+}
+
+static inline void mem_cgroup_uncharge_page(struct page *page)
+{
+}
+
+static inline void mem_cgroup_move_lists(struct page_cgroup *pc,
+ bool active)
+{
+}
+
+static inline int mem_cgroup_cache_charge(struct page *page,
+ struct mm_struct *mm,
+ gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline struct mem_cgroup *mm_cgroup(const struct mm_struct *mm)
+{
+ return NULL;
+}
+
+static inline int task_in_mem_cgroup(struct task_struct *task,
+ const struct mem_cgroup *mem)
+{
+ return 1;
+}
+
+static inline int mem_cgroup_prepare_migration(struct page *page)
+{
+ return 0;
+}
+
+static inline void mem_cgroup_end_migration(struct page *page)
+{
+}
+
+static inline void
+mem_cgroup_page_migration(struct page *page, struct page *newpage)
+{
+}
+
+static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
+{
+ return 0;
+}
+
+static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
+{
+ return 0;
+}
+
+static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
+{
+ return 0;
+}
+
+static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
+ int priority)
+{
+}
+
+static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
+ int priority)
+{
+}
+
+static inline long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
+ struct zone *zone, int priority)
+{
+ return 0;
+}
+
+static inline long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
+ struct zone *zone, int priority)
+{
+ return 0;
+}
+#endif /* CONFIG_CGROUP_MEM_CONT */
+
+#endif /* _LINUX_MEMCONTROL_H */
+
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
new file mode 100644
index 000000000000..4ab2162db13b
--- /dev/null
+++ b/include/linux/mfd/asic3.h
@@ -0,0 +1,497 @@
+/*
+ * include/linux/mfd/asic3.h
+ *
+ * Compaq ASIC3 headers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Copyright 2001 Compaq Computer Corporation.
+ * Copyright 2007 OpendHand.
+ */
+
+#ifndef __ASIC3_H__
+#define __ASIC3_H__
+
+#include <linux/types.h>
+
+struct asic3 {
+ void __iomem *mapping;
+ unsigned int bus_shift;
+ unsigned int irq_nr;
+ unsigned int irq_base;
+ spinlock_t lock;
+ u16 irq_bothedge[4];
+ struct device *dev;
+};
+
+struct asic3_platform_data {
+ struct {
+ u32 dir;
+ u32 init;
+ u32 sleep_mask;
+ u32 sleep_out;
+ u32 batt_fault_out;
+ u32 sleep_conf;
+ u32 alt_function;
+ } gpio_a, gpio_b, gpio_c, gpio_d;
+
+ unsigned int bus_shift;
+
+ unsigned int irq_base;
+
+ struct platform_device **children;
+ unsigned int n_children;
+};
+
+int asic3_gpio_get_value(struct asic3 *asic, unsigned gpio);
+void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val);
+
+#define ASIC3_NUM_GPIO_BANKS 4
+#define ASIC3_GPIOS_PER_BANK 16
+#define ASIC3_NUM_GPIOS 64
+#define ASIC3_NR_IRQS ASIC3_NUM_GPIOS + 6
+
+#define ASIC3_GPIO_BANK_A 0
+#define ASIC3_GPIO_BANK_B 1
+#define ASIC3_GPIO_BANK_C 2
+#define ASIC3_GPIO_BANK_D 3
+
+#define ASIC3_GPIO(bank, gpio) \
+ ((ASIC3_GPIOS_PER_BANK * ASIC3_GPIO_BANK_##bank) + (gpio))
+#define ASIC3_GPIO_bit(gpio) (1 << (gpio & 0xf))
+/* All offsets below are specified with this address bus shift */
+#define ASIC3_DEFAULT_ADDR_SHIFT 2
+
+#define ASIC3_OFFSET(base, reg) (ASIC3_##base##_Base + ASIC3_##base##_##reg)
+#define ASIC3_GPIO_OFFSET(base, reg) \
+ (ASIC3_GPIO_##base##_Base + ASIC3_GPIO_##reg)
+
+#define ASIC3_GPIO_A_Base 0x0000
+#define ASIC3_GPIO_B_Base 0x0100
+#define ASIC3_GPIO_C_Base 0x0200
+#define ASIC3_GPIO_D_Base 0x0300
+
+#define ASIC3_GPIO_Mask 0x00 /* R/W 0:don't mask */
+#define ASIC3_GPIO_Direction 0x04 /* R/W 0:input */
+#define ASIC3_GPIO_Out 0x08 /* R/W 0:output low */
+#define ASIC3_GPIO_TriggerType 0x0c /* R/W 0:level */
+#define ASIC3_GPIO_EdgeTrigger 0x10 /* R/W 0:falling */
+#define ASIC3_GPIO_LevelTrigger 0x14 /* R/W 0:low level detect */
+#define ASIC3_GPIO_SleepMask 0x18 /* R/W 0:don't mask in sleep mode */
+#define ASIC3_GPIO_SleepOut 0x1c /* R/W level 0:low in sleep mode */
+#define ASIC3_GPIO_BattFaultOut 0x20 /* R/W level 0:low in batt_fault */
+#define ASIC3_GPIO_IntStatus 0x24 /* R/W 0:none, 1:detect */
+#define ASIC3_GPIO_AltFunction 0x28 /* R/W 1:LED register control */
+#define ASIC3_GPIO_SleepConf 0x2c /*
+ * R/W bit 1: autosleep
+ * 0: disable gposlpout in normal mode,
+ * enable gposlpout in sleep mode.
+ */
+#define ASIC3_GPIO_Status 0x30 /* R Pin status */
+
+#define ASIC3_SPI_Base 0x0400
+#define ASIC3_SPI_Control 0x0000
+#define ASIC3_SPI_TxData 0x0004
+#define ASIC3_SPI_RxData 0x0008
+#define ASIC3_SPI_Int 0x000c
+#define ASIC3_SPI_Status 0x0010
+
+#define SPI_CONTROL_SPR(clk) ((clk) & 0x0f) /* Clock rate */
+
+#define ASIC3_PWM_0_Base 0x0500
+#define ASIC3_PWM_1_Base 0x0600
+#define ASIC3_PWM_TimeBase 0x0000
+#define ASIC3_PWM_PeriodTime 0x0004
+#define ASIC3_PWM_DutyTime 0x0008
+
+#define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */
+#define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */
+
+#define ASIC3_LED_0_Base 0x0700
+#define ASIC3_LED_1_Base 0x0800
+#define ASIC3_LED_2_Base 0x0900
+#define ASIC3_LED_TimeBase 0x0000 /* R/W 7 bits */
+#define ASIC3_LED_PeriodTime 0x0004 /* R/W 12 bits */
+#define ASIC3_LED_DutyTime 0x0008 /* R/W 12 bits */
+#define ASIC3_LED_AutoStopCount 0x000c /* R/W 16 bits */
+
+/* LED TimeBase bits - match ASIC2 */
+#define LED_TBS 0x0f /* Low 4 bits sets time base, max = 13 */
+ /* Note: max = 5 on hx4700 */
+ /* 0: maximum time base */
+ /* 1: maximum time base / 2 */
+ /* n: maximum time base / 2^n */
+
+#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */
+#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop 0:disable, 1:enable */
+#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */
+
+#define ASIC3_CLOCK_Base 0x0A00
+#define ASIC3_CLOCK_CDEX 0x00
+#define ASIC3_CLOCK_SEL 0x04
+
+#define CLOCK_CDEX_SOURCE (1 << 0) /* 2 bits */
+#define CLOCK_CDEX_SOURCE0 (1 << 0)
+#define CLOCK_CDEX_SOURCE1 (1 << 1)
+#define CLOCK_CDEX_SPI (1 << 2)
+#define CLOCK_CDEX_OWM (1 << 3)
+#define CLOCK_CDEX_PWM0 (1 << 4)
+#define CLOCK_CDEX_PWM1 (1 << 5)
+#define CLOCK_CDEX_LED0 (1 << 6)
+#define CLOCK_CDEX_LED1 (1 << 7)
+#define CLOCK_CDEX_LED2 (1 << 8)
+
+/* Clocks settings: 1 for 24.576 MHz, 0 for 12.288Mhz */
+#define CLOCK_CDEX_SD_HOST (1 << 9) /* R/W: SD host clock source */
+#define CLOCK_CDEX_SD_BUS (1 << 10) /* R/W: SD bus clock source ctrl */
+#define CLOCK_CDEX_SMBUS (1 << 11)
+#define CLOCK_CDEX_CONTROL_CX (1 << 12)
+
+#define CLOCK_CDEX_EX0 (1 << 13) /* R/W: 32.768 kHz crystal */
+#define CLOCK_CDEX_EX1 (1 << 14) /* R/W: 24.576 MHz crystal */
+
+#define CLOCK_SEL_SD_HCLK_SEL (1 << 0) /* R/W: SDIO host clock select */
+#define CLOCK_SEL_SD_BCLK_SEL (1 << 1) /* R/W: SDIO bus clock select */
+
+/* R/W: INT clock source control (32.768 kHz) */
+#define CLOCK_SEL_CX (1 << 2)
+
+
+#define ASIC3_INTR_Base 0x0B00
+
+#define ASIC3_INTR_IntMask 0x00 /* Interrupt mask control */
+#define ASIC3_INTR_PIntStat 0x04 /* Peripheral interrupt status */
+#define ASIC3_INTR_IntCPS 0x08 /* Interrupt timer clock pre-scale */
+#define ASIC3_INTR_IntTBS 0x0c /* Interrupt timer set */
+
+#define ASIC3_INTMASK_GINTMASK (1 << 0) /* Global INTs mask 1:enable */
+#define ASIC3_INTMASK_GINTEL (1 << 1) /* 1: rising edge, 0: hi level */
+#define ASIC3_INTMASK_MASK0 (1 << 2)
+#define ASIC3_INTMASK_MASK1 (1 << 3)
+#define ASIC3_INTMASK_MASK2 (1 << 4)
+#define ASIC3_INTMASK_MASK3 (1 << 5)
+#define ASIC3_INTMASK_MASK4 (1 << 6)
+#define ASIC3_INTMASK_MASK5 (1 << 7)
+
+#define ASIC3_INTR_PERIPHERAL_A (1 << 0)
+#define ASIC3_INTR_PERIPHERAL_B (1 << 1)
+#define ASIC3_INTR_PERIPHERAL_C (1 << 2)
+#define ASIC3_INTR_PERIPHERAL_D (1 << 3)
+#define ASIC3_INTR_LED0 (1 << 4)
+#define ASIC3_INTR_LED1 (1 << 5)
+#define ASIC3_INTR_LED2 (1 << 6)
+#define ASIC3_INTR_SPI (1 << 7)
+#define ASIC3_INTR_SMBUS (1 << 8)
+#define ASIC3_INTR_OWM (1 << 9)
+
+#define ASIC3_INTR_CPS(x) ((x)&0x0f) /* 4 bits, max 14 */
+#define ASIC3_INTR_CPS_SET (1 << 4) /* Time base enable */
+
+
+/* Basic control of the SD ASIC */
+#define ASIC3_SDHWCTRL_Base 0x0E00
+#define ASIC3_SDHWCTRL_SDConf 0x00
+
+#define ASIC3_SDHWCTRL_SUSPEND (1 << 0) /* 1=suspend all SD operations */
+#define ASIC3_SDHWCTRL_CLKSEL (1 << 1) /* 1=SDICK, 0=HCLK */
+#define ASIC3_SDHWCTRL_PCLR (1 << 2) /* All registers of SDIO cleared */
+#define ASIC3_SDHWCTRL_LEVCD (1 << 3) /* SD card detection: 0:low */
+
+/* SD card write protection: 0=high */
+#define ASIC3_SDHWCTRL_LEVWP (1 << 4)
+#define ASIC3_SDHWCTRL_SDLED (1 << 5) /* SD card LED signal 0=disable */
+
+/* SD card power supply ctrl 1=enable */
+#define ASIC3_SDHWCTRL_SDPWR (1 << 6)
+
+#define ASIC3_EXTCF_Base 0x1100
+
+#define ASIC3_EXTCF_Select 0x00
+#define ASIC3_EXTCF_Reset 0x04
+
+#define ASIC3_EXTCF_SMOD0 (1 << 0) /* slot number of mode 0 */
+#define ASIC3_EXTCF_SMOD1 (1 << 1) /* slot number of mode 1 */
+#define ASIC3_EXTCF_SMOD2 (1 << 2) /* slot number of mode 2 */
+#define ASIC3_EXTCF_OWM_EN (1 << 4) /* enable onewire module */
+#define ASIC3_EXTCF_OWM_SMB (1 << 5) /* OWM bus selection */
+#define ASIC3_EXTCF_OWM_RESET (1 << 6) /* ?? used by OWM and CF */
+#define ASIC3_EXTCF_CF0_SLEEP_MODE (1 << 7) /* CF0 sleep state */
+#define ASIC3_EXTCF_CF1_SLEEP_MODE (1 << 8) /* CF1 sleep state */
+#define ASIC3_EXTCF_CF0_PWAIT_EN (1 << 10) /* CF0 PWAIT_n control */
+#define ASIC3_EXTCF_CF1_PWAIT_EN (1 << 11) /* CF1 PWAIT_n control */
+#define ASIC3_EXTCF_CF0_BUF_EN (1 << 12) /* CF0 buffer control */
+#define ASIC3_EXTCF_CF1_BUF_EN (1 << 13) /* CF1 buffer control */
+#define ASIC3_EXTCF_SD_MEM_ENABLE (1 << 14)
+#define ASIC3_EXTCF_CF_SLEEP (1 << 15) /* CF sleep mode control */
+
+/*********************************************
+ * The Onewire interface registers
+ *
+ * OWM_CMD
+ * OWM_DAT
+ * OWM_INTR
+ * OWM_INTEN
+ * OWM_CLKDIV
+ *
+ *********************************************/
+
+#define ASIC3_OWM_Base 0xC00
+
+#define ASIC3_OWM_CMD 0x00
+#define ASIC3_OWM_DAT 0x04
+#define ASIC3_OWM_INTR 0x08
+#define ASIC3_OWM_INTEN 0x0C
+#define ASIC3_OWM_CLKDIV 0x10
+
+#define ASIC3_OWM_CMD_ONEWR (1 << 0)
+#define ASIC3_OWM_CMD_SRA (1 << 1)
+#define ASIC3_OWM_CMD_DQO (1 << 2)
+#define ASIC3_OWM_CMD_DQI (1 << 3)
+
+#define ASIC3_OWM_INTR_PD (1 << 0)
+#define ASIC3_OWM_INTR_PDR (1 << 1)
+#define ASIC3_OWM_INTR_TBE (1 << 2)
+#define ASIC3_OWM_INTR_TEMP (1 << 3)
+#define ASIC3_OWM_INTR_RBF (1 << 4)
+
+#define ASIC3_OWM_INTEN_EPD (1 << 0)
+#define ASIC3_OWM_INTEN_IAS (1 << 1)
+#define ASIC3_OWM_INTEN_ETBE (1 << 2)
+#define ASIC3_OWM_INTEN_ETMT (1 << 3)
+#define ASIC3_OWM_INTEN_ERBF (1 << 4)
+
+#define ASIC3_OWM_CLKDIV_PRE (3 << 0) /* two bits wide at bit 0 */
+#define ASIC3_OWM_CLKDIV_DIV (7 << 2) /* 3 bits wide at bit 2 */
+
+
+/*****************************************************************************
+ * The SD configuration registers are at a completely different location
+ * in memory. They are divided into three sets of registers:
+ *
+ * SD_CONFIG Core configuration register
+ * SD_CTRL Control registers for SD operations
+ * SDIO_CTRL Control registers for SDIO operations
+ *
+ *****************************************************************************/
+#define ASIC3_SD_CONFIG_Base 0x0400 /* Assumes 32 bit addressing */
+
+#define ASIC3_SD_CONFIG_Command 0x08 /* R/W: Command */
+
+/* [0:8] SD Control Register Base Address */
+#define ASIC3_SD_CONFIG_Addr0 0x20
+
+/* [9:31] SD Control Register Base Address */
+#define ASIC3_SD_CONFIG_Addr1 0x24
+
+/* R/O: interrupt assigned to pin */
+#define ASIC3_SD_CONFIG_IntPin 0x78
+
+/*
+ * Set to 0x1f to clock SD controller, 0 otherwise.
+ * At 0x82 - Gated Clock Ctrl
+ */
+#define ASIC3_SD_CONFIG_ClkStop 0x80
+
+/* Control clock of SD controller */
+#define ASIC3_SD_CONFIG_ClockMode 0x84
+#define ASIC3_SD_CONFIG_SDHC_PinStatus 0x88 /* R/0: SD pins status */
+#define ASIC3_SD_CONFIG_SDHC_Power1 0x90 /* Power1 - manual pwr ctrl */
+
+/* auto power up after card inserted */
+#define ASIC3_SD_CONFIG_SDHC_Power2 0x92
+
+/* auto power down when card removed */
+#define ASIC3_SD_CONFIG_SDHC_Power3 0x94
+#define ASIC3_SD_CONFIG_SDHC_CardDetect 0x98
+#define ASIC3_SD_CONFIG_SDHC_Slot 0xA0 /* R/O: support slot number */
+#define ASIC3_SD_CONFIG_SDHC_ExtGateClk1 0x1E0 /* Not used */
+#define ASIC3_SD_CONFIG_SDHC_ExtGateClk2 0x1E2 /* Not used*/
+
+/* GPIO Output Reg. , at 0x1EA - GPIO Output Enable Reg. */
+#define ASIC3_SD_CONFIG_SDHC_GPIO_OutAndEnable 0x1E8
+#define ASIC3_SD_CONFIG_SDHC_GPIO_Status 0x1EC /* GPIO Status Reg. */
+
+/* Bit 1: double buffer/single buffer */
+#define ASIC3_SD_CONFIG_SDHC_ExtGateClk3 0x1F0
+
+/* Memory access enable (set to 1 to access SD Controller) */
+#define SD_CONFIG_COMMAND_MAE (1<<1)
+
+#define SD_CONFIG_CLK_ENABLE_ALL 0x1f
+
+#define SD_CONFIG_POWER1_PC_33V 0x0200 /* Set for 3.3 volts */
+#define SD_CONFIG_POWER1_PC_OFF 0x0000 /* Turn off power */
+
+ /* two bits - number of cycles for card detection */
+#define SD_CONFIG_CARDDETECTMODE_CLK ((x) & 0x3)
+
+
+#define ASIC3_SD_CTRL_Base 0x1000
+
+#define ASIC3_SD_CTRL_Cmd 0x00
+#define ASIC3_SD_CTRL_Arg0 0x08
+#define ASIC3_SD_CTRL_Arg1 0x0C
+#define ASIC3_SD_CTRL_StopInternal 0x10
+#define ASIC3_SD_CTRL_TransferSectorCount 0x14
+#define ASIC3_SD_CTRL_Response0 0x18
+#define ASIC3_SD_CTRL_Response1 0x1C
+#define ASIC3_SD_CTRL_Response2 0x20
+#define ASIC3_SD_CTRL_Response3 0x24
+#define ASIC3_SD_CTRL_Response4 0x28
+#define ASIC3_SD_CTRL_Response5 0x2C
+#define ASIC3_SD_CTRL_Response6 0x30
+#define ASIC3_SD_CTRL_Response7 0x34
+#define ASIC3_SD_CTRL_CardStatus 0x38
+#define ASIC3_SD_CTRL_BufferCtrl 0x3C
+#define ASIC3_SD_CTRL_IntMaskCard 0x40
+#define ASIC3_SD_CTRL_IntMaskBuffer 0x44
+#define ASIC3_SD_CTRL_CardClockCtrl 0x48
+#define ASIC3_SD_CTRL_MemCardXferDataLen 0x4C
+#define ASIC3_SD_CTRL_MemCardOptionSetup 0x50
+#define ASIC3_SD_CTRL_ErrorStatus0 0x58
+#define ASIC3_SD_CTRL_ErrorStatus1 0x5C
+#define ASIC3_SD_CTRL_DataPort 0x60
+#define ASIC3_SD_CTRL_TransactionCtrl 0x68
+#define ASIC3_SD_CTRL_SoftwareReset 0x1C0
+
+#define SD_CTRL_SOFTWARE_RESET_CLEAR (1<<0)
+
+#define SD_CTRL_TRANSACTIONCONTROL_SET (1<<8)
+
+#define SD_CTRL_CARDCLOCKCONTROL_FOR_SD_CARD (1<<15)
+#define SD_CTRL_CARDCLOCKCONTROL_ENABLE_CLOCK (1<<8)
+#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_512 (1<<7)
+#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_256 (1<<6)
+#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_128 (1<<5)
+#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_64 (1<<4)
+#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_32 (1<<3)
+#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_16 (1<<2)
+#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_8 (1<<1)
+#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_4 (1<<0)
+#define SD_CTRL_CARDCLOCKCONTROL_CLK_DIV_2 (0<<0)
+
+#define MEM_CARD_OPTION_REQUIRED 0x000e
+#define MEM_CARD_OPTION_DATA_RESPONSE_TIMEOUT(x) (((x) & 0x0f) << 4)
+#define MEM_CARD_OPTION_C2_MODULE_NOT_PRESENT (1<<14)
+#define MEM_CARD_OPTION_DATA_XFR_WIDTH_1 (1<<15)
+#define MEM_CARD_OPTION_DATA_XFR_WIDTH_4 0
+
+#define SD_CTRL_COMMAND_INDEX(x) ((x) & 0x3f)
+#define SD_CTRL_COMMAND_TYPE_CMD (0 << 6)
+#define SD_CTRL_COMMAND_TYPE_ACMD (1 << 6)
+#define SD_CTRL_COMMAND_TYPE_AUTHENTICATION (2 << 6)
+#define SD_CTRL_COMMAND_RESPONSE_TYPE_NORMAL (0 << 8)
+#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R1 (4 << 8)
+#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R1B (5 << 8)
+#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R2 (6 << 8)
+#define SD_CTRL_COMMAND_RESPONSE_TYPE_EXT_R3 (7 << 8)
+#define SD_CTRL_COMMAND_DATA_PRESENT (1 << 11)
+#define SD_CTRL_COMMAND_TRANSFER_READ (1 << 12)
+#define SD_CTRL_COMMAND_TRANSFER_WRITE (0 << 12)
+#define SD_CTRL_COMMAND_MULTI_BLOCK (1 << 13)
+#define SD_CTRL_COMMAND_SECURITY_CMD (1 << 14)
+
+#define SD_CTRL_STOP_INTERNAL_ISSSUE_CMD12 (1 << 0)
+#define SD_CTRL_STOP_INTERNAL_AUTO_ISSUE_CMD12 (1 << 8)
+
+#define SD_CTRL_CARDSTATUS_RESPONSE_END (1 << 0)
+#define SD_CTRL_CARDSTATUS_RW_END (1 << 2)
+#define SD_CTRL_CARDSTATUS_CARD_REMOVED_0 (1 << 3)
+#define SD_CTRL_CARDSTATUS_CARD_INSERTED_0 (1 << 4)
+#define SD_CTRL_CARDSTATUS_SIGNAL_STATE_PRESENT_0 (1 << 5)
+#define SD_CTRL_CARDSTATUS_WRITE_PROTECT (1 << 7)
+#define SD_CTRL_CARDSTATUS_CARD_REMOVED_3 (1 << 8)
+#define SD_CTRL_CARDSTATUS_CARD_INSERTED_3 (1 << 9)
+#define SD_CTRL_CARDSTATUS_SIGNAL_STATE_PRESENT_3 (1 << 10)
+
+#define SD_CTRL_BUFFERSTATUS_CMD_INDEX_ERROR (1 << 0)
+#define SD_CTRL_BUFFERSTATUS_CRC_ERROR (1 << 1)
+#define SD_CTRL_BUFFERSTATUS_STOP_BIT_END_ERROR (1 << 2)
+#define SD_CTRL_BUFFERSTATUS_DATA_TIMEOUT (1 << 3)
+#define SD_CTRL_BUFFERSTATUS_BUFFER_OVERFLOW (1 << 4)
+#define SD_CTRL_BUFFERSTATUS_BUFFER_UNDERFLOW (1 << 5)
+#define SD_CTRL_BUFFERSTATUS_CMD_TIMEOUT (1 << 6)
+#define SD_CTRL_BUFFERSTATUS_UNK7 (1 << 7)
+#define SD_CTRL_BUFFERSTATUS_BUFFER_READ_ENABLE (1 << 8)
+#define SD_CTRL_BUFFERSTATUS_BUFFER_WRITE_ENABLE (1 << 9)
+#define SD_CTRL_BUFFERSTATUS_ILLEGAL_FUNCTION (1 << 13)
+#define SD_CTRL_BUFFERSTATUS_CMD_BUSY (1 << 14)
+#define SD_CTRL_BUFFERSTATUS_ILLEGAL_ACCESS (1 << 15)
+
+#define SD_CTRL_INTMASKCARD_RESPONSE_END (1 << 0)
+#define SD_CTRL_INTMASKCARD_RW_END (1 << 2)
+#define SD_CTRL_INTMASKCARD_CARD_REMOVED_0 (1 << 3)
+#define SD_CTRL_INTMASKCARD_CARD_INSERTED_0 (1 << 4)
+#define SD_CTRL_INTMASKCARD_SIGNAL_STATE_PRESENT_0 (1 << 5)
+#define SD_CTRL_INTMASKCARD_UNK6 (1 << 6)
+#define SD_CTRL_INTMASKCARD_WRITE_PROTECT (1 << 7)
+#define SD_CTRL_INTMASKCARD_CARD_REMOVED_3 (1 << 8)
+#define SD_CTRL_INTMASKCARD_CARD_INSERTED_3 (1 << 9)
+#define SD_CTRL_INTMASKCARD_SIGNAL_STATE_PRESENT_3 (1 << 10)
+
+#define SD_CTRL_INTMASKBUFFER_CMD_INDEX_ERROR (1 << 0)
+#define SD_CTRL_INTMASKBUFFER_CRC_ERROR (1 << 1)
+#define SD_CTRL_INTMASKBUFFER_STOP_BIT_END_ERROR (1 << 2)
+#define SD_CTRL_INTMASKBUFFER_DATA_TIMEOUT (1 << 3)
+#define SD_CTRL_INTMASKBUFFER_BUFFER_OVERFLOW (1 << 4)
+#define SD_CTRL_INTMASKBUFFER_BUFFER_UNDERFLOW (1 << 5)
+#define SD_CTRL_INTMASKBUFFER_CMD_TIMEOUT (1 << 6)
+#define SD_CTRL_INTMASKBUFFER_UNK7 (1 << 7)
+#define SD_CTRL_INTMASKBUFFER_BUFFER_READ_ENABLE (1 << 8)
+#define SD_CTRL_INTMASKBUFFER_BUFFER_WRITE_ENABLE (1 << 9)
+#define SD_CTRL_INTMASKBUFFER_ILLEGAL_FUNCTION (1 << 13)
+#define SD_CTRL_INTMASKBUFFER_CMD_BUSY (1 << 14)
+#define SD_CTRL_INTMASKBUFFER_ILLEGAL_ACCESS (1 << 15)
+
+#define SD_CTRL_DETAIL0_RESPONSE_CMD_ERROR (1 << 0)
+#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_RESPONSE_NON_CMD12 (1 << 2)
+#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_RESPONSE_CMD12 (1 << 3)
+#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_READ_DATA (1 << 4)
+#define SD_CTRL_DETAIL0_END_BIT_ERROR_FOR_WRITE_CRC_STATUS (1 << 5)
+#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_RESPONSE_NON_CMD12 (1 << 8)
+#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_RESPONSE_CMD12 (1 << 9)
+#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_READ_DATA (1 << 10)
+#define SD_CTRL_DETAIL0_CRC_ERROR_FOR_WRITE_CMD (1 << 11)
+
+#define SD_CTRL_DETAIL1_NO_CMD_RESPONSE (1 << 0)
+#define SD_CTRL_DETAIL1_TIMEOUT_READ_DATA (1 << 4)
+#define SD_CTRL_DETAIL1_TIMEOUT_CRS_STATUS (1 << 5)
+#define SD_CTRL_DETAIL1_TIMEOUT_CRC_BUSY (1 << 6)
+
+#define ASIC3_SDIO_CTRL_Base 0x1200
+
+#define ASIC3_SDIO_CTRL_Cmd 0x00
+#define ASIC3_SDIO_CTRL_CardPortSel 0x04
+#define ASIC3_SDIO_CTRL_Arg0 0x08
+#define ASIC3_SDIO_CTRL_Arg1 0x0C
+#define ASIC3_SDIO_CTRL_TransferBlockCount 0x14
+#define ASIC3_SDIO_CTRL_Response0 0x18
+#define ASIC3_SDIO_CTRL_Response1 0x1C
+#define ASIC3_SDIO_CTRL_Response2 0x20
+#define ASIC3_SDIO_CTRL_Response3 0x24
+#define ASIC3_SDIO_CTRL_Response4 0x28
+#define ASIC3_SDIO_CTRL_Response5 0x2C
+#define ASIC3_SDIO_CTRL_Response6 0x30
+#define ASIC3_SDIO_CTRL_Response7 0x34
+#define ASIC3_SDIO_CTRL_CardStatus 0x38
+#define ASIC3_SDIO_CTRL_BufferCtrl 0x3C
+#define ASIC3_SDIO_CTRL_IntMaskCard 0x40
+#define ASIC3_SDIO_CTRL_IntMaskBuffer 0x44
+#define ASIC3_SDIO_CTRL_CardXferDataLen 0x4C
+#define ASIC3_SDIO_CTRL_CardOptionSetup 0x50
+#define ASIC3_SDIO_CTRL_ErrorStatus0 0x54
+#define ASIC3_SDIO_CTRL_ErrorStatus1 0x58
+#define ASIC3_SDIO_CTRL_DataPort 0x60
+#define ASIC3_SDIO_CTRL_TransactionCtrl 0x68
+#define ASIC3_SDIO_CTRL_CardIntCtrl 0x6C
+#define ASIC3_SDIO_CTRL_ClocknWaitCtrl 0x70
+#define ASIC3_SDIO_CTRL_HostInformation 0x74
+#define ASIC3_SDIO_CTRL_ErrorCtrl 0x78
+#define ASIC3_SDIO_CTRL_LEDCtrl 0x7C
+#define ASIC3_SDIO_CTRL_SoftwareReset 0x1C0
+
+#define ASIC3_MAP_SIZE 0x2000
+
+#endif /* __ASIC3_H__ */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index dff9ea32606a..24b30b9b4f8a 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -43,7 +43,15 @@ struct miscdevice {
};
extern int misc_register(struct miscdevice * misc);
-extern int misc_deregister(struct miscdevice * misc);
+extern int __misc_deregister(struct miscdevice *misc, bool suspended);
+static inline int misc_deregister(struct miscdevice *misc)
+{
+ return __misc_deregister(misc, false);
+}
+static inline int misc_deregister_suspended(struct miscdevice *misc)
+{
+ return __misc_deregister(misc, true);
+}
#define MODULE_ALIAS_MISCDEV(minor) \
MODULE_ALIAS("char-major-" __stringify(MISC_MAJOR) \
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1bba6789a50a..89d7c691b93a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -227,10 +227,22 @@ static inline int put_page_testzero(struct page *page)
*/
static inline int get_page_unless_zero(struct page *page)
{
- VM_BUG_ON(PageCompound(page));
+ VM_BUG_ON(PageTail(page));
return atomic_inc_not_zero(&page->_count);
}
+/* Support for virtually mapped pages */
+struct page *vmalloc_to_page(const void *addr);
+unsigned long vmalloc_to_pfn(const void *addr);
+
+/* Determine if an address is within the vmalloc range */
+static inline int is_vmalloc_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ return addr >= VMALLOC_START && addr < VMALLOC_END;
+}
+
static inline struct page *compound_head(struct page *page)
{
if (unlikely(PageTail(page)))
@@ -706,6 +718,28 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted,
struct zap_details *);
+
+/**
+ * mm_walk - callbacks for walk_page_range
+ * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
+ * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
+ * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
+ * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
+ * @pte_hole: if set, called for each hole at all levels
+ *
+ * (see walk_page_range for more details)
+ */
+struct mm_walk {
+ int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, void *);
+ int (*pud_entry)(pud_t *, unsigned long, unsigned long, void *);
+ int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, void *);
+ int (*pte_entry)(pte_t *, unsigned long, unsigned long, void *);
+ int (*pte_hole)(unsigned long, unsigned long, void *);
+};
+
+int walk_page_range(const struct mm_struct *, unsigned long addr,
+ unsigned long end, const struct mm_walk *walk,
+ void *private);
void free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
unsigned long end, unsigned long floor, unsigned long ceiling);
void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
@@ -1089,8 +1123,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
pgprot_t vm_get_page_prot(unsigned long vm_flags);
struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
-struct page *vmalloc_to_page(void *addr);
-unsigned long vmalloc_to_pfn(void *addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index f4c03e0b355e..bfee0bd1d435 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -64,7 +64,10 @@ struct page {
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
spinlock_t ptl;
#endif
- struct kmem_cache *slab; /* SLUB: Pointer to slab */
+ struct {
+ struct kmem_cache *slab; /* SLUB: Pointer to slab */
+ void *end; /* SLUB: end marker */
+ };
struct page *first_page; /* Compound tail pages */
};
union {
@@ -88,6 +91,9 @@ struct page {
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
+#ifdef CONFIG_CGROUP_MEM_CONT
+ unsigned long page_cgroup;
+#endif
};
/*
@@ -219,6 +225,9 @@ struct mm_struct {
/* aio bits */
rwlock_t ioctx_list_lock;
struct kioctx *ioctx_list;
+#ifdef CONFIG_CGROUP_MEM_CONT
+ struct mem_cgroup *mem_cgroup;
+#endif
};
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4c4522a51a3b..8d8d1977736e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -113,7 +113,7 @@ struct per_cpu_pages {
};
struct per_cpu_pageset {
- struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
+ struct per_cpu_pages pcp;
#ifdef CONFIG_NUMA
s8 expire;
#endif
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index e9fddb42f26c..139d49d2f078 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -343,7 +343,8 @@ struct sdio_device_id {
__u8 class; /* Standard interface or SDIO_ANY_ID */
__u16 vendor; /* Vendor or SDIO_ANY_ID */
__u16 device; /* Device ID or SDIO_ANY_ID */
- kernel_ulong_t driver_data; /* Data private to the driver */
+ kernel_ulong_t driver_data /* Data private to the driver */
+ __attribute__((aligned(sizeof(kernel_ulong_t))));
};
/* SSB core, see drivers/ssb/ */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index e17c5343cf51..b0ddf4b25862 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -98,6 +98,18 @@ static inline int cfi_interleave_supported(int i)
#define CFI_DEVICETYPE_X32 (32 / 8)
#define CFI_DEVICETYPE_X64 (64 / 8)
+
+/* Device Interface Code Assignments from the "Common Flash Memory Interface
+ * Publication 100" dated December 1, 2001.
+ */
+#define CFI_INTERFACE_X8_ASYNC 0x0000
+#define CFI_INTERFACE_X16_ASYNC 0x0001
+#define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002
+#define CFI_INTERFACE_X32_ASYNC 0x0003
+#define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005
+#define CFI_INTERFACE_NOT_ALLOWED 0xffff
+
+
/* NB: We keep these structures in memory in HOST byteorder, except
* where individually noted.
*/
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 783fc983417c..0a13bb35f044 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -152,6 +152,15 @@ struct mtd_info {
int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
+ /* In blackbox flight recorder like scenarios we want to make successful
+ writes in interrupt context. panic_write() is only intended to be
+ called when its known the kernel is about to panic and we need the
+ write to succeed. Since the kernel is not going to be running for much
+ longer, this function can break locks and delay to ensure the write
+ succeeds (but not sleep). */
+
+ int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
+
int (*read_oob) (struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops);
int (*write_oob) (struct mtd_info *mtd, loff_t to,
diff --git a/include/linux/mtd/mtdram.h b/include/linux/mtd/mtdram.h
new file mode 100644
index 000000000000..04fdc07b7353
--- /dev/null
+++ b/include/linux/mtd/mtdram.h
@@ -0,0 +1,8 @@
+#ifndef __MTD_MTDRAM_H__
+#define __MTD_MTDRAM_H__
+
+#include <linux/mtd/mtd.h>
+int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
+ unsigned long size, char *name);
+
+#endif /* __MTD_MTDRAM_H__ */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index c46161f4eee3..d1b310c92eb4 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -67,6 +67,7 @@
/*
* Device ID Register F001h (R)
*/
+#define ONENAND_DEVICE_DENSITY_MASK (0xf)
#define ONENAND_DEVICE_DENSITY_SHIFT (4)
#define ONENAND_DEVICE_IS_DDP (1 << 3)
#define ONENAND_DEVICE_IS_DEMUX (1 << 2)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index da6b3d6f12a7..7c37d7e55abc 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -71,5 +71,12 @@ extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
#define put_partition_parser(p) do { module_put((p)->owner); } while(0)
-#endif
+struct device;
+struct device_node;
+
+int __devinit of_mtd_parse_partitions(struct device *dev,
+ struct mtd_info *mtd,
+ struct device_node *node,
+ struct mtd_partition **pparts);
+#endif
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 3d967b6b120a..f71201d0f3e7 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -26,23 +26,6 @@
#include <mtd/ubi-user.h>
/*
- * UBI data type hint constants.
- *
- * UBI_LONGTERM: long-term data
- * UBI_SHORTTERM: short-term data
- * UBI_UNKNOWN: data persistence is unknown
- *
- * These constants are used when data is written to UBI volumes in order to
- * help the UBI wear-leveling unit to find more appropriate physical
- * eraseblocks.
- */
-enum {
- UBI_LONGTERM = 1,
- UBI_SHORTTERM,
- UBI_UNKNOWN
-};
-
-/*
* enum ubi_open_mode - UBI volume open mode constants.
*
* UBI_READONLY: read-only mode
@@ -167,6 +150,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
int len, int dtype);
int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum);
int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype);
int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
/*
diff --git a/include/linux/nubus.h b/include/linux/nubus.h
index cdb3e9b8db54..c4355076d1a5 100644
--- a/include/linux/nubus.h
+++ b/include/linux/nubus.h
@@ -132,10 +132,12 @@ enum nubus_drhw {
NUBUS_DRHW_RDIUS_DCGX = 0x027C, /* Radius DirectColor/GX */
NUBUS_DRHW_RDIUS_PC8 = 0x0291, /* Radius PrecisionColor 8 */
NUBUS_DRHW_LAPIS_PCS8 = 0x0292, /* Lapis ProColorServer 8 */
- NUBUS_DRHW_RASTER_24LXI = 0x02A0, /* RasterOps 8/24 XLi */
+ NUBUS_DRHW_RASTER_24XLI = 0x02A0, /* RasterOps 8/24 XLi */
NUBUS_DRHW_RASTER_PBPGT = 0x02A5, /* RasterOps PaintBoard Prism GT */
NUBUS_DRHW_EMACH_FSX = 0x02AE, /* E-Machines Futura SX */
+ NUBUS_DRHW_RASTER_24XLTV = 0x02B7, /* RasterOps 24XLTV */
NUBUS_DRHW_SMAC_THUND24 = 0x02CB, /* SuperMac Thunder/24 */
+ NUBUS_DRHW_SMAC_THUNDLGHT = 0x03D9, /* SuperMac ThunderLight */
NUBUS_DRHW_RDIUS_PC24XP = 0x0406, /* Radius PrecisionColor 24Xp */
NUBUS_DRHW_RDIUS_PC24X = 0x040A, /* Radius PrecisionColor 24X */
NUBUS_DRHW_RDIUS_PC8XJ = 0x040B, /* Radius PrecisionColor 8XJ */
diff --git a/include/linux/of.h b/include/linux/of.h
index b5f33efcb8e2..6981016dcc25 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -50,6 +50,7 @@ extern struct device_node *of_find_matching_node(struct device_node *from,
extern struct device_node *of_find_node_by_path(const char *path);
extern struct device_node *of_find_node_by_phandle(phandle handle);
extern struct device_node *of_get_parent(const struct device_node *node);
+extern struct device_node *of_get_next_parent(struct device_node *node);
extern struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev);
#define for_each_child_of_node(parent, child) \
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 209d3a47f50f..bbad43fb8181 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -131,16 +131,52 @@
#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
-#define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags)
+static inline int PageUptodate(struct page *page)
+{
+ int ret = test_bit(PG_uptodate, &(page)->flags);
+
+ /*
+ * Must ensure that the data we read out of the page is loaded
+ * _after_ we've loaded page->flags to check for PageUptodate.
+ * We can skip the barrier if the page is not uptodate, because
+ * we wouldn't be reading anything from it.
+ *
+ * See SetPageUptodate() for the other side of the story.
+ */
+ if (ret)
+ smp_rmb();
+
+ return ret;
+}
+
+static inline void __SetPageUptodate(struct page *page)
+{
+ smp_wmb();
+ __set_bit(PG_uptodate, &(page)->flags);
#ifdef CONFIG_S390
+ page_clear_dirty(page);
+#endif
+}
+
static inline void SetPageUptodate(struct page *page)
{
+#ifdef CONFIG_S390
if (!test_and_set_bit(PG_uptodate, &page->flags))
page_clear_dirty(page);
-}
#else
-#define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
+ /*
+ * Memory barrier must be issued before setting the PG_uptodate bit,
+ * so that all previous stores issued in order to bring the page
+ * uptodate are actually visible before PageUptodate becomes true.
+ *
+ * s390 doesn't need an explicit smp_wmb here because the test and
+ * set bit already provides full barriers.
+ */
+ smp_wmb();
+ set_bit(PG_uptodate, &(page)->flags);
#endif
+}
+
#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index cee75c0ff6e7..7215d3b1f4af 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -159,6 +159,8 @@ struct pci_dev {
this if your device has broken DMA
or supports 64-bit transfers. */
+ struct device_dma_parameters dma_parms;
+
pci_power_t current_state; /* Current operating state. In ACPI-speak,
this is D0-D3, D0 being fully functional,
and D3 being off. */
@@ -580,6 +582,8 @@ void pci_intx(struct pci_dev *dev, int enable);
void pci_msi_off(struct pci_dev *dev);
int pci_set_dma_mask(struct pci_dev *dev, u64 mask);
int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask);
+int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
+int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pcix_get_max_mmrbc(struct pci_dev *dev);
int pcix_get_mmrbc(struct pci_dev *dev);
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
@@ -822,6 +826,18 @@ static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO;
}
+static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
+ unsigned int size)
+{
+ return -EIO;
+}
+
+static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
+ unsigned long mask)
+{
+ return -EIO;
+}
+
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{
return -EBUSY;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 41f6f28690f6..df6dd79a0d3b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1765,6 +1765,7 @@
#define PCI_DEVICE_ID_QUATECH_DSC100 0x0020
#define PCI_DEVICE_ID_QUATECH_ESC100D 0x0050
#define PCI_DEVICE_ID_QUATECH_ESC100M 0x0060
+#define PCI_DEVICE_ID_QUATECH_SPPXP_100 0x0278
#define PCI_VENDOR_ID_SEALEVEL 0x135e
#define PCI_DEVICE_ID_SEALEVEL_U530 0x7101
@@ -2043,6 +2044,23 @@
#define PCI_VENDOR_ID_QUICKNET 0x15e2
#define PCI_DEVICE_ID_QUICKNET_XJ 0x0500
+/*
+ * ADDI-DATA GmbH communication cards <info@addi-data.com>
+ */
+#define PCI_VENDOR_ID_ADDIDATA_OLD 0x10E8
+#define PCI_VENDOR_ID_ADDIDATA 0x15B8
+#define PCI_DEVICE_ID_ADDIDATA_APCI7500 0x7000
+#define PCI_DEVICE_ID_ADDIDATA_APCI7420 0x7001
+#define PCI_DEVICE_ID_ADDIDATA_APCI7300 0x7002
+#define PCI_DEVICE_ID_ADDIDATA_APCI7800 0x818E
+#define PCI_DEVICE_ID_ADDIDATA_APCI7500_2 0x7009
+#define PCI_DEVICE_ID_ADDIDATA_APCI7420_2 0x700A
+#define PCI_DEVICE_ID_ADDIDATA_APCI7300_2 0x700B
+#define PCI_DEVICE_ID_ADDIDATA_APCI7500_3 0x700C
+#define PCI_DEVICE_ID_ADDIDATA_APCI7420_3 0x700D
+#define PCI_DEVICE_ID_ADDIDATA_APCI7300_3 0x700E
+#define PCI_DEVICE_ID_ADDIDATA_APCI7800_3 0x700F
+
#define PCI_VENDOR_ID_PDC 0x15e9
#define PCI_VENDOR_ID_FARSITE 0x1619
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 50faa0ea28e4..1ac969724bb2 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -54,7 +54,7 @@
#ifdef CONFIG_SMP
struct percpu_data {
- void *ptrs[NR_CPUS];
+ void *ptrs[1];
};
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index 1c1dba9ea5fb..28dfc61cf79e 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -348,6 +348,7 @@ enum
FLOW_KEY_RTCLASSID,
FLOW_KEY_SKUID,
FLOW_KEY_SKGID,
+ FLOW_KEY_VLAN_TAG,
__FLOW_KEY_MAX,
};
@@ -459,7 +460,8 @@ enum
#define TCF_EM_U32 3
#define TCF_EM_META 4
#define TCF_EM_TEXT 5
-#define TCF_EM_MAX 5
+#define TCF_EM_VLAN 6
+#define TCF_EM_MAX 6
enum
{
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
new file mode 100644
index 000000000000..2e4e97bd19f7
--- /dev/null
+++ b/include/linux/pm_qos_params.h
@@ -0,0 +1,25 @@
+/* interface for the pm_qos_power infrastructure of the linux kernel.
+ *
+ * Mark Gross
+ */
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/miscdevice.h>
+
+#define PM_QOS_RESERVED 0
+#define PM_QOS_CPU_DMA_LATENCY 1
+#define PM_QOS_NETWORK_LATENCY 2
+#define PM_QOS_NETWORK_THROUGHPUT 3
+
+#define PM_QOS_NUM_CLASSES 4
+#define PM_QOS_DEFAULT_VALUE -1
+
+int pm_qos_add_requirement(int qos, char *name, s32 value);
+int pm_qos_update_requirement(int qos, char *name, s32 new_value);
+void pm_qos_remove_requirement(int qos, char *name);
+
+int pm_qos_requirement(int qos);
+
+int pm_qos_add_notifier(int qos, struct notifier_block *notifier);
+int pm_qos_remove_notifier(int qos, struct notifier_block *notifier);
+
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index b9339d8b95bc..cd6332b88829 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -258,6 +258,7 @@ extern struct pnp_protocol isapnp_protocol;
#else
#define pnp_device_is_isapnp(dev) 0
#endif
+extern struct mutex pnp_res_mutex;
#ifdef CONFIG_PNPBIOS
extern struct pnp_protocol pnpbios_protocol;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 5cbf3e371012..68ed19ccf1f7 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -94,6 +94,7 @@ enum power_supply_property {
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_SERIAL_NUMBER,
};
enum power_supply_type {
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index e2eff9079fe9..3800639775ae 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -63,4 +63,8 @@
#define PR_GET_SECCOMP 21
#define PR_SET_SECCOMP 22
+/* Get/set the capability bounding set */
+#define PR_CAPBSET_READ 23
+#define PR_CAPBSET_DROP 24
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 8f92546b403d..e43551516831 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -19,6 +19,8 @@ struct completion;
*/
#define FIRST_PROCESS_ENTRY 256
+/* Worst case buffer size needed for holding an integer. */
+#define PROC_NUMBUF 13
/*
* We always define these enumerators
@@ -117,7 +119,6 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
unsigned long task_vsize(struct mm_struct *);
int task_statm(struct mm_struct *, int *, int *, int *, int *);
char *task_mem(struct mm_struct *, char *);
-void clear_refs_smap(struct mm_struct *mm);
struct proc_dir_entry *de_get(struct proc_dir_entry *de);
void de_put(struct proc_dir_entry *de);
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 515bff053de8..6ab80714a916 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -204,6 +204,41 @@ static inline void user_enable_block_step(struct task_struct *task)
}
#endif /* arch_has_block_step */
+#ifndef arch_ptrace_stop_needed
+/**
+ * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
+ * @code: current->exit_code value ptrace will stop with
+ * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
+ *
+ * This is called with the siglock held, to decide whether or not it's
+ * necessary to release the siglock and call arch_ptrace_stop() with the
+ * same @code and @info arguments. It can be defined to a constant if
+ * arch_ptrace_stop() is never required, or always is. On machines where
+ * this makes sense, it should be defined to a quick test to optimize out
+ * calling arch_ptrace_stop() when it would be superfluous. For example,
+ * if the thread has not been back to user mode since the last stop, the
+ * thread state might indicate that nothing needs to be done.
+ */
+#define arch_ptrace_stop_needed(code, info) (0)
+#endif
+
+#ifndef arch_ptrace_stop
+/**
+ * arch_ptrace_stop - Do machine-specific work before stopping for ptrace
+ * @code: current->exit_code value ptrace will stop with
+ * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
+ *
+ * This is called with no locks held when arch_ptrace_stop_needed() has
+ * just returned nonzero. It is allowed to block, e.g. for user memory
+ * access. The arch can have machine-specific work to be done before
+ * ptrace stops. On ia64, register backing store gets written back to user
+ * memory here. Since this can be costly (requires dropping the siglock),
+ * we only do it when the arch requires it for this particular stop, as
+ * indicated by arch_ptrace_stop_needed().
+ */
+#define arch_ptrace_stop(code, info) do { } while (0)
+#endif
+
#endif
#endif
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h
index 19bc9b8b6191..34a196ee7941 100644
--- a/include/linux/qnx4_fs.h
+++ b/include/linux/qnx4_fs.h
@@ -110,6 +110,7 @@ struct qnx4_inode_info {
struct inode vfs_inode;
};
+extern struct inode *qnx4_iget(struct super_block *, unsigned long);
extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd);
extern unsigned long qnx4_count_free_blocks(struct super_block *sb);
extern unsigned long qnx4_block_map(struct inode *inode, long iblock);
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
index 306a1d1a5af0..e51b531cd0b2 100644
--- a/include/linux/raid/bitmap.h
+++ b/include/linux/raid/bitmap.h
@@ -244,6 +244,8 @@ struct bitmap {
*/
unsigned long daemon_lastrun; /* jiffies of last run */
unsigned long daemon_sleep; /* how many seconds between updates? */
+ unsigned long last_end_sync; /* when we lasted called end_sync to
+ * update bitmap with resync progress */
atomic_t pending_writes; /* pending writes to the bitmap file */
wait_queue_head_t write_wait;
@@ -275,6 +277,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded);
void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
void bitmap_close_sync(struct bitmap *bitmap);
+void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
void bitmap_unplug(struct bitmap *bitmap);
void bitmap_daemon_work(struct bitmap *bitmap);
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index dcb729244f47..85a068bab625 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -81,6 +81,8 @@ struct mdk_rdev_s
#define In_sync 2 /* device is in_sync with rest of array */
#define WriteMostly 4 /* Avoid reading if at all possible */
#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */
+#define AllReserved 6 /* If whole device is reserved for
+ * one array */
int desc_nr; /* descriptor index in the superblock */
int raid_disk; /* role of device in array */
@@ -130,6 +132,9 @@ struct mddev_s
minor_version,
patch_version;
int persistent;
+ int external; /* metadata is
+ * managed externally */
+ char metadata_type[17]; /* externally set*/
int chunk_size;
time_t ctime, utime;
int level, layout;
@@ -216,6 +221,8 @@ struct mddev_s
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
sector_t recovery_cp;
+ sector_t resync_max; /* resync should pause
+ * when it gets here */
spinlock_t write_lock;
wait_queue_head_t sb_wait; /* for waiting on superblock updates */
@@ -306,23 +313,17 @@ static inline char * mdname (mddev_t * mddev)
* iterates through some rdev ringlist. It's safe to remove the
* current 'rdev'. Dont touch 'tmp' though.
*/
-#define ITERATE_RDEV_GENERIC(head,rdev,tmp) \
+#define rdev_for_each_list(rdev, tmp, list) \
\
- for ((tmp) = (head).next; \
+ for ((tmp) = (list).next; \
(rdev) = (list_entry((tmp), mdk_rdev_t, same_set)), \
- (tmp) = (tmp)->next, (tmp)->prev != &(head) \
+ (tmp) = (tmp)->next, (tmp)->prev != &(list) \
; )
/*
* iterates through the 'same array disks' ringlist
*/
-#define ITERATE_RDEV(mddev,rdev,tmp) \
- ITERATE_RDEV_GENERIC((mddev)->disks,rdev,tmp)
-
-/*
- * Iterates through 'pending RAID disks'
- */
-#define ITERATE_RDEV_PENDING(rdev,tmp) \
- ITERATE_RDEV_GENERIC(pending_raid_disks,rdev,tmp)
+#define rdev_for_each(rdev, tmp, mddev) \
+ rdev_for_each_list(rdev, tmp, (mddev)->disks)
typedef struct mdk_thread_s {
void (*run) (mddev_t *mddev);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d32c14de270e..37a642c54871 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -174,10 +174,13 @@ struct rcu_head {
* code.
*/
-#define rcu_assign_pointer(p, v) ({ \
- smp_wmb(); \
- (p) = (v); \
- })
+#define rcu_assign_pointer(p, v) \
+ ({ \
+ if (!__builtin_constant_p(v) || \
+ ((v) != NULL)) \
+ smp_wmb(); \
+ (p) = (v); \
+ })
/**
* synchronize_sched - block until all CPUs have exited any non-preemptive
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 85ea63f462af..b93b541cf111 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -59,8 +59,6 @@ extern void machine_crash_shutdown(struct pt_regs *);
* Architecture independent implemenations of sys_reboot commands.
*/
-extern void kernel_shutdown_prepare(enum system_states state);
-
extern void kernel_restart(char *cmd);
extern void kernel_halt(void);
extern void kernel_power_off(void);
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
new file mode 100644
index 000000000000..61363ce896d5
--- /dev/null
+++ b/include/linux/res_counter.h
@@ -0,0 +1,127 @@
+#ifndef __RES_COUNTER_H__
+#define __RES_COUNTER_H__
+
+/*
+ * Resource Counters
+ * Contain common data types and routines for resource accounting
+ *
+ * Copyright 2007 OpenVZ SWsoft Inc
+ *
+ * Author: Pavel Emelianov <xemul@openvz.org>
+ *
+ */
+
+#include <linux/cgroup.h>
+
+/*
+ * The core object. the cgroup that wishes to account for some
+ * resource may include this counter into its structures and use
+ * the helpers described beyond
+ */
+
+struct res_counter {
+ /*
+ * the current resource consumption level
+ */
+ unsigned long long usage;
+ /*
+ * the limit that usage cannot exceed
+ */
+ unsigned long long limit;
+ /*
+ * the number of unsuccessful attempts to consume the resource
+ */
+ unsigned long long failcnt;
+ /*
+ * the lock to protect all of the above.
+ * the routines below consider this to be IRQ-safe
+ */
+ spinlock_t lock;
+};
+
+/*
+ * Helpers to interact with userspace
+ * res_counter_read/_write - put/get the specified fields from the
+ * res_counter struct to/from the user
+ *
+ * @counter: the counter in question
+ * @member: the field to work with (see RES_xxx below)
+ * @buf: the buffer to opeate on,...
+ * @nbytes: its size...
+ * @pos: and the offset.
+ */
+
+ssize_t res_counter_read(struct res_counter *counter, int member,
+ const char __user *buf, size_t nbytes, loff_t *pos,
+ int (*read_strategy)(unsigned long long val, char *s));
+ssize_t res_counter_write(struct res_counter *counter, int member,
+ const char __user *buf, size_t nbytes, loff_t *pos,
+ int (*write_strategy)(char *buf, unsigned long long *val));
+
+/*
+ * the field descriptors. one for each member of res_counter
+ */
+
+enum {
+ RES_USAGE,
+ RES_LIMIT,
+ RES_FAILCNT,
+};
+
+/*
+ * helpers for accounting
+ */
+
+void res_counter_init(struct res_counter *counter);
+
+/*
+ * charge - try to consume more resource.
+ *
+ * @counter: the counter
+ * @val: the amount of the resource. each controller defines its own
+ * units, e.g. numbers, bytes, Kbytes, etc
+ *
+ * returns 0 on success and <0 if the counter->usage will exceed the
+ * counter->limit _locked call expects the counter->lock to be taken
+ */
+
+int res_counter_charge_locked(struct res_counter *counter, unsigned long val);
+int res_counter_charge(struct res_counter *counter, unsigned long val);
+
+/*
+ * uncharge - tell that some portion of the resource is released
+ *
+ * @counter: the counter
+ * @val: the amount of the resource
+ *
+ * these calls check for usage underflow and show a warning on the console
+ * _locked call expects the counter->lock to be taken
+ */
+
+void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
+void res_counter_uncharge(struct res_counter *counter, unsigned long val);
+
+static inline bool res_counter_limit_check_locked(struct res_counter *cnt)
+{
+ if (cnt->usage < cnt->limit)
+ return true;
+
+ return false;
+}
+
+/*
+ * Helper function to detect if the cgroup is within it's limit or
+ * not. It's currently called from cgroup_rss_prepare()
+ */
+static inline bool res_counter_check_under_limit(struct res_counter *cnt)
+{
+ bool ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cnt->lock, flags);
+ ret = res_counter_limit_check_locked(cnt);
+ spin_unlock_irqrestore(&cnt->lock, flags);
+ return ret;
+}
+
+#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 97347f22fc20..1383692ac5bd 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -8,6 +8,7 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
+#include <linux/memcontrol.h>
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@@ -86,7 +87,7 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma,
/*
* Called from mm/vmscan.c to handle paging out
*/
-int page_referenced(struct page *, int is_locked);
+int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt);
int try_to_unmap(struct page *, int ignore_refs);
/*
@@ -114,7 +115,7 @@ int page_mkclean(struct page *);
#define anon_vma_prepare(vma) (0)
#define anon_vma_link(vma) do {} while (0)
-#define page_referenced(page,l) TestClearPageReferenced(page)
+#define page_referenced(page,l,cnt) TestClearPageReferenced(page)
#define try_to_unmap(page, refs) SWAP_FAIL
static inline int page_mkclean(struct page *page)
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index b014f6b7fe29..b9e174079002 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -602,24 +602,12 @@ struct tcamsg
#include <linux/mutex.h>
-extern size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size);
static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
{
int len = strlen(str) + 1;
return len > rta->rta_len || memcmp(RTA_DATA(rta), str, len);
}
-extern int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len);
-extern int __rtattr_parse_nested_compat(struct rtattr *tb[], int maxattr,
- struct rtattr *rta, int len);
-
-#define rtattr_parse_nested(tb, max, rta) \
- rtattr_parse((tb), (max), RTA_DATA((rta)), RTA_PAYLOAD((rta)))
-
-#define rtattr_parse_nested_compat(tb, max, rta, data, len) \
-({ data = RTA_PAYLOAD(rta) >= len ? RTA_DATA(rta) : NULL; \
- __rtattr_parse_nested_compat(tb, max, rta, len); })
-
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern int rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index af6947e69b40..8a4812c1c038 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -92,6 +92,7 @@ struct sched_param {
#include <asm/processor.h>
+struct mem_cgroup;
struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
@@ -555,6 +556,13 @@ struct signal_struct {
#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */
+/* If true, all threads except ->group_exit_task have pending SIGKILL */
+static inline int signal_group_exit(const struct signal_struct *sig)
+{
+ return (sig->flags & SIGNAL_GROUP_EXIT) ||
+ (sig->group_exit_task != NULL);
+}
+
/*
* Some day this will be a full-fledged user tracking system..
*/
@@ -803,7 +811,7 @@ static inline int above_background_load(void)
struct io_context; /* See blkdev.h */
#define NGROUPS_SMALL 32
-#define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t)))
+#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
struct group_info {
int ngroups;
atomic_t usage;
@@ -1091,7 +1099,7 @@ struct task_struct {
uid_t uid,euid,suid,fsuid;
gid_t gid,egid,sgid,fsgid;
struct group_info *group_info;
- kernel_cap_t cap_effective, cap_inheritable, cap_permitted;
+ kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
unsigned keep_capabilities:1;
struct user_struct *user;
#ifdef CONFIG_KEYS
@@ -1770,7 +1778,7 @@ extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned lon
struct task_struct *fork_idle(int);
extern void set_task_comm(struct task_struct *tsk, char *from);
-extern void get_task_comm(char *to, struct task_struct *tsk);
+extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void wait_task_inactive(struct task_struct * p);
@@ -2080,6 +2088,10 @@ static inline void migration_init(void)
}
#endif
+#ifndef TASK_SIZE_OF
+#define TASK_SIZE_OF(tsk) TASK_SIZE
+#endif
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index d24974262dc6..fe52cdeab0a6 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -40,11 +40,6 @@
#define ROOTCONTEXT_MNT 0x04
#define DEFCONTEXT_MNT 0x08
-/*
- * Bounding set
- */
-extern kernel_cap_t cap_bset;
-
extern unsigned securebits;
struct ctl_table;
@@ -423,15 +418,12 @@ struct request_sock;
* identified by @name for @dentry.
* Return 0 if permission is granted.
* @inode_getsecurity:
- * Copy the extended attribute representation of the security label
- * associated with @name for @inode into @buffer. @buffer may be
- * NULL to request the size of the buffer required. @size indicates
- * the size of @buffer in bytes. Note that @name is the remainder
- * of the attribute name after the security. prefix has been removed.
- * @err is the return value from the preceding fs getxattr call,
- * and can be used by the security module to determine whether it
- * should try and canonicalize the attribute value.
- * Return number of bytes used/required on success.
+ * Retrieve a copy of the extended attribute representation of the
+ * security label associated with @name for @inode via @buffer. Note that
+ * @name is the remainder of the attribute name after the security prefix
+ * has been removed. @alloc is used to specify of the call should return a
+ * value via the buffer or just the value length Return size of buffer on
+ * success.
* @inode_setsecurity:
* Set the security label associated with @name for @inode from the
* extended attribute value @value. @size indicates the size of the
@@ -1304,7 +1296,7 @@ struct security_operations {
int (*inode_removexattr) (struct dentry *dentry, char *name);
int (*inode_need_killpriv) (struct dentry *dentry);
int (*inode_killpriv) (struct dentry *dentry);
- int (*inode_getsecurity)(const struct inode *inode, const char *name, void *buffer, size_t size, int err);
+ int (*inode_getsecurity)(const struct inode *inode, const char *name, void **buffer, bool alloc);
int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size);
@@ -1565,7 +1557,7 @@ int security_inode_listxattr(struct dentry *dentry);
int security_inode_removexattr(struct dentry *dentry, char *name);
int security_inode_need_killpriv(struct dentry *dentry);
int security_inode_killpriv(struct dentry *dentry);
-int security_inode_getsecurity(const struct inode *inode, const char *name, void *buffer, size_t size, int err);
+int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc);
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags);
int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size);
int security_file_permission(struct file *file, int mask);
@@ -1967,7 +1959,7 @@ static inline int security_inode_killpriv(struct dentry *dentry)
return cap_inode_killpriv(dentry);
}
-static inline int security_inode_getsecurity(const struct inode *inode, const char *name, void *buffer, size_t size, int err)
+static inline int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/serial167.h b/include/linux/serial167.h
index 71b6df2516a6..59c81b708562 100644
--- a/include/linux/serial167.h
+++ b/include/linux/serial167.h
@@ -37,7 +37,6 @@ struct cyclades_port {
int ignore_status_mask;
int close_delay;
int IER; /* Interrupt Enable Register */
- unsigned long event;
unsigned long last_active;
int count; /* # of fd on device */
int x_char; /* to be pushed out ASAP */
@@ -49,7 +48,6 @@ struct cyclades_port {
int xmit_cnt;
int default_threshold;
int default_timeout;
- struct work_struct tqueue;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
struct cyclades_monitor mon;
@@ -67,18 +65,6 @@ struct cyclades_port {
#define CYGETDEFTIMEOUT 0x435908
#define CYSETDEFTIMEOUT 0x435909
-/*
- * Events are used to schedule things to happen at timer-interrupt
- * time, instead of at cy interrupt time.
- */
-#define Cy_EVENT_READ_PROCESS 0
-#define Cy_EVENT_WRITE_WAKEUP 1
-#define Cy_EVENT_HANGUP 2
-#define Cy_EVENT_BREAK 3
-#define Cy_EVENT_OPEN_WAKEUP 4
-
-
-
#define CyMaxChipsPerCard 1
/**** cd2401 registers ****/
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index afe0f6d9b9bc..00b65c0a82ca 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -23,6 +23,7 @@ struct plat_serial8250_port {
resource_size_t mapbase; /* resource base */
unsigned int irq; /* interrupt number */
unsigned int uartclk; /* UART clock rate */
+ void *private_data;
unsigned char regshift; /* register shift */
unsigned char iotype; /* UPIO_* */
unsigned char hub6;
diff --git a/include/linux/shm.h b/include/linux/shm.h
index eeaed921a1dc..eca6235a46c0 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -3,7 +3,11 @@
#include <linux/ipc.h>
#include <linux/errno.h>
+#ifdef __KERNEL__
#include <asm/page.h>
+#else
+#include <unistd.h>
+#endif
/*
* SHMMAX, SHMMNI and SHMALL are upper limits are defaults which can
@@ -13,7 +17,11 @@
#define SHMMAX 0x2000000 /* max shared seg size (bytes) */
#define SHMMIN 1 /* min shared seg size (bytes) */
#define SHMMNI 4096 /* max num of segs system wide */
+#ifdef __KERNEL__
#define SHMALL (SHMMAX/PAGE_SIZE*(SHMMNI/16)) /* max shm system wide (pages) */
+#else
+#define SHMALL (SHMMAX/getpagesize()*(SHMMNI/16))
+#endif
#define SHMSEG SHMMNI /* max shared segs per process */
#ifdef __KERNEL__
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 0ae338866240..7e095147656c 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -371,6 +371,8 @@ int unhandled_signal(struct task_struct *tsk, int sig);
(!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
(t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
+void signals_init(void);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SIGNAL_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index ddb1a706b144..5e6d3d634d5b 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -11,12 +11,35 @@
#include <linux/workqueue.h>
#include <linux/kobject.h>
+enum stat_item {
+ ALLOC_FASTPATH, /* Allocation from cpu slab */
+ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
+ FREE_FASTPATH, /* Free to cpu slub */
+ FREE_SLOWPATH, /* Freeing not to cpu slab */
+ FREE_FROZEN, /* Freeing to frozen slab */
+ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
+ FREE_REMOVE_PARTIAL, /* Freeing removes last object */
+ ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */
+ ALLOC_SLAB, /* Cpu slab acquired from page allocator */
+ ALLOC_REFILL, /* Refill cpu slab from slab freelist */
+ FREE_SLAB, /* Slab freed to the page allocator */
+ CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
+ DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
+ DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
+ DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
+ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
+ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+ NR_SLUB_STAT_ITEMS };
+
struct kmem_cache_cpu {
void **freelist; /* Pointer to first free per cpu object */
struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */
unsigned int offset; /* Freepointer offset (in word units) */
unsigned int objsize; /* Size of an object (from kmem_cache) */
+#ifdef CONFIG_SLUB_STATS
+ unsigned stat[NR_SLUB_STAT_ITEMS];
+#endif
};
struct kmem_cache_node {
diff --git a/include/linux/sm501.h b/include/linux/sm501.h
index 9e3aaad6fe4d..932a9efee8a5 100644
--- a/include/linux/sm501.h
+++ b/include/linux/sm501.h
@@ -70,6 +70,8 @@ extern unsigned long sm501_gpio_get(struct device *dev,
#define SM501FB_FLAG_DISABLE_AT_EXIT (1<<1)
#define SM501FB_FLAG_USE_HWCURSOR (1<<2)
#define SM501FB_FLAG_USE_HWACCEL (1<<3)
+#define SM501FB_FLAG_PANEL_USE_FPEN (1<<4)
+#define SM501FB_FLAG_PANEL_USE_VBIASEN (1<<5)
struct sm501_platdata_fbsub {
struct fb_videomode *def_mode;
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index 40c7b5d993b9..f41ffd7c2dd9 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -101,6 +101,8 @@
#define SONYPI_EVENT_FNKEY_RELEASED 59
#define SONYPI_EVENT_WIRELESS_ON 60
#define SONYPI_EVENT_WIRELESS_OFF 61
+#define SONYPI_EVENT_ZOOM_IN_PRESSED 62
+#define SONYPI_EVENT_ZOOM_OUT_PRESSED 63
/* get/set brightness */
#define SONYPI_IOCGBRT _IOR('v', 0, __u8)
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
new file mode 100644
index 000000000000..835ddf47d45c
--- /dev/null
+++ b/include/linux/spi/mcp23s08.h
@@ -0,0 +1,24 @@
+
+/* FIXME driver should be able to handle all four slaves that
+ * can be hooked up to each chipselect, as well as IRQs...
+ */
+
+struct mcp23s08_platform_data {
+ /* four slaves can share one SPI chipselect */
+ u8 slave;
+
+ /* number assigned to the first GPIO */
+ unsigned base;
+
+ /* pins with pullups */
+ u8 pullups;
+
+ void *context; /* param to setup/teardown */
+
+ int (*setup)(struct spi_device *spi,
+ int gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct spi_device *spi,
+ int gpio, unsigned ngpio,
+ void *context);
+};
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index e18f5c23b930..9d5da8b2ccf9 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -373,6 +373,15 @@ void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state)
if (sdev->bus->bustype == SSB_BUSTYPE_PCI)
pci_set_power_state(sdev->bus->host_pci, state);
}
+#else
+static inline void ssb_pcihost_unregister(struct pci_driver *driver)
+{
+}
+
+static inline
+void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state)
+{
+}
#endif /* CONFIG_SSB_PCIHOST */
diff --git a/include/linux/stallion.h b/include/linux/stallion.h
index 94b4a10b912f..0424d75a5aaa 100644
--- a/include/linux/stallion.h
+++ b/include/linux/stallion.h
@@ -95,7 +95,6 @@ struct stlport {
struct tty_struct *tty;
wait_queue_head_t open_wait;
wait_queue_head_t close_wait;
- struct work_struct tqueue;
comstats_t stats;
struct stlrq tx;
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 646ce2d068d4..1d7d4c5797ee 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -130,7 +130,6 @@ struct pbe {
};
/* mm/page_alloc.c */
-extern void drain_local_pages(void);
extern void mark_free_pages(struct zone *zone);
/**
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4f3838adbb30..3ca5c4bd6d3f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -5,6 +5,7 @@
#include <linux/linkage.h>
#include <linux/mmzone.h>
#include <linux/list.h>
+#include <linux/memcontrol.h>
#include <linux/sched.h>
#include <asm/atomic.h>
@@ -158,9 +159,6 @@ struct swap_list_t {
/* Swap 50% full? Release swapcache more aggressively.. */
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
-/* linux/mm/memory.c */
-extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
-
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
@@ -185,6 +183,9 @@ extern void swap_setup(void);
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zone **zones, int order,
gfp_t gfp_mask);
+extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
+ gfp_t gfp_mask);
+extern int __isolate_lru_page(struct page *page, int mode);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
@@ -223,16 +224,17 @@ extern struct address_space swapper_space;
#define total_swapcache_pages swapper_space.nrpages
extern void show_swap_cache_info(void);
extern int add_to_swap(struct page *, gfp_t);
+extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
extern void __delete_from_swap_cache(struct page *);
extern void delete_from_swap_cache(struct page *);
-extern int move_to_swap_cache(struct page *, swp_entry_t);
-extern int move_from_swap_cache(struct page *, unsigned long,
- struct address_space *);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
-extern struct page * lookup_swap_cache(swp_entry_t);
-extern struct page * read_swap_cache_async(swp_entry_t, struct vm_area_struct *vma,
- unsigned long addr);
+extern struct page *lookup_swap_cache(swp_entry_t);
+extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
+ struct vm_area_struct *vma, unsigned long addr);
+extern struct page *swapin_readahead(swp_entry_t, gfp_t,
+ struct vm_area_struct *vma, unsigned long addr);
+
/* linux/mm/swapfile.c */
extern long total_swap_pages;
extern unsigned int nr_swapfiles;
@@ -306,7 +308,7 @@ static inline void swap_free(swp_entry_t swp)
{
}
-static inline struct page *read_swap_cache_async(swp_entry_t swp,
+static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
return NULL;
@@ -317,22 +319,12 @@ static inline struct page *lookup_swap_cache(swp_entry_t swp)
return NULL;
}
-static inline int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
-{
- return 0;
-}
-
#define can_share_swap_page(p) (page_mapcount(p) == 1)
-static inline int move_to_swap_cache(struct page *page, swp_entry_t entry)
-{
- return 1;
-}
-
-static inline int move_from_swap_cache(struct page *page, unsigned long index,
- struct address_space *mapping)
+static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
+ gfp_t gfp_mask)
{
- return 1;
+ return -1;
}
static inline void __delete_from_swap_cache(struct page *page)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index ceb6cc5ceebb..7bf2d149d209 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -42,6 +42,12 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
return entry.val & SWP_OFFSET_MASK(entry);
}
+/* check whether a pte points to a swap entry */
+static inline int is_swap_pte(pte_t pte)
+{
+ return !pte_none(pte) && !pte_present(pte) && !pte_file(pte);
+}
+
/*
* Convert the arch-dependent pte representation of a swp_entry_t into an
* arch-independent swp_entry_t.
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 61def7c8fbb3..4c2577bd1c85 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -607,8 +607,11 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
size_t len);
asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
asmlinkage long sys_signalfd(int ufd, sigset_t __user *user_mask, size_t sizemask);
-asmlinkage long sys_timerfd(int ufd, int clockid, int flags,
- const struct itimerspec __user *utmr);
+asmlinkage long sys_timerfd_create(int clockid, int flags);
+asmlinkage long sys_timerfd_settime(int ufd, int flags,
+ const struct itimerspec __user *utmr,
+ struct itimerspec __user *otmr);
+asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
asmlinkage long sys_eventfd(unsigned int count);
asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index bf4ae4e138f7..571f01d20a86 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -102,7 +102,6 @@ enum
KERN_NODENAME=7,
KERN_DOMAINNAME=8,
- KERN_CAP_BSET=14, /* int: capability bounding set */
KERN_PANIC=15, /* int: panic timeout */
KERN_REALROOTDEV=16, /* real root device to mount after initrd */
@@ -965,8 +964,6 @@ extern int proc_dostring(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
extern int proc_dointvec(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
-extern int proc_dointvec_bset(struct ctl_table *, int, struct file *,
- void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_jiffies(struct ctl_table *, int, struct file *,
diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h
index e21937cf91d0..c50d2ba5caf0 100644
--- a/include/linux/tc_ematch/tc_em_meta.h
+++ b/include/linux/tc_ematch/tc_em_meta.h
@@ -81,6 +81,7 @@ enum
TCF_META_ID_SK_SNDTIMEO,
TCF_META_ID_SK_SENDMSG_OFF,
TCF_META_ID_SK_WRITE_PENDING,
+ TCF_META_ID_VLAN_TAG,
__TCF_META_ID_MAX
};
#define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1)
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
new file mode 100644
index 000000000000..bba7712cadc7
--- /dev/null
+++ b/include/linux/thermal.h
@@ -0,0 +1,94 @@
+/*
+ * thermal.h ($Revision: 0 $)
+ *
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#ifndef __THERMAL_H__
+#define __THERMAL_H__
+
+#include <linux/idr.h>
+#include <linux/device.h>
+
+struct thermal_zone_device;
+struct thermal_cooling_device;
+
+struct thermal_zone_device_ops {
+ int (*bind) (struct thermal_zone_device *,
+ struct thermal_cooling_device *);
+ int (*unbind) (struct thermal_zone_device *,
+ struct thermal_cooling_device *);
+ int (*get_temp) (struct thermal_zone_device *, char *);
+ int (*get_mode) (struct thermal_zone_device *, char *);
+ int (*set_mode) (struct thermal_zone_device *, const char *);
+ int (*get_trip_type) (struct thermal_zone_device *, int, char *);
+ int (*get_trip_temp) (struct thermal_zone_device *, int, char *);
+};
+
+struct thermal_cooling_device_ops {
+ int (*get_max_state) (struct thermal_cooling_device *, char *);
+ int (*get_cur_state) (struct thermal_cooling_device *, char *);
+ int (*set_cur_state) (struct thermal_cooling_device *, unsigned int);
+};
+
+#define THERMAL_TRIPS_NONE -1
+#define THERMAL_MAX_TRIPS 10
+#define THERMAL_NAME_LENGTH 20
+struct thermal_cooling_device {
+ int id;
+ char type[THERMAL_NAME_LENGTH];
+ struct device device;
+ void *devdata;
+ struct thermal_cooling_device_ops *ops;
+ struct list_head node;
+};
+
+#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
+ ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
+#define CELSIUS_TO_KELVIN(t) ((t)*10+2732)
+
+struct thermal_zone_device {
+ int id;
+ char type[THERMAL_NAME_LENGTH];
+ struct device device;
+ void *devdata;
+ int trips;
+ struct thermal_zone_device_ops *ops;
+ struct list_head cooling_devices;
+ struct idr idr;
+ struct mutex lock; /* protect cooling devices list */
+ struct list_head node;
+};
+
+struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
+ struct thermal_zone_device_ops *);
+void thermal_zone_device_unregister(struct thermal_zone_device *);
+
+int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
+ struct thermal_cooling_device *);
+int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
+ struct thermal_cooling_device *);
+
+struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
+ struct thermal_cooling_device_ops *);
+void thermal_cooling_device_unregister(struct thermal_cooling_device *);
+
+#endif /* __THERMAL_H__ */
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 24c6a2b59511..8ea3e71ba7fa 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -244,6 +244,8 @@ extern int do_adjtimex(struct timex *);
/* Don't use! Compatibility define for existing users. */
#define tickadj (500/HZ ? : 1)
+int read_current_timer(unsigned long *timer_val);
+
#endif /* KERNEL */
#endif /* LINUX_TIMEX_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 402de892b3ed..dd8e08fe8855 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -53,13 +53,6 @@
*/
#define __DISABLED_CHAR '\0'
-/*
- * This is the flip buffer used for the tty driver. The buffer is
- * located in the tty structure, and is used as a high speed interface
- * between the tty driver and the tty line discipline.
- */
-#define TTY_FLIPBUF_SIZE 512
-
struct tty_buffer {
struct tty_buffer *next;
char *char_buf_ptr;
@@ -74,7 +67,6 @@ struct tty_buffer {
struct tty_bufhead {
struct delayed_work work;
- struct semaphore pty_sem;
spinlock_t lock;
struct tty_buffer *head; /* Queue head */
struct tty_buffer *tail; /* Active buffer */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 89338b468d0d..ce8e7da05807 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -45,11 +45,11 @@ extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot);
-extern void vfree(void *addr);
+extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
-extern void vunmap(void *addr);
+extern void vunmap(const void *addr);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
@@ -71,7 +71,7 @@ extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
extern struct vm_struct *get_vm_area_node(unsigned long size,
unsigned long flags, int node,
gfp_t gfp_mask);
-extern struct vm_struct *remove_vm_area(void *addr);
+extern struct vm_struct *remove_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages);
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index feb5e99a1079..9448ffbdcbf6 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -77,6 +77,7 @@ void change_console(struct vc_data *new_vc);
void reset_vc(struct vc_data *vc);
extern int unbind_con_driver(const struct consw *csw, int first, int last,
int deflt);
+int vty_init(void);
/*
* vc_screen.c shares this temporary buffer with the console write code so that
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h
new file mode 100644
index 000000000000..9797fec7748a
--- /dev/null
+++ b/include/linux/w1-gpio.h
@@ -0,0 +1,23 @@
+/*
+ * w1-gpio interface to platform code
+ *
+ * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_W1_GPIO_H
+#define _LINUX_W1_GPIO_H
+
+/**
+ * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio
+ * @pin: GPIO pin to use
+ * @is_open_drain: GPIO pin is configured as open drain
+ */
+struct w1_gpio_platform_data {
+ unsigned int pin;
+ unsigned int is_open_drain:1;
+};
+
+#endif /* _LINUX_W1_GPIO_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 1f4fb0a81ecd..33a2aa9e02f2 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -162,6 +162,22 @@ wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * macro to avoid include hell
+ */
+#define wake_up_nested(x, s) \
+do { \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave_nested(&(x)->lock, flags, (s)); \
+ wake_up_locked(x); \
+ spin_unlock_irqrestore(&(x)->lock, flags); \
+} while (0)
+#else
+#define wake_up_nested(x, s) wake_up(x)
+#endif
+
#define __wait_event(wq, condition) \
do { \
DEFINE_WAIT(__wait); \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index c6148bbf1250..b7b3362f7717 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -62,6 +62,7 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned for_writepages:1; /* This is a writepages() call */
unsigned range_cyclic:1; /* range_start is cyclic */
+ unsigned more_io:1; /* more io to be dispatched */
};
/*
@@ -100,6 +101,7 @@ extern int dirty_background_ratio;
extern int vm_dirty_ratio;
extern int dirty_writeback_interval;
extern int dirty_expire_interval;
+extern int vm_highmem_is_dirtyable;
extern int block_dump;
extern int laptop_mode;
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index def131a5ac70..df6b95d2218e 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -46,6 +46,7 @@ struct xattr_handler {
size_t size, int flags);
};
+ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
ssize_t vfs_getxattr(struct dentry *, char *, void *, size_t);
ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
int vfs_setxattr(struct dentry *, char *, void *, size_t, int);
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index f71dac420394..615072c4da04 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -29,7 +29,7 @@ struct mtd_oob_buf {
#define MTD_WRITEABLE 0x400 /* Device is writeable */
#define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */
#define MTD_NO_ERASE 0x1000 /* No erase necessary */
-#define MTD_STUPID_LOCK 0x2000 /* Always locked after reset */
+#define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */
// Some common devices / combinations of capabilities
#define MTD_CAP_ROM 0
diff --git a/include/mtd/ubi-header.h b/include/mtd/ubi-header.h
index 74efa7763479..292f916ea564 100644
--- a/include/mtd/ubi-header.h
+++ b/include/mtd/ubi-header.h
@@ -58,6 +58,43 @@ enum {
};
/*
+ * Volume flags used in the volume table record.
+ *
+ * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume
+ *
+ * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume
+ * table. UBI automatically re-sizes the volume which has this flag and makes
+ * the volume to be of largest possible size. This means that if after the
+ * initialization UBI finds out that there are available physical eraseblocks
+ * present on the device, it automatically appends all of them to the volume
+ * (the physical eraseblocks reserved for bad eraseblocks handling and other
+ * reserved physical eraseblocks are not taken). So, if there is a volume with
+ * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical
+ * eraseblocks will be zero after UBI is loaded, because all of them will be
+ * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared
+ * after the volume had been initialized.
+ *
+ * The auto-resize feature is useful for device production purposes. For
+ * example, different NAND flash chips may have different amount of initial bad
+ * eraseblocks, depending of particular chip instance. Manufacturers of NAND
+ * chips usually guarantee that the amount of initial bad eraseblocks does not
+ * exceed certain percent, e.g. 2%. When one creates an UBI image which will be
+ * flashed to the end devices in production, he does not know the exact amount
+ * of good physical eraseblocks the NAND chip on the device will have, but this
+ * number is required to calculate the volume sized and put them to the volume
+ * table of the UBI image. In this case, one of the volumes (e.g., the one
+ * which will store the root file system) is marked as "auto-resizable", and
+ * UBI will adjust its size on the first boot if needed.
+ *
+ * Note, first UBI reserves some amount of physical eraseblocks for bad
+ * eraseblock handling, and then re-sizes the volume, not vice-versa. This
+ * means that the pool of reserved physical eraseblocks will always be present.
+ */
+enum {
+ UBI_VTBL_AUTORESIZE_FLG = 0x01,
+};
+
+/*
* Compatibility constants used by internal volumes.
*
* @UBI_COMPAT_DELETE: delete this internal volume before anything is written
@@ -262,7 +299,9 @@ struct ubi_vid_hdr {
/* The layout volume contains the volume table */
-#define UBI_LAYOUT_VOL_ID UBI_INTERNAL_VOL_START
+#define UBI_LAYOUT_VOLUME_ID UBI_INTERNAL_VOL_START
+#define UBI_LAYOUT_VOLUME_TYPE UBI_VID_DYNAMIC
+#define UBI_LAYOUT_VOLUME_ALIGN 1
#define UBI_LAYOUT_VOLUME_EBS 2
#define UBI_LAYOUT_VOLUME_NAME "layout volume"
#define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT
@@ -289,7 +328,8 @@ struct ubi_vid_hdr {
* @upd_marker: if volume update was started but not finished
* @name_len: volume name length
* @name: the volume name
- * @padding2: reserved, zeroes
+ * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG)
+ * @padding: reserved, zeroes
* @crc: a CRC32 checksum of the record
*
* The volume table records are stored in the volume table, which is stored in
@@ -324,7 +364,8 @@ struct ubi_vtbl_record {
__u8 upd_marker;
__be16 name_len;
__u8 name[UBI_VOL_NAME_MAX+1];
- __u8 padding2[24];
+ __u8 flags;
+ __u8 padding[23];
__be32 crc;
} __attribute__ ((packed));
diff --git a/include/mtd/ubi-user.h b/include/mtd/ubi-user.h
index fe06ded0e6b8..a7421f130cc0 100644
--- a/include/mtd/ubi-user.h
+++ b/include/mtd/ubi-user.h
@@ -22,6 +22,21 @@
#define __UBI_USER_H__
/*
+ * UBI device creation (the same as MTD device attachment)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * MTD devices may be attached using %UBI_IOCATT ioctl command of the UBI
+ * control device. The caller has to properly fill and pass
+ * &struct ubi_attach_req object - UBI will attach the MTD device specified in
+ * the request and return the newly created UBI device number as the ioctl
+ * return value.
+ *
+ * UBI device deletion (the same as MTD device detachment)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * An UBI device maybe deleted with %UBI_IOCDET ioctl command of the UBI
+ * control device.
+ *
* UBI volume creation
* ~~~~~~~~~~~~~~~~~~~
*
@@ -48,7 +63,7 @@
*
* Volume update should be done via the %UBI_IOCVOLUP IOCTL command of the
* corresponding UBI volume character device. A pointer to a 64-bit update
- * size should be passed to the IOCTL. After then, UBI expects user to write
+ * size should be passed to the IOCTL. After this, UBI expects user to write
* this number of bytes to the volume character device. The update is finished
* when the claimed number of bytes is passed. So, the volume update sequence
* is something like:
@@ -57,14 +72,24 @@
* ioctl(fd, UBI_IOCVOLUP, &image_size);
* write(fd, buf, image_size);
* close(fd);
+ *
+ * Atomic eraseblock change
+ * ~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Atomic eraseblock change operation is done via the %UBI_IOCEBCH IOCTL
+ * command of the corresponding UBI volume character device. A pointer to
+ * &struct ubi_leb_change_req has to be passed to the IOCTL. Then the user is
+ * expected to write the requested amount of bytes. This is similar to the
+ * "volume update" IOCTL.
*/
/*
- * When a new volume is created, users may either specify the volume number they
- * want to create or to let UBI automatically assign a volume number using this
- * constant.
+ * When a new UBI volume or UBI device is created, users may either specify the
+ * volume/device number they want to create or to let UBI automatically assign
+ * the number using these constants.
*/
#define UBI_VOL_NUM_AUTO (-1)
+#define UBI_DEV_NUM_AUTO (-1)
/* Maximum volume name length */
#define UBI_MAX_VOLUME_NAME 127
@@ -80,6 +105,15 @@
/* Re-size an UBI volume */
#define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req)
+/* IOCTL commands of the UBI control character device */
+
+#define UBI_CTRL_IOC_MAGIC 'o'
+
+/* Attach an MTD device */
+#define UBI_IOCATT _IOW(UBI_CTRL_IOC_MAGIC, 64, struct ubi_attach_req)
+/* Detach an MTD device */
+#define UBI_IOCDET _IOW(UBI_CTRL_IOC_MAGIC, 65, int32_t)
+
/* IOCTL commands of UBI volume character devices */
#define UBI_VOL_IOC_MAGIC 'O'
@@ -88,6 +122,28 @@
#define UBI_IOCVOLUP _IOW(UBI_VOL_IOC_MAGIC, 0, int64_t)
/* An eraseblock erasure command, used for debugging, disabled by default */
#define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, int32_t)
+/* An atomic eraseblock change command */
+#define UBI_IOCEBCH _IOW(UBI_VOL_IOC_MAGIC, 2, int32_t)
+
+/* Maximum MTD device name length supported by UBI */
+#define MAX_UBI_MTD_NAME_LEN 127
+
+/*
+ * UBI data type hint constants.
+ *
+ * UBI_LONGTERM: long-term data
+ * UBI_SHORTTERM: short-term data
+ * UBI_UNKNOWN: data persistence is unknown
+ *
+ * These constants are used when data is written to UBI volumes in order to
+ * help the UBI wear-leveling unit to find more appropriate physical
+ * eraseblocks.
+ */
+enum {
+ UBI_LONGTERM = 1,
+ UBI_SHORTTERM = 2,
+ UBI_UNKNOWN = 3,
+};
/*
* UBI volume type constants.
@@ -97,22 +153,58 @@
*/
enum {
UBI_DYNAMIC_VOLUME = 3,
- UBI_STATIC_VOLUME = 4
+ UBI_STATIC_VOLUME = 4,
+};
+
+/**
+ * struct ubi_attach_req - attach MTD device request.
+ * @ubi_num: UBI device number to create
+ * @mtd_num: MTD device number to attach
+ * @vid_hdr_offset: VID header offset (use defaults if %0)
+ * @padding: reserved for future, not used, has to be zeroed
+ *
+ * This data structure is used to specify MTD device UBI has to attach and the
+ * parameters it has to use. The number which should be assigned to the new UBI
+ * device is passed in @ubi_num. UBI may automatically assign the number if
+ * @UBI_DEV_NUM_AUTO is passed. In this case, the device number is returned in
+ * @ubi_num.
+ *
+ * Most applications should pass %0 in @vid_hdr_offset to make UBI use default
+ * offset of the VID header within physical eraseblocks. The default offset is
+ * the next min. I/O unit after the EC header. For example, it will be offset
+ * 512 in case of a 512 bytes page NAND flash with no sub-page support. Or
+ * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages.
+ *
+ * But in rare cases, if this optimizes things, the VID header may be placed to
+ * a different offset. For example, the boot-loader might do things faster if the
+ * VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. As
+ * the boot-loader would not normally need to read EC headers (unless it needs
+ * UBI in RW mode), it might be faster to calculate ECC. This is weird example,
+ * but it real-life example. So, in this example, @vid_hdr_offer would be
+ * 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes
+ * aligned, which is OK, as UBI is clever enough to realize this is 4th sub-page
+ * of the first page and add needed padding.
+ */
+struct ubi_attach_req {
+ int32_t ubi_num;
+ int32_t mtd_num;
+ int32_t vid_hdr_offset;
+ uint8_t padding[12];
};
/**
* struct ubi_mkvol_req - volume description data structure used in
- * volume creation requests.
+ * volume creation requests.
* @vol_id: volume number
* @alignment: volume alignment
* @bytes: volume size in bytes
* @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
- * @padding1: reserved for future, not used
+ * @padding1: reserved for future, not used, has to be zeroed
* @name_len: volume name length
- * @padding2: reserved for future, not used
+ * @padding2: reserved for future, not used, has to be zeroed
* @name: volume name
*
- * This structure is used by userspace programs when creating new volumes. The
+ * This structure is used by user-space programs when creating new volumes. The
* @used_bytes field is only necessary when creating static volumes.
*
* The @alignment field specifies the required alignment of the volume logical
@@ -139,7 +231,7 @@ struct ubi_mkvol_req {
int8_t padding1;
int16_t name_len;
int8_t padding2[4];
- char name[UBI_MAX_VOLUME_NAME+1];
+ char name[UBI_MAX_VOLUME_NAME + 1];
} __attribute__ ((packed));
/**
@@ -158,4 +250,19 @@ struct ubi_rsvol_req {
int32_t vol_id;
} __attribute__ ((packed));
+/**
+ * struct ubi_leb_change_req - a data structure used in atomic logical
+ * eraseblock change requests.
+ * @lnum: logical eraseblock number to change
+ * @bytes: how many bytes will be written to the logical eraseblock
+ * @dtype: data type (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN)
+ * @padding: reserved for future, not used, has to be zeroed
+ */
+struct ubi_leb_change_req {
+ int32_t lnum;
+ int32_t bytes;
+ uint8_t dtype;
+ uint8_t padding[7];
+} __attribute__ ((packed));
+
#endif /* __UBI_USER_H__ */
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 625346c47ee2..585eb4496990 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -124,6 +124,7 @@ enum {
P9_DMSOCKET = 0x00100000,
P9_DMSETUID = 0x00080000,
P9_DMSETGID = 0x00040000,
+ P9_DMSETVTX = 0x00010000,
};
/* qid.types */
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 9b9221a21392..e52f93d9ac5f 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -3,6 +3,7 @@
*
* 9P Client Definitions
*
+ * Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net>
*
* This program is free software; you can redistribute it and/or modify
@@ -29,6 +30,7 @@ struct p9_client {
spinlock_t lock; /* protect client structure */
int msize;
unsigned char dotu;
+ struct p9_trans_module *trans_mod;
struct p9_trans *trans;
struct p9_conn *conn;
@@ -52,8 +54,7 @@ struct p9_fid {
struct list_head dlist; /* list of all fids attached to a dentry */
};
-struct p9_client *p9_client_create(struct p9_trans *trans, int msize,
- int dotu);
+struct p9_client *p9_client_create(const char *dev_name, char *options);
void p9_client_destroy(struct p9_client *clnt);
void p9_client_disconnect(struct p9_client *clnt);
struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
diff --git a/include/net/9p/conn.h b/include/net/9p/conn.h
deleted file mode 100644
index 756d8784f953..000000000000
--- a/include/net/9p/conn.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * include/net/9p/conn.h
- *
- * Connection Definitions
- *
- * Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
- * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to:
- * Free Software Foundation
- * 51 Franklin Street, Fifth Floor
- * Boston, MA 02111-1301 USA
- *
- */
-
-#ifndef NET_9P_CONN_H
-#define NET_9P_CONN_H
-
-#undef P9_NONBLOCK
-
-struct p9_conn;
-struct p9_req;
-
-/**
- * p9_mux_req_callback - callback function that is called when the
- * response of a request is received. The callback is called from
- * a workqueue and shouldn't block.
- *
- * @req - request
- * @a - the pointer that was specified when the request was send to be
- * passed to the callback
- */
-typedef void (*p9_conn_req_callback)(struct p9_req *req, void *a);
-
-struct p9_conn *p9_conn_create(struct p9_trans *trans, int msize,
- unsigned char *dotu);
-void p9_conn_destroy(struct p9_conn *);
-int p9_conn_rpc(struct p9_conn *m, struct p9_fcall *tc, struct p9_fcall **rc);
-
-#ifdef P9_NONBLOCK
-int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
- p9_conn_req_callback cb, void *a);
-#endif /* P9_NONBLOCK */
-
-void p9_conn_cancel(struct p9_conn *m, int err);
-
-#endif /* NET_9P_CONN_H */
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 9dd4a05619a8..d2209ae9d18b 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -4,7 +4,7 @@
* Transport Definition
*
* Copyright (C) 2005 by Latchesar Ionkov <lucho@ionkov.net>
- * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
+ * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
@@ -34,11 +34,12 @@ enum p9_trans_status {
struct p9_trans {
enum p9_trans_status status;
+ int msize;
+ unsigned char extended;
void *priv;
- int (*write) (struct p9_trans *, void *, int);
- int (*read) (struct p9_trans *, void *, int);
void (*close) (struct p9_trans *);
- unsigned int (*poll)(struct p9_trans *, struct poll_table_struct *);
+ int (*rpc) (struct p9_trans *t, struct p9_fcall *tc,
+ struct p9_fcall **rc);
};
struct p9_trans_module {
@@ -46,7 +47,7 @@ struct p9_trans_module {
char *name; /* name of transport */
int maxsize; /* max message size of transport */
int def; /* this transport should be default */
- struct p9_trans * (*create)(const char *devname, char *options);
+ struct p9_trans * (*create)(const char *, char *, int, unsigned char);
};
void v9fs_register_trans(struct p9_trans_module *m);
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 48ac620cb846..97dc35ad09be 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -389,7 +389,7 @@ static inline struct sock *inet_lookup(struct net *net,
}
extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk,
+ struct sock *sk, u32 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **),
void (*hash)(struct sock *sk));
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 90d1175f63de..8b12667f7a2b 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -266,6 +266,14 @@ static inline void fib_res_put(struct fib_result *res)
#ifdef CONFIG_PROC_FS
extern int __net_init fib_proc_init(struct net *net);
extern void __net_exit fib_proc_exit(struct net *net);
+#else
+static inline int fib_proc_init(struct net *net)
+{
+ return 0;
+}
+static inline void fib_proc_exit(struct net *net)
+{
+}
#endif
#endif /* _NET_FIB_H */
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index fa80ea48639d..c0c019f72ba9 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -110,7 +110,6 @@ struct frag_hdr {
/* sysctls */
extern int sysctl_mld_max_msf;
-
extern struct ctl_path net_ipv6_ctl_path[];
#define _DEVINC(statname, modifier, idev, field) \
@@ -586,9 +585,6 @@ extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
int __user *optlen);
#ifdef CONFIG_PROC_FS
-extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
-extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
-
extern int ac6_proc_init(void);
extern void ac6_proc_exit(void);
extern int raw6_proc_init(void);
@@ -621,6 +617,8 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev)
extern ctl_table ipv6_route_table_template[];
extern ctl_table ipv6_icmp_table_template[];
+extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
+extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
extern int ipv6_sysctl_register(void);
extern void ipv6_sysctl_unregister(void);
#endif
diff --git a/include/net/netlabel.h b/include/net/netlabel.h
index b3213c7c5309..0ca67d73c7ad 100644
--- a/include/net/netlabel.h
+++ b/include/net/netlabel.h
@@ -36,6 +36,8 @@
#include <net/netlink.h>
#include <asm/atomic.h>
+struct cipso_v4_doi;
+
/*
* NetLabel - A management interface for maintaining network packet label
* mapping tables for explicit packet labling protocols.
@@ -103,12 +105,6 @@ struct netlbl_audit {
uid_t loginuid;
};
-/* Domain mapping definition struct */
-struct netlbl_dom_map;
-
-/* Domain mapping operations */
-int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
-
/*
* LSM security attributes
*/
@@ -344,6 +340,19 @@ static inline void netlbl_secattr_free(struct netlbl_lsm_secattr *secattr)
#ifdef CONFIG_NETLABEL
/*
+ * LSM configuration operations
+ */
+int netlbl_cfg_map_del(const char *domain, struct netlbl_audit *audit_info);
+int netlbl_cfg_unlbl_add_map(const char *domain,
+ struct netlbl_audit *audit_info);
+int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def,
+ struct netlbl_audit *audit_info);
+int netlbl_cfg_cipsov4_add_map(struct cipso_v4_doi *doi_def,
+ const char *domain,
+ struct netlbl_audit *audit_info);
+int netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info);
+
+/*
* LSM security attribute operations
*/
int netlbl_secattr_catmap_walk(struct netlbl_lsm_secattr_catmap *catmap,
@@ -378,6 +387,32 @@ void netlbl_cache_invalidate(void);
int netlbl_cache_add(const struct sk_buff *skb,
const struct netlbl_lsm_secattr *secattr);
#else
+static inline int netlbl_cfg_map_del(const char *domain,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_cfg_unlbl_add_map(const char *domain,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_cfg_cipsov4_add_map(struct cipso_v4_doi *doi_def,
+ const char *domain,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
+static inline int netlbl_cfg_cipsov4_del(u32 doi,
+ struct netlbl_audit *audit_info)
+{
+ return -ENOSYS;
+}
static inline int netlbl_secattr_catmap_walk(
struct netlbl_lsm_secattr_catmap *catmap,
u32 offset)
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h
index d5838c30d20f..87a260e3699e 100644
--- a/include/pcmcia/cs.h
+++ b/include/pcmcia/cs.h
@@ -147,11 +147,11 @@ typedef struct config_req_t {
/* For RequestIO and ReleaseIO */
typedef struct io_req_t {
- ioaddr_t BasePort1;
- ioaddr_t NumPorts1;
+ u_int BasePort1;
+ u_int NumPorts1;
u_int Attributes1;
- ioaddr_t BasePort2;
- ioaddr_t NumPorts2;
+ u_int BasePort2;
+ u_int NumPorts2;
u_int Attributes2;
u_int IOAddrLines;
} io_req_t;
diff --git a/include/pcmcia/cs_types.h b/include/pcmcia/cs_types.h
index 5f388035687d..9a6bcc4952f0 100644
--- a/include/pcmcia/cs_types.h
+++ b/include/pcmcia/cs_types.h
@@ -27,7 +27,6 @@ typedef u_int ioaddr_t;
#else
typedef u_short ioaddr_t;
#endif
-typedef unsigned long kio_addr_t;
typedef u_short socket_t;
typedef u_int event_t;
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 6e84258b94de..f95dca077c1c 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -92,7 +92,7 @@ typedef struct pccard_io_map {
u_char map;
u_char flags;
u_short speed;
- kio_addr_t start, stop;
+ u_int start, stop;
} pccard_io_map;
typedef struct pccard_mem_map {
@@ -155,7 +155,7 @@ extern struct pccard_resource_ops pccard_iodyn_ops;
struct pcmcia_socket;
typedef struct io_window_t {
- kio_addr_t InUse, Config;
+ u_int InUse, Config;
struct resource *res;
} io_window_t;
@@ -208,7 +208,7 @@ struct pcmcia_socket {
u_int features;
u_int irq_mask;
u_int map_size;
- kio_addr_t io_offset;
+ u_int io_offset;
u_char pci_irq;
struct pci_dev * cb_dev;
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index 318a909e7ae1..5ffec8ad6964 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -45,8 +45,8 @@
/* initiator tags; opaque for target */
typedef uint32_t __bitwise__ itt_t;
/* below makes sense only for initiator that created this tag */
-#define build_itt(itt, id, age) ((__force itt_t)\
- ((itt) | ((id) << ISCSI_CID_SHIFT) | ((age) << ISCSI_AGE_SHIFT)))
+#define build_itt(itt, age) ((__force itt_t)\
+ ((itt) | ((age) << ISCSI_AGE_SHIFT)))
#define get_itt(itt) ((__force uint32_t)(itt_t)(itt) & ISCSI_ITT_MASK)
#define RESERVED_ITT ((__force itt_t)0xffffffff)
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 889f51fabab9..7b90b63fb5c7 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -70,8 +70,6 @@ enum {
#define ISCSI_SUSPEND_BIT 1
#define ISCSI_ITT_MASK (0xfff)
-#define ISCSI_CID_SHIFT 12
-#define ISCSI_CID_MASK (0xffff << ISCSI_CID_SHIFT)
#define ISCSI_AGE_SHIFT 28
#define ISCSI_AGE_MASK (0xf << ISCSI_AGE_SHIFT)
@@ -135,6 +133,14 @@ static inline void* iscsi_next_hdr(struct iscsi_cmd_task *ctask)
return (void*)ctask->hdr + ctask->hdr_len;
}
+/* Connection's states */
+enum {
+ ISCSI_CONN_INITIAL_STAGE,
+ ISCSI_CONN_STARTED,
+ ISCSI_CONN_STOPPED,
+ ISCSI_CONN_CLEANUP_WAIT,
+};
+
struct iscsi_conn {
struct iscsi_cls_conn *cls_conn; /* ptr to class connection */
void *dd_data; /* iscsi_transport data */
@@ -227,6 +233,17 @@ struct iscsi_pool {
int max; /* Max number of elements */
};
+/* Session's states */
+enum {
+ ISCSI_STATE_FREE = 1,
+ ISCSI_STATE_LOGGED_IN,
+ ISCSI_STATE_FAILED,
+ ISCSI_STATE_TERMINATE,
+ ISCSI_STATE_IN_RECOVERY,
+ ISCSI_STATE_RECOVERY_FAILED,
+ ISCSI_STATE_LOGGING_OUT,
+};
+
struct iscsi_session {
/*
* Syncs up the scsi eh thread with the iscsi eh thread when sending
@@ -325,6 +342,10 @@ extern int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
#define session_to_cls(_sess) \
hostdata_session(_sess->host->hostdata)
+#define iscsi_session_printk(prefix, _sess, fmt, a...) \
+ iscsi_cls_session_printk(prefix, \
+ (struct iscsi_cls_session *)session_to_cls(_sess), fmt, ##a)
+
/*
* connection management
*/
@@ -339,6 +360,9 @@ extern void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err);
extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf);
+#define iscsi_conn_printk(prefix, _c, fmt, a...) \
+ iscsi_cls_conn_printk(prefix, _c->cls_conn, fmt, ##a)
+
/*
* pdu and task processing
*/
@@ -349,8 +373,6 @@ extern int iscsi_conn_send_pdu(struct iscsi_cls_conn *, struct iscsi_hdr *,
char *, uint32_t);
extern int iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
char *, int);
-extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
- char *, int);
extern int iscsi_verify_itt(struct iscsi_conn *, struct iscsi_hdr *,
uint32_t *);
extern void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 82251575a9b4..1f74bcd603fe 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -235,6 +235,20 @@ static inline int scsi_status_is_good(int status)
#define TYPE_RBC 0x0e
#define TYPE_NO_LUN 0x7f
+/* SCSI protocols; these are taken from SPC-3 section 7.5 */
+enum scsi_protocol {
+ SCSI_PROTOCOL_FCP = 0, /* Fibre Channel */
+ SCSI_PROTOCOL_SPI = 1, /* parallel SCSI */
+ SCSI_PROTOCOL_SSA = 2, /* Serial Storage Architecture - Obsolete */
+ SCSI_PROTOCOL_SBP = 3, /* firewire */
+ SCSI_PROTOCOL_SRP = 4, /* Infiniband RDMA */
+ SCSI_PROTOCOL_ISCSI = 5,
+ SCSI_PROTOCOL_SAS = 6,
+ SCSI_PROTOCOL_ADT = 7, /* Media Changers */
+ SCSI_PROTOCOL_ATA = 8,
+ SCSI_PROTOCOL_UNSPEC = 0xf, /* No specific protocol */
+};
+
/* Returns a human-readable name for the device */
extern const char * scsi_device_type(unsigned type);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 5c58d594126a..d1299e999723 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -280,39 +280,45 @@ struct scsi_host_template {
* If the host wants to be called before the scan starts, but
* after the midlayer has set up ready for the scan, it can fill
* in this function.
+ *
+ * Status: OPTIONAL
*/
void (* scan_start)(struct Scsi_Host *);
/*
- * fill in this function to allow the queue depth of this host
- * to be changeable (on a per device basis). returns either
+ * Fill in this function to allow the queue depth of this host
+ * to be changeable (on a per device basis). Returns either
* the current queue depth setting (may be different from what
* was passed in) or an error. An error should only be
* returned if the requested depth is legal but the driver was
* unable to set it. If the requested depth is illegal, the
* driver should set and return the closest legal queue depth.
*
+ * Status: OPTIONAL
*/
int (* change_queue_depth)(struct scsi_device *, int);
/*
- * fill in this function to allow the changing of tag types
+ * Fill in this function to allow the changing of tag types
* (this also allows the enabling/disabling of tag command
* queueing). An error should only be returned if something
* went wrong in the driver while trying to set the tag type.
* If the driver doesn't support the requested tag type, then
* it should set the closest type it does support without
* returning an error. Returns the actual tag type set.
+ *
+ * Status: OPTIONAL
*/
int (* change_queue_type)(struct scsi_device *, int);
/*
- * This function determines the bios parameters for a given
+ * This function determines the BIOS parameters for a given
* harddisk. These tend to be numbers that are made up by
* the host adapter. Parameters:
* size, device, list (heads, sectors, cylinders)
*
- * Status: OPTIONAL */
+ * Status: OPTIONAL
+ */
int (* bios_param)(struct scsi_device *, struct block_device *,
sector_t, int []);
@@ -351,7 +357,7 @@ struct scsi_host_template {
/*
* This determines if we will use a non-interrupt driven
- * or an interrupt driven scheme, It is set to the maximum number
+ * or an interrupt driven scheme. It is set to the maximum number
* of simultaneous commands a given host adapter will accept.
*/
int can_queue;
@@ -372,12 +378,12 @@ struct scsi_host_template {
unsigned short sg_tablesize;
/*
- * If the host adapter has limitations beside segment count
+ * Set this if the host adapter has limitations beside segment count.
*/
unsigned short max_sectors;
/*
- * dma scatter gather segment boundary limit. a segment crossing this
+ * DMA scatter gather segment boundary limit. A segment crossing this
* boundary will be split in two.
*/
unsigned long dma_boundary;
@@ -386,7 +392,7 @@ struct scsi_host_template {
* This specifies "machine infinity" for host templates which don't
* limit the transfer size. Note this limit represents an absolute
* maximum, and may be over the transfer limits allowed for
- * individual devices (e.g. 256 for SCSI-1)
+ * individual devices (e.g. 256 for SCSI-1).
*/
#define SCSI_DEFAULT_MAX_SECTORS 1024
@@ -413,12 +419,12 @@ struct scsi_host_template {
unsigned supported_mode:2;
/*
- * true if this host adapter uses unchecked DMA onto an ISA bus.
+ * True if this host adapter uses unchecked DMA onto an ISA bus.
*/
unsigned unchecked_isa_dma:1;
/*
- * true if this host adapter can make good use of clustering.
+ * True if this host adapter can make good use of clustering.
* I originally thought that if the tablesize was large that it
* was a waste of CPU cycles to prepare a cluster list, but
* it works out that the Buslogic is faster if you use a smaller
@@ -428,7 +434,7 @@ struct scsi_host_template {
unsigned use_clustering:1;
/*
- * True for emulated SCSI host adapters (e.g. ATAPI)
+ * True for emulated SCSI host adapters (e.g. ATAPI).
*/
unsigned emulated:1;
@@ -438,12 +444,12 @@ struct scsi_host_template {
unsigned skip_settle_delay:1;
/*
- * ordered write support
+ * True if we are using ordered write support.
*/
unsigned ordered_tag:1;
/*
- * Countdown for host blocking with no commands outstanding
+ * Countdown for host blocking with no commands outstanding.
*/
unsigned int max_host_blocked;
@@ -522,8 +528,8 @@ struct Scsi_Host {
struct scsi_transport_template *transportt;
/*
- * area to keep a shared tag map (if needed, will be
- * NULL if not)
+ * Area to keep a shared tag map (if needed, will be
+ * NULL if not).
*/
struct blk_queue_tag *bqt;
@@ -596,16 +602,16 @@ struct Scsi_Host {
/*
* Host uses correct SCSI ordering not PC ordering. The bit is
* set for the minority of drivers whose authors actually read
- * the spec ;)
+ * the spec ;).
*/
unsigned reverse_ordering:1;
/*
- * ordered write support
+ * Ordered write support
*/
unsigned ordered_tag:1;
- /* task mgmt function in progress */
+ /* Task mgmt function in progress */
unsigned tmf_in_progress:1;
/* Asynchronous scan in progress */
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 404f11d331d6..dbc96ef4cc72 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -149,13 +149,6 @@ extern void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error);
extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size);
-
-/* Connection's states */
-#define ISCSI_CONN_INITIAL_STAGE 0
-#define ISCSI_CONN_STARTED 1
-#define ISCSI_CONN_STOPPED 2
-#define ISCSI_CONN_CLEANUP_WAIT 3
-
struct iscsi_cls_conn {
struct list_head conn_list; /* item in connlist */
void *dd_data; /* LLD private data */
@@ -169,27 +162,31 @@ struct iscsi_cls_conn {
#define iscsi_dev_to_conn(_dev) \
container_of(_dev, struct iscsi_cls_conn, dev)
-/* Session's states */
-#define ISCSI_STATE_FREE 1
-#define ISCSI_STATE_LOGGED_IN 2
-#define ISCSI_STATE_FAILED 3
-#define ISCSI_STATE_TERMINATE 4
-#define ISCSI_STATE_IN_RECOVERY 5
-#define ISCSI_STATE_RECOVERY_FAILED 6
-#define ISCSI_STATE_LOGGING_OUT 7
+#define iscsi_conn_to_session(_conn) \
+ iscsi_dev_to_session(_conn->dev.parent)
+
+/* iscsi class session state */
+enum {
+ ISCSI_SESSION_LOGGED_IN,
+ ISCSI_SESSION_FAILED,
+ ISCSI_SESSION_FREE,
+};
struct iscsi_cls_session {
struct list_head sess_list; /* item in session_list */
struct list_head host_list;
struct iscsi_transport *transport;
+ spinlock_t lock;
+ struct work_struct scan_work;
+ struct work_struct unbind_work;
/* recovery fields */
int recovery_tmo;
struct delayed_work recovery_work;
- struct work_struct unbind_work;
int target_id;
+ int state;
int sid; /* session id */
void *dd_data; /* LLD private data */
struct device dev; /* sysfs transport/container device */
@@ -206,14 +203,22 @@ struct iscsi_cls_session {
struct iscsi_host {
struct list_head sessions;
+ atomic_t nr_scans;
struct mutex mutex;
- struct workqueue_struct *unbind_workq;
- char unbind_workq_name[KOBJ_NAME_LEN];
+ struct workqueue_struct *scan_workq;
+ char scan_workq_name[KOBJ_NAME_LEN];
};
/*
* session and connection functions that can be used by HW iSCSI LLDs
*/
+#define iscsi_cls_session_printk(prefix, _cls_session, fmt, a...) \
+ dev_printk(prefix, &(_cls_session)->dev, fmt, ##a)
+
+#define iscsi_cls_conn_printk(prefix, _cls_conn, fmt, a...) \
+ dev_printk(prefix, &(_cls_conn)->dev, fmt, ##a)
+
+extern int iscsi_session_chkready(struct iscsi_cls_session *session);
extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
struct iscsi_transport *transport);
extern int iscsi_add_session(struct iscsi_cls_session *session,
@@ -231,6 +236,6 @@ extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess,
extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn);
extern void iscsi_unblock_session(struct iscsi_cls_session *session);
extern void iscsi_block_session(struct iscsi_cls_session *session);
-
+extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time);
#endif
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h
index 4eea63761a3f..336c20db87f8 100644
--- a/include/video/atmel_lcdc.h
+++ b/include/video/atmel_lcdc.h
@@ -22,7 +22,7 @@
#ifndef __ATMEL_LCDC_H__
#define __ATMEL_LCDC_H__
- /* LCD Controller info data structure */
+ /* LCD Controller info data structure, stored in device platform_data */
struct atmel_lcdfb_info {
spinlock_t lock;
struct fb_info *info;
@@ -33,7 +33,14 @@ struct atmel_lcdfb_info {
struct platform_device *pdev;
struct clk *bus_clk;
struct clk *lcdc_clk;
- unsigned int default_bpp;
+
+#ifdef CONFIG_BACKLIGHT_ATMEL_LCDC
+ struct backlight_device *backlight;
+ u8 bl_power;
+#endif
+ bool lcdcon_is_backlight;
+
+ u8 default_bpp;
unsigned int default_lcdcon2;
unsigned int default_dmacon;
void (*atmel_lcdfb_power_control)(int on);
@@ -115,20 +122,20 @@ struct atmel_lcdfb_info {
#define ATMEL_LCDC_MEMOR_LITTLE (1 << 31)
#define ATMEL_LCDC_TIM1 0x0808
-#define ATMEL_LCDC_VFP (0xff << 0)
+#define ATMEL_LCDC_VFP (0xffU << 0)
#define ATMEL_LCDC_VBP_OFFSET 8
-#define ATMEL_LCDC_VBP (0xff << ATMEL_LCDC_VBP_OFFSET)
+#define ATMEL_LCDC_VBP (0xffU << ATMEL_LCDC_VBP_OFFSET)
#define ATMEL_LCDC_VPW_OFFSET 16
-#define ATMEL_LCDC_VPW (0x3f << ATMEL_LCDC_VPW_OFFSET)
+#define ATMEL_LCDC_VPW (0x3fU << ATMEL_LCDC_VPW_OFFSET)
#define ATMEL_LCDC_VHDLY_OFFSET 24
-#define ATMEL_LCDC_VHDLY (0xf << ATMEL_LCDC_VHDLY_OFFSET)
+#define ATMEL_LCDC_VHDLY (0xfU << ATMEL_LCDC_VHDLY_OFFSET)
#define ATMEL_LCDC_TIM2 0x080c
-#define ATMEL_LCDC_HBP (0xff << 0)
+#define ATMEL_LCDC_HBP (0xffU << 0)
#define ATMEL_LCDC_HPW_OFFSET 8
-#define ATMEL_LCDC_HPW (0x3f << ATMEL_LCDC_HPW_OFFSET)
+#define ATMEL_LCDC_HPW (0x3fU << ATMEL_LCDC_HPW_OFFSET)
#define ATMEL_LCDC_HFP_OFFSET 21
-#define ATMEL_LCDC_HFP (0x7ff << ATMEL_LCDC_HFP_OFFSET)
+#define ATMEL_LCDC_HFP (0x7ffU << ATMEL_LCDC_HFP_OFFSET)
#define ATMEL_LCDC_LCDFRMCFG 0x0810
#define ATMEL_LCDC_LINEVAL (0x7ff << 0)
diff --git a/init/Kconfig b/init/Kconfig
index b2acdeb2d312..95ac2657b0f4 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -369,6 +369,13 @@ config CGROUP_CPUACCT
Provides a simple Resource Controller for monitoring the
total CPU consumed by the tasks in a cgroup
+config RESOURCE_COUNTERS
+ bool "Resource counters"
+ help
+ This option enables controller independent resource accounting
+ infrastructure that works with cgroups
+ depends on CGROUPS
+
config SYSFS_DEPRECATED
bool "Create deprecated sysfs files"
depends on SYSFS
@@ -390,6 +397,13 @@ config SYSFS_DEPRECATED
If you are using a distro that was released in 2006 or later,
it should be safe to say N here.
+config CGROUP_MEM_CONT
+ bool "Memory controller for cgroups"
+ depends on CGROUPS && RESOURCE_COUNTERS
+ help
+ Provides a memory controller that manages both page cache and
+ RSS memory.
+
config PROC_PID_CPUSET
bool "Include legacy /proc/<pid>/cpuset file"
depends on CPUSETS
@@ -541,6 +555,18 @@ config ELF_CORE
help
Enable support for generating core dumps. Disabling saves about 4k.
+config COMPAT_BRK
+ bool "Disable heap randomization"
+ default y
+ help
+ Randomizing heap placement makes heap exploits harder, but it
+ also breaks ancient binaries (including anything libc5 based).
+ This option changes the bootup default to heap randomization
+ disabled, and can be overriden runtime by setting
+ /proc/sys/kernel/randomize_va_space to 2.
+
+ On non-ancient distros (post-2000 ones) Y is usually a safe choice.
+
config BASE_FULL
default y
bool "Enable full-sized data structures for core" if EMBEDDED
@@ -582,7 +608,6 @@ config SIGNALFD
config TIMERFD
bool "Enable timerfd() system call" if EMBEDDED
select ANON_INODES
- depends on BROKEN
default y
help
Enable the timerfd() system call that allows to receive timer
@@ -657,11 +682,9 @@ config SLOB
depends on EMBEDDED
bool "SLOB (Simple Allocator)"
help
- SLOB replaces the SLAB allocator with a drastically simpler
- allocator. SLOB is more space efficient than SLAB but does not
- scale well (single lock for all operations) and is also highly
- susceptible to fragmentation. SLUB can accomplish a higher object
- density. It is usually better to use SLUB instead of SLOB.
+ SLOB replaces the stock allocator with a drastically simpler
+ allocator. SLOB is generally more space efficient but
+ does not perform as well on large systems.
endchoice
@@ -679,6 +702,16 @@ config MARKERS
source "arch/Kconfig"
+config PROC_PAGE_MONITOR
+ default y
+ depends on PROC_FS && MMU
+ bool "Enable /proc page monitoring" if EMBEDDED
+ help
+ Various /proc files exist to monitor process memory utilization:
+ /proc/pid/smaps, /proc/pid/clear_refs, /proc/pid/pagemap,
+ /proc/kpagecount, and /proc/kpageflags. Disabling these
+ interfaces will reduce the size of the kernel by approximately 4kb.
+
endmenu # General setup
config SLABINFO
diff --git a/init/calibrate.c b/init/calibrate.c
index 2d3d73bd4ce1..ecb3822d4f70 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -7,8 +7,7 @@
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/init.h>
-
-#include <asm/timex.h>
+#include <linux/timex.h>
unsigned long preset_lpj;
static int __init lpj_setup(char *str)
@@ -29,7 +28,7 @@ __setup("lpj=", lpj_setup);
#define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100))
#define MAX_DIRECT_CALIBRATION_RETRIES 5
-static unsigned long __devinit calibrate_delay_direct(void)
+static unsigned long __cpuinit calibrate_delay_direct(void)
{
unsigned long pre_start, start, post_start;
unsigned long pre_end, end, post_end;
@@ -102,7 +101,7 @@ static unsigned long __devinit calibrate_delay_direct(void)
return 0;
}
#else
-static unsigned long __devinit calibrate_delay_direct(void) {return 0;}
+static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;}
#endif
/*
@@ -112,7 +111,7 @@ static unsigned long __devinit calibrate_delay_direct(void) {return 0;}
*/
#define LPS_PREC 8
-void __devinit calibrate_delay(void)
+void __cpuinit calibrate_delay(void)
{
unsigned long ticks, loopbit;
int lps_precision = LPS_PREC;
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 1161dfd7b0d3..f86573126f83 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -11,6 +11,7 @@
#include <linux/mount.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/fs.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
@@ -18,8 +19,6 @@
#include "do_mounts.h"
-extern int get_filesystem_list(char * buf);
-
int __initdata rd_doload; /* 1 = load RAM disk, 0 = don't load */
int root_mountflags = MS_RDONLY | MS_SILENT;
diff --git a/init/initramfs.c b/init/initramfs.c
index 1db02a0025db..c0b1e0533d80 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -503,7 +503,6 @@ static int __init retain_initrd_param(char *str)
__setup("retain_initrd", retain_initrd_param);
extern char __initramfs_start[], __initramfs_end[];
-#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/initrd.h>
#include <linux/kexec.h>
@@ -539,15 +538,12 @@ skip:
initrd_end = 0;
}
-#endif
-
-static int __init populate_rootfs(void)
+int __init populate_rootfs(void)
{
char *err = unpack_to_rootfs(__initramfs_start,
__initramfs_end - __initramfs_start, 0);
if (err)
panic(err);
-#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start) {
#ifdef CONFIG_BLK_DEV_RAM
int fd;
@@ -579,7 +575,12 @@ static int __init populate_rootfs(void)
free_initrd();
#endif
}
-#endif
return 0;
}
+#ifndef CONFIG_ACPI_CUSTOM_DSDT_INITRD
+/*
+ * if this option is enabled, populate_rootfs() is called _earlier_ in the
+ * boot sequence. This insures that the ACPI initialisation can find the file.
+ */
rootfs_initcall(populate_rootfs);
+#endif
diff --git a/init/main.c b/init/main.c
index cb81ed116f62..2a78932f6c07 100644
--- a/init/main.c
+++ b/init/main.c
@@ -57,6 +57,7 @@
#include <linux/device.h>
#include <linux/kthread.h>
#include <linux/sched.h>
+#include <linux/signal.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -83,7 +84,6 @@ extern void init_IRQ(void);
extern void fork_init(unsigned long);
extern void mca_init(void);
extern void sbus_init(void);
-extern void signals_init(void);
extern void pidhash_init(void);
extern void pidmap_init(void);
extern void prio_tree_init(void);
@@ -102,6 +102,12 @@ static inline void mark_rodata_ro(void) { }
extern void tc_init(void);
#endif
+#ifdef CONFIG_ACPI_CUSTOM_DSDT_INITRD
+extern int populate_rootfs(void);
+#else
+static inline void populate_rootfs(void) {}
+#endif
+
enum system_states system_state;
EXPORT_SYMBOL(system_state);
@@ -648,6 +654,7 @@ asmlinkage void __init start_kernel(void)
check_bugs();
+ populate_rootfs(); /* For DSDT override from initramfs */
acpi_early_init(); /* before LAPIC and SMP init */
/* Do the rest non-__init'ed, we're now alive */
diff --git a/ipc/msg.c b/ipc/msg.c
index fdf3db5731ce..ec0c724054b9 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -105,6 +105,7 @@ int msg_init_ns(struct ipc_namespace *ns)
void msg_exit_ns(struct ipc_namespace *ns)
{
struct msg_queue *msq;
+ struct kern_ipc_perm *perm;
int next_id;
int total, in_use;
@@ -113,10 +114,11 @@ void msg_exit_ns(struct ipc_namespace *ns)
in_use = msg_ids(ns).in_use;
for (total = 0, next_id = 0; total < in_use; next_id++) {
- msq = idr_find(&msg_ids(ns).ipcs_idr, next_id);
- if (msq == NULL)
+ perm = idr_find(&msg_ids(ns).ipcs_idr, next_id);
+ if (perm == NULL)
continue;
- ipc_lock_by_ptr(&msq->q_perm);
+ ipc_lock_by_ptr(perm);
+ msq = container_of(perm, struct msg_queue, q_perm);
freeque(ns, msq);
total++;
}
@@ -144,6 +146,9 @@ static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns,
{
struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id);
+ if (IS_ERR(ipcp))
+ return (struct msg_queue *)ipcp;
+
return container_of(ipcp, struct msg_queue, q_perm);
}
@@ -155,6 +160,9 @@ static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
+ if (IS_ERR(ipcp))
+ return (struct msg_queue *)ipcp;
+
return container_of(ipcp, struct msg_queue, q_perm);
}
@@ -163,6 +171,9 @@ static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
{
struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
+ if (IS_ERR(ipcp))
+ return (struct msg_queue *)ipcp;
+
return container_of(ipcp, struct msg_queue, q_perm);
}
diff --git a/ipc/sem.c b/ipc/sem.c
index 35952c0bae46..d65e285b7e30 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -143,6 +143,7 @@ int sem_init_ns(struct ipc_namespace *ns)
void sem_exit_ns(struct ipc_namespace *ns)
{
struct sem_array *sma;
+ struct kern_ipc_perm *perm;
int next_id;
int total, in_use;
@@ -151,10 +152,11 @@ void sem_exit_ns(struct ipc_namespace *ns)
in_use = sem_ids(ns).in_use;
for (total = 0, next_id = 0; total < in_use; next_id++) {
- sma = idr_find(&sem_ids(ns).ipcs_idr, next_id);
- if (sma == NULL)
+ perm = idr_find(&sem_ids(ns).ipcs_idr, next_id);
+ if (perm == NULL)
continue;
- ipc_lock_by_ptr(&sma->sem_perm);
+ ipc_lock_by_ptr(perm);
+ sma = container_of(perm, struct sem_array, sem_perm);
freeary(ns, sma);
total++;
}
@@ -181,6 +183,9 @@ static inline struct sem_array *sem_lock_check_down(struct ipc_namespace *ns,
{
struct kern_ipc_perm *ipcp = ipc_lock_check_down(&sem_ids(ns), id);
+ if (IS_ERR(ipcp))
+ return (struct sem_array *)ipcp;
+
return container_of(ipcp, struct sem_array, sem_perm);
}
@@ -192,6 +197,9 @@ static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id);
+ if (IS_ERR(ipcp))
+ return (struct sem_array *)ipcp;
+
return container_of(ipcp, struct sem_array, sem_perm);
}
@@ -200,6 +208,9 @@ static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
{
struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
+ if (IS_ERR(ipcp))
+ return (struct sem_array *)ipcp;
+
return container_of(ipcp, struct sem_array, sem_perm);
}
diff --git a/ipc/shm.c b/ipc/shm.c
index 3818fae625c5..65c3a294aba5 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -111,6 +111,7 @@ int shm_init_ns(struct ipc_namespace *ns)
void shm_exit_ns(struct ipc_namespace *ns)
{
struct shmid_kernel *shp;
+ struct kern_ipc_perm *perm;
int next_id;
int total, in_use;
@@ -119,10 +120,11 @@ void shm_exit_ns(struct ipc_namespace *ns)
in_use = shm_ids(ns).in_use;
for (total = 0, next_id = 0; total < in_use; next_id++) {
- shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
- if (shp == NULL)
+ perm = idr_find(&shm_ids(ns).ipcs_idr, next_id);
+ if (perm == NULL)
continue;
- ipc_lock_by_ptr(&shp->shm_perm);
+ ipc_lock_by_ptr(perm);
+ shp = container_of(perm, struct shmid_kernel, shm_perm);
do_shm_rmid(ns, shp);
total++;
}
@@ -149,6 +151,9 @@ static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
{
struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id);
+ if (IS_ERR(ipcp))
+ return (struct shmid_kernel *)ipcp;
+
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
@@ -158,6 +163,9 @@ static inline struct shmid_kernel *shm_lock_check_down(
{
struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id);
+ if (IS_ERR(ipcp))
+ return (struct shmid_kernel *)ipcp;
+
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
@@ -169,6 +177,9 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
+ if (IS_ERR(ipcp))
+ return (struct shmid_kernel *)ipcp;
+
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
@@ -177,6 +188,9 @@ static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
{
struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
+ if (IS_ERR(ipcp))
+ return (struct shmid_kernel *)ipcp;
+
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
diff --git a/ipc/util.c b/ipc/util.c
index 1aa0ebf71bac..76c1f3461e22 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -802,8 +802,8 @@ struct ipc_proc_iter {
/*
* This routine locks the ipc structure found at least at position pos.
*/
-struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
- loff_t *new_pos)
+static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
+ loff_t *new_pos)
{
struct kern_ipc_perm *ipc;
int total, id;
diff --git a/kernel/Makefile b/kernel/Makefile
index db9af707ff5b..685697c0a181 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,8 +8,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
- hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \
- utsname.o notifier.o ksysfs.o
+ hrtimer.o rwsem.o nsproxy.o srcu.o \
+ utsname.o notifier.o ksysfs.o pm_qos_params.o
obj-$(CONFIG_SYSCTL) += sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -43,6 +43,7 @@ obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o
obj-$(CONFIG_CPUSETS) += cpuset.o
obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
obj-$(CONFIG_IKCONFIG) += configs.o
+obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
diff --git a/kernel/capability.c b/kernel/capability.c
index efbd9cdce132..39e8193b41ea 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -22,6 +22,37 @@
static DEFINE_SPINLOCK(task_capability_lock);
/*
+ * Leveraged for setting/resetting capabilities
+ */
+
+const kernel_cap_t __cap_empty_set = CAP_EMPTY_SET;
+const kernel_cap_t __cap_full_set = CAP_FULL_SET;
+const kernel_cap_t __cap_init_eff_set = CAP_INIT_EFF_SET;
+
+EXPORT_SYMBOL(__cap_empty_set);
+EXPORT_SYMBOL(__cap_full_set);
+EXPORT_SYMBOL(__cap_init_eff_set);
+
+/*
+ * More recent versions of libcap are available from:
+ *
+ * http://www.kernel.org/pub/linux/libs/security/linux-privs/
+ */
+
+static void warn_legacy_capability_use(void)
+{
+ static int warned;
+ if (!warned) {
+ char name[sizeof(current->comm)];
+
+ printk(KERN_INFO "warning: `%s' uses 32-bit capabilities"
+ " (legacy support in use)\n",
+ get_task_comm(name, current));
+ warned = 1;
+ }
+}
+
+/*
* For sys_getproccap() and sys_setproccap(), any of the three
* capability set pointers may be NULL -- indicating that that set is
* uninteresting and/or not to be changed.
@@ -42,12 +73,21 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
pid_t pid;
__u32 version;
struct task_struct *target;
- struct __user_cap_data_struct data;
+ unsigned tocopy;
+ kernel_cap_t pE, pI, pP;
if (get_user(version, &header->version))
return -EFAULT;
- if (version != _LINUX_CAPABILITY_VERSION) {
+ switch (version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ warn_legacy_capability_use();
+ tocopy = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ tocopy = _LINUX_CAPABILITY_U32S_2;
+ break;
+ default:
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
@@ -71,14 +111,47 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
} else
target = current;
- ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted);
+ ret = security_capget(target, &pE, &pI, &pP);
out:
read_unlock(&tasklist_lock);
spin_unlock(&task_capability_lock);
- if (!ret && copy_to_user(dataptr, &data, sizeof data))
- return -EFAULT;
+ if (!ret) {
+ struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
+ unsigned i;
+
+ for (i = 0; i < tocopy; i++) {
+ kdata[i].effective = pE.cap[i];
+ kdata[i].permitted = pP.cap[i];
+ kdata[i].inheritable = pI.cap[i];
+ }
+
+ /*
+ * Note, in the case, tocopy < _LINUX_CAPABILITY_U32S,
+ * we silently drop the upper capabilities here. This
+ * has the effect of making older libcap
+ * implementations implicitly drop upper capability
+ * bits when they perform a: capget/modify/capset
+ * sequence.
+ *
+ * This behavior is considered fail-safe
+ * behavior. Upgrading the application to a newer
+ * version of libcap will enable access to the newer
+ * capabilities.
+ *
+ * An alternative would be to return an error here
+ * (-ERANGE), but that causes legacy applications to
+ * unexpectidly fail; the capget/modify/capset aborts
+ * before modification is attempted and the application
+ * fails.
+ */
+
+ if (copy_to_user(dataptr, kdata, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
+ return -EFAULT;
+ }
+ }
return ret;
}
@@ -167,6 +240,8 @@ static inline int cap_set_all(kernel_cap_t *effective,
*/
asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
{
+ struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
+ unsigned i, tocopy;
kernel_cap_t inheritable, permitted, effective;
__u32 version;
struct task_struct *target;
@@ -176,7 +251,15 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
if (get_user(version, &header->version))
return -EFAULT;
- if (version != _LINUX_CAPABILITY_VERSION) {
+ switch (version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ warn_legacy_capability_use();
+ tocopy = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ tocopy = _LINUX_CAPABILITY_U32S_2;
+ break;
+ default:
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
@@ -188,10 +271,22 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
if (pid && pid != task_pid_vnr(current) && !capable(CAP_SETPCAP))
return -EPERM;
- if (copy_from_user(&effective, &data->effective, sizeof(effective)) ||
- copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) ||
- copy_from_user(&permitted, &data->permitted, sizeof(permitted)))
+ if (copy_from_user(&kdata, data, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
return -EFAULT;
+ }
+
+ for (i = 0; i < tocopy; i++) {
+ effective.cap[i] = kdata[i].effective;
+ permitted.cap[i] = kdata[i].permitted;
+ inheritable.cap[i] = kdata[i].inheritable;
+ }
+ while (i < _LINUX_CAPABILITY_U32S) {
+ effective.cap[i] = 0;
+ permitted.cap[i] = 0;
+ inheritable.cap[i] = 0;
+ i++;
+ }
spin_lock(&task_capability_lock);
read_lock(&tasklist_lock);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 1a3c23936d43..4766bb65e4d9 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -141,7 +141,7 @@ enum {
ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
};
-inline int cgroup_is_releasable(const struct cgroup *cgrp)
+static int cgroup_is_releasable(const struct cgroup *cgrp)
{
const int bits =
(1 << CGRP_RELEASABLE) |
@@ -149,7 +149,7 @@ inline int cgroup_is_releasable(const struct cgroup *cgrp)
return (cgrp->flags & bits) == bits;
}
-inline int notify_on_release(const struct cgroup *cgrp)
+static int notify_on_release(const struct cgroup *cgrp)
{
return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
}
@@ -489,7 +489,7 @@ static struct css_set *find_css_set(
* Any task can increment and decrement the count field without lock.
* So in general, code holding cgroup_mutex can't rely on the count
* field not changing. However, if the count goes to zero, then only
- * attach_task() can increment it again. Because a count of zero
+ * cgroup_attach_task() can increment it again. Because a count of zero
* means that no tasks are currently attached, therefore there is no
* way a task attached to that cgroup can fork (the other way to
* increment the count). So code holding cgroup_mutex can safely
@@ -520,17 +520,17 @@ static struct css_set *find_css_set(
* The task_lock() exception
*
* The need for this exception arises from the action of
- * attach_task(), which overwrites one tasks cgroup pointer with
+ * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
* another. It does so using cgroup_mutexe, however there are
* several performance critical places that need to reference
* task->cgroup without the expense of grabbing a system global
* mutex. Therefore except as noted below, when dereferencing or, as
- * in attach_task(), modifying a task'ss cgroup pointer we use
+ * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
* task_lock(), which acts on a spinlock (task->alloc_lock) already in
* the task_struct routinely used for such matters.
*
* P.S. One more locking exception. RCU is used to guard the
- * update of a tasks cgroup pointer by attach_task()
+ * update of a tasks cgroup pointer by cgroup_attach_task()
*/
/**
@@ -586,11 +586,27 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
return inode;
}
+/*
+ * Call subsys's pre_destroy handler.
+ * This is called before css refcnt check.
+ */
+
+static void cgroup_call_pre_destroy(struct cgroup *cgrp)
+{
+ struct cgroup_subsys *ss;
+ for_each_subsys(cgrp->root, ss)
+ if (ss->pre_destroy && cgrp->subsys[ss->subsys_id])
+ ss->pre_destroy(ss, cgrp);
+ return;
+}
+
+
static void cgroup_diput(struct dentry *dentry, struct inode *inode)
{
/* is dentry a directory ? if so, kfree() associated cgroup */
if (S_ISDIR(inode->i_mode)) {
struct cgroup *cgrp = dentry->d_fsdata;
+ struct cgroup_subsys *ss;
BUG_ON(!(cgroup_is_removed(cgrp)));
/* It's possible for external users to be holding css
* reference counts on a cgroup; css_put() needs to
@@ -599,6 +615,23 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
* queue the cgroup to be handled by the release
* agent */
synchronize_rcu();
+
+ mutex_lock(&cgroup_mutex);
+ /*
+ * Release the subsystem state objects.
+ */
+ for_each_subsys(cgrp->root, ss) {
+ if (cgrp->subsys[ss->subsys_id])
+ ss->destroy(ss, cgrp);
+ }
+
+ cgrp->root->number_of_cgroups--;
+ mutex_unlock(&cgroup_mutex);
+
+ /* Drop the active superblock reference that we took when we
+ * created the cgroup */
+ deactivate_super(cgrp->root->sb);
+
kfree(cgrp);
}
iput(inode);
@@ -1161,7 +1194,7 @@ static void get_first_subsys(const struct cgroup *cgrp,
* Call holding cgroup_mutex. May take task_lock of
* the task 'pid' during call.
*/
-static int attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
int retval = 0;
struct cgroup_subsys *ss;
@@ -1181,9 +1214,8 @@ static int attach_task(struct cgroup *cgrp, struct task_struct *tsk)
for_each_subsys(root, ss) {
if (ss->can_attach) {
retval = ss->can_attach(ss, cgrp, tsk);
- if (retval) {
+ if (retval)
return retval;
- }
}
}
@@ -1192,9 +1224,8 @@ static int attach_task(struct cgroup *cgrp, struct task_struct *tsk)
* based on its final set of cgroups
*/
newcg = find_css_set(cg, cgrp);
- if (!newcg) {
+ if (!newcg)
return -ENOMEM;
- }
task_lock(tsk);
if (tsk->flags & PF_EXITING) {
@@ -1214,9 +1245,8 @@ static int attach_task(struct cgroup *cgrp, struct task_struct *tsk)
write_unlock(&css_set_lock);
for_each_subsys(root, ss) {
- if (ss->attach) {
+ if (ss->attach)
ss->attach(ss, cgrp, oldcgrp, tsk);
- }
}
set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
synchronize_rcu();
@@ -1239,7 +1269,7 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf)
if (pid) {
rcu_read_lock();
- tsk = find_task_by_pid(pid);
+ tsk = find_task_by_vpid(pid);
if (!tsk || tsk->flags & PF_EXITING) {
rcu_read_unlock();
return -ESRCH;
@@ -1257,7 +1287,7 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf)
get_task_struct(tsk);
}
- ret = attach_task(cgrp, tsk);
+ ret = cgroup_attach_task(cgrp, tsk);
put_task_struct(tsk);
return ret;
}
@@ -1329,9 +1359,14 @@ static ssize_t cgroup_common_file_write(struct cgroup *cgrp,
goto out1;
}
buffer[nbytes] = 0; /* nul-terminate */
+ strstrip(buffer); /* strip -just- trailing whitespace */
mutex_lock(&cgroup_mutex);
+ /*
+ * This was already checked for in cgroup_file_write(), but
+ * check again now we're holding cgroup_mutex.
+ */
if (cgroup_is_removed(cgrp)) {
retval = -ENODEV;
goto out2;
@@ -1349,24 +1384,9 @@ static ssize_t cgroup_common_file_write(struct cgroup *cgrp,
clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
break;
case FILE_RELEASE_AGENT:
- {
- struct cgroupfs_root *root = cgrp->root;
- /* Strip trailing newline */
- if (nbytes && (buffer[nbytes-1] == '\n')) {
- buffer[nbytes-1] = 0;
- }
- if (nbytes < sizeof(root->release_agent_path)) {
- /* We never write anything other than '\0'
- * into the last char of release_agent_path,
- * so it always remains a NUL-terminated
- * string */
- strncpy(root->release_agent_path, buffer, nbytes);
- root->release_agent_path[nbytes] = 0;
- } else {
- retval = -ENOSPC;
- }
+ BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
+ strcpy(cgrp->root->release_agent_path, buffer);
break;
- }
default:
retval = -EINVAL;
goto out2;
@@ -1387,7 +1407,7 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
struct cftype *cft = __d_cft(file->f_dentry);
struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
- if (!cft)
+ if (!cft || cgroup_is_removed(cgrp))
return -ENODEV;
if (cft->write)
return cft->write(cgrp, cft, file, buf, nbytes, ppos);
@@ -1457,7 +1477,7 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf,
struct cftype *cft = __d_cft(file->f_dentry);
struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
- if (!cft)
+ if (!cft || cgroup_is_removed(cgrp))
return -ENODEV;
if (cft->read)
@@ -1675,6 +1695,29 @@ static void cgroup_advance_iter(struct cgroup *cgrp,
it->task = cg->tasks.next;
}
+/*
+ * To reduce the fork() overhead for systems that are not actually
+ * using their cgroups capability, we don't maintain the lists running
+ * through each css_set to its tasks until we see the list actually
+ * used - in other words after the first call to cgroup_iter_start().
+ *
+ * The tasklist_lock is not held here, as do_each_thread() and
+ * while_each_thread() are protected by RCU.
+ */
+void cgroup_enable_task_cg_lists(void)
+{
+ struct task_struct *p, *g;
+ write_lock(&css_set_lock);
+ use_task_css_set_links = 1;
+ do_each_thread(g, p) {
+ task_lock(p);
+ if (list_empty(&p->cg_list))
+ list_add(&p->cg_list, &p->cgroups->tasks);
+ task_unlock(p);
+ } while_each_thread(g, p);
+ write_unlock(&css_set_lock);
+}
+
void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
{
/*
@@ -1682,18 +1725,9 @@ void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
* we need to enable the list linking each css_set to its
* tasks, and fix up all existing tasks.
*/
- if (!use_task_css_set_links) {
- struct task_struct *p, *g;
- write_lock(&css_set_lock);
- use_task_css_set_links = 1;
- do_each_thread(g, p) {
- task_lock(p);
- if (list_empty(&p->cg_list))
- list_add(&p->cg_list, &p->cgroups->tasks);
- task_unlock(p);
- } while_each_thread(g, p);
- write_unlock(&css_set_lock);
- }
+ if (!use_task_css_set_links)
+ cgroup_enable_task_cg_lists();
+
read_lock(&css_set_lock);
it->cg_link = &cgrp->css_sets;
cgroup_advance_iter(cgrp, it);
@@ -1726,6 +1760,166 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
read_unlock(&css_set_lock);
}
+static inline int started_after_time(struct task_struct *t1,
+ struct timespec *time,
+ struct task_struct *t2)
+{
+ int start_diff = timespec_compare(&t1->start_time, time);
+ if (start_diff > 0) {
+ return 1;
+ } else if (start_diff < 0) {
+ return 0;
+ } else {
+ /*
+ * Arbitrarily, if two processes started at the same
+ * time, we'll say that the lower pointer value
+ * started first. Note that t2 may have exited by now
+ * so this may not be a valid pointer any longer, but
+ * that's fine - it still serves to distinguish
+ * between two tasks started (effectively) simultaneously.
+ */
+ return t1 > t2;
+ }
+}
+
+/*
+ * This function is a callback from heap_insert() and is used to order
+ * the heap.
+ * In this case we order the heap in descending task start time.
+ */
+static inline int started_after(void *p1, void *p2)
+{
+ struct task_struct *t1 = p1;
+ struct task_struct *t2 = p2;
+ return started_after_time(t1, &t2->start_time, t2);
+}
+
+/**
+ * cgroup_scan_tasks - iterate though all the tasks in a cgroup
+ * @scan: struct cgroup_scanner containing arguments for the scan
+ *
+ * Arguments include pointers to callback functions test_task() and
+ * process_task().
+ * Iterate through all the tasks in a cgroup, calling test_task() for each,
+ * and if it returns true, call process_task() for it also.
+ * The test_task pointer may be NULL, meaning always true (select all tasks).
+ * Effectively duplicates cgroup_iter_{start,next,end}()
+ * but does not lock css_set_lock for the call to process_task().
+ * The struct cgroup_scanner may be embedded in any structure of the caller's
+ * creation.
+ * It is guaranteed that process_task() will act on every task that
+ * is a member of the cgroup for the duration of this call. This
+ * function may or may not call process_task() for tasks that exit
+ * or move to a different cgroup during the call, or are forked or
+ * move into the cgroup during the call.
+ *
+ * Note that test_task() may be called with locks held, and may in some
+ * situations be called multiple times for the same task, so it should
+ * be cheap.
+ * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
+ * pre-allocated and will be used for heap operations (and its "gt" member will
+ * be overwritten), else a temporary heap will be used (allocation of which
+ * may cause this function to fail).
+ */
+int cgroup_scan_tasks(struct cgroup_scanner *scan)
+{
+ int retval, i;
+ struct cgroup_iter it;
+ struct task_struct *p, *dropped;
+ /* Never dereference latest_task, since it's not refcounted */
+ struct task_struct *latest_task = NULL;
+ struct ptr_heap tmp_heap;
+ struct ptr_heap *heap;
+ struct timespec latest_time = { 0, 0 };
+
+ if (scan->heap) {
+ /* The caller supplied our heap and pre-allocated its memory */
+ heap = scan->heap;
+ heap->gt = &started_after;
+ } else {
+ /* We need to allocate our own heap memory */
+ heap = &tmp_heap;
+ retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
+ if (retval)
+ /* cannot allocate the heap */
+ return retval;
+ }
+
+ again:
+ /*
+ * Scan tasks in the cgroup, using the scanner's "test_task" callback
+ * to determine which are of interest, and using the scanner's
+ * "process_task" callback to process any of them that need an update.
+ * Since we don't want to hold any locks during the task updates,
+ * gather tasks to be processed in a heap structure.
+ * The heap is sorted by descending task start time.
+ * If the statically-sized heap fills up, we overflow tasks that
+ * started later, and in future iterations only consider tasks that
+ * started after the latest task in the previous pass. This
+ * guarantees forward progress and that we don't miss any tasks.
+ */
+ heap->size = 0;
+ cgroup_iter_start(scan->cg, &it);
+ while ((p = cgroup_iter_next(scan->cg, &it))) {
+ /*
+ * Only affect tasks that qualify per the caller's callback,
+ * if he provided one
+ */
+ if (scan->test_task && !scan->test_task(p, scan))
+ continue;
+ /*
+ * Only process tasks that started after the last task
+ * we processed
+ */
+ if (!started_after_time(p, &latest_time, latest_task))
+ continue;
+ dropped = heap_insert(heap, p);
+ if (dropped == NULL) {
+ /*
+ * The new task was inserted; the heap wasn't
+ * previously full
+ */
+ get_task_struct(p);
+ } else if (dropped != p) {
+ /*
+ * The new task was inserted, and pushed out a
+ * different task
+ */
+ get_task_struct(p);
+ put_task_struct(dropped);
+ }
+ /*
+ * Else the new task was newer than anything already in
+ * the heap and wasn't inserted
+ */
+ }
+ cgroup_iter_end(scan->cg, &it);
+
+ if (heap->size) {
+ for (i = 0; i < heap->size; i++) {
+ struct task_struct *p = heap->ptrs[i];
+ if (i == 0) {
+ latest_time = p->start_time;
+ latest_task = p;
+ }
+ /* Process the task per the caller's callback */
+ scan->process_task(p, scan);
+ put_task_struct(p);
+ }
+ /*
+ * If we had to process any tasks at all, scan again
+ * in case some of them were in the middle of forking
+ * children that didn't get processed.
+ * Not the most efficient way to do it, but it avoids
+ * having to take callback_mutex in the fork path
+ */
+ goto again;
+ }
+ if (heap == &tmp_heap)
+ heap_free(&tmp_heap);
+ return 0;
+}
+
/*
* Stuff for reading the 'tasks' file.
*
@@ -1761,7 +1955,7 @@ static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp)
while ((tsk = cgroup_iter_next(cgrp, &it))) {
if (unlikely(n == npids))
break;
- pidarray[n++] = task_pid_nr(tsk);
+ pidarray[n++] = task_pid_vnr(tsk);
}
cgroup_iter_end(cgrp, &it);
return n;
@@ -2126,9 +2320,8 @@ static inline int cgroup_has_css_refs(struct cgroup *cgrp)
* matter, since it can only happen if the cgroup
* has been deleted and hence no longer needs the
* release agent to be called anyway. */
- if (css && atomic_read(&css->refcnt)) {
+ if (css && atomic_read(&css->refcnt))
return 1;
- }
}
return 0;
}
@@ -2138,7 +2331,6 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
struct cgroup *cgrp = dentry->d_fsdata;
struct dentry *d;
struct cgroup *parent;
- struct cgroup_subsys *ss;
struct super_block *sb;
struct cgroupfs_root *root;
@@ -2157,17 +2349,19 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
parent = cgrp->parent;
root = cgrp->root;
sb = root->sb;
+ /*
+ * Call pre_destroy handlers of subsys
+ */
+ cgroup_call_pre_destroy(cgrp);
+ /*
+ * Notify subsyses that rmdir() request comes.
+ */
if (cgroup_has_css_refs(cgrp)) {
mutex_unlock(&cgroup_mutex);
return -EBUSY;
}
- for_each_subsys(root, ss) {
- if (cgrp->subsys[ss->subsys_id])
- ss->destroy(ss, cgrp);
- }
-
spin_lock(&release_list_lock);
set_bit(CGRP_REMOVED, &cgrp->flags);
if (!list_empty(&cgrp->release_list))
@@ -2182,15 +2376,11 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
cgroup_d_remove_dir(d);
dput(d);
- root->number_of_cgroups--;
set_bit(CGRP_RELEASABLE, &parent->flags);
check_for_release(parent);
mutex_unlock(&cgroup_mutex);
- /* Drop the active superblock reference that we took when we
- * created the cgroup */
- deactivate_super(sb);
return 0;
}
@@ -2324,7 +2514,7 @@ out:
* - Used for /proc/<pid>/cgroup.
* - No need to task_lock(tsk) on this tsk->cgroup reference, as it
* doesn't really matter if tsk->cgroup changes after we read it,
- * and we take cgroup_mutex, keeping attach_task() from changing it
+ * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
* anyway. No need to check that tsk->cgroup != NULL, thanks to
* the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
* cgroup to top_cgroup.
@@ -2435,7 +2625,7 @@ static struct file_operations proc_cgroupstats_operations = {
* A pointer to the shared css_set was automatically copied in
* fork.c by dup_task_struct(). However, we ignore that copy, since
* it was not made under the protection of RCU or cgroup_mutex, so
- * might no longer be a valid cgroup pointer. attach_task() might
+ * might no longer be a valid cgroup pointer. cgroup_attach_task() might
* have already changed current->cgroups, allowing the previously
* referenced cgroup group to be removed and freed.
*
@@ -2514,8 +2704,8 @@ void cgroup_post_fork(struct task_struct *child)
* attach us to a different cgroup, decrementing the count on
* the first cgroup that we never incremented. But in this case,
* top_cgroup isn't going away, and either task has PF_EXITING set,
- * which wards off any attach_task() attempts, or task is a failed
- * fork, never visible to attach_task.
+ * which wards off any cgroup_attach_task() attempts, or task is a failed
+ * fork, never visible to cgroup_attach_task.
*
*/
void cgroup_exit(struct task_struct *tsk, int run_callbacks)
@@ -2655,7 +2845,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
}
/* All seems fine. Finish by moving the task into the new cgroup */
- ret = attach_task(child, tsk);
+ ret = cgroup_attach_task(child, tsk);
mutex_unlock(&cgroup_mutex);
out_release:
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index cfaf6419d817..67b2bfe27814 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -38,7 +38,6 @@
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
-#include <linux/prio_heap.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
#include <linux/sched.h>
@@ -56,6 +55,8 @@
#include <asm/atomic.h>
#include <linux/mutex.h>
#include <linux/kfifo.h>
+#include <linux/workqueue.h>
+#include <linux/cgroup.h>
/*
* Tracks how many cpusets are currently defined in system.
@@ -64,7 +65,7 @@
*/
int number_of_cpusets __read_mostly;
-/* Retrieve the cpuset from a cgroup */
+/* Forward declare cgroup structures */
struct cgroup_subsys cpuset_subsys;
struct cpuset;
@@ -96,6 +97,9 @@ struct cpuset {
/* partition number for rebuild_sched_domains() */
int pn;
+
+ /* used for walking a cpuset heirarchy */
+ struct list_head stack_list;
};
/* Retrieve the cpuset for a cgroup */
@@ -111,7 +115,10 @@ static inline struct cpuset *task_cs(struct task_struct *task)
return container_of(task_subsys_state(task, cpuset_subsys_id),
struct cpuset, css);
}
-
+struct cpuset_hotplug_scanner {
+ struct cgroup_scanner scan;
+ struct cgroup *to;
+};
/* bits in struct cpuset flags field */
typedef enum {
@@ -160,17 +167,17 @@ static inline int is_spread_slab(const struct cpuset *cs)
* number, and avoid having to lock and reload mems_allowed unless
* the cpuset they're using changes generation.
*
- * A single, global generation is needed because attach_task() could
+ * A single, global generation is needed because cpuset_attach_task() could
* reattach a task to a different cpuset, which must not have its
* generation numbers aliased with those of that tasks previous cpuset.
*
* Generations are needed for mems_allowed because one task cannot
- * modify anothers memory placement. So we must enable every task,
+ * modify another's memory placement. So we must enable every task,
* on every visit to __alloc_pages(), to efficiently check whether
* its current->cpuset->mems_allowed has changed, requiring an update
* of its current->mems_allowed.
*
- * Since cpuset_mems_generation is guarded by manage_mutex,
+ * Since writes to cpuset_mems_generation are guarded by the cgroup lock
* there is no need to mark it atomic.
*/
static int cpuset_mems_generation;
@@ -182,17 +189,20 @@ static struct cpuset top_cpuset = {
};
/*
- * We have two global cpuset mutexes below. They can nest.
- * It is ok to first take manage_mutex, then nest callback_mutex. We also
- * require taking task_lock() when dereferencing a tasks cpuset pointer.
- * See "The task_lock() exception", at the end of this comment.
+ * There are two global mutexes guarding cpuset structures. The first
+ * is the main control groups cgroup_mutex, accessed via
+ * cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific
+ * callback_mutex, below. They can nest. It is ok to first take
+ * cgroup_mutex, then nest callback_mutex. We also require taking
+ * task_lock() when dereferencing a task's cpuset pointer. See "The
+ * task_lock() exception", at the end of this comment.
*
* A task must hold both mutexes to modify cpusets. If a task
- * holds manage_mutex, then it blocks others wanting that mutex,
+ * holds cgroup_mutex, then it blocks others wanting that mutex,
* ensuring that it is the only task able to also acquire callback_mutex
* and be able to modify cpusets. It can perform various checks on
* the cpuset structure first, knowing nothing will change. It can
- * also allocate memory while just holding manage_mutex. While it is
+ * also allocate memory while just holding cgroup_mutex. While it is
* performing these checks, various callback routines can briefly
* acquire callback_mutex to query cpusets. Once it is ready to make
* the changes, it takes callback_mutex, blocking everyone else.
@@ -208,60 +218,16 @@ static struct cpuset top_cpuset = {
* The task_struct fields mems_allowed and mems_generation may only
* be accessed in the context of that task, so require no locks.
*
- * Any task can increment and decrement the count field without lock.
- * So in general, code holding manage_mutex or callback_mutex can't rely
- * on the count field not changing. However, if the count goes to
- * zero, then only attach_task(), which holds both mutexes, can
- * increment it again. Because a count of zero means that no tasks
- * are currently attached, therefore there is no way a task attached
- * to that cpuset can fork (the other way to increment the count).
- * So code holding manage_mutex or callback_mutex can safely assume that
- * if the count is zero, it will stay zero. Similarly, if a task
- * holds manage_mutex or callback_mutex on a cpuset with zero count, it
- * knows that the cpuset won't be removed, as cpuset_rmdir() needs
- * both of those mutexes.
- *
* The cpuset_common_file_write handler for operations that modify
- * the cpuset hierarchy holds manage_mutex across the entire operation,
+ * the cpuset hierarchy holds cgroup_mutex across the entire operation,
* single threading all such cpuset modifications across the system.
*
* The cpuset_common_file_read() handlers only hold callback_mutex across
* small pieces of code, such as when reading out possibly multi-word
* cpumasks and nodemasks.
*
- * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
- * (usually) take either mutex. These are the two most performance
- * critical pieces of code here. The exception occurs on cpuset_exit(),
- * when a task in a notify_on_release cpuset exits. Then manage_mutex
- * is taken, and if the cpuset count is zero, a usermode call made
- * to /sbin/cpuset_release_agent with the name of the cpuset (path
- * relative to the root of cpuset file system) as the argument.
- *
- * A cpuset can only be deleted if both its 'count' of using tasks
- * is zero, and its list of 'children' cpusets is empty. Since all
- * tasks in the system use _some_ cpuset, and since there is always at
- * least one task in the system (init), therefore, top_cpuset
- * always has either children cpusets and/or using tasks. So we don't
- * need a special hack to ensure that top_cpuset cannot be deleted.
- *
- * The above "Tale of Two Semaphores" would be complete, but for:
- *
- * The task_lock() exception
- *
- * The need for this exception arises from the action of attach_task(),
- * which overwrites one tasks cpuset pointer with another. It does
- * so using both mutexes, however there are several performance
- * critical places that need to reference task->cpuset without the
- * expense of grabbing a system global mutex. Therefore except as
- * noted below, when dereferencing or, as in attach_task(), modifying
- * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
- * (task->alloc_lock) already in the task_struct routinely used for
- * such matters.
- *
- * P.S. One more locking exception. RCU is used to guard the
- * update of a tasks cpuset pointer by attach_task() and the
- * access of task->cpuset->mems_generation via that pointer in
- * the routine cpuset_update_task_memory_state().
+ * Accessing a task's cpuset should be done in accordance with the
+ * guidelines for accessing subsystem state in kernel/cgroup.c
*/
static DEFINE_MUTEX(callback_mutex);
@@ -354,15 +320,14 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
* Do not call this routine if in_interrupt().
*
* Call without callback_mutex or task_lock() held. May be
- * called with or without manage_mutex held. Thanks in part to
- * 'the_top_cpuset_hack', the tasks cpuset pointer will never
+ * called with or without cgroup_mutex held. Thanks in part to
+ * 'the_top_cpuset_hack', the task's cpuset pointer will never
* be NULL. This routine also might acquire callback_mutex and
* current->mm->mmap_sem during call.
*
* Reading current->cpuset->mems_generation doesn't need task_lock
* to guard the current->cpuset derefence, because it is guarded
- * from concurrent freeing of current->cpuset by attach_task(),
- * using RCU.
+ * from concurrent freeing of current->cpuset using RCU.
*
* The rcu_dereference() is technically probably not needed,
* as I don't actually mind if I see a new cpuset pointer but
@@ -424,7 +389,7 @@ void cpuset_update_task_memory_state(void)
*
* One cpuset is a subset of another if all its allowed CPUs and
* Memory Nodes are a subset of the other, and its exclusive flags
- * are only set if the other's are set. Call holding manage_mutex.
+ * are only set if the other's are set. Call holding cgroup_mutex.
*/
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
@@ -442,7 +407,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
* If we replaced the flag and mask values of the current cpuset
* (cur) with those values in the trial cpuset (trial), would
* our various subset and exclusive rules still be valid? Presumes
- * manage_mutex held.
+ * cgroup_mutex held.
*
* 'cur' is the address of an actual, in-use cpuset. Operations
* such as list traversal that depend on the actual address of the
@@ -476,7 +441,10 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
if (!is_cpuset_subset(trial, par))
return -EACCES;
- /* If either I or some sibling (!= me) is exclusive, we can't overlap */
+ /*
+ * If either I or some sibling (!= me) is exclusive, we can't
+ * overlap
+ */
list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
c = cgroup_cs(cont);
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
@@ -732,22 +700,50 @@ static inline int started_after(void *p1, void *p2)
return started_after_time(t1, &t2->start_time, t2);
}
-/*
- * Call with manage_mutex held. May take callback_mutex during call.
+/**
+ * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
+ * @tsk: task to test
+ * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
+ *
+ * Call with cgroup_mutex held. May take callback_mutex during call.
+ * Called for each task in a cgroup by cgroup_scan_tasks().
+ * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
+ * words, if its mask is not equal to its cpuset's mask).
+ */
+int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
+{
+ return !cpus_equal(tsk->cpus_allowed,
+ (cgroup_cs(scan->cg))->cpus_allowed);
+}
+
+/**
+ * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
+ * @tsk: task to test
+ * @scan: struct cgroup_scanner containing the cgroup of the task
+ *
+ * Called by cgroup_scan_tasks() for each task in a cgroup whose
+ * cpus_allowed mask needs to be changed.
+ *
+ * We don't need to re-check for the cgroup/cpuset membership, since we're
+ * holding cgroup_lock() at this point.
*/
+void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
+{
+ set_cpus_allowed(tsk, (cgroup_cs(scan->cg))->cpus_allowed);
+}
+/**
+ * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
+ * @cs: the cpuset to consider
+ * @buf: buffer of cpu numbers written to this cpuset
+ */
static int update_cpumask(struct cpuset *cs, char *buf)
{
struct cpuset trialcs;
- int retval, i;
- int is_load_balanced;
- struct cgroup_iter it;
- struct cgroup *cgrp = cs->css.cgroup;
- struct task_struct *p, *dropped;
- /* Never dereference latest_task, since it's not refcounted */
- struct task_struct *latest_task = NULL;
+ struct cgroup_scanner scan;
struct ptr_heap heap;
- struct timespec latest_time = { 0, 0 };
+ int retval;
+ int is_load_balanced;
/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
if (cs == &top_cpuset)
@@ -756,7 +752,7 @@ static int update_cpumask(struct cpuset *cs, char *buf)
trialcs = *cs;
/*
- * An empty cpus_allowed is ok iff there are no tasks in the cpuset.
+ * An empty cpus_allowed is ok only if the cpuset has no tasks.
* Since cpulist_parse() fails on an empty mask, we special case
* that parsing. The validate_change() call ensures that cpusets
* with tasks have cpus.
@@ -777,6 +773,7 @@ static int update_cpumask(struct cpuset *cs, char *buf)
/* Nothing to do if the cpus didn't change */
if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
return 0;
+
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after);
if (retval)
return retval;
@@ -787,62 +784,19 @@ static int update_cpumask(struct cpuset *cs, char *buf)
cs->cpus_allowed = trialcs.cpus_allowed;
mutex_unlock(&callback_mutex);
- again:
/*
* Scan tasks in the cpuset, and update the cpumasks of any
- * that need an update. Since we can't call set_cpus_allowed()
- * while holding tasklist_lock, gather tasks to be processed
- * in a heap structure. If the statically-sized heap fills up,
- * overflow tasks that started later, and in future iterations
- * only consider tasks that started after the latest task in
- * the previous pass. This guarantees forward progress and
- * that we don't miss any tasks
+ * that need an update.
*/
- heap.size = 0;
- cgroup_iter_start(cgrp, &it);
- while ((p = cgroup_iter_next(cgrp, &it))) {
- /* Only affect tasks that don't have the right cpus_allowed */
- if (cpus_equal(p->cpus_allowed, cs->cpus_allowed))
- continue;
- /*
- * Only process tasks that started after the last task
- * we processed
- */
- if (!started_after_time(p, &latest_time, latest_task))
- continue;
- dropped = heap_insert(&heap, p);
- if (dropped == NULL) {
- get_task_struct(p);
- } else if (dropped != p) {
- get_task_struct(p);
- put_task_struct(dropped);
- }
- }
- cgroup_iter_end(cgrp, &it);
- if (heap.size) {
- for (i = 0; i < heap.size; i++) {
- struct task_struct *p = heap.ptrs[i];
- if (i == 0) {
- latest_time = p->start_time;
- latest_task = p;
- }
- set_cpus_allowed(p, cs->cpus_allowed);
- put_task_struct(p);
- }
- /*
- * If we had to process any tasks at all, scan again
- * in case some of them were in the middle of forking
- * children that didn't notice the new cpumask
- * restriction. Not the most efficient way to do it,
- * but it avoids having to take callback_mutex in the
- * fork path
- */
- goto again;
- }
+ scan.cg = cs->css.cgroup;
+ scan.test_task = cpuset_test_cpumask;
+ scan.process_task = cpuset_change_cpumask;
+ scan.heap = &heap;
+ cgroup_scan_tasks(&scan);
heap_free(&heap);
+
if (is_load_balanced)
rebuild_sched_domains();
-
return 0;
}
@@ -854,11 +808,11 @@ static int update_cpumask(struct cpuset *cs, char *buf)
* Temporarilly set tasks mems_allowed to target nodes of migration,
* so that the migration code can allocate pages on these nodes.
*
- * Call holding manage_mutex, so our current->cpuset won't change
- * during this call, as manage_mutex holds off any attach_task()
+ * Call holding cgroup_mutex, so current's cpuset won't change
+ * during this call, as manage_mutex holds off any cpuset_attach()
* calls. Therefore we don't need to take task_lock around the
* call to guarantee_online_mems(), as we know no one is changing
- * our tasks cpuset.
+ * our task's cpuset.
*
* Hold callback_mutex around the two modifications of our tasks
* mems_allowed to synchronize with cpuset_mems_allowed().
@@ -903,7 +857,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
* the cpuset is marked 'memory_migrate', migrate the tasks
* pages to the new memory.
*
- * Call with manage_mutex held. May take callback_mutex during call.
+ * Call with cgroup_mutex held. May take callback_mutex during call.
* Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
* lock each such tasks mm->mmap_sem, scan its vma's and rebind
* their mempolicies to the cpusets new mems_allowed.
@@ -1016,7 +970,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
* tasklist_lock. Forks can happen again now - the mpol_copy()
* cpuset_being_rebound check will catch such forks, and rebind
* their vma mempolicies too. Because we still hold the global
- * cpuset manage_mutex, we know that no other rebind effort will
+ * cgroup_mutex, we know that no other rebind effort will
* be contending for the global variable cpuset_being_rebound.
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
@@ -1031,7 +985,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
mmput(mm);
}
- /* We're done rebinding vma's to this cpusets new mems_allowed. */
+ /* We're done rebinding vmas to this cpuset's new mems_allowed. */
kfree(mmarray);
cpuset_being_rebound = NULL;
retval = 0;
@@ -1045,7 +999,7 @@ int current_cpuset_is_being_rebound(void)
}
/*
- * Call with manage_mutex held.
+ * Call with cgroup_mutex held.
*/
static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
@@ -1066,7 +1020,7 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
* cs: the cpuset to update
* buf: the buffer where we read the 0 or 1
*
- * Call with manage_mutex held.
+ * Call with cgroup_mutex held.
*/
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
@@ -1200,6 +1154,7 @@ static int fmeter_getrate(struct fmeter *fmp)
return val;
}
+/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
static int cpuset_can_attach(struct cgroup_subsys *ss,
struct cgroup *cont, struct task_struct *tsk)
{
@@ -1547,7 +1502,8 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
* If this becomes a problem for some users who wish to
* allow that scenario, then cpuset_post_clone() could be
* changed to grant parent->cpus_allowed-sibling_cpus_exclusive
- * (and likewise for mems) to the new cgroup.
+ * (and likewise for mems) to the new cgroup. Called with cgroup_mutex
+ * held.
*/
static void cpuset_post_clone(struct cgroup_subsys *ss,
struct cgroup *cgroup)
@@ -1571,11 +1527,8 @@ static void cpuset_post_clone(struct cgroup_subsys *ss,
/*
* cpuset_create - create a cpuset
- * parent: cpuset that will be parent of the new cpuset.
- * name: name of the new cpuset. Will be strcpy'ed.
- * mode: mode to set on new inode
- *
- * Must be called with the mutex on the parent inode held
+ * ss: cpuset cgroup subsystem
+ * cont: control group that the new cpuset will be part of
*/
static struct cgroup_subsys_state *cpuset_create(
@@ -1687,53 +1640,140 @@ int __init cpuset_init(void)
return 0;
}
+/**
+ * cpuset_do_move_task - move a given task to another cpuset
+ * @tsk: pointer to task_struct the task to move
+ * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
+ *
+ * Called by cgroup_scan_tasks() for each task in a cgroup.
+ * Return nonzero to stop the walk through the tasks.
+ */
+void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan)
+{
+ struct cpuset_hotplug_scanner *chsp;
+
+ chsp = container_of(scan, struct cpuset_hotplug_scanner, scan);
+ cgroup_attach_task(chsp->to, tsk);
+}
+
+/**
+ * move_member_tasks_to_cpuset - move tasks from one cpuset to another
+ * @from: cpuset in which the tasks currently reside
+ * @to: cpuset to which the tasks will be moved
+ *
+ * Called with cgroup_mutex held
+ * callback_mutex must not be held, as cpuset_attach() will take it.
+ *
+ * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
+ * calling callback functions for each.
+ */
+static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
+{
+ struct cpuset_hotplug_scanner scan;
+
+ scan.scan.cg = from->css.cgroup;
+ scan.scan.test_task = NULL; /* select all tasks in cgroup */
+ scan.scan.process_task = cpuset_do_move_task;
+ scan.scan.heap = NULL;
+ scan.to = to->css.cgroup;
+
+ if (cgroup_scan_tasks((struct cgroup_scanner *)&scan))
+ printk(KERN_ERR "move_member_tasks_to_cpuset: "
+ "cgroup_scan_tasks failed\n");
+}
+
/*
* If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
* or memory nodes, we need to walk over the cpuset hierarchy,
* removing that CPU or node from all cpusets. If this removes the
- * last CPU or node from a cpuset, then the guarantee_online_cpus()
- * or guarantee_online_mems() code will use that emptied cpusets
- * parent online CPUs or nodes. Cpusets that were already empty of
- * CPUs or nodes are left empty.
+ * last CPU or node from a cpuset, then move the tasks in the empty
+ * cpuset to its next-highest non-empty parent.
*
- * This routine is intentionally inefficient in a couple of regards.
- * It will check all cpusets in a subtree even if the top cpuset of
- * the subtree has no offline CPUs or nodes. It checks both CPUs and
- * nodes, even though the caller could have been coded to know that
- * only one of CPUs or nodes needed to be checked on a given call.
- * This was done to minimize text size rather than cpu cycles.
+ * Called with cgroup_mutex held
+ * callback_mutex must not be held, as cpuset_attach() will take it.
+ */
+static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
+{
+ struct cpuset *parent;
+
+ /*
+ * The cgroup's css_sets list is in use if there are tasks
+ * in the cpuset; the list is empty if there are none;
+ * the cs->css.refcnt seems always 0.
+ */
+ if (list_empty(&cs->css.cgroup->css_sets))
+ return;
+
+ /*
+ * Find its next-highest non-empty parent, (top cpuset
+ * has online cpus, so can't be empty).
+ */
+ parent = cs->parent;
+ while (cpus_empty(parent->cpus_allowed) ||
+ nodes_empty(parent->mems_allowed))
+ parent = parent->parent;
+
+ move_member_tasks_to_cpuset(cs, parent);
+}
+
+/*
+ * Walk the specified cpuset subtree and look for empty cpusets.
+ * The tasks of such cpuset must be moved to a parent cpuset.
+ *
+ * Called with cgroup_mutex held. We take callback_mutex to modify
+ * cpus_allowed and mems_allowed.
*
- * Call with both manage_mutex and callback_mutex held.
+ * This walk processes the tree from top to bottom, completing one layer
+ * before dropping down to the next. It always processes a node before
+ * any of its children.
*
- * Recursive, on depth of cpuset subtree.
+ * For now, since we lack memory hot unplug, we'll never see a cpuset
+ * that has tasks along with an empty 'mems'. But if we did see such
+ * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
*/
-
-static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
+static void scan_for_empty_cpusets(const struct cpuset *root)
{
+ struct cpuset *cp; /* scans cpusets being updated */
+ struct cpuset *child; /* scans child cpusets of cp */
+ struct list_head queue;
struct cgroup *cont;
- struct cpuset *c;
- /* Each of our child cpusets mems must be online */
- list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
- c = cgroup_cs(cont);
- guarantee_online_cpus_mems_in_subtree(c);
- if (!cpus_empty(c->cpus_allowed))
- guarantee_online_cpus(c, &c->cpus_allowed);
- if (!nodes_empty(c->mems_allowed))
- guarantee_online_mems(c, &c->mems_allowed);
+ INIT_LIST_HEAD(&queue);
+
+ list_add_tail((struct list_head *)&root->stack_list, &queue);
+
+ while (!list_empty(&queue)) {
+ cp = container_of(queue.next, struct cpuset, stack_list);
+ list_del(queue.next);
+ list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
+ child = cgroup_cs(cont);
+ list_add_tail(&child->stack_list, &queue);
+ }
+ cont = cp->css.cgroup;
+
+ /* Continue past cpusets with all cpus, mems online */
+ if (cpus_subset(cp->cpus_allowed, cpu_online_map) &&
+ nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
+ continue;
+
+ /* Remove offline cpus and mems from this cpuset. */
+ mutex_lock(&callback_mutex);
+ cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
+ nodes_and(cp->mems_allowed, cp->mems_allowed,
+ node_states[N_HIGH_MEMORY]);
+ mutex_unlock(&callback_mutex);
+
+ /* Move tasks from the empty cpuset to a parent */
+ if (cpus_empty(cp->cpus_allowed) ||
+ nodes_empty(cp->mems_allowed))
+ remove_tasks_in_empty_cpuset(cp);
}
}
/*
* The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
* cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to
- * track what's online after any CPU or memory node hotplug or unplug
- * event.
- *
- * To ensure that we don't remove a CPU or node from the top cpuset
- * that is currently in use by a child cpuset (which would violate
- * the rule that cpusets must be subsets of their parent), we first
- * call the recursive routine guarantee_online_cpus_mems_in_subtree().
+ * track what's online after any CPU or memory node hotplug or unplug event.
*
* Since there are two callers of this routine, one for CPU hotplug
* events and one for memory node hotplug events, we could have coded
@@ -1744,13 +1784,11 @@ static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
static void common_cpu_mem_hotplug_unplug(void)
{
cgroup_lock();
- mutex_lock(&callback_mutex);
- guarantee_online_cpus_mems_in_subtree(&top_cpuset);
top_cpuset.cpus_allowed = cpu_online_map;
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
+ scan_for_empty_cpusets(&top_cpuset);
- mutex_unlock(&callback_mutex);
cgroup_unlock();
}
@@ -1826,7 +1864,7 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
/**
* cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
- * Must be called with callback_mutex held.
+ * Must be called with callback_mutex held.
**/
cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk)
{
@@ -2163,10 +2201,8 @@ void __cpuset_memory_pressure_bump(void)
* - Used for /proc/<pid>/cpuset.
* - No need to task_lock(tsk) on this tsk->cpuset reference, as it
* doesn't really matter if tsk->cpuset changes after we read it,
- * and we take manage_mutex, keeping attach_task() from changing it
- * anyway. No need to check that tsk->cpuset != NULL, thanks to
- * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks
- * cpuset to top_cpuset.
+ * and we take cgroup_mutex, keeping cpuset_attach() from changing it
+ * anyway.
*/
static int proc_cpuset_show(struct seq_file *m, void *unused_v)
{
diff --git a/kernel/exit.c b/kernel/exit.c
index 9e459fefda77..eb9934a82fc1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1083,11 +1083,12 @@ do_group_exit(int exit_code)
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
- if (sig->flags & SIGNAL_GROUP_EXIT)
+ if (signal_group_exit(sig))
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
sig->group_exit_code = exit_code;
+ sig->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
@@ -1589,8 +1590,6 @@ repeat:
goto repeat;
if (retval != 0) /* He released the lock. */
goto end;
- } else if (p->exit_state == EXIT_DEAD) {
- continue;
} else if (p->exit_state == EXIT_ZOMBIE) {
/*
* Eligible but we cannot release it yet:
@@ -1605,7 +1604,7 @@ repeat:
/* He released the lock. */
if (retval != 0)
goto end;
- } else {
+ } else if (p->exit_state != EXIT_DEAD) {
check_continued:
/*
* It's running now, so it might later
diff --git a/kernel/fork.c b/kernel/fork.c
index 05e0b6f4365b..b2ef8e4fad70 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -40,6 +40,7 @@
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
+#include <linux/memcontrol.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/acct.h>
@@ -325,7 +326,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm)
static inline void mm_free_pgd(struct mm_struct * mm)
{
- pgd_free(mm->pgd);
+ pgd_free(mm, mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
@@ -340,7 +341,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#include <linux/init_task.h>
-static struct mm_struct * mm_init(struct mm_struct * mm)
+static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
@@ -357,11 +358,14 @@ static struct mm_struct * mm_init(struct mm_struct * mm)
mm->ioctx_list = NULL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
+ mm_init_cgroup(mm, p);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
return mm;
}
+
+ mm_free_cgroup(mm);
free_mm(mm);
return NULL;
}
@@ -376,7 +380,7 @@ struct mm_struct * mm_alloc(void)
mm = allocate_mm();
if (mm) {
memset(mm, 0, sizeof(*mm));
- mm = mm_init(mm);
+ mm = mm_init(mm, current);
}
return mm;
}
@@ -390,6 +394,7 @@ void fastcall __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
+ mm_free_cgroup(mm);
destroy_context(mm);
free_mm(mm);
}
@@ -511,7 +516,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
mm->token_priority = 0;
mm->last_interval = 0;
- if (!mm_init(mm))
+ if (!mm_init(mm, tsk))
goto fail_nomem;
if (init_new_context(tsk, mm))
@@ -1118,6 +1123,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef CONFIG_SECURITY
p->security = NULL;
#endif
+ p->cap_bset = current->cap_bset;
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
@@ -1398,7 +1404,7 @@ fork_out:
return ERR_PTR(retval);
}
-noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
+noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
return regs;
@@ -1450,6 +1456,23 @@ long do_fork(unsigned long clone_flags,
int trace = 0;
long nr;
+ /*
+ * We hope to recycle these flags after 2.6.26
+ */
+ if (unlikely(clone_flags & CLONE_STOPPED)) {
+ static int __read_mostly count = 100;
+
+ if (count > 0 && printk_ratelimit()) {
+ char comm[TASK_COMM_LEN];
+
+ count--;
+ printk(KERN_INFO "fork(): process `%s' used deprecated "
+ "clone flags 0x%lx\n",
+ get_task_comm(comm, current),
+ clone_flags & CLONE_STOPPED);
+ }
+ }
+
if (unlikely(current->ptrace)) {
trace = fork_traceflag (clone_flags);
if (trace)
@@ -1492,7 +1515,7 @@ long do_fork(unsigned long clone_flags,
if (!(clone_flags & CLONE_STOPPED))
wake_up_new_task(p, clone_flags);
else
- p->state = TASK_STOPPED;
+ __set_task_state(p, TASK_STOPPED);
if (unlikely (trace)) {
current->ptrace_message = nr;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 1069998fe25f..668f3967eb39 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -306,7 +306,7 @@ EXPORT_SYMBOL_GPL(ktime_sub_ns);
/*
* Divide a ktime value by a nanosecond value
*/
-unsigned long ktime_divns(const ktime_t kt, s64 div)
+u64 ktime_divns(const ktime_t kt, s64 div)
{
u64 dclc, inc, dns;
int sft = 0;
@@ -321,7 +321,7 @@ unsigned long ktime_divns(const ktime_t kt, s64 div)
dclc >>= sft;
do_div(dclc, (unsigned long) div);
- return (unsigned long) dclc;
+ return dclc;
}
#endif /* BITS_PER_LONG >= 64 */
@@ -656,10 +656,9 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
* Forward the timer expiry so it will expire in the future.
* Returns the number of overruns.
*/
-unsigned long
-hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
+u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
- unsigned long orun = 1;
+ u64 orun = 1;
ktime_t delta;
delta = ktime_sub(now, timer->expires);
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 7dadc71ce516..f091d13def00 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -53,14 +53,6 @@ static inline int is_kernel_inittext(unsigned long addr)
return 0;
}
-static inline int is_kernel_extratext(unsigned long addr)
-{
- if (addr >= (unsigned long)_sextratext
- && addr <= (unsigned long)_eextratext)
- return 1;
- return 0;
-}
-
static inline int is_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext)
@@ -80,8 +72,7 @@ static int is_ksym_addr(unsigned long addr)
if (all_var)
return is_kernel(addr);
- return is_kernel_text(addr) || is_kernel_inittext(addr) ||
- is_kernel_extratext(addr);
+ return is_kernel_text(addr) || is_kernel_inittext(addr);
}
/* expand a compressed symbol data into the resulting uncompressed string,
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 9a26eec9eb04..06a0e2775651 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1361,8 +1361,8 @@ unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
static int __init crash_save_vmcoreinfo_init(void)
{
- vmcoreinfo_append_str("OSRELEASE=%s\n", init_uts_ns.name.release);
- vmcoreinfo_append_str("PAGESIZE=%ld\n", PAGE_SIZE);
+ VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
+ VMCOREINFO_PAGESIZE(PAGE_SIZE);
VMCOREINFO_SYMBOL(init_uts_ns);
VMCOREINFO_SYMBOL(node_online_map);
@@ -1376,15 +1376,15 @@ static int __init crash_save_vmcoreinfo_init(void)
#ifdef CONFIG_SPARSEMEM
VMCOREINFO_SYMBOL(mem_section);
VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
- VMCOREINFO_SIZE(mem_section);
+ VMCOREINFO_STRUCT_SIZE(mem_section);
VMCOREINFO_OFFSET(mem_section, section_mem_map);
#endif
- VMCOREINFO_SIZE(page);
- VMCOREINFO_SIZE(pglist_data);
- VMCOREINFO_SIZE(zone);
- VMCOREINFO_SIZE(free_area);
- VMCOREINFO_SIZE(list_head);
- VMCOREINFO_TYPEDEF_SIZE(nodemask_t);
+ VMCOREINFO_STRUCT_SIZE(page);
+ VMCOREINFO_STRUCT_SIZE(pglist_data);
+ VMCOREINFO_STRUCT_SIZE(zone);
+ VMCOREINFO_STRUCT_SIZE(free_area);
+ VMCOREINFO_STRUCT_SIZE(list_head);
+ VMCOREINFO_SIZE(nodemask_t);
VMCOREINFO_OFFSET(page, flags);
VMCOREINFO_OFFSET(page, _count);
VMCOREINFO_OFFSET(page, mapping);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d0493eafea3e..7a86e6432338 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -699,6 +699,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
struct kretprobe_instance, uflist);
ri->rp = rp;
ri->task = current;
+
+ if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+ return 0;
+ }
+
arch_prepare_kretprobe(ri, regs);
/* XXX(hch): why is there no hlist_move_head? */
@@ -745,7 +751,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
INIT_HLIST_HEAD(&rp->used_instances);
INIT_HLIST_HEAD(&rp->free_instances);
for (i = 0; i < rp->maxactive; i++) {
- inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
+ inst = kmalloc(sizeof(struct kretprobe_instance) +
+ rp->data_size, GFP_KERNEL);
if (inst == NULL) {
free_rp_inst(rp);
return -ENOMEM;
diff --git a/kernel/latency.c b/kernel/latency.c
deleted file mode 100644
index e63fcacb61a7..000000000000
--- a/kernel/latency.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * latency.c: Explicit system-wide latency-expectation infrastructure
- *
- * The purpose of this infrastructure is to allow device drivers to set
- * latency constraint they have and to collect and summarize these
- * expectations globally. The cummulated result can then be used by
- * power management and similar users to make decisions that have
- * tradoffs with a latency component.
- *
- * An example user of this are the x86 C-states; each higher C state saves
- * more power, but has a higher exit latency. For the idle loop power
- * code to make a good decision which C-state to use, information about
- * acceptable latencies is required.
- *
- * An example announcer of latency is an audio driver that knowns it
- * will get an interrupt when the hardware has 200 usec of samples
- * left in the DMA buffer; in that case the driver can set a latency
- * constraint of, say, 150 usec.
- *
- * Multiple drivers can each announce their maximum accepted latency,
- * to keep these appart, a string based identifier is used.
- *
- *
- * (C) Copyright 2006 Intel Corporation
- * Author: Arjan van de Ven <arjan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/latency.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/jiffies.h>
-#include <asm/atomic.h>
-
-struct latency_info {
- struct list_head list;
- int usecs;
- char *identifier;
-};
-
-/*
- * locking rule: all modifications to current_max_latency and
- * latency_list need to be done while holding the latency_lock.
- * latency_lock needs to be taken _irqsave.
- */
-static atomic_t current_max_latency;
-static DEFINE_SPINLOCK(latency_lock);
-
-static LIST_HEAD(latency_list);
-static BLOCKING_NOTIFIER_HEAD(latency_notifier);
-
-/*
- * This function returns the maximum latency allowed, which
- * happens to be the minimum of all maximum latencies on the
- * list.
- */
-static int __find_max_latency(void)
-{
- int min = INFINITE_LATENCY;
- struct latency_info *info;
-
- list_for_each_entry(info, &latency_list, list) {
- if (info->usecs < min)
- min = info->usecs;
- }
- return min;
-}
-
-/**
- * set_acceptable_latency - sets the maximum latency acceptable
- * @identifier: string that identifies this driver
- * @usecs: maximum acceptable latency for this driver
- *
- * This function informs the kernel that this device(driver)
- * can accept at most usecs latency. This setting is used for
- * power management and similar tradeoffs.
- *
- * This function sleeps and can only be called from process
- * context.
- * Calling this function with an existing identifier is valid
- * and will cause the existing latency setting to be changed.
- */
-void set_acceptable_latency(char *identifier, int usecs)
-{
- struct latency_info *info, *iter;
- unsigned long flags;
- int found_old = 0;
-
- info = kzalloc(sizeof(struct latency_info), GFP_KERNEL);
- if (!info)
- return;
- info->usecs = usecs;
- info->identifier = kstrdup(identifier, GFP_KERNEL);
- if (!info->identifier)
- goto free_info;
-
- spin_lock_irqsave(&latency_lock, flags);
- list_for_each_entry(iter, &latency_list, list) {
- if (strcmp(iter->identifier, identifier)==0) {
- found_old = 1;
- iter->usecs = usecs;
- break;
- }
- }
- if (!found_old)
- list_add(&info->list, &latency_list);
-
- if (usecs < atomic_read(&current_max_latency))
- atomic_set(&current_max_latency, usecs);
-
- spin_unlock_irqrestore(&latency_lock, flags);
-
- blocking_notifier_call_chain(&latency_notifier,
- atomic_read(&current_max_latency), NULL);
-
- /*
- * if we inserted the new one, we're done; otherwise there was
- * an existing one so we need to free the redundant data
- */
- if (!found_old)
- return;
-
- kfree(info->identifier);
-free_info:
- kfree(info);
-}
-EXPORT_SYMBOL_GPL(set_acceptable_latency);
-
-/**
- * modify_acceptable_latency - changes the maximum latency acceptable
- * @identifier: string that identifies this driver
- * @usecs: maximum acceptable latency for this driver
- *
- * This function informs the kernel that this device(driver)
- * can accept at most usecs latency. This setting is used for
- * power management and similar tradeoffs.
- *
- * This function does not sleep and can be called in any context.
- * Trying to use a non-existing identifier silently gets ignored.
- *
- * Due to the atomic nature of this function, the modified latency
- * value will only be used for future decisions; past decisions
- * can still lead to longer latencies in the near future.
- */
-void modify_acceptable_latency(char *identifier, int usecs)
-{
- struct latency_info *iter;
- unsigned long flags;
-
- spin_lock_irqsave(&latency_lock, flags);
- list_for_each_entry(iter, &latency_list, list) {
- if (strcmp(iter->identifier, identifier) == 0) {
- iter->usecs = usecs;
- break;
- }
- }
- if (usecs < atomic_read(&current_max_latency))
- atomic_set(&current_max_latency, usecs);
- spin_unlock_irqrestore(&latency_lock, flags);
-}
-EXPORT_SYMBOL_GPL(modify_acceptable_latency);
-
-/**
- * remove_acceptable_latency - removes the maximum latency acceptable
- * @identifier: string that identifies this driver
- *
- * This function removes a previously set maximum latency setting
- * for the driver and frees up any resources associated with the
- * bookkeeping needed for this.
- *
- * This function does not sleep and can be called in any context.
- * Trying to use a non-existing identifier silently gets ignored.
- */
-void remove_acceptable_latency(char *identifier)
-{
- unsigned long flags;
- int newmax = 0;
- struct latency_info *iter, *temp;
-
- spin_lock_irqsave(&latency_lock, flags);
-
- list_for_each_entry_safe(iter, temp, &latency_list, list) {
- if (strcmp(iter->identifier, identifier) == 0) {
- list_del(&iter->list);
- newmax = iter->usecs;
- kfree(iter->identifier);
- kfree(iter);
- break;
- }
- }
-
- /* If we just deleted the system wide value, we need to
- * recalculate with a full search
- */
- if (newmax == atomic_read(&current_max_latency)) {
- newmax = __find_max_latency();
- atomic_set(&current_max_latency, newmax);
- }
- spin_unlock_irqrestore(&latency_lock, flags);
-}
-EXPORT_SYMBOL_GPL(remove_acceptable_latency);
-
-/**
- * system_latency_constraint - queries the system wide latency maximum
- *
- * This function returns the system wide maximum latency in
- * microseconds.
- *
- * This function does not sleep and can be called in any context.
- */
-int system_latency_constraint(void)
-{
- return atomic_read(&current_max_latency);
-}
-EXPORT_SYMBOL_GPL(system_latency_constraint);
-
-/**
- * synchronize_acceptable_latency - recalculates all latency decisions
- *
- * This function will cause a callback to various kernel pieces that
- * will make those pieces rethink their latency decisions. This implies
- * that if there are overlong latencies in hardware state already, those
- * latencies get taken right now. When this call completes no overlong
- * latency decisions should be active anymore.
- *
- * Typical usecase of this is after a modify_acceptable_latency() call,
- * which in itself is non-blocking and non-synchronizing.
- *
- * This function blocks and should not be called with locks held.
- */
-
-void synchronize_acceptable_latency(void)
-{
- blocking_notifier_call_chain(&latency_notifier,
- atomic_read(&current_max_latency), NULL);
-}
-EXPORT_SYMBOL_GPL(synchronize_acceptable_latency);
-
-/*
- * Latency notifier: this notifier gets called when a non-atomic new
- * latency value gets set. The expectation nof the caller of the
- * non-atomic set is that when the call returns, future latencies
- * are within bounds, so the functions on the notifier list are
- * expected to take the overlong latencies immediately, inside the
- * callback, and not make a overlong latency decision anymore.
- *
- * The callback gets called when the new latency value is made
- * active so system_latency_constraint() returns the new latency.
- */
-int register_latency_notifier(struct notifier_block * nb)
-{
- return blocking_notifier_chain_register(&latency_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(register_latency_notifier);
-
-int unregister_latency_notifier(struct notifier_block * nb)
-{
- return blocking_notifier_chain_unregister(&latency_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_latency_notifier);
-
-static __init int latency_init(void)
-{
- atomic_set(&current_max_latency, INFINITE_LATENCY);
- /*
- * we don't want by default to have longer latencies than 2 ticks,
- * since that would cause lost ticks
- */
- set_acceptable_latency("kernel", 2*1000000/HZ);
- return 0;
-}
-
-module_init(latency_init);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4253f472f060..643360d1bb14 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -4,6 +4,7 @@
#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/vmalloc.h>
+#include <linux/reboot.h>
/*
* Notifier list for kernel code which wants to be called
diff --git a/kernel/panic.c b/kernel/panic.c
index d9e90cfe3298..24af9f8bac99 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -161,7 +161,7 @@ const char *print_tainted(void)
{
static char buf[20];
if (tainted) {
- snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c",
+ snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c",
tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
@@ -169,7 +169,8 @@ const char *print_tainted(void)
tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
tainted & TAINT_BAD_PAGE ? 'B' : ' ',
tainted & TAINT_USER ? 'U' : ' ',
- tainted & TAINT_DIE ? 'D' : ' ');
+ tainted & TAINT_DIE ? 'D' : ' ',
+ tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ');
}
else
snprintf(buf, sizeof(buf), "Not tainted");
diff --git a/kernel/params.c b/kernel/params.c
index 42fe5e6126c0..e28c70628bb7 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -272,7 +272,7 @@ static int param_array(const char *name,
unsigned int min, unsigned int max,
void *elem, int elemsize,
int (*set)(const char *, struct kernel_param *kp),
- int *num)
+ unsigned int *num)
{
int ret;
struct kernel_param kp;
diff --git a/kernel/pid.c b/kernel/pid.c
index f815455431bf..3b30bccdfcdc 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -368,6 +368,7 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)
}
return result;
}
+EXPORT_SYMBOL(pid_task);
/*
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
new file mode 100644
index 000000000000..0afe32be4c85
--- /dev/null
+++ b/kernel/pm_qos_params.c
@@ -0,0 +1,425 @@
+/*
+ * This module exposes the interface to kernel space for specifying
+ * QoS dependencies. It provides infrastructure for registration of:
+ *
+ * Dependents on a QoS value : register requirements
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ *
+ * There are 3 basic classes of QoS parameter: latency, timeout, throughput
+ * each have defined units:
+ * latency: usec
+ * timeout: usec <-- currently not used.
+ * throughput: kbs (kilo byte / sec)
+ *
+ * There are lists of pm_qos_objects each one wrapping requirements, notifiers
+ *
+ * User mode requirements on a QOS parameter register themselves to the
+ * subsystem by opening the device node /dev/... and writing there request to
+ * the node. As long as the process holds a file handle open to the node the
+ * client continues to be accounted for. Upon file release the usermode
+ * requirement is removed and a new qos target is computed. This way when the
+ * requirement that the application has is cleaned up when closes the file
+ * pointer or exits the pm_qos_object will get an opportunity to clean up.
+ *
+ * mark gross mgross@linux.intel.com
+ */
+
+#include <linux/pm_qos_params.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+
+#include <linux/uaccess.h>
+
+/*
+ * locking rule: all changes to target_value or requirements or notifiers lists
+ * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
+ * held, taken with _irqsave. One lock to rule them all
+ */
+struct requirement_list {
+ struct list_head list;
+ union {
+ s32 value;
+ s32 usec;
+ s32 kbps;
+ };
+ char *name;
+};
+
+static s32 max_compare(s32 v1, s32 v2);
+static s32 min_compare(s32 v1, s32 v2);
+
+struct pm_qos_object {
+ struct requirement_list requirements;
+ struct blocking_notifier_head *notifiers;
+ struct miscdevice pm_qos_power_miscdev;
+ char *name;
+ s32 default_value;
+ s32 target_value;
+ s32 (*comparitor)(s32, s32);
+};
+
+static struct pm_qos_object null_pm_qos;
+static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
+static struct pm_qos_object cpu_dma_pm_qos = {
+ .requirements = {LIST_HEAD_INIT(cpu_dma_pm_qos.requirements.list)},
+ .notifiers = &cpu_dma_lat_notifier,
+ .name = "cpu_dma_latency",
+ .default_value = 2000 * USEC_PER_SEC,
+ .target_value = 2000 * USEC_PER_SEC,
+ .comparitor = min_compare
+};
+
+static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+static struct pm_qos_object network_lat_pm_qos = {
+ .requirements = {LIST_HEAD_INIT(network_lat_pm_qos.requirements.list)},
+ .notifiers = &network_lat_notifier,
+ .name = "network_latency",
+ .default_value = 2000 * USEC_PER_SEC,
+ .target_value = 2000 * USEC_PER_SEC,
+ .comparitor = min_compare
+};
+
+
+static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
+static struct pm_qos_object network_throughput_pm_qos = {
+ .requirements =
+ {LIST_HEAD_INIT(network_throughput_pm_qos.requirements.list)},
+ .notifiers = &network_throughput_notifier,
+ .name = "network_throughput",
+ .default_value = 0,
+ .target_value = 0,
+ .comparitor = max_compare
+};
+
+
+static struct pm_qos_object *pm_qos_array[] = {
+ &null_pm_qos,
+ &cpu_dma_pm_qos,
+ &network_lat_pm_qos,
+ &network_throughput_pm_qos
+};
+
+static DEFINE_SPINLOCK(pm_qos_lock);
+
+static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos);
+static int pm_qos_power_open(struct inode *inode, struct file *filp);
+static int pm_qos_power_release(struct inode *inode, struct file *filp);
+
+static const struct file_operations pm_qos_power_fops = {
+ .write = pm_qos_power_write,
+ .open = pm_qos_power_open,
+ .release = pm_qos_power_release,
+};
+
+/* static helper functions */
+static s32 max_compare(s32 v1, s32 v2)
+{
+ return max(v1, v2);
+}
+
+static s32 min_compare(s32 v1, s32 v2)
+{
+ return min(v1, v2);
+}
+
+
+static void update_target(int target)
+{
+ s32 extreme_value;
+ struct requirement_list *node;
+ unsigned long flags;
+ int call_notifier = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ extreme_value = pm_qos_array[target]->default_value;
+ list_for_each_entry(node,
+ &pm_qos_array[target]->requirements.list, list) {
+ extreme_value = pm_qos_array[target]->comparitor(
+ extreme_value, node->value);
+ }
+ if (pm_qos_array[target]->target_value != extreme_value) {
+ call_notifier = 1;
+ pm_qos_array[target]->target_value = extreme_value;
+ pr_debug(KERN_ERR "new target for qos %d is %d\n", target,
+ pm_qos_array[target]->target_value);
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ if (call_notifier)
+ blocking_notifier_call_chain(pm_qos_array[target]->notifiers,
+ (unsigned long) extreme_value, NULL);
+}
+
+static int register_pm_qos_misc(struct pm_qos_object *qos)
+{
+ qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
+ qos->pm_qos_power_miscdev.name = qos->name;
+ qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
+
+ return misc_register(&qos->pm_qos_power_miscdev);
+}
+
+static int find_pm_qos_object_by_minor(int minor)
+{
+ int pm_qos_class;
+
+ for (pm_qos_class = 0;
+ pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
+ if (minor ==
+ pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
+ return pm_qos_class;
+ }
+ return -1;
+}
+
+/**
+ * pm_qos_requirement - returns current system wide qos expectation
+ * @pm_qos_class: identification of which qos value is requested
+ *
+ * This function returns the current target value in an atomic manner.
+ */
+int pm_qos_requirement(int pm_qos_class)
+{
+ int ret_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ ret_val = pm_qos_array[pm_qos_class]->target_value;
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ return ret_val;
+}
+EXPORT_SYMBOL_GPL(pm_qos_requirement);
+
+/**
+ * pm_qos_add_requirement - inserts new qos request into the list
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the pm_qos_class list of requested qos
+ * performance charactoistics. It recomputes the agregate QoS expectations for
+ * the pm_qos_class of parrameters.
+ */
+int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value)
+{
+ struct requirement_list *dep;
+ unsigned long flags;
+
+ dep = kzalloc(sizeof(struct requirement_list), GFP_KERNEL);
+ if (dep) {
+ if (value == PM_QOS_DEFAULT_VALUE)
+ dep->value = pm_qos_array[pm_qos_class]->default_value;
+ else
+ dep->value = value;
+ dep->name = kstrdup(name, GFP_KERNEL);
+ if (!dep->name)
+ goto cleanup;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_add(&dep->list,
+ &pm_qos_array[pm_qos_class]->requirements.list);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ update_target(pm_qos_class);
+
+ return 0;
+ }
+
+cleanup:
+ kfree(dep);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_requirement);
+
+/**
+ * pm_qos_update_requirement - modifies an existing qos request
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * Updates an existing qos requierement for the pm_qos_class of parameters along
+ * with updating the target pm_qos_class value.
+ *
+ * If the named request isn't in the lest then no change is made.
+ */
+int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_for_each_entry(node,
+ &pm_qos_array[pm_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ if (new_value == PM_QOS_DEFAULT_VALUE)
+ node->value =
+ pm_qos_array[pm_qos_class]->default_value;
+ else
+ node->value = new_value;
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ if (pending_update)
+ update_target(pm_qos_class);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_qos_update_requirement);
+
+/**
+ * pm_qos_remove_requirement - modifies an existing qos request
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ *
+ * Will remove named qos request from pm_qos_class list of parrameters and
+ * recompute the current target value for the pm_qos_class.
+ */
+void pm_qos_remove_requirement(int pm_qos_class, char *name)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_for_each_entry(node,
+ &pm_qos_array[pm_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ kfree(node->name);
+ list_del(&node->list);
+ kfree(node);
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ if (pending_update)
+ update_target(pm_qos_class);
+}
+EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
+
+/**
+ * pm_qos_add_notifier - sets notification entry for changes to target value
+ * @pm_qos_class: identifies which qos target changes should be notified.
+ * @notifier: notifier block managed by caller.
+ *
+ * will register the notifier into a notification chain that gets called
+ * uppon changes to the pm_qos_class target value.
+ */
+ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ int retval;
+
+ retval = blocking_notifier_chain_register(
+ pm_qos_array[pm_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
+
+/**
+ * pm_qos_remove_notifier - deletes notification entry from chain.
+ * @pm_qos_class: identifies which qos target changes are notified.
+ * @notifier: notifier block to be removed.
+ *
+ * will remove the notifier from the notification chain that gets called
+ * uppon changes to the pm_qos_class target value.
+ */
+int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ int retval;
+
+ retval = blocking_notifier_chain_unregister(
+ pm_qos_array[pm_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
+
+#define PID_NAME_LEN sizeof("process_1234567890")
+static char name[PID_NAME_LEN];
+
+static int pm_qos_power_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ long pm_qos_class;
+
+ pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
+ if (pm_qos_class >= 0) {
+ filp->private_data = (void *)pm_qos_class;
+ sprintf(name, "process_%d", current->pid);
+ ret = pm_qos_add_requirement(pm_qos_class, name,
+ PM_QOS_DEFAULT_VALUE);
+ if (ret >= 0)
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int pm_qos_power_release(struct inode *inode, struct file *filp)
+{
+ int pm_qos_class;
+
+ pm_qos_class = (long)filp->private_data;
+ sprintf(name, "process_%d", current->pid);
+ pm_qos_remove_requirement(pm_qos_class, name);
+
+ return 0;
+}
+
+static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ s32 value;
+ int pm_qos_class;
+
+ pm_qos_class = (long)filp->private_data;
+ if (count != sizeof(s32))
+ return -EINVAL;
+ if (copy_from_user(&value, buf, sizeof(s32)))
+ return -EFAULT;
+ sprintf(name, "process_%d", current->pid);
+ pm_qos_update_requirement(pm_qos_class, name, value);
+
+ return sizeof(s32);
+}
+
+
+static int __init pm_qos_power_init(void)
+{
+ int ret = 0;
+
+ ret = register_pm_qos_misc(&cpu_dma_pm_qos);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&network_lat_pm_qos);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&network_throughput_pm_qos);
+ if (ret < 0)
+ printk(KERN_ERR
+ "pm_qos_param: network_throughput setup failed\n");
+
+ return ret;
+}
+
+late_initcall(pm_qos_power_init);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 36d563fd9e3b..122d5c787fe2 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -256,8 +256,9 @@ static void schedule_next_timer(struct k_itimer *timr)
if (timr->it.real.interval.tv64 == 0)
return;
- timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
- timr->it.real.interval);
+ timr->it_overrun += (unsigned int) hrtimer_forward(timer,
+ timer->base->get_time(),
+ timr->it.real.interval);
timr->it_overrun_last = timr->it_overrun;
timr->it_overrun = -1;
@@ -386,7 +387,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
now = ktime_add(now, kj);
}
#endif
- timr->it_overrun +=
+ timr->it_overrun += (unsigned int)
hrtimer_forward(timer, now,
timr->it.real.interval);
ret = HRTIMER_RESTART;
@@ -662,7 +663,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
*/
if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
(timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
- timr->it_overrun += hrtimer_forward(timer, now, iv);
+ timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
remaining = ktime_sub(timer->expires, now);
/* Return 0 only, when the timer is expired and not pending */
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index ef9b802738a5..79833170bb9c 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -74,8 +74,8 @@ config PM_TRACE_RTC
RTC across reboots, so that you can debug a machine that just hangs
during suspend (or more commonly, during resume).
- To use this debugging feature you should attempt to suspend the machine,
- then reboot it, then run
+ To use this debugging feature you should attempt to suspend the
+ machine, reboot it and then run
dmesg -s 1000000 | grep 'hash matches'
@@ -123,7 +123,10 @@ config HIBERNATION
called "hibernation" in user interfaces. STD checkpoints the
system and powers it off; and restores that checkpoint on reboot.
- You can suspend your machine with 'echo disk > /sys/power/state'.
+ You can suspend your machine with 'echo disk > /sys/power/state'
+ after placing resume=/dev/swappartition on the kernel command line
+ in your bootloader's configuration file.
+
Alternatively, you can use the additional userland tools available
from <http://suspend.sf.net>.
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index d09da0895174..859a8e59773a 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -26,7 +26,7 @@
static int noresume = 0;
-char resume_file[256] = CONFIG_PM_STD_PARTITION;
+static char resume_file[256] = CONFIG_PM_STD_PARTITION;
dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
@@ -185,7 +185,7 @@ static void platform_restore_cleanup(int platform_mode)
* reappears in this routine after a restore.
*/
-int create_image(int platform_mode)
+static int create_image(int platform_mode)
{
int error;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f6a5df934f8d..95250d7c8d91 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1203,7 +1203,7 @@ asmlinkage int swsusp_save(void)
printk(KERN_INFO "PM: Creating hibernation image: \n");
- drain_local_pages();
+ drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
@@ -1221,7 +1221,7 @@ asmlinkage int swsusp_save(void)
/* During allocating of suspend pagedir, new cold pages may appear.
* Kill them.
*/
- drain_local_pages();
+ drain_local_pages(NULL);
copy_data_pages(&copy_bm, &orig_bm);
/*
diff --git a/kernel/printk.c b/kernel/printk.c
index 29ae1e99cde0..4a090621f379 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -93,16 +93,16 @@ static int console_locked, console_suspended;
*/
static DEFINE_SPINLOCK(logbuf_lock);
-#define LOG_BUF_MASK (log_buf_len-1)
+#define LOG_BUF_MASK (log_buf_len-1)
#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
/*
* The indices into log_buf are not constrained to log_buf_len - they
* must be masked before subscripting
*/
-static unsigned long log_start; /* Index into log_buf: next char to be read by syslog() */
-static unsigned long con_start; /* Index into log_buf: next char to be sent to consoles */
-static unsigned long log_end; /* Index into log_buf: most-recently-written-char + 1 */
+static unsigned log_start; /* Index into log_buf: next char to be read by syslog() */
+static unsigned con_start; /* Index into log_buf: next char to be sent to consoles */
+static unsigned log_end; /* Index into log_buf: most-recently-written-char + 1 */
/*
* Array of consoles built from command line options (console=)
@@ -128,17 +128,17 @@ static int console_may_schedule;
static char __log_buf[__LOG_BUF_LEN];
static char *log_buf = __log_buf;
static int log_buf_len = __LOG_BUF_LEN;
-static unsigned long logged_chars; /* Number of chars produced since last read+clear operation */
+static unsigned logged_chars; /* Number of chars produced since last read+clear operation */
static int __init log_buf_len_setup(char *str)
{
- unsigned long size = memparse(str, &str);
+ unsigned size = memparse(str, &str);
unsigned long flags;
if (size)
size = roundup_pow_of_two(size);
if (size > log_buf_len) {
- unsigned long start, dest_idx, offset;
+ unsigned start, dest_idx, offset;
char *new_log_buf;
new_log_buf = alloc_bootmem(size);
@@ -295,7 +295,7 @@ int log_buf_read(int idx)
*/
int do_syslog(int type, char __user *buf, int len)
{
- unsigned long i, j, limit, count;
+ unsigned i, j, limit, count;
int do_clear = 0;
char c;
int error = 0;
@@ -436,7 +436,7 @@ asmlinkage long sys_syslog(int type, char __user *buf, int len)
/*
* Call the console drivers on a range of log_buf
*/
-static void __call_console_drivers(unsigned long start, unsigned long end)
+static void __call_console_drivers(unsigned start, unsigned end)
{
struct console *con;
@@ -463,8 +463,8 @@ early_param("ignore_loglevel", ignore_loglevel_setup);
/*
* Write out chars from start to end - 1 inclusive
*/
-static void _call_console_drivers(unsigned long start,
- unsigned long end, int msg_log_level)
+static void _call_console_drivers(unsigned start,
+ unsigned end, int msg_log_level)
{
if ((msg_log_level < console_loglevel || ignore_loglevel) &&
console_drivers && start != end) {
@@ -484,12 +484,12 @@ static void _call_console_drivers(unsigned long start,
* log_buf[start] to log_buf[end - 1].
* The console_sem must be held.
*/
-static void call_console_drivers(unsigned long start, unsigned long end)
+static void call_console_drivers(unsigned start, unsigned end)
{
- unsigned long cur_index, start_print;
+ unsigned cur_index, start_print;
static int msg_level = -1;
- BUG_ON(((long)(start - end)) > 0);
+ BUG_ON(((int)(start - end)) > 0);
cur_index = start;
start_print = start;
@@ -790,7 +790,7 @@ asmlinkage long sys_syslog(int type, char __user *buf, int len)
return -ENOSYS;
}
-static void call_console_drivers(unsigned long start, unsigned long end)
+static void call_console_drivers(unsigned start, unsigned end)
{
}
@@ -983,8 +983,8 @@ void wake_up_klogd(void)
void release_console_sem(void)
{
unsigned long flags;
- unsigned long _con_start, _log_end;
- unsigned long wake_klogd = 0;
+ unsigned _con_start, _log_end;
+ unsigned wake_klogd = 0;
if (console_suspended) {
up(&secondary_console_sem);
@@ -1275,7 +1275,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
{
static DEFINE_SPINLOCK(ratelimit_lock);
- static unsigned long toks = 10 * 5 * HZ;
+ static unsigned toks = 10 * 5 * HZ;
static unsigned long last_msg;
static int missed;
unsigned long flags;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b0d4ab4dfd3d..628b03ab88a5 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -20,6 +20,7 @@
#include <linux/signal.h>
#include <linux/audit.h>
#include <linux/pid_namespace.h>
+#include <linux/syscalls.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -53,7 +54,7 @@ void ptrace_untrace(struct task_struct *child)
spin_lock(&child->sighand->siglock);
if (task_is_traced(child)) {
if (child->signal->flags & SIGNAL_STOP_STOPPED) {
- child->state = TASK_STOPPED;
+ __set_task_state(child, TASK_STOPPED);
} else {
signal_wake_up(child, 1);
}
@@ -103,18 +104,16 @@ int ptrace_check_attach(struct task_struct *child, int kill)
&& child->signal != NULL) {
ret = 0;
spin_lock_irq(&child->sighand->siglock);
- if (task_is_stopped(child)) {
+ if (task_is_stopped(child))
child->state = TASK_TRACED;
- } else if (!task_is_traced(child) && !kill) {
+ else if (!task_is_traced(child) && !kill)
ret = -ESRCH;
- }
spin_unlock_irq(&child->sighand->siglock);
}
read_unlock(&tasklist_lock);
- if (!ret && !kill) {
+ if (!ret && !kill)
wait_task_inactive(child);
- }
/* All systems go.. */
return ret;
diff --git a/kernel/relay.c b/kernel/relay.c
index 7c0373322f18..d080b9d161a7 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -37,37 +37,31 @@ static void relay_file_mmap_close(struct vm_area_struct *vma)
}
/*
- * nopage() vm_op implementation for relay file mapping.
+ * fault() vm_op implementation for relay file mapping.
*/
-static struct page *relay_buf_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int *type)
+static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page;
struct rchan_buf *buf = vma->vm_private_data;
- unsigned long offset = address - vma->vm_start;
+ pgoff_t pgoff = vmf->pgoff;
- if (address > vma->vm_end)
- return NOPAGE_SIGBUS; /* Disallow mremap */
if (!buf)
- return NOPAGE_OOM;
+ return VM_FAULT_OOM;
- page = vmalloc_to_page(buf->start + offset);
+ page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT));
if (!page)
- return NOPAGE_OOM;
+ return VM_FAULT_SIGBUS;
get_page(page);
+ vmf->page = page;
- if (type)
- *type = VM_FAULT_MINOR;
-
- return page;
+ return 0;
}
/*
* vm_ops for relay file mappings.
*/
static struct vm_operations_struct relay_file_mmap_ops = {
- .nopage = relay_buf_nopage,
+ .fault = relay_buf_fault,
.close = relay_file_mmap_close,
};
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
new file mode 100644
index 000000000000..16cbec2d5d60
--- /dev/null
+++ b/kernel/res_counter.c
@@ -0,0 +1,134 @@
+/*
+ * resource cgroups
+ *
+ * Copyright 2007 OpenVZ SWsoft Inc
+ *
+ * Author: Pavel Emelianov <xemul@openvz.org>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/parser.h>
+#include <linux/fs.h>
+#include <linux/res_counter.h>
+#include <linux/uaccess.h>
+
+void res_counter_init(struct res_counter *counter)
+{
+ spin_lock_init(&counter->lock);
+ counter->limit = (unsigned long long)LLONG_MAX;
+}
+
+int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
+{
+ if (counter->usage + val > counter->limit) {
+ counter->failcnt++;
+ return -ENOMEM;
+ }
+
+ counter->usage += val;
+ return 0;
+}
+
+int res_counter_charge(struct res_counter *counter, unsigned long val)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&counter->lock, flags);
+ ret = res_counter_charge_locked(counter, val);
+ spin_unlock_irqrestore(&counter->lock, flags);
+ return ret;
+}
+
+void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
+{
+ if (WARN_ON(counter->usage < val))
+ val = counter->usage;
+
+ counter->usage -= val;
+}
+
+void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&counter->lock, flags);
+ res_counter_uncharge_locked(counter, val);
+ spin_unlock_irqrestore(&counter->lock, flags);
+}
+
+
+static inline unsigned long long *
+res_counter_member(struct res_counter *counter, int member)
+{
+ switch (member) {
+ case RES_USAGE:
+ return &counter->usage;
+ case RES_LIMIT:
+ return &counter->limit;
+ case RES_FAILCNT:
+ return &counter->failcnt;
+ };
+
+ BUG();
+ return NULL;
+}
+
+ssize_t res_counter_read(struct res_counter *counter, int member,
+ const char __user *userbuf, size_t nbytes, loff_t *pos,
+ int (*read_strategy)(unsigned long long val, char *st_buf))
+{
+ unsigned long long *val;
+ char buf[64], *s;
+
+ s = buf;
+ val = res_counter_member(counter, member);
+ if (read_strategy)
+ s += read_strategy(*val, s);
+ else
+ s += sprintf(s, "%llu\n", *val);
+ return simple_read_from_buffer((void __user *)userbuf, nbytes,
+ pos, buf, s - buf);
+}
+
+ssize_t res_counter_write(struct res_counter *counter, int member,
+ const char __user *userbuf, size_t nbytes, loff_t *pos,
+ int (*write_strategy)(char *st_buf, unsigned long long *val))
+{
+ int ret;
+ char *buf, *end;
+ unsigned long flags;
+ unsigned long long tmp, *val;
+
+ buf = kmalloc(nbytes + 1, GFP_KERNEL);
+ ret = -ENOMEM;
+ if (buf == NULL)
+ goto out;
+
+ buf[nbytes] = '\0';
+ ret = -EFAULT;
+ if (copy_from_user(buf, userbuf, nbytes))
+ goto out_free;
+
+ ret = -EINVAL;
+
+ if (write_strategy) {
+ if (write_strategy(buf, &tmp)) {
+ goto out_free;
+ }
+ } else {
+ tmp = simple_strtoull(buf, &end, 10);
+ if (*end != '\0')
+ goto out_free;
+ }
+ spin_lock_irqsave(&counter->lock, flags);
+ val = res_counter_member(counter, member);
+ *val = tmp;
+ spin_unlock_irqrestore(&counter->lock, flags);
+ ret = nbytes;
+out_free:
+ kfree(buf);
+out:
+ return ret;
+}
diff --git a/kernel/signal.c b/kernel/signal.c
index 4333b6dbb424..5d30ff561847 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -911,27 +911,6 @@ __group_complete_signal(int sig, struct task_struct *p)
} while_each_thread(p, t);
return;
}
-
- /*
- * There will be a core dump. We make all threads other
- * than the chosen one go into a group stop so that nothing
- * happens until it gets scheduled, takes the signal off
- * the shared queue, and does the core dump. This is a
- * little more complicated than strictly necessary, but it
- * keeps the signal state that winds up in the core dump
- * unchanged from the death state, e.g. which thread had
- * the core-dump signal unblocked.
- */
- rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
- rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
- p->signal->group_stop_count = 0;
- p->signal->group_exit_task = t;
- p = t;
- do {
- p->signal->group_stop_count++;
- signal_wake_up(t, t == p);
- } while_each_thread(p, t);
- return;
}
/*
@@ -978,7 +957,6 @@ void zap_other_threads(struct task_struct *p)
{
struct task_struct *t;
- p->signal->flags = SIGNAL_GROUP_EXIT;
p->signal->group_stop_count = 0;
for (t = next_thread(p); t != p; t = next_thread(t)) {
@@ -1600,6 +1578,17 @@ static inline int may_ptrace_stop(void)
}
/*
+ * Return nonzero if there is a SIGKILL that should be waking us up.
+ * Called with the siglock held.
+ */
+static int sigkill_pending(struct task_struct *tsk)
+{
+ return ((sigismember(&tsk->pending.signal, SIGKILL) ||
+ sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
+ !unlikely(sigismember(&tsk->blocked, SIGKILL)));
+}
+
+/*
* This must be called with current->sighand->siglock held.
*
* This should be the path for all ptrace stops.
@@ -1612,6 +1601,26 @@ static inline int may_ptrace_stop(void)
*/
static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
{
+ int killed = 0;
+
+ if (arch_ptrace_stop_needed(exit_code, info)) {
+ /*
+ * The arch code has something special to do before a
+ * ptrace stop. This is allowed to block, e.g. for faults
+ * on user stack pages. We can't keep the siglock while
+ * calling arch_ptrace_stop, so we must release it now.
+ * To preserve proper semantics, we must do this before
+ * any signal bookkeeping like checking group_stop_count.
+ * Meanwhile, a SIGKILL could come in before we retake the
+ * siglock. That must prevent us from sleeping in TASK_TRACED.
+ * So after regaining the lock, we must check for SIGKILL.
+ */
+ spin_unlock_irq(&current->sighand->siglock);
+ arch_ptrace_stop(exit_code, info);
+ spin_lock_irq(&current->sighand->siglock);
+ killed = sigkill_pending(current);
+ }
+
/*
* If there is a group stop in progress,
* we must participate in the bookkeeping.
@@ -1623,11 +1632,11 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
current->exit_code = exit_code;
/* Let the debugger run. */
- set_current_state(TASK_TRACED);
+ __set_current_state(TASK_TRACED);
spin_unlock_irq(&current->sighand->siglock);
try_to_freeze();
read_lock(&tasklist_lock);
- if (may_ptrace_stop()) {
+ if (!unlikely(killed) && may_ptrace_stop()) {
do_notify_parent_cldstop(current, CLD_TRAPPED);
read_unlock(&tasklist_lock);
schedule();
@@ -1709,9 +1718,6 @@ static int do_signal_stop(int signr)
struct signal_struct *sig = current->signal;
int stop_count;
- if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
- return 0;
-
if (sig->group_stop_count > 0) {
/*
* There is a group stop in progress. We don't need to
@@ -1719,12 +1725,15 @@ static int do_signal_stop(int signr)
*/
stop_count = --sig->group_stop_count;
} else {
+ struct task_struct *t;
+
+ if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
+ unlikely(sig->group_exit_task))
+ return 0;
/*
* There is no group stop already in progress.
* We must initiate one now.
*/
- struct task_struct *t;
-
sig->group_exit_code = signr;
stop_count = 0;
@@ -1752,47 +1761,6 @@ static int do_signal_stop(int signr)
return 1;
}
-/*
- * Do appropriate magic when group_stop_count > 0.
- * We return nonzero if we stopped, after releasing the siglock.
- * We return zero if we still hold the siglock and should look
- * for another signal without checking group_stop_count again.
- */
-static int handle_group_stop(void)
-{
- int stop_count;
-
- if (current->signal->group_exit_task == current) {
- /*
- * Group stop is so we can do a core dump,
- * We are the initiating thread, so get on with it.
- */
- current->signal->group_exit_task = NULL;
- return 0;
- }
-
- if (current->signal->flags & SIGNAL_GROUP_EXIT)
- /*
- * Group stop is so another thread can do a core dump,
- * or else we are racing against a death signal.
- * Just punt the stop so we can get the next signal.
- */
- return 0;
-
- /*
- * There is a group stop in progress. We stop
- * without any associated signal being in our queue.
- */
- stop_count = --current->signal->group_stop_count;
- if (stop_count == 0)
- current->signal->flags = SIGNAL_STOP_STOPPED;
- current->exit_code = current->signal->group_exit_code;
- set_current_state(TASK_STOPPED);
- spin_unlock_irq(&current->sighand->siglock);
- finish_stop(stop_count);
- return 1;
-}
-
int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
struct pt_regs *regs, void *cookie)
{
@@ -1807,7 +1775,7 @@ relock:
struct k_sigaction *ka;
if (unlikely(current->signal->group_stop_count > 0) &&
- handle_group_stop())
+ do_signal_stop(0))
goto relock;
signr = dequeue_signal(current, mask, info);
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 3507cabe963b..b0aeeaf22ce4 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -74,7 +74,7 @@ static int srcu_readers_active_idx(struct srcu_struct *sp, int idx)
* severe errors when invoked on an active srcu_struct. That said, it
* can be useful as an error check at cleanup time.
*/
-int srcu_readers_active(struct srcu_struct *sp)
+static int srcu_readers_active(struct srcu_struct *sp)
{
return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1);
}
@@ -255,4 +255,3 @@ EXPORT_SYMBOL_GPL(srcu_read_lock);
EXPORT_SYMBOL_GPL(srcu_read_unlock);
EXPORT_SYMBOL_GPL(synchronize_srcu);
EXPORT_SYMBOL_GPL(srcu_batches_completed);
-EXPORT_SYMBOL_GPL(srcu_readers_active);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 51b5ee53571a..6f4e0e13f70c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -29,7 +29,6 @@ enum stopmachine_state {
static enum stopmachine_state stopmachine_state;
static unsigned int stopmachine_num_threads;
static atomic_t stopmachine_thread_ack;
-static DECLARE_MUTEX(stopmachine_mutex);
static int stopmachine(void *cpu)
{
@@ -170,6 +169,7 @@ static int do_stop(void *_smdata)
struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
unsigned int cpu)
{
+ static DEFINE_MUTEX(stopmachine_mutex);
struct stop_machine_data smdata;
struct task_struct *p;
@@ -177,7 +177,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
smdata.data = data;
init_completion(&smdata.done);
- down(&stopmachine_mutex);
+ mutex_lock(&stopmachine_mutex);
/* If they don't care which CPU fn runs on, bind to any online one. */
if (cpu == NR_CPUS)
@@ -193,7 +193,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
wake_up_process(p);
wait_for_completion(&smdata.done);
}
- up(&stopmachine_mutex);
+ mutex_unlock(&stopmachine_mutex);
return p;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index d1fe71eb4546..e3c08d4324de 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -315,7 +315,7 @@ static void kernel_kexec(void)
#endif
}
-void kernel_shutdown_prepare(enum system_states state)
+static void kernel_shutdown_prepare(enum system_states state)
{
blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
@@ -1145,16 +1145,16 @@ static int groups_to_user(gid_t __user *grouplist,
struct group_info *group_info)
{
int i;
- int count = group_info->ngroups;
+ unsigned int count = group_info->ngroups;
for (i = 0; i < group_info->nblocks; i++) {
- int cp_count = min(NGROUPS_PER_BLOCK, count);
- int off = i * NGROUPS_PER_BLOCK;
- int len = cp_count * sizeof(*grouplist);
+ unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+ unsigned int len = cp_count * sizeof(*grouplist);
- if (copy_to_user(grouplist+off, group_info->blocks[i], len))
+ if (copy_to_user(grouplist, group_info->blocks[i], len))
return -EFAULT;
+ grouplist += NGROUPS_PER_BLOCK;
count -= cp_count;
}
return 0;
@@ -1165,16 +1165,16 @@ static int groups_from_user(struct group_info *group_info,
gid_t __user *grouplist)
{
int i;
- int count = group_info->ngroups;
+ unsigned int count = group_info->ngroups;
for (i = 0; i < group_info->nblocks; i++) {
- int cp_count = min(NGROUPS_PER_BLOCK, count);
- int off = i * NGROUPS_PER_BLOCK;
- int len = cp_count * sizeof(*grouplist);
+ unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+ unsigned int len = cp_count * sizeof(*grouplist);
- if (copy_from_user(group_info->blocks[i], grouplist+off, len))
+ if (copy_from_user(group_info->blocks[i], grouplist, len))
return -EFAULT;
+ grouplist += NGROUPS_PER_BLOCK;
count -= cp_count;
}
return 0;
@@ -1472,7 +1472,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
!capable(CAP_SYS_RESOURCE))
return -EPERM;
- if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
+ if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
return -EPERM;
retval = security_task_setrlimit(resource, &new_rlim);
@@ -1637,7 +1637,7 @@ asmlinkage long sys_umask(int mask)
mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
return mask;
}
-
+
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
@@ -1742,6 +1742,17 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
error = prctl_set_seccomp(arg2);
break;
+ case PR_CAPBSET_READ:
+ if (!cap_valid(arg2))
+ return -EINVAL;
+ return !!cap_raised(current->cap_bset, arg2);
+ case PR_CAPBSET_DROP:
+#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+ return cap_prctl_drop(arg2);
+#else
+ return -EINVAL;
+#endif
+
default:
error = -EINVAL;
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index beee5b3b68a2..5b9b467de070 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -154,7 +154,10 @@ cond_syscall(sys_ioprio_get);
/* New file descriptors */
cond_syscall(sys_signalfd);
-cond_syscall(sys_timerfd);
cond_syscall(compat_sys_signalfd);
-cond_syscall(compat_sys_timerfd);
+cond_syscall(sys_timerfd_create);
+cond_syscall(sys_timerfd_settime);
+cond_syscall(sys_timerfd_gettime);
+cond_syscall(compat_sys_timerfd_settime);
+cond_syscall(compat_sys_timerfd_gettime);
cond_syscall(sys_eventfd);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7cb1ac3e6fff..8c98d8147d88 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -67,6 +67,7 @@ extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern int sysctl_panic_on_oom;
extern int sysctl_oom_kill_allocating_task;
+extern int sysctl_oom_dump_tasks;
extern int max_threads;
extern int core_uses_pid;
extern int suid_dumpable;
@@ -84,8 +85,11 @@ extern int sysctl_stat_interval;
extern int latencytop_enabled;
/* Constants used for minimum and maximum */
-#ifdef CONFIG_DETECT_SOFTLOCKUP
+#if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM)
static int one = 1;
+#endif
+
+#ifdef CONFIG_DETECT_SOFTLOCKUP
static int sixty = 60;
#endif
@@ -416,15 +420,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
-#ifdef CONFIG_SECURITY_CAPABILITIES
- {
- .procname = "cap-bound",
- .data = &cap_bset,
- .maxlen = sizeof(kernel_cap_t),
- .mode = 0600,
- .proc_handler = &proc_dointvec_bset,
- },
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
#ifdef CONFIG_BLK_DEV_INITRD
{
.ctl_name = KERN_REALROOTDEV,
@@ -877,6 +872,14 @@ static struct ctl_table vm_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "oom_dump_tasks",
+ .data = &sysctl_oom_dump_tasks,
+ .maxlen = sizeof(sysctl_oom_dump_tasks),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = VM_OVERCOMMIT_RATIO,
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
@@ -1150,6 +1153,19 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
#endif
+#ifdef CONFIG_HIGHMEM
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "highmem_is_dirtyable",
+ .data = &vm_highmem_is_dirtyable,
+ .maxlen = sizeof(vm_highmem_is_dirtyable),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
@@ -1196,6 +1212,14 @@ static struct ctl_table fs_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nr_open",
+ .data = &sysctl_nr_open,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = FS_DENTRY,
.procname = "dentry-state",
.data = &dentry_stat,
@@ -2080,26 +2104,6 @@ static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
return 0;
}
-#ifdef CONFIG_SECURITY_CAPABILITIES
-/*
- * init may raise the set.
- */
-
-int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int op;
-
- if (write && !capable(CAP_SYS_MODULE)) {
- return -EPERM;
- }
-
- op = is_global_init(current) ? OP_SET : OP_AND;
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
- do_proc_dointvec_bset_conv,&op);
-}
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
-
/*
* Taint values can only be increased
*/
@@ -2513,12 +2517,6 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
return -ENOSYS;
}
-int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return -ENOSYS;
-}
-
int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index c3206fa50048..006365b69eaf 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -37,10 +37,6 @@ static struct trans_ctl_table trans_kern_table[] = {
{ KERN_NODENAME, "hostname" },
{ KERN_DOMAINNAME, "domainname" },
-#ifdef CONFIG_SECURITY_CAPABILITIES
- { KERN_CAP_BSET, "cap-bound" },
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
-
{ KERN_PANIC, "panic" },
{ KERN_REALROOTDEV, "real-root-dev" },
@@ -1498,9 +1494,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
(table->strategy == sysctl_ms_jiffies) ||
(table->proc_handler == proc_dostring) ||
(table->proc_handler == proc_dointvec) ||
-#ifdef CONFIG_SECURITY_CAPABILITIES
- (table->proc_handler == proc_dointvec_bset) ||
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
(table->proc_handler == proc_dointvec_minmax) ||
(table->proc_handler == proc_dointvec_jiffies) ||
(table->proc_handler == proc_dointvec_userhz_jiffies) ||
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 88cdb109e13c..06b6395b45b2 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -135,6 +135,12 @@ static int test_jprobe(void)
#ifdef CONFIG_KRETPROBES
static u32 krph_val;
+static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ krph_val = (rand1 / div_factor);
+ return 0;
+}
+
static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long ret = regs_return_value(regs);
@@ -144,13 +150,19 @@ static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
printk(KERN_ERR "Kprobe smoke test failed: "
"incorrect value in kretprobe handler\n");
}
+ if (krph_val == 0) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "call to kretprobe entry handler failed\n");
+ }
- krph_val = (rand1 / div_factor);
+ krph_val = rand1;
return 0;
}
static struct kretprobe rp = {
.handler = return_handler,
+ .entry_handler = entry_handler,
.kp.symbol_name = "kprobe_target"
};
@@ -167,7 +179,7 @@ static int test_kretprobe(void)
ret = kprobe_target(rand1);
unregister_kretprobe(&rp);
- if (krph_val == 0) {
+ if (krph_val != rand1) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kretprobe handler not called\n");
handler_errors++;
diff --git a/kernel/time.c b/kernel/time.c
index 4064c0566e77..33af3e55570d 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -566,7 +566,11 @@ EXPORT_SYMBOL(jiffies_to_timeval);
clock_t jiffies_to_clock_t(long x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+# if HZ < USER_HZ
+ return x * (USER_HZ / HZ);
+# else
return x / (HZ / USER_HZ);
+# endif
#else
u64 tmp = (u64)x * TICK_NSEC;
do_div(tmp, (NSEC_PER_SEC / USER_HZ));
@@ -599,7 +603,14 @@ EXPORT_SYMBOL(clock_t_to_jiffies);
u64 jiffies_64_to_clock_t(u64 x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+# if HZ < USER_HZ
+ x *= USER_HZ;
+ do_div(x, HZ);
+# elif HZ > USER_HZ
do_div(x, HZ / USER_HZ);
+# else
+ /* Nothing to do */
+# endif
#else
/*
* There are better ways that don't overflow early,
@@ -611,7 +622,6 @@ u64 jiffies_64_to_clock_t(u64 x)
#endif
return x;
}
-
EXPORT_SYMBOL(jiffies_64_to_clock_t);
u64 nsec_to_clock_t(u64 x)
@@ -646,7 +656,6 @@ u64 get_jiffies_64(void)
} while (read_seqretry(&xtime_lock, seq));
return ret;
}
-
EXPORT_SYMBOL(get_jiffies_64);
#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 6e9259a5d501..81afb3927ecc 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -363,15 +363,13 @@ void clocksource_unregister(struct clocksource *cs)
static ssize_t
sysfs_show_current_clocksources(struct sys_device *dev, char *buf)
{
- char *curr = buf;
+ ssize_t count = 0;
spin_lock_irq(&clocksource_lock);
- curr += sprintf(curr, "%s ", curr_clocksource->name);
+ count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
spin_unlock_irq(&clocksource_lock);
- curr += sprintf(curr, "\n");
-
- return curr - buf;
+ return count;
}
/**
@@ -439,17 +437,20 @@ static ssize_t
sysfs_show_available_clocksources(struct sys_device *dev, char *buf)
{
struct clocksource *src;
- char *curr = buf;
+ ssize_t count = 0;
spin_lock_irq(&clocksource_lock);
list_for_each_entry(src, &clocksource_list, list) {
- curr += sprintf(curr, "%s ", src->name);
+ count += snprintf(buf + count,
+ max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
+ "%s ", src->name);
}
spin_unlock_irq(&clocksource_lock);
- curr += sprintf(curr, "\n");
+ count += snprintf(buf + count,
+ max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
- return curr - buf;
+ return count;
}
/*
diff --git a/kernel/timer.c b/kernel/timer.c
index 9fbb472b8cf0..70b29b59343f 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -818,12 +818,14 @@ unsigned long next_timer_interrupt(void)
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
void account_process_tick(struct task_struct *p, int user_tick)
{
+ cputime_t one_jiffy = jiffies_to_cputime(1);
+
if (user_tick) {
- account_user_time(p, jiffies_to_cputime(1));
- account_user_time_scaled(p, jiffies_to_cputime(1));
+ account_user_time(p, one_jiffy);
+ account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
} else {
- account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
- account_system_time_scaled(p, jiffies_to_cputime(1));
+ account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
+ account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
}
}
#endif
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0d385be682db..4f4008fc73e4 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -205,6 +205,19 @@ config SLUB_DEBUG_ON
off in a kernel built with CONFIG_SLUB_DEBUG_ON by specifying
"slub_debug=-".
+config SLUB_STATS
+ default n
+ bool "Enable SLUB performance statistics"
+ depends on SLUB
+ help
+ SLUB statistics are useful to debug SLUBs allocation behavior in
+ order find ways to optimize the allocator. This should never be
+ enabled for production use since keeping statistics slows down
+ the allocator by a few percentage points. The slabinfo command
+ supports the determination of the most active slabs to figure
+ out which slabs are relevant to a particular load.
+ Try running: slabinfo -DA
+
config DEBUG_PREEMPT
bool "Debug preemptible kernel"
depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64)
diff --git a/lib/Makefile b/lib/Makefile
index 543f2502b60a..a18062e4633f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_SMP) += pcounter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
+obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
diff --git a/lib/extable.c b/lib/extable.c
index 463f4560f16d..179c08745595 100644
--- a/lib/extable.c
+++ b/lib/extable.c
@@ -57,10 +57,10 @@ search_extable(const struct exception_table_entry *first,
while (first <= last) {
const struct exception_table_entry *mid;
- mid = (last - first) / 2 + first;
+ mid = ((last - first) >> 1) + first;
/*
- * careful, the distance between entries can be
- * larger than 2GB:
+ * careful, the distance between value and insn
+ * can be larger than MAX_LONG:
*/
if (mid->insn < value)
first = mid + 1;
diff --git a/lib/iommu-helper.c b/lib/iommu-helper.c
new file mode 100644
index 000000000000..495575a59ca6
--- /dev/null
+++ b/lib/iommu-helper.c
@@ -0,0 +1,80 @@
+/*
+ * IOMMU helper functions for the free area management
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+static unsigned long find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr,
+ unsigned long align_mask)
+{
+ unsigned long index, end, i;
+again:
+ index = find_next_zero_bit(map, size, start);
+
+ /* Align allocation */
+ index = (index + align_mask) & ~align_mask;
+
+ end = index + nr;
+ if (end >= size)
+ return -1;
+ for (i = index; i < end; i++) {
+ if (test_bit(i, map)) {
+ start = i+1;
+ goto again;
+ }
+ }
+ return index;
+}
+
+static inline void set_bit_area(unsigned long *map, unsigned long i,
+ int len)
+{
+ unsigned long end = i + len;
+ while (i < end) {
+ __set_bit(i, map);
+ i++;
+ }
+}
+
+static inline int is_span_boundary(unsigned int index, unsigned int nr,
+ unsigned long shift,
+ unsigned long boundary_size)
+{
+ shift = (shift + index) & (boundary_size - 1);
+ return shift + nr > boundary_size;
+}
+
+unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr,
+ unsigned long shift, unsigned long boundary_size,
+ unsigned long align_mask)
+{
+ unsigned long index;
+again:
+ index = find_next_zero_area(map, size, start, nr, align_mask);
+ if (index != -1) {
+ if (is_span_boundary(index, nr, shift, boundary_size)) {
+ /* we could do more effectively */
+ start = index + 1;
+ goto again;
+ }
+ set_bit_area(map, index, nr);
+ }
+ return index;
+}
+EXPORT_SYMBOL(iommu_area_alloc);
+
+void iommu_area_free(unsigned long *map, unsigned long start, unsigned int nr)
+{
+ unsigned long end = start + nr;
+
+ while (start < end) {
+ __clear_bit(start, map);
+ start++;
+ }
+}
+EXPORT_SYMBOL(iommu_area_free);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 48c250fe2233..65f0e758ec38 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -95,14 +95,17 @@ static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
static struct radix_tree_node *
radix_tree_node_alloc(struct radix_tree_root *root)
{
- struct radix_tree_node *ret;
+ struct radix_tree_node *ret = NULL;
gfp_t gfp_mask = root_gfp_mask(root);
- ret = kmem_cache_alloc(radix_tree_node_cachep,
- set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
- if (ret == NULL && !(gfp_mask & __GFP_WAIT)) {
+ if (!(gfp_mask & __GFP_WAIT)) {
struct radix_tree_preload *rtp;
+ /*
+ * Provided the caller has preloaded here, we will always
+ * succeed in getting a node here (and never reach
+ * kmem_cache_alloc)
+ */
rtp = &__get_cpu_var(radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes[rtp->nr - 1];
@@ -110,6 +113,10 @@ radix_tree_node_alloc(struct radix_tree_root *root)
rtp->nr--;
}
}
+ if (ret == NULL)
+ ret = kmem_cache_alloc(radix_tree_node_cachep,
+ set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+
BUG_ON(radix_tree_is_indirect_ptr(ret));
return ret;
}
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index eddc9b3d3876..6c90fb90e19c 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -42,7 +42,9 @@ unsigned int debug_smp_processor_id(void)
if (!printk_ratelimit())
goto out_enable;
- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
+ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
+ "code: %s/%d\n",
+ preempt_count() - 1, current->comm, current->pid);
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 1a8050ade861..4bb5a11e18a2 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -282,6 +282,15 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
return (addr & ~mask) != 0;
}
+static inline unsigned int is_span_boundary(unsigned int index,
+ unsigned int nslots,
+ unsigned long offset_slots,
+ unsigned long max_slots)
+{
+ unsigned long offset = (offset_slots + index) & (max_slots - 1);
+ return offset + nslots > max_slots;
+}
+
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
@@ -292,6 +301,16 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
char *dma_addr;
unsigned int nslots, stride, index, wrap;
int i;
+ unsigned long start_dma_addr;
+ unsigned long mask;
+ unsigned long offset_slots;
+ unsigned long max_slots;
+
+ mask = dma_get_seg_boundary(hwdev);
+ start_dma_addr = virt_to_bus(io_tlb_start) & mask;
+
+ offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+ max_slots = ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
/*
* For mappings greater than a page, we limit the stride (and
@@ -311,10 +330,17 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
*/
spin_lock_irqsave(&io_tlb_lock, flags);
{
- wrap = index = ALIGN(io_tlb_index, stride);
-
+ index = ALIGN(io_tlb_index, stride);
if (index >= io_tlb_nslabs)
- wrap = index = 0;
+ index = 0;
+
+ while (is_span_boundary(index, nslots, offset_slots,
+ max_slots)) {
+ index += stride;
+ if (index >= io_tlb_nslabs)
+ index = 0;
+ }
+ wrap = index;
do {
/*
@@ -341,9 +367,12 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
goto found;
}
- index += stride;
- if (index >= io_tlb_nslabs)
- index = 0;
+ do {
+ index += stride;
+ if (index >= io_tlb_nslabs)
+ index = 0;
+ } while (is_span_boundary(index, nslots, offset_slots,
+ max_slots));
} while (index != wrap);
spin_unlock_irqrestore(&io_tlb_lock, flags);
diff --git a/mm/Makefile b/mm/Makefile
index 5c0b0ea7572d..9f117bab5322 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -13,8 +13,10 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
page_isolation.o $(mmu-y)
+obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o
obj-$(CONFIG_BOUNCE) += bounce.o
obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o
+obj-$(CONFIG_HAS_DMA) += dmapool.o
obj-$(CONFIG_HUGETLBFS) += hugetlb.o
obj-$(CONFIG_NUMA) += mempolicy.o
obj-$(CONFIG_SPARSEMEM) += sparse.o
@@ -30,4 +32,5 @@ obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
obj-$(CONFIG_SMP) += allocpercpu.o
obj-$(CONFIG_QUICKLIST) += quicklist.o
+obj-$(CONFIG_CGROUP_MEM_CONT) += memcontrol.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 00b02623f008..7e58322b7134 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(__percpu_populate_mask);
*/
void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
{
- void *pdata = kzalloc(sizeof(struct percpu_data), gfp);
+ void *pdata = kzalloc(nr_cpu_ids * sizeof(void *), gfp);
void *__pdata = __percpu_disguise(pdata);
if (unlikely(!pdata))
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 00a96970b237..f6ff4337b424 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -111,11 +111,12 @@ static unsigned long __init init_bootmem_core(pg_data_t *pgdat,
* might be used for boot-time allocations - or it might get added
* to the free page pool later on.
*/
-static void __init reserve_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
- unsigned long size)
+static int __init reserve_bootmem_core(bootmem_data_t *bdata,
+ unsigned long addr, unsigned long size, int flags)
{
unsigned long sidx, eidx;
unsigned long i;
+ int ret;
/*
* round up, partially reserved pages are considered
@@ -133,7 +134,20 @@ static void __init reserve_bootmem_core(bootmem_data_t *bdata, unsigned long add
#ifdef CONFIG_DEBUG_BOOTMEM
printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
#endif
+ if (flags & BOOTMEM_EXCLUSIVE) {
+ ret = -EBUSY;
+ goto err;
+ }
}
+
+ return 0;
+
+err:
+ /* unreserve memory we accidentally reserved */
+ for (i--; i >= sidx; i--)
+ clear_bit(i, bdata->node_bootmem_map);
+
+ return ret;
}
static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
@@ -374,9 +388,9 @@ unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
}
void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
- unsigned long size)
+ unsigned long size, int flags)
{
- reserve_bootmem_core(pgdat->bdata, physaddr, size);
+ reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
}
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
@@ -398,9 +412,10 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
}
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
-void __init reserve_bootmem(unsigned long addr, unsigned long size)
+int __init reserve_bootmem(unsigned long addr, unsigned long size,
+ int flags)
{
- reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size);
+ return reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size, flags);
}
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
diff --git a/mm/dmapool.c b/mm/dmapool.c
new file mode 100644
index 000000000000..34aaac451a96
--- /dev/null
+++ b/mm/dmapool.c
@@ -0,0 +1,500 @@
+/*
+ * DMA Pool allocator
+ *
+ * Copyright 2001 David Brownell
+ * Copyright 2007 Intel Corporation
+ * Author: Matthew Wilcox <willy@linux.intel.com>
+ *
+ * This software may be redistributed and/or modified under the terms of
+ * the GNU General Public License ("GPL") version 2 as published by the
+ * Free Software Foundation.
+ *
+ * This allocator returns small blocks of a given size which are DMA-able by
+ * the given device. It uses the dma_alloc_coherent page allocator to get
+ * new pages, then splits them up into blocks of the required size.
+ * Many older drivers still have their own code to do this.
+ *
+ * The current design of this allocator is fairly simple. The pool is
+ * represented by the 'struct dma_pool' which keeps a doubly-linked list of
+ * allocated pages. Each page in the page_list is split into blocks of at
+ * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
+ * list of free blocks within the page. Used blocks aren't tracked, but we
+ * keep a count of how many are currently allocated from each page.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct dma_pool { /* the pool */
+ struct list_head page_list;
+ spinlock_t lock;
+ size_t size;
+ struct device *dev;
+ size_t allocation;
+ size_t boundary;
+ char name[32];
+ wait_queue_head_t waitq;
+ struct list_head pools;
+};
+
+struct dma_page { /* cacheable header for 'allocation' bytes */
+ struct list_head page_list;
+ void *vaddr;
+ dma_addr_t dma;
+ unsigned int in_use;
+ unsigned int offset;
+};
+
+#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
+
+static DEFINE_MUTEX(pools_lock);
+
+static ssize_t
+show_pools(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ unsigned temp;
+ unsigned size;
+ char *next;
+ struct dma_page *page;
+ struct dma_pool *pool;
+
+ next = buf;
+ size = PAGE_SIZE;
+
+ temp = scnprintf(next, size, "poolinfo - 0.1\n");
+ size -= temp;
+ next += temp;
+
+ mutex_lock(&pools_lock);
+ list_for_each_entry(pool, &dev->dma_pools, pools) {
+ unsigned pages = 0;
+ unsigned blocks = 0;
+
+ list_for_each_entry(page, &pool->page_list, page_list) {
+ pages++;
+ blocks += page->in_use;
+ }
+
+ /* per-pool info, no real statistics yet */
+ temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
+ pool->name, blocks,
+ pages * (pool->allocation / pool->size),
+ pool->size, pages);
+ size -= temp;
+ next += temp;
+ }
+ mutex_unlock(&pools_lock);
+
+ return PAGE_SIZE - size;
+}
+
+static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
+
+/**
+ * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
+ * @name: name of pool, for diagnostics
+ * @dev: device that will be doing the DMA
+ * @size: size of the blocks in this pool.
+ * @align: alignment requirement for blocks; must be a power of two
+ * @boundary: returned blocks won't cross this power of two boundary
+ * Context: !in_interrupt()
+ *
+ * Returns a dma allocation pool with the requested characteristics, or
+ * null if one can't be created. Given one of these pools, dma_pool_alloc()
+ * may be used to allocate memory. Such memory will all have "consistent"
+ * DMA mappings, accessible by the device and its driver without using
+ * cache flushing primitives. The actual size of blocks allocated may be
+ * larger than requested because of alignment.
+ *
+ * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
+ * cross that size boundary. This is useful for devices which have
+ * addressing restrictions on individual DMA transfers, such as not crossing
+ * boundaries of 4KBytes.
+ */
+struct dma_pool *dma_pool_create(const char *name, struct device *dev,
+ size_t size, size_t align, size_t boundary)
+{
+ struct dma_pool *retval;
+ size_t allocation;
+
+ if (align == 0) {
+ align = 1;
+ } else if (align & (align - 1)) {
+ return NULL;
+ }
+
+ if (size == 0) {
+ return NULL;
+ } else if (size < 4) {
+ size = 4;
+ }
+
+ if ((size % align) != 0)
+ size = ALIGN(size, align);
+
+ allocation = max_t(size_t, size, PAGE_SIZE);
+
+ if (!boundary) {
+ boundary = allocation;
+ } else if ((boundary < size) || (boundary & (boundary - 1))) {
+ return NULL;
+ }
+
+ retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
+ if (!retval)
+ return retval;
+
+ strlcpy(retval->name, name, sizeof(retval->name));
+
+ retval->dev = dev;
+
+ INIT_LIST_HEAD(&retval->page_list);
+ spin_lock_init(&retval->lock);
+ retval->size = size;
+ retval->boundary = boundary;
+ retval->allocation = allocation;
+ init_waitqueue_head(&retval->waitq);
+
+ if (dev) {
+ int ret;
+
+ mutex_lock(&pools_lock);
+ if (list_empty(&dev->dma_pools))
+ ret = device_create_file(dev, &dev_attr_pools);
+ else
+ ret = 0;
+ /* note: not currently insisting "name" be unique */
+ if (!ret)
+ list_add(&retval->pools, &dev->dma_pools);
+ else {
+ kfree(retval);
+ retval = NULL;
+ }
+ mutex_unlock(&pools_lock);
+ } else
+ INIT_LIST_HEAD(&retval->pools);
+
+ return retval;
+}
+EXPORT_SYMBOL(dma_pool_create);
+
+static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
+{
+ unsigned int offset = 0;
+ unsigned int next_boundary = pool->boundary;
+
+ do {
+ unsigned int next = offset + pool->size;
+ if (unlikely((next + pool->size) >= next_boundary)) {
+ next = next_boundary;
+ next_boundary += pool->boundary;
+ }
+ *(int *)(page->vaddr + offset) = next;
+ offset = next;
+ } while (offset < pool->allocation);
+}
+
+static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
+{
+ struct dma_page *page;
+
+ page = kmalloc(sizeof(*page), mem_flags);
+ if (!page)
+ return NULL;
+ page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
+ &page->dma, mem_flags);
+ if (page->vaddr) {
+#ifdef CONFIG_DEBUG_SLAB
+ memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
+#endif
+ pool_initialise_page(pool, page);
+ list_add(&page->page_list, &pool->page_list);
+ page->in_use = 0;
+ page->offset = 0;
+ } else {
+ kfree(page);
+ page = NULL;
+ }
+ return page;
+}
+
+static inline int is_page_busy(struct dma_page *page)
+{
+ return page->in_use != 0;
+}
+
+static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
+{
+ dma_addr_t dma = page->dma;
+
+#ifdef CONFIG_DEBUG_SLAB
+ memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
+#endif
+ dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
+ list_del(&page->page_list);
+ kfree(page);
+}
+
+/**
+ * dma_pool_destroy - destroys a pool of dma memory blocks.
+ * @pool: dma pool that will be destroyed
+ * Context: !in_interrupt()
+ *
+ * Caller guarantees that no more memory from the pool is in use,
+ * and that nothing will try to use the pool after this call.
+ */
+void dma_pool_destroy(struct dma_pool *pool)
+{
+ mutex_lock(&pools_lock);
+ list_del(&pool->pools);
+ if (pool->dev && list_empty(&pool->dev->dma_pools))
+ device_remove_file(pool->dev, &dev_attr_pools);
+ mutex_unlock(&pools_lock);
+
+ while (!list_empty(&pool->page_list)) {
+ struct dma_page *page;
+ page = list_entry(pool->page_list.next,
+ struct dma_page, page_list);
+ if (is_page_busy(page)) {
+ if (pool->dev)
+ dev_err(pool->dev,
+ "dma_pool_destroy %s, %p busy\n",
+ pool->name, page->vaddr);
+ else
+ printk(KERN_ERR
+ "dma_pool_destroy %s, %p busy\n",
+ pool->name, page->vaddr);
+ /* leak the still-in-use consistent memory */
+ list_del(&page->page_list);
+ kfree(page);
+ } else
+ pool_free_page(pool, page);
+ }
+
+ kfree(pool);
+}
+EXPORT_SYMBOL(dma_pool_destroy);
+
+/**
+ * dma_pool_alloc - get a block of consistent memory
+ * @pool: dma pool that will produce the block
+ * @mem_flags: GFP_* bitmask
+ * @handle: pointer to dma address of block
+ *
+ * This returns the kernel virtual address of a currently unused block,
+ * and reports its dma address through the handle.
+ * If such a memory block can't be allocated, %NULL is returned.
+ */
+void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
+ dma_addr_t *handle)
+{
+ unsigned long flags;
+ struct dma_page *page;
+ size_t offset;
+ void *retval;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ restart:
+ list_for_each_entry(page, &pool->page_list, page_list) {
+ if (page->offset < pool->allocation)
+ goto ready;
+ }
+ page = pool_alloc_page(pool, GFP_ATOMIC);
+ if (!page) {
+ if (mem_flags & __GFP_WAIT) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ __set_current_state(TASK_INTERRUPTIBLE);
+ __add_wait_queue(&pool->waitq, &wait);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ schedule_timeout(POOL_TIMEOUT_JIFFIES);
+
+ spin_lock_irqsave(&pool->lock, flags);
+ __remove_wait_queue(&pool->waitq, &wait);
+ goto restart;
+ }
+ retval = NULL;
+ goto done;
+ }
+
+ ready:
+ page->in_use++;
+ offset = page->offset;
+ page->offset = *(int *)(page->vaddr + offset);
+ retval = offset + page->vaddr;
+ *handle = offset + page->dma;
+#ifdef CONFIG_DEBUG_SLAB
+ memset(retval, POOL_POISON_ALLOCATED, pool->size);
+#endif
+ done:
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return retval;
+}
+EXPORT_SYMBOL(dma_pool_alloc);
+
+static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
+{
+ unsigned long flags;
+ struct dma_page *page;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ list_for_each_entry(page, &pool->page_list, page_list) {
+ if (dma < page->dma)
+ continue;
+ if (dma < (page->dma + pool->allocation))
+ goto done;
+ }
+ page = NULL;
+ done:
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return page;
+}
+
+/**
+ * dma_pool_free - put block back into dma pool
+ * @pool: the dma pool holding the block
+ * @vaddr: virtual address of block
+ * @dma: dma address of block
+ *
+ * Caller promises neither device nor driver will again touch this block
+ * unless it is first re-allocated.
+ */
+void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
+{
+ struct dma_page *page;
+ unsigned long flags;
+ unsigned int offset;
+
+ page = pool_find_page(pool, dma);
+ if (!page) {
+ if (pool->dev)
+ dev_err(pool->dev,
+ "dma_pool_free %s, %p/%lx (bad dma)\n",
+ pool->name, vaddr, (unsigned long)dma);
+ else
+ printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
+ pool->name, vaddr, (unsigned long)dma);
+ return;
+ }
+
+ offset = vaddr - page->vaddr;
+#ifdef CONFIG_DEBUG_SLAB
+ if ((dma - page->dma) != offset) {
+ if (pool->dev)
+ dev_err(pool->dev,
+ "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
+ pool->name, vaddr, (unsigned long long)dma);
+ else
+ printk(KERN_ERR
+ "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
+ pool->name, vaddr, (unsigned long long)dma);
+ return;
+ }
+ {
+ unsigned int chain = page->offset;
+ while (chain < pool->allocation) {
+ if (chain != offset) {
+ chain = *(int *)(page->vaddr + chain);
+ continue;
+ }
+ if (pool->dev)
+ dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
+ "already free\n", pool->name,
+ (unsigned long long)dma);
+ else
+ printk(KERN_ERR "dma_pool_free %s, dma %Lx "
+ "already free\n", pool->name,
+ (unsigned long long)dma);
+ return;
+ }
+ }
+ memset(vaddr, POOL_POISON_FREED, pool->size);
+#endif
+
+ spin_lock_irqsave(&pool->lock, flags);
+ page->in_use--;
+ *(int *)vaddr = page->offset;
+ page->offset = offset;
+ if (waitqueue_active(&pool->waitq))
+ wake_up_locked(&pool->waitq);
+ /*
+ * Resist a temptation to do
+ * if (!is_page_busy(page)) pool_free_page(pool, page);
+ * Better have a few empty pages hang around.
+ */
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+EXPORT_SYMBOL(dma_pool_free);
+
+/*
+ * Managed DMA pool
+ */
+static void dmam_pool_release(struct device *dev, void *res)
+{
+ struct dma_pool *pool = *(struct dma_pool **)res;
+
+ dma_pool_destroy(pool);
+}
+
+static int dmam_pool_match(struct device *dev, void *res, void *match_data)
+{
+ return *(struct dma_pool **)res == match_data;
+}
+
+/**
+ * dmam_pool_create - Managed dma_pool_create()
+ * @name: name of pool, for diagnostics
+ * @dev: device that will be doing the DMA
+ * @size: size of the blocks in this pool.
+ * @align: alignment requirement for blocks; must be a power of two
+ * @allocation: returned blocks won't cross this boundary (or zero)
+ *
+ * Managed dma_pool_create(). DMA pool created with this function is
+ * automatically destroyed on driver detach.
+ */
+struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
+ size_t size, size_t align, size_t allocation)
+{
+ struct dma_pool **ptr, *pool;
+
+ ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
+ if (pool)
+ devres_add(dev, ptr);
+ else
+ devres_free(ptr);
+
+ return pool;
+}
+EXPORT_SYMBOL(dmam_pool_create);
+
+/**
+ * dmam_pool_destroy - Managed dma_pool_destroy()
+ * @pool: dma pool that will be destroyed
+ *
+ * Managed dma_pool_destroy().
+ */
+void dmam_pool_destroy(struct dma_pool *pool)
+{
+ struct device *dev = pool->dev;
+
+ dma_pool_destroy(pool);
+ WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
+}
+EXPORT_SYMBOL(dmam_pool_destroy);
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 0df4c899e979..3c0f1e99f5e4 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -49,9 +49,21 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
goto out;
}
- if (mapping->a_ops->get_xip_page)
- /* no bad return value, but ignore advice */
+ if (mapping->a_ops->get_xip_page) {
+ switch (advice) {
+ case POSIX_FADV_NORMAL:
+ case POSIX_FADV_RANDOM:
+ case POSIX_FADV_SEQUENTIAL:
+ case POSIX_FADV_WILLNEED:
+ case POSIX_FADV_NOREUSE:
+ case POSIX_FADV_DONTNEED:
+ /* no bad return value, but ignore advice */
+ break;
+ default:
+ ret = -EINVAL;
+ }
goto out;
+ }
/* Careful about overflows. Len == 0 means "as much as possible" */
endbyte = offset + len;
diff --git a/mm/filemap.c b/mm/filemap.c
index 76bea88cbebc..5357fcc4643b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -33,6 +33,7 @@
#include <linux/syscalls.h>
#include <linux/cpuset.h>
#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
+#include <linux/memcontrol.h>
#include "internal.h"
/*
@@ -65,7 +66,6 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* ->private_lock (__free_pte->__set_page_dirty_buffers)
* ->swap_lock (exclusive_swap_page, others)
* ->mapping->tree_lock
- * ->zone.lock
*
* ->i_mutex
* ->i_mmap_lock (truncate->unmap_mapping_range)
@@ -119,6 +119,7 @@ void __remove_from_page_cache(struct page *page)
{
struct address_space *mapping = page->mapping;
+ mem_cgroup_uncharge_page(page);
radix_tree_delete(&mapping->page_tree, page->index);
page->mapping = NULL;
mapping->nrpages--;
@@ -459,8 +460,12 @@ int filemap_write_and_wait_range(struct address_space *mapping,
int add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
{
- int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+ int error = mem_cgroup_cache_charge(page, current->mm,
+ gfp_mask & ~__GFP_HIGHMEM);
+ if (error)
+ goto out;
+ error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (error == 0) {
write_lock_irq(&mapping->tree_lock);
error = radix_tree_insert(&mapping->page_tree, offset, page);
@@ -471,10 +476,14 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
page->index = offset;
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
- }
+ } else
+ mem_cgroup_uncharge_page(page);
+
write_unlock_irq(&mapping->tree_lock);
radix_tree_preload_end();
- }
+ } else
+ mem_cgroup_uncharge_page(page);
+out:
return error;
}
EXPORT_SYMBOL(add_to_page_cache);
@@ -528,7 +537,7 @@ static inline void wake_up_page(struct page *page, int bit)
__wake_up_bit(page_waitqueue(page), &page->flags, bit);
}
-void fastcall wait_on_page_bit(struct page *page, int bit_nr)
+void wait_on_page_bit(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
@@ -552,7 +561,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
* the clear_bit and the read of the waitqueue (to avoid SMP races with a
* parallel wait_on_page_locked()).
*/
-void fastcall unlock_page(struct page *page)
+void unlock_page(struct page *page)
{
smp_mb__before_clear_bit();
if (!TestClearPageLocked(page))
@@ -586,7 +595,7 @@ EXPORT_SYMBOL(end_page_writeback);
* chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/
-void fastcall __lock_page(struct page *page)
+void __lock_page(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
@@ -607,7 +616,7 @@ int fastcall __lock_page_killable(struct page *page)
* Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping.
*/
-void fastcall __lock_page_nosync(struct page *page)
+void __lock_page_nosync(struct page *page)
{
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -1277,7 +1286,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
*/
-static int fastcall page_cache_read(struct file * file, pgoff_t offset)
+static int page_cache_read(struct file *file, pgoff_t offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index f874ae818ad3..0420a0292b03 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -431,7 +431,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
else
return PTR_ERR(page);
}
- zero_user_page(page, offset, length, KM_USER0);
+ zero_user(page, offset, length);
return 0;
}
EXPORT_SYMBOL_GPL(xip_truncate_page);
diff --git a/mm/fremap.c b/mm/fremap.c
index 14bd3bf7826e..69a37c2bdf81 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -190,10 +190,13 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
*/
if (mapping_cap_account_dirty(mapping)) {
unsigned long addr;
+ struct file *file = vma->vm_file;
flags &= MAP_NONBLOCK;
- addr = mmap_region(vma->vm_file, start, size,
+ get_file(file);
+ addr = mmap_region(file, start, size,
flags, vma->vm_flags, pgoff, 1);
+ fput(file);
if (IS_ERR_VALUE(addr)) {
err = addr;
} else {
diff --git a/mm/highmem.c b/mm/highmem.c
index 7a967bc35152..35d47733cde4 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -163,7 +163,7 @@ start:
return vaddr;
}
-void fastcall *kmap_high(struct page *page)
+void *kmap_high(struct page *page)
{
unsigned long vaddr;
@@ -185,7 +185,7 @@ void fastcall *kmap_high(struct page *page)
EXPORT_SYMBOL(kmap_high);
-void fastcall kunmap_high(struct page *page)
+void kunmap_high(struct page *page)
{
unsigned long vaddr;
unsigned long nr;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index db861d8b6c28..1a5642074e34 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -813,6 +813,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
spin_unlock(&mm->page_table_lock);
copy_huge_page(new_page, old_page, address, vma);
+ __SetPageUptodate(new_page);
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & HPAGE_MASK);
@@ -858,6 +859,7 @@ retry:
goto out;
}
clear_huge_page(page, address);
+ __SetPageUptodate(page);
if (vma->vm_flags & VM_SHARED) {
int err;
diff --git a/mm/internal.h b/mm/internal.h
index 953f941ea867..5a9a6200e034 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -24,7 +24,7 @@ static inline void set_page_count(struct page *page, int v)
*/
static inline void set_page_refcounted(struct page *page)
{
- VM_BUG_ON(PageCompound(page) && PageTail(page));
+ VM_BUG_ON(PageTail(page));
VM_BUG_ON(atomic_read(&page->_count));
set_page_count(page, 1);
}
@@ -34,7 +34,7 @@ static inline void __put_page(struct page *page)
atomic_dec(&page->_count);
}
-extern void fastcall __init __free_pages_bootmem(struct page *page,
+extern void __init __free_pages_bootmem(struct page *page,
unsigned int order);
/*
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
new file mode 100644
index 000000000000..5c2c702af617
--- /dev/null
+++ b/mm/memcontrol.c
@@ -0,0 +1,1192 @@
+/* memcontrol.c - Memory Controller
+ *
+ * Copyright IBM Corporation, 2007
+ * Author Balbir Singh <balbir@linux.vnet.ibm.com>
+ *
+ * Copyright 2007 OpenVZ SWsoft Inc
+ * Author: Pavel Emelianov <xemul@openvz.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/res_counter.h>
+#include <linux/memcontrol.h>
+#include <linux/cgroup.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/page-flags.h>
+#include <linux/backing-dev.h>
+#include <linux/bit_spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/swap.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+
+#include <asm/uaccess.h>
+
+struct cgroup_subsys mem_cgroup_subsys;
+static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
+
+/*
+ * Statistics for memory cgroup.
+ */
+enum mem_cgroup_stat_index {
+ /*
+ * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
+ */
+ MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
+ MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
+
+ MEM_CGROUP_STAT_NSTATS,
+};
+
+struct mem_cgroup_stat_cpu {
+ s64 count[MEM_CGROUP_STAT_NSTATS];
+} ____cacheline_aligned_in_smp;
+
+struct mem_cgroup_stat {
+ struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
+};
+
+/*
+ * For accounting under irq disable, no need for increment preempt count.
+ */
+static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
+ enum mem_cgroup_stat_index idx, int val)
+{
+ int cpu = smp_processor_id();
+ stat->cpustat[cpu].count[idx] += val;
+}
+
+static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
+ enum mem_cgroup_stat_index idx)
+{
+ int cpu;
+ s64 ret = 0;
+ for_each_possible_cpu(cpu)
+ ret += stat->cpustat[cpu].count[idx];
+ return ret;
+}
+
+/*
+ * per-zone information in memory controller.
+ */
+
+enum mem_cgroup_zstat_index {
+ MEM_CGROUP_ZSTAT_ACTIVE,
+ MEM_CGROUP_ZSTAT_INACTIVE,
+
+ NR_MEM_CGROUP_ZSTAT,
+};
+
+struct mem_cgroup_per_zone {
+ /*
+ * spin_lock to protect the per cgroup LRU
+ */
+ spinlock_t lru_lock;
+ struct list_head active_list;
+ struct list_head inactive_list;
+ unsigned long count[NR_MEM_CGROUP_ZSTAT];
+};
+/* Macro for accessing counter */
+#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
+
+struct mem_cgroup_per_node {
+ struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
+};
+
+struct mem_cgroup_lru_info {
+ struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
+};
+
+/*
+ * The memory controller data structure. The memory controller controls both
+ * page cache and RSS per cgroup. We would eventually like to provide
+ * statistics based on the statistics developed by Rik Van Riel for clock-pro,
+ * to help the administrator determine what knobs to tune.
+ *
+ * TODO: Add a water mark for the memory controller. Reclaim will begin when
+ * we hit the water mark. May be even add a low water mark, such that
+ * no reclaim occurs from a cgroup at it's low water mark, this is
+ * a feature that will be implemented much later in the future.
+ */
+struct mem_cgroup {
+ struct cgroup_subsys_state css;
+ /*
+ * the counter to account for memory usage
+ */
+ struct res_counter res;
+ /*
+ * Per cgroup active and inactive list, similar to the
+ * per zone LRU lists.
+ */
+ struct mem_cgroup_lru_info info;
+
+ int prev_priority; /* for recording reclaim priority */
+ /*
+ * statistics.
+ */
+ struct mem_cgroup_stat stat;
+};
+
+/*
+ * We use the lower bit of the page->page_cgroup pointer as a bit spin
+ * lock. We need to ensure that page->page_cgroup is atleast two
+ * byte aligned (based on comments from Nick Piggin)
+ */
+#define PAGE_CGROUP_LOCK_BIT 0x0
+#define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
+
+/*
+ * A page_cgroup page is associated with every page descriptor. The
+ * page_cgroup helps us identify information about the cgroup
+ */
+struct page_cgroup {
+ struct list_head lru; /* per cgroup LRU list */
+ struct page *page;
+ struct mem_cgroup *mem_cgroup;
+ atomic_t ref_cnt; /* Helpful when pages move b/w */
+ /* mapped and cached states */
+ int flags;
+};
+#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
+#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
+
+static inline int page_cgroup_nid(struct page_cgroup *pc)
+{
+ return page_to_nid(pc->page);
+}
+
+static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
+{
+ return page_zonenum(pc->page);
+}
+
+enum {
+ MEM_CGROUP_TYPE_UNSPEC = 0,
+ MEM_CGROUP_TYPE_MAPPED,
+ MEM_CGROUP_TYPE_CACHED,
+ MEM_CGROUP_TYPE_ALL,
+ MEM_CGROUP_TYPE_MAX,
+};
+
+enum charge_type {
+ MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
+ MEM_CGROUP_CHARGE_TYPE_MAPPED,
+};
+
+
+/*
+ * Always modified under lru lock. Then, not necessary to preempt_disable()
+ */
+static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
+ bool charge)
+{
+ int val = (charge)? 1 : -1;
+ struct mem_cgroup_stat *stat = &mem->stat;
+ VM_BUG_ON(!irqs_disabled());
+
+ if (flags & PAGE_CGROUP_FLAG_CACHE)
+ __mem_cgroup_stat_add_safe(stat,
+ MEM_CGROUP_STAT_CACHE, val);
+ else
+ __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
+}
+
+static inline struct mem_cgroup_per_zone *
+mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
+{
+ BUG_ON(!mem->info.nodeinfo[nid]);
+ return &mem->info.nodeinfo[nid]->zoneinfo[zid];
+}
+
+static inline struct mem_cgroup_per_zone *
+page_cgroup_zoneinfo(struct page_cgroup *pc)
+{
+ struct mem_cgroup *mem = pc->mem_cgroup;
+ int nid = page_cgroup_nid(pc);
+ int zid = page_cgroup_zid(pc);
+
+ return mem_cgroup_zoneinfo(mem, nid, zid);
+}
+
+static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
+ enum mem_cgroup_zstat_index idx)
+{
+ int nid, zid;
+ struct mem_cgroup_per_zone *mz;
+ u64 total = 0;
+
+ for_each_online_node(nid)
+ for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+ mz = mem_cgroup_zoneinfo(mem, nid, zid);
+ total += MEM_CGROUP_ZSTAT(mz, idx);
+ }
+ return total;
+}
+
+static struct mem_cgroup init_mem_cgroup;
+
+static inline
+struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
+{
+ return container_of(cgroup_subsys_state(cont,
+ mem_cgroup_subsys_id), struct mem_cgroup,
+ css);
+}
+
+static inline
+struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
+{
+ return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
+ struct mem_cgroup, css);
+}
+
+void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
+{
+ struct mem_cgroup *mem;
+
+ mem = mem_cgroup_from_task(p);
+ css_get(&mem->css);
+ mm->mem_cgroup = mem;
+}
+
+void mm_free_cgroup(struct mm_struct *mm)
+{
+ css_put(&mm->mem_cgroup->css);
+}
+
+static inline int page_cgroup_locked(struct page *page)
+{
+ return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT,
+ &page->page_cgroup);
+}
+
+void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
+{
+ int locked;
+
+ /*
+ * While resetting the page_cgroup we might not hold the
+ * page_cgroup lock. free_hot_cold_page() is an example
+ * of such a scenario
+ */
+ if (pc)
+ VM_BUG_ON(!page_cgroup_locked(page));
+ locked = (page->page_cgroup & PAGE_CGROUP_LOCK);
+ page->page_cgroup = ((unsigned long)pc | locked);
+}
+
+struct page_cgroup *page_get_page_cgroup(struct page *page)
+{
+ return (struct page_cgroup *)
+ (page->page_cgroup & ~PAGE_CGROUP_LOCK);
+}
+
+static void __always_inline lock_page_cgroup(struct page *page)
+{
+ bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
+ VM_BUG_ON(!page_cgroup_locked(page));
+}
+
+static void __always_inline unlock_page_cgroup(struct page *page)
+{
+ bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
+}
+
+/*
+ * Tie new page_cgroup to struct page under lock_page_cgroup()
+ * This can fail if the page has been tied to a page_cgroup.
+ * If success, returns 0.
+ */
+static int page_cgroup_assign_new_page_cgroup(struct page *page,
+ struct page_cgroup *pc)
+{
+ int ret = 0;
+
+ lock_page_cgroup(page);
+ if (!page_get_page_cgroup(page))
+ page_assign_page_cgroup(page, pc);
+ else /* A page is tied to other pc. */
+ ret = 1;
+ unlock_page_cgroup(page);
+ return ret;
+}
+
+/*
+ * Clear page->page_cgroup member under lock_page_cgroup().
+ * If given "pc" value is different from one page->page_cgroup,
+ * page->cgroup is not cleared.
+ * Returns a value of page->page_cgroup at lock taken.
+ * A can can detect failure of clearing by following
+ * clear_page_cgroup(page, pc) == pc
+ */
+
+static struct page_cgroup *clear_page_cgroup(struct page *page,
+ struct page_cgroup *pc)
+{
+ struct page_cgroup *ret;
+ /* lock and clear */
+ lock_page_cgroup(page);
+ ret = page_get_page_cgroup(page);
+ if (likely(ret == pc))
+ page_assign_page_cgroup(page, NULL);
+ unlock_page_cgroup(page);
+ return ret;
+}
+
+static void __mem_cgroup_remove_list(struct page_cgroup *pc)
+{
+ int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
+ struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
+
+ if (from)
+ MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
+ else
+ MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
+
+ mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
+ list_del_init(&pc->lru);
+}
+
+static void __mem_cgroup_add_list(struct page_cgroup *pc)
+{
+ int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
+ struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
+
+ if (!to) {
+ MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
+ list_add(&pc->lru, &mz->inactive_list);
+ } else {
+ MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
+ list_add(&pc->lru, &mz->active_list);
+ }
+ mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
+}
+
+static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
+{
+ int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
+ struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
+
+ if (from)
+ MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
+ else
+ MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
+
+ if (active) {
+ MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
+ pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
+ list_move(&pc->lru, &mz->active_list);
+ } else {
+ MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
+ pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
+ list_move(&pc->lru, &mz->inactive_list);
+ }
+}
+
+int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
+{
+ int ret;
+
+ task_lock(task);
+ ret = task->mm && mm_cgroup(task->mm) == mem;
+ task_unlock(task);
+ return ret;
+}
+
+/*
+ * This routine assumes that the appropriate zone's lru lock is already held
+ */
+void mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
+{
+ struct mem_cgroup_per_zone *mz;
+ unsigned long flags;
+
+ if (!pc)
+ return;
+
+ mz = page_cgroup_zoneinfo(pc);
+ spin_lock_irqsave(&mz->lru_lock, flags);
+ __mem_cgroup_move_lists(pc, active);
+ spin_unlock_irqrestore(&mz->lru_lock, flags);
+}
+
+/*
+ * Calculate mapped_ratio under memory controller. This will be used in
+ * vmscan.c for deteremining we have to reclaim mapped pages.
+ */
+int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
+{
+ long total, rss;
+
+ /*
+ * usage is recorded in bytes. But, here, we assume the number of
+ * physical pages can be represented by "long" on any arch.
+ */
+ total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
+ rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
+ return (int)((rss * 100L) / total);
+}
+/*
+ * This function is called from vmscan.c. In page reclaiming loop. balance
+ * between active and inactive list is calculated. For memory controller
+ * page reclaiming, we should use using mem_cgroup's imbalance rather than
+ * zone's global lru imbalance.
+ */
+long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
+{
+ unsigned long active, inactive;
+ /* active and inactive are the number of pages. 'long' is ok.*/
+ active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
+ inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
+ return (long) (active / (inactive + 1));
+}
+
+/*
+ * prev_priority control...this will be used in memory reclaim path.
+ */
+int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
+{
+ return mem->prev_priority;
+}
+
+void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
+{
+ if (priority < mem->prev_priority)
+ mem->prev_priority = priority;
+}
+
+void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
+{
+ mem->prev_priority = priority;
+}
+
+/*
+ * Calculate # of pages to be scanned in this priority/zone.
+ * See also vmscan.c
+ *
+ * priority starts from "DEF_PRIORITY" and decremented in each loop.
+ * (see include/linux/mmzone.h)
+ */
+
+long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
+ struct zone *zone, int priority)
+{
+ long nr_active;
+ int nid = zone->zone_pgdat->node_id;
+ int zid = zone_idx(zone);
+ struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
+
+ nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
+ return (nr_active >> priority);
+}
+
+long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
+ struct zone *zone, int priority)
+{
+ long nr_inactive;
+ int nid = zone->zone_pgdat->node_id;
+ int zid = zone_idx(zone);
+ struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
+
+ nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
+
+ return (nr_inactive >> priority);
+}
+
+unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
+ struct list_head *dst,
+ unsigned long *scanned, int order,
+ int mode, struct zone *z,
+ struct mem_cgroup *mem_cont,
+ int active)
+{
+ unsigned long nr_taken = 0;
+ struct page *page;
+ unsigned long scan;
+ LIST_HEAD(pc_list);
+ struct list_head *src;
+ struct page_cgroup *pc, *tmp;
+ int nid = z->zone_pgdat->node_id;
+ int zid = zone_idx(z);
+ struct mem_cgroup_per_zone *mz;
+
+ mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
+ if (active)
+ src = &mz->active_list;
+ else
+ src = &mz->inactive_list;
+
+
+ spin_lock(&mz->lru_lock);
+ scan = 0;
+ list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
+ if (scan >= nr_to_scan)
+ break;
+ page = pc->page;
+ VM_BUG_ON(!pc);
+
+ if (unlikely(!PageLRU(page)))
+ continue;
+
+ if (PageActive(page) && !active) {
+ __mem_cgroup_move_lists(pc, true);
+ continue;
+ }
+ if (!PageActive(page) && active) {
+ __mem_cgroup_move_lists(pc, false);
+ continue;
+ }
+
+ scan++;
+ list_move(&pc->lru, &pc_list);
+
+ if (__isolate_lru_page(page, mode) == 0) {
+ list_move(&page->lru, dst);
+ nr_taken++;
+ }
+ }
+
+ list_splice(&pc_list, src);
+ spin_unlock(&mz->lru_lock);
+
+ *scanned = scan;
+ return nr_taken;
+}
+
+/*
+ * Charge the memory controller for page usage.
+ * Return
+ * 0 if the charge was successful
+ * < 0 if the cgroup is over its limit
+ */
+static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, enum charge_type ctype)
+{
+ struct mem_cgroup *mem;
+ struct page_cgroup *pc;
+ unsigned long flags;
+ unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+ struct mem_cgroup_per_zone *mz;
+
+ /*
+ * Should page_cgroup's go to their own slab?
+ * One could optimize the performance of the charging routine
+ * by saving a bit in the page_flags and using it as a lock
+ * to see if the cgroup page already has a page_cgroup associated
+ * with it
+ */
+retry:
+ if (page) {
+ lock_page_cgroup(page);
+ pc = page_get_page_cgroup(page);
+ /*
+ * The page_cgroup exists and
+ * the page has already been accounted.
+ */
+ if (pc) {
+ if (unlikely(!atomic_inc_not_zero(&pc->ref_cnt))) {
+ /* this page is under being uncharged ? */
+ unlock_page_cgroup(page);
+ cpu_relax();
+ goto retry;
+ } else {
+ unlock_page_cgroup(page);
+ goto done;
+ }
+ }
+ unlock_page_cgroup(page);
+ }
+
+ pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
+ if (pc == NULL)
+ goto err;
+
+ /*
+ * We always charge the cgroup the mm_struct belongs to.
+ * The mm_struct's mem_cgroup changes on task migration if the
+ * thread group leader migrates. It's possible that mm is not
+ * set, if so charge the init_mm (happens for pagecache usage).
+ */
+ if (!mm)
+ mm = &init_mm;
+
+ rcu_read_lock();
+ mem = rcu_dereference(mm->mem_cgroup);
+ /*
+ * For every charge from the cgroup, increment reference
+ * count
+ */
+ css_get(&mem->css);
+ rcu_read_unlock();
+
+ /*
+ * If we created the page_cgroup, we should free it on exceeding
+ * the cgroup limit.
+ */
+ while (res_counter_charge(&mem->res, PAGE_SIZE)) {
+ if (!(gfp_mask & __GFP_WAIT))
+ goto out;
+
+ if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
+ continue;
+
+ /*
+ * try_to_free_mem_cgroup_pages() might not give us a full
+ * picture of reclaim. Some pages are reclaimed and might be
+ * moved to swap cache or just unmapped from the cgroup.
+ * Check the limit again to see if the reclaim reduced the
+ * current usage of the cgroup before giving up
+ */
+ if (res_counter_check_under_limit(&mem->res))
+ continue;
+
+ if (!nr_retries--) {
+ mem_cgroup_out_of_memory(mem, gfp_mask);
+ goto out;
+ }
+ congestion_wait(WRITE, HZ/10);
+ }
+
+ atomic_set(&pc->ref_cnt, 1);
+ pc->mem_cgroup = mem;
+ pc->page = page;
+ pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
+ if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
+ pc->flags |= PAGE_CGROUP_FLAG_CACHE;
+
+ if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
+ /*
+ * Another charge has been added to this page already.
+ * We take lock_page_cgroup(page) again and read
+ * page->cgroup, increment refcnt.... just retry is OK.
+ */
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ css_put(&mem->css);
+ kfree(pc);
+ if (!page)
+ goto done;
+ goto retry;
+ }
+
+ mz = page_cgroup_zoneinfo(pc);
+ spin_lock_irqsave(&mz->lru_lock, flags);
+ /* Update statistics vector */
+ __mem_cgroup_add_list(pc);
+ spin_unlock_irqrestore(&mz->lru_lock, flags);
+
+done:
+ return 0;
+out:
+ css_put(&mem->css);
+ kfree(pc);
+err:
+ return -ENOMEM;
+}
+
+int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask)
+{
+ return mem_cgroup_charge_common(page, mm, gfp_mask,
+ MEM_CGROUP_CHARGE_TYPE_MAPPED);
+}
+
+/*
+ * See if the cached pages should be charged at all?
+ */
+int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask)
+{
+ int ret = 0;
+ if (!mm)
+ mm = &init_mm;
+
+ ret = mem_cgroup_charge_common(page, mm, gfp_mask,
+ MEM_CGROUP_CHARGE_TYPE_CACHE);
+ return ret;
+}
+
+/*
+ * Uncharging is always a welcome operation, we never complain, simply
+ * uncharge. This routine should be called with lock_page_cgroup held
+ */
+void mem_cgroup_uncharge(struct page_cgroup *pc)
+{
+ struct mem_cgroup *mem;
+ struct mem_cgroup_per_zone *mz;
+ struct page *page;
+ unsigned long flags;
+
+ /*
+ * Check if our page_cgroup is valid
+ */
+ if (!pc)
+ return;
+
+ if (atomic_dec_and_test(&pc->ref_cnt)) {
+ page = pc->page;
+ mz = page_cgroup_zoneinfo(pc);
+ /*
+ * get page->cgroup and clear it under lock.
+ * force_empty can drop page->cgroup without checking refcnt.
+ */
+ unlock_page_cgroup(page);
+ if (clear_page_cgroup(page, pc) == pc) {
+ mem = pc->mem_cgroup;
+ css_put(&mem->css);
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ spin_lock_irqsave(&mz->lru_lock, flags);
+ __mem_cgroup_remove_list(pc);
+ spin_unlock_irqrestore(&mz->lru_lock, flags);
+ kfree(pc);
+ }
+ lock_page_cgroup(page);
+ }
+}
+
+void mem_cgroup_uncharge_page(struct page *page)
+{
+ lock_page_cgroup(page);
+ mem_cgroup_uncharge(page_get_page_cgroup(page));
+ unlock_page_cgroup(page);
+}
+
+/*
+ * Returns non-zero if a page (under migration) has valid page_cgroup member.
+ * Refcnt of page_cgroup is incremented.
+ */
+
+int mem_cgroup_prepare_migration(struct page *page)
+{
+ struct page_cgroup *pc;
+ int ret = 0;
+ lock_page_cgroup(page);
+ pc = page_get_page_cgroup(page);
+ if (pc && atomic_inc_not_zero(&pc->ref_cnt))
+ ret = 1;
+ unlock_page_cgroup(page);
+ return ret;
+}
+
+void mem_cgroup_end_migration(struct page *page)
+{
+ struct page_cgroup *pc;
+
+ lock_page_cgroup(page);
+ pc = page_get_page_cgroup(page);
+ mem_cgroup_uncharge(pc);
+ unlock_page_cgroup(page);
+}
+/*
+ * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.
+ * And no race with uncharge() routines because page_cgroup for *page*
+ * has extra one reference by mem_cgroup_prepare_migration.
+ */
+
+void mem_cgroup_page_migration(struct page *page, struct page *newpage)
+{
+ struct page_cgroup *pc;
+ struct mem_cgroup *mem;
+ unsigned long flags;
+ struct mem_cgroup_per_zone *mz;
+retry:
+ pc = page_get_page_cgroup(page);
+ if (!pc)
+ return;
+ mem = pc->mem_cgroup;
+ mz = page_cgroup_zoneinfo(pc);
+ if (clear_page_cgroup(page, pc) != pc)
+ goto retry;
+ spin_lock_irqsave(&mz->lru_lock, flags);
+
+ __mem_cgroup_remove_list(pc);
+ spin_unlock_irqrestore(&mz->lru_lock, flags);
+
+ pc->page = newpage;
+ lock_page_cgroup(newpage);
+ page_assign_page_cgroup(newpage, pc);
+ unlock_page_cgroup(newpage);
+
+ mz = page_cgroup_zoneinfo(pc);
+ spin_lock_irqsave(&mz->lru_lock, flags);
+ __mem_cgroup_add_list(pc);
+ spin_unlock_irqrestore(&mz->lru_lock, flags);
+ return;
+}
+
+/*
+ * This routine traverse page_cgroup in given list and drop them all.
+ * This routine ignores page_cgroup->ref_cnt.
+ * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
+ */
+#define FORCE_UNCHARGE_BATCH (128)
+static void
+mem_cgroup_force_empty_list(struct mem_cgroup *mem,
+ struct mem_cgroup_per_zone *mz,
+ int active)
+{
+ struct page_cgroup *pc;
+ struct page *page;
+ int count;
+ unsigned long flags;
+ struct list_head *list;
+
+ if (active)
+ list = &mz->active_list;
+ else
+ list = &mz->inactive_list;
+
+ if (list_empty(list))
+ return;
+retry:
+ count = FORCE_UNCHARGE_BATCH;
+ spin_lock_irqsave(&mz->lru_lock, flags);
+
+ while (--count && !list_empty(list)) {
+ pc = list_entry(list->prev, struct page_cgroup, lru);
+ page = pc->page;
+ /* Avoid race with charge */
+ atomic_set(&pc->ref_cnt, 0);
+ if (clear_page_cgroup(page, pc) == pc) {
+ css_put(&mem->css);
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
+ __mem_cgroup_remove_list(pc);
+ kfree(pc);
+ } else /* being uncharged ? ...do relax */
+ break;
+ }
+ spin_unlock_irqrestore(&mz->lru_lock, flags);
+ if (!list_empty(list)) {
+ cond_resched();
+ goto retry;
+ }
+ return;
+}
+
+/*
+ * make mem_cgroup's charge to be 0 if there is no task.
+ * This enables deleting this mem_cgroup.
+ */
+
+int mem_cgroup_force_empty(struct mem_cgroup *mem)
+{
+ int ret = -EBUSY;
+ int node, zid;
+ css_get(&mem->css);
+ /*
+ * page reclaim code (kswapd etc..) will move pages between
+` * active_list <-> inactive_list while we don't take a lock.
+ * So, we have to do loop here until all lists are empty.
+ */
+ while (mem->res.usage > 0) {
+ if (atomic_read(&mem->css.cgroup->count) > 0)
+ goto out;
+ for_each_node_state(node, N_POSSIBLE)
+ for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+ struct mem_cgroup_per_zone *mz;
+ mz = mem_cgroup_zoneinfo(mem, node, zid);
+ /* drop all page_cgroup in active_list */
+ mem_cgroup_force_empty_list(mem, mz, 1);
+ /* drop all page_cgroup in inactive_list */
+ mem_cgroup_force_empty_list(mem, mz, 0);
+ }
+ }
+ ret = 0;
+out:
+ css_put(&mem->css);
+ return ret;
+}
+
+
+
+int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
+{
+ *tmp = memparse(buf, &buf);
+ if (*buf != '\0')
+ return -EINVAL;
+
+ /*
+ * Round up the value to the closest page size
+ */
+ *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
+ return 0;
+}
+
+static ssize_t mem_cgroup_read(struct cgroup *cont,
+ struct cftype *cft, struct file *file,
+ char __user *userbuf, size_t nbytes, loff_t *ppos)
+{
+ return res_counter_read(&mem_cgroup_from_cont(cont)->res,
+ cft->private, userbuf, nbytes, ppos,
+ NULL);
+}
+
+static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
+ struct file *file, const char __user *userbuf,
+ size_t nbytes, loff_t *ppos)
+{
+ return res_counter_write(&mem_cgroup_from_cont(cont)->res,
+ cft->private, userbuf, nbytes, ppos,
+ mem_cgroup_write_strategy);
+}
+
+static ssize_t mem_force_empty_write(struct cgroup *cont,
+ struct cftype *cft, struct file *file,
+ const char __user *userbuf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+ int ret;
+ ret = mem_cgroup_force_empty(mem);
+ if (!ret)
+ ret = nbytes;
+ return ret;
+}
+
+/*
+ * Note: This should be removed if cgroup supports write-only file.
+ */
+
+static ssize_t mem_force_empty_read(struct cgroup *cont,
+ struct cftype *cft,
+ struct file *file, char __user *userbuf,
+ size_t nbytes, loff_t *ppos)
+{
+ return -EINVAL;
+}
+
+
+static const struct mem_cgroup_stat_desc {
+ const char *msg;
+ u64 unit;
+} mem_cgroup_stat_desc[] = {
+ [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
+ [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
+};
+
+static int mem_control_stat_show(struct seq_file *m, void *arg)
+{
+ struct cgroup *cont = m->private;
+ struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
+ struct mem_cgroup_stat *stat = &mem_cont->stat;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
+ s64 val;
+
+ val = mem_cgroup_read_stat(stat, i);
+ val *= mem_cgroup_stat_desc[i].unit;
+ seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg,
+ (long long)val);
+ }
+ /* showing # of active pages */
+ {
+ unsigned long active, inactive;
+
+ inactive = mem_cgroup_get_all_zonestat(mem_cont,
+ MEM_CGROUP_ZSTAT_INACTIVE);
+ active = mem_cgroup_get_all_zonestat(mem_cont,
+ MEM_CGROUP_ZSTAT_ACTIVE);
+ seq_printf(m, "active %ld\n", (active) * PAGE_SIZE);
+ seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE);
+ }
+ return 0;
+}
+
+static const struct file_operations mem_control_stat_file_operations = {
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mem_control_stat_open(struct inode *unused, struct file *file)
+{
+ /* XXX __d_cont */
+ struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
+
+ file->f_op = &mem_control_stat_file_operations;
+ return single_open(file, mem_control_stat_show, cont);
+}
+
+
+
+static struct cftype mem_cgroup_files[] = {
+ {
+ .name = "usage_in_bytes",
+ .private = RES_USAGE,
+ .read = mem_cgroup_read,
+ },
+ {
+ .name = "limit_in_bytes",
+ .private = RES_LIMIT,
+ .write = mem_cgroup_write,
+ .read = mem_cgroup_read,
+ },
+ {
+ .name = "failcnt",
+ .private = RES_FAILCNT,
+ .read = mem_cgroup_read,
+ },
+ {
+ .name = "force_empty",
+ .write = mem_force_empty_write,
+ .read = mem_force_empty_read,
+ },
+ {
+ .name = "stat",
+ .open = mem_control_stat_open,
+ },
+};
+
+static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
+{
+ struct mem_cgroup_per_node *pn;
+ struct mem_cgroup_per_zone *mz;
+ int zone;
+ /*
+ * This routine is called against possible nodes.
+ * But it's BUG to call kmalloc() against offline node.
+ *
+ * TODO: this routine can waste much memory for nodes which will
+ * never be onlined. It's better to use memory hotplug callback
+ * function.
+ */
+ if (node_state(node, N_HIGH_MEMORY))
+ pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, node);
+ else
+ pn = kmalloc(sizeof(*pn), GFP_KERNEL);
+ if (!pn)
+ return 1;
+
+ mem->info.nodeinfo[node] = pn;
+ memset(pn, 0, sizeof(*pn));
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ mz = &pn->zoneinfo[zone];
+ INIT_LIST_HEAD(&mz->active_list);
+ INIT_LIST_HEAD(&mz->inactive_list);
+ spin_lock_init(&mz->lru_lock);
+ }
+ return 0;
+}
+
+static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
+{
+ kfree(mem->info.nodeinfo[node]);
+}
+
+
+static struct mem_cgroup init_mem_cgroup;
+
+static struct cgroup_subsys_state *
+mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ struct mem_cgroup *mem;
+ int node;
+
+ if (unlikely((cont->parent) == NULL)) {
+ mem = &init_mem_cgroup;
+ init_mm.mem_cgroup = mem;
+ } else
+ mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
+
+ if (mem == NULL)
+ return NULL;
+
+ res_counter_init(&mem->res);
+
+ memset(&mem->info, 0, sizeof(mem->info));
+
+ for_each_node_state(node, N_POSSIBLE)
+ if (alloc_mem_cgroup_per_zone_info(mem, node))
+ goto free_out;
+
+ return &mem->css;
+free_out:
+ for_each_node_state(node, N_POSSIBLE)
+ free_mem_cgroup_per_zone_info(mem, node);
+ if (cont->parent != NULL)
+ kfree(mem);
+ return NULL;
+}
+
+static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+ mem_cgroup_force_empty(mem);
+}
+
+static void mem_cgroup_destroy(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+ int node;
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+
+ for_each_node_state(node, N_POSSIBLE)
+ free_mem_cgroup_per_zone_info(mem, node);
+
+ kfree(mem_cgroup_from_cont(cont));
+}
+
+static int mem_cgroup_populate(struct cgroup_subsys *ss,
+ struct cgroup *cont)
+{
+ return cgroup_add_files(cont, ss, mem_cgroup_files,
+ ARRAY_SIZE(mem_cgroup_files));
+}
+
+static void mem_cgroup_move_task(struct cgroup_subsys *ss,
+ struct cgroup *cont,
+ struct cgroup *old_cont,
+ struct task_struct *p)
+{
+ struct mm_struct *mm;
+ struct mem_cgroup *mem, *old_mem;
+
+ mm = get_task_mm(p);
+ if (mm == NULL)
+ return;
+
+ mem = mem_cgroup_from_cont(cont);
+ old_mem = mem_cgroup_from_cont(old_cont);
+
+ if (mem == old_mem)
+ goto out;
+
+ /*
+ * Only thread group leaders are allowed to migrate, the mm_struct is
+ * in effect owned by the leader
+ */
+ if (p->tgid != p->pid)
+ goto out;
+
+ css_get(&mem->css);
+ rcu_assign_pointer(mm->mem_cgroup, mem);
+ css_put(&old_mem->css);
+
+out:
+ mmput(mm);
+ return;
+}
+
+struct cgroup_subsys mem_cgroup_subsys = {
+ .name = "memory",
+ .subsys_id = mem_cgroup_subsys_id,
+ .create = mem_cgroup_create,
+ .pre_destroy = mem_cgroup_pre_destroy,
+ .destroy = mem_cgroup_destroy,
+ .populate = mem_cgroup_populate,
+ .attach = mem_cgroup_move_task,
+ .early_init = 0,
+};
diff --git a/mm/memory.c b/mm/memory.c
index d902d0e25edc..153a54b2013c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -50,6 +50,7 @@
#include <linux/delayacct.h>
#include <linux/init.h>
#include <linux/writeback.h>
+#include <linux/memcontrol.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
@@ -82,7 +83,18 @@ void * high_memory;
EXPORT_SYMBOL(num_physpages);
EXPORT_SYMBOL(high_memory);
-int randomize_va_space __read_mostly = 1;
+/*
+ * Randomize the address space (stacks, mmaps, brk, etc.).
+ *
+ * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
+ * as ancient (libc5 based) binaries can segfault. )
+ */
+int randomize_va_space __read_mostly =
+#ifdef CONFIG_COMPAT_BRK
+ 1;
+#else
+ 2;
+#endif
static int __init disable_randmaps(char *s)
{
@@ -305,7 +317,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
spin_lock(&mm->page_table_lock);
if (pmd_present(*pmd)) { /* Another has populated it */
pte_lock_deinit(new);
- pte_free(new);
+ pte_free(mm, new);
} else {
mm->nr_ptes++;
inc_zone_page_state(new, NR_PAGETABLE);
@@ -323,7 +335,7 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
spin_lock(&init_mm.page_table_lock);
if (pmd_present(*pmd)) /* Another has populated it */
- pte_free_kernel(new);
+ pte_free_kernel(&init_mm, new);
else
pmd_populate_kernel(&init_mm, pmd, new);
spin_unlock(&init_mm.page_table_lock);
@@ -1109,7 +1121,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
EXPORT_SYMBOL(get_user_pages);
-pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
+pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+ spinlock_t **ptl)
{
pgd_t * pgd = pgd_offset(mm, addr);
pud_t * pud = pud_alloc(mm, pgd, addr);
@@ -1132,16 +1145,20 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
{
int retval;
pte_t *pte;
- spinlock_t *ptl;
+ spinlock_t *ptl;
+
+ retval = mem_cgroup_charge(page, mm, GFP_KERNEL);
+ if (retval)
+ goto out;
retval = -EINVAL;
if (PageAnon(page))
- goto out;
+ goto out_uncharge;
retval = -ENOMEM;
flush_dcache_page(page);
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
- goto out;
+ goto out_uncharge;
retval = -EBUSY;
if (!pte_none(*pte))
goto out_unlock;
@@ -1153,8 +1170,12 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
set_pte_at(mm, addr, pte, mk_pte(page, prot));
retval = 0;
+ pte_unmap_unlock(pte, ptl);
+ return retval;
out_unlock:
pte_unmap_unlock(pte, ptl);
+out_uncharge:
+ mem_cgroup_uncharge_page(page);
out:
return retval;
}
@@ -1517,10 +1538,8 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
memset(kaddr, 0, PAGE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
flush_dcache_page(dst);
- return;
-
- }
- copy_user_highpage(dst, src, va, vma);
+ } else
+ copy_user_highpage(dst, src, va, vma);
}
/*
@@ -1629,6 +1648,10 @@ gotten:
if (!new_page)
goto oom;
cow_user_page(new_page, old_page, address, vma);
+ __SetPageUptodate(new_page);
+
+ if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
+ goto oom_free_new;
/*
* Re-check the pte - we dropped the lock
@@ -1661,7 +1684,9 @@ gotten:
/* Free the old page.. */
new_page = old_page;
ret |= VM_FAULT_WRITE;
- }
+ } else
+ mem_cgroup_uncharge_page(new_page);
+
if (new_page)
page_cache_release(new_page);
if (old_page)
@@ -1685,6 +1710,8 @@ unlock:
put_page(dirty_page);
}
return ret;
+oom_free_new:
+ __free_page(new_page);
oom:
if (old_page)
page_cache_release(old_page);
@@ -1909,50 +1936,49 @@ EXPORT_SYMBOL(unmap_mapping_range);
*/
int vmtruncate(struct inode * inode, loff_t offset)
{
- struct address_space *mapping = inode->i_mapping;
- unsigned long limit;
+ if (inode->i_size < offset) {
+ unsigned long limit;
- if (inode->i_size < offset)
- goto do_expand;
- /*
- * truncation of in-use swapfiles is disallowed - it would cause
- * subsequent swapout to scribble on the now-freed blocks.
- */
- if (IS_SWAPFILE(inode))
- goto out_busy;
- i_size_write(inode, offset);
+ limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && offset > limit)
+ goto out_sig;
+ if (offset > inode->i_sb->s_maxbytes)
+ goto out_big;
+ i_size_write(inode, offset);
+ } else {
+ struct address_space *mapping = inode->i_mapping;
+
+ /*
+ * truncation of in-use swapfiles is disallowed - it would
+ * cause subsequent swapout to scribble on the now-freed
+ * blocks.
+ */
+ if (IS_SWAPFILE(inode))
+ return -ETXTBSY;
+ i_size_write(inode, offset);
+
+ /*
+ * unmap_mapping_range is called twice, first simply for
+ * efficiency so that truncate_inode_pages does fewer
+ * single-page unmaps. However after this first call, and
+ * before truncate_inode_pages finishes, it is possible for
+ * private pages to be COWed, which remain after
+ * truncate_inode_pages finishes, hence the second
+ * unmap_mapping_range call must be made for correctness.
+ */
+ unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+ truncate_inode_pages(mapping, offset);
+ unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
+ }
- /*
- * unmap_mapping_range is called twice, first simply for efficiency
- * so that truncate_inode_pages does fewer single-page unmaps. However
- * after this first call, and before truncate_inode_pages finishes,
- * it is possible for private pages to be COWed, which remain after
- * truncate_inode_pages finishes, hence the second unmap_mapping_range
- * call must be made for correctness.
- */
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- truncate_inode_pages(mapping, offset);
- unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
- goto out_truncate;
-
-do_expand:
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && offset > limit)
- goto out_sig;
- if (offset > inode->i_sb->s_maxbytes)
- goto out_big;
- i_size_write(inode, offset);
-
-out_truncate:
if (inode->i_op && inode->i_op->truncate)
inode->i_op->truncate(inode);
return 0;
+
out_sig:
send_sig(SIGXFSZ, current, 0);
out_big:
return -EFBIG;
-out_busy:
- return -ETXTBSY;
}
EXPORT_SYMBOL(vmtruncate);
@@ -1980,67 +2006,6 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
return 0;
}
-/**
- * swapin_readahead - swap in pages in hope we need them soon
- * @entry: swap entry of this memory
- * @addr: address to start
- * @vma: user vma this addresses belong to
- *
- * Primitive swap readahead code. We simply read an aligned block of
- * (1 << page_cluster) entries in the swap area. This method is chosen
- * because it doesn't cost us any seek time. We also make sure to queue
- * the 'original' request together with the readahead ones...
- *
- * This has been extended to use the NUMA policies from the mm triggering
- * the readahead.
- *
- * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
- */
-void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
-{
-#ifdef CONFIG_NUMA
- struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
-#endif
- int i, num;
- struct page *new_page;
- unsigned long offset;
-
- /*
- * Get the number of handles we should do readahead io to.
- */
- num = valid_swaphandles(entry, &offset);
- for (i = 0; i < num; offset++, i++) {
- /* Ok, do the async read-ahead now */
- new_page = read_swap_cache_async(swp_entry(swp_type(entry),
- offset), vma, addr);
- if (!new_page)
- break;
- page_cache_release(new_page);
-#ifdef CONFIG_NUMA
- /*
- * Find the next applicable VMA for the NUMA policy.
- */
- addr += PAGE_SIZE;
- if (addr == 0)
- vma = NULL;
- if (vma) {
- if (addr >= vma->vm_end) {
- vma = next_vma;
- next_vma = vma ? vma->vm_next : NULL;
- }
- if (vma && addr < vma->vm_start)
- vma = NULL;
- } else {
- if (next_vma && addr >= next_vma->vm_start) {
- vma = next_vma;
- next_vma = vma->vm_next;
- }
- }
-#endif
- }
- lru_add_drain(); /* Push any new pages onto the LRU now */
-}
-
/*
* We enter with non-exclusive mmap_sem (to exclude vma changes,
* but allow concurrent faults), and pte mapped but not yet locked.
@@ -2068,8 +2033,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
page = lookup_swap_cache(entry);
if (!page) {
grab_swap_token(); /* Contend for token _before_ read-in */
- swapin_readahead(entry, address, vma);
- page = read_swap_cache_async(entry, vma, address);
+ page = swapin_readahead(entry,
+ GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
/*
* Back out if somebody else faulted in this pte
@@ -2087,6 +2052,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
count_vm_event(PGMAJFAULT);
}
+ if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+ delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
+
mark_page_accessed(page);
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -2124,8 +2095,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (write_access) {
/* XXX: We could OR the do_wp_page code with this one? */
if (do_wp_page(mm, vma, address,
- page_table, pmd, ptl, pte) & VM_FAULT_OOM)
+ page_table, pmd, ptl, pte) & VM_FAULT_OOM) {
+ mem_cgroup_uncharge_page(page);
ret = VM_FAULT_OOM;
+ }
goto out;
}
@@ -2136,6 +2109,7 @@ unlock:
out:
return ret;
out_nomap:
+ mem_cgroup_uncharge_page(page);
pte_unmap_unlock(page_table, ptl);
unlock_page(page);
page_cache_release(page);
@@ -2163,6 +2137,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
page = alloc_zeroed_user_highpage_movable(vma, address);
if (!page)
goto oom;
+ __SetPageUptodate(page);
+
+ if (mem_cgroup_charge(page, mm, GFP_KERNEL))
+ goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -2181,8 +2159,11 @@ unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
release:
+ mem_cgroup_uncharge_page(page);
page_cache_release(page);
goto unlock;
+oom_free_page:
+ __free_page(page);
oom:
return VM_FAULT_OOM;
}
@@ -2263,6 +2244,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
}
copy_user_highpage(page, vmf.page, address, vma);
+ __SetPageUptodate(page);
} else {
/*
* If the page will be shareable, see if the backing
@@ -2295,6 +2277,11 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
+ if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
+
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
/*
@@ -2330,6 +2317,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* no need to invalidate: a not-present page won't be cached */
update_mmu_cache(vma, address, entry);
} else {
+ mem_cgroup_uncharge_page(page);
if (anon)
page_cache_release(page);
else
@@ -2563,7 +2551,7 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
spin_lock(&mm->page_table_lock);
if (pgd_present(*pgd)) /* Another has populated it */
- pud_free(new);
+ pud_free(mm, new);
else
pgd_populate(mm, pgd, new);
spin_unlock(&mm->page_table_lock);
@@ -2585,12 +2573,12 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
spin_lock(&mm->page_table_lock);
#ifndef __ARCH_HAS_4LEVEL_HACK
if (pud_present(*pud)) /* Another has populated it */
- pmd_free(new);
+ pmd_free(mm, new);
else
pud_populate(mm, pud, new);
#else
if (pgd_present(*pud)) /* Another has populated it */
- pmd_free(new);
+ pmd_free(mm, new);
else
pgd_populate(mm, pud, new);
#endif /* __ARCH_HAS_4LEVEL_HACK */
@@ -2618,46 +2606,6 @@ int make_pages_present(unsigned long addr, unsigned long end)
return ret == len ? 0 : -1;
}
-/*
- * Map a vmalloc()-space virtual address to the physical page.
- */
-struct page * vmalloc_to_page(void * vmalloc_addr)
-{
- unsigned long addr = (unsigned long) vmalloc_addr;
- struct page *page = NULL;
- pgd_t *pgd = pgd_offset_k(addr);
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep, pte;
-
- if (!pgd_none(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (!pud_none(*pud)) {
- pmd = pmd_offset(pud, addr);
- if (!pmd_none(*pmd)) {
- ptep = pte_offset_map(pmd, addr);
- pte = *ptep;
- if (pte_present(pte))
- page = pte_page(pte);
- pte_unmap(ptep);
- }
- }
- }
- return page;
-}
-
-EXPORT_SYMBOL(vmalloc_to_page);
-
-/*
- * Map a vmalloc()-space virtual address to the physical page frame number.
- */
-unsigned long vmalloc_to_pfn(void * vmalloc_addr)
-{
- return page_to_pfn(vmalloc_to_page(vmalloc_addr));
-}
-
-EXPORT_SYMBOL(vmalloc_to_pfn);
-
#if !defined(__HAVE_ARCH_GATE_AREA)
#if defined(AT_SYSINFO_EHDR)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9512a544d044..7469c503580d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -481,8 +481,6 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
return offlined;
}
-extern void drain_all_local_pages(void);
-
int offline_pages(unsigned long start_pfn,
unsigned long end_pfn, unsigned long timeout)
{
@@ -540,7 +538,7 @@ repeat:
lru_add_drain_all();
flush_scheduled_work();
cond_resched();
- drain_all_local_pages();
+ drain_all_pages();
}
pfn = scan_lru_pages(start_pfn, end_pfn);
@@ -563,7 +561,7 @@ repeat:
flush_scheduled_work();
yield();
/* drain pcp pages , this is synchrouns. */
- drain_all_local_pages();
+ drain_all_pages();
/* check again */
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
if (offlined_pages < 0) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 6a207e8d17ea..a73504ff5ab9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -29,6 +29,7 @@
#include <linux/mempolicy.h>
#include <linux/vmalloc.h>
#include <linux/security.h>
+#include <linux/memcontrol.h>
#include "internal.h"
@@ -115,11 +116,6 @@ int putback_lru_pages(struct list_head *l)
return count;
}
-static inline int is_swap_pte(pte_t pte)
-{
- return !pte_none(pte) && !pte_present(pte) && !pte_file(pte);
-}
-
/*
* Restore a potential migration pte to a working pte entry
*/
@@ -157,6 +153,11 @@ static void remove_migration_pte(struct vm_area_struct *vma,
return;
}
+ if (mem_cgroup_charge(new, mm, GFP_KERNEL)) {
+ pte_unmap(ptep);
+ return;
+ }
+
ptl = pte_lockptr(mm, pmd);
spin_lock(ptl);
pte = *ptep;
@@ -592,9 +593,10 @@ static int move_to_new_page(struct page *newpage, struct page *page)
else
rc = fallback_migrate_page(mapping, newpage, page);
- if (!rc)
+ if (!rc) {
+ mem_cgroup_page_migration(page, newpage);
remove_migration_ptes(page, newpage);
- else
+ } else
newpage->mapping = NULL;
unlock_page(newpage);
@@ -613,6 +615,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
int *result = NULL;
struct page *newpage = get_new_page(page, private, &result);
int rcu_locked = 0;
+ int charge = 0;
if (!newpage)
return -ENOMEM;
@@ -645,23 +648,46 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
rcu_read_lock();
rcu_locked = 1;
}
+
/*
- * This is a corner case handling.
- * When a new swap-cache is read into, it is linked to LRU
- * and treated as swapcache but has no rmap yet.
- * Calling try_to_unmap() against a page->mapping==NULL page is
- * BUG. So handle it here.
+ * Corner case handling:
+ * 1. When a new swap-cache page is read into, it is added to the LRU
+ * and treated as swapcache but it has no rmap yet.
+ * Calling try_to_unmap() against a page->mapping==NULL page will
+ * trigger a BUG. So handle it here.
+ * 2. An orphaned page (see truncate_complete_page) might have
+ * fs-private metadata. The page can be picked up due to memory
+ * offlining. Everywhere else except page reclaim, the page is
+ * invisible to the vm, so the page can not be migrated. So try to
+ * free the metadata, so the page can be freed.
*/
- if (!page->mapping)
+ if (!page->mapping) {
+ if (!PageAnon(page) && PagePrivate(page)) {
+ /*
+ * Go direct to try_to_free_buffers() here because
+ * a) that's what try_to_release_page() would do anyway
+ * b) we may be under rcu_read_lock() here, so we can't
+ * use GFP_KERNEL which is what try_to_release_page()
+ * needs to be effective.
+ */
+ try_to_free_buffers(page);
+ }
goto rcu_unlock;
+ }
+
+ charge = mem_cgroup_prepare_migration(page);
/* Establish migration ptes or remove ptes */
try_to_unmap(page, 1);
if (!page_mapped(page))
rc = move_to_new_page(newpage, page);
- if (rc)
+ if (rc) {
remove_migration_ptes(page, page);
+ if (charge)
+ mem_cgroup_end_migration(page);
+ } else if (charge)
+ mem_cgroup_end_migration(newpage);
rcu_unlock:
if (rcu_locked)
rcu_read_unlock();
diff --git a/mm/mmap.c b/mm/mmap.c
index 8295577a83b2..ad6e4eaf34f8 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -36,6 +36,10 @@
#define arch_mmap_check(addr, len, flags) (0)
#endif
+#ifndef arch_rebalance_pgtables
+#define arch_rebalance_pgtables(addr, len) (addr)
+#endif
+
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
@@ -241,7 +245,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk)
down_write(&mm->mmap_sem);
- if (brk < mm->end_code)
+ if (brk < mm->start_brk)
goto out;
/*
@@ -1424,7 +1428,7 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
if (addr & ~PAGE_MASK)
return -EINVAL;
- return addr;
+ return arch_rebalance_pgtables(addr, len);
}
EXPORT_SYMBOL(get_unmapped_area);
diff --git a/mm/nommu.c b/mm/nommu.c
index b989cb928a7c..5d8ae086f74e 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -10,6 +10,7 @@
* Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
* Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
* Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
+ * Copyright (c) 2007 Paul Mundt <lethal@linux-sh.org>
*/
#include <linux/module.h>
@@ -167,7 +168,7 @@ EXPORT_SYMBOL(get_user_pages);
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
-void vfree(void *addr)
+void vfree(const void *addr)
{
kfree(addr);
}
@@ -183,13 +184,33 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
}
EXPORT_SYMBOL(__vmalloc);
-struct page * vmalloc_to_page(void *addr)
+void *vmalloc_user(unsigned long size)
+{
+ void *ret;
+
+ ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL);
+ if (ret) {
+ struct vm_area_struct *vma;
+
+ down_write(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, (unsigned long)ret);
+ if (vma)
+ vma->vm_flags |= VM_USERMAP;
+ up_write(&current->mm->mmap_sem);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(vmalloc_user);
+
+struct page *vmalloc_to_page(const void *addr)
{
return virt_to_page(addr);
}
EXPORT_SYMBOL(vmalloc_to_page);
-unsigned long vmalloc_to_pfn(void *addr)
+unsigned long vmalloc_to_pfn(const void *addr)
{
return page_to_pfn(virt_to_page(addr));
}
@@ -253,10 +274,17 @@ EXPORT_SYMBOL(vmalloc_32);
*
* The resulting memory area is 32bit addressable and zeroed so it can be
* mapped to userspace without leaking data.
+ *
+ * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
+ * remap_vmalloc_range() are permissible.
*/
void *vmalloc_32_user(unsigned long size)
{
- return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+ /*
+ * We'll have to sort out the ZONE_DMA bits for 64-bit,
+ * but for now this can simply use vmalloc_user() directly.
+ */
+ return vmalloc_user(size);
}
EXPORT_SYMBOL(vmalloc_32_user);
@@ -267,7 +295,7 @@ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_
}
EXPORT_SYMBOL(vmap);
-void vunmap(void *addr)
+void vunmap(const void *addr)
{
BUG();
}
@@ -1216,6 +1244,21 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
}
EXPORT_SYMBOL(remap_pfn_range);
+int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
+ unsigned long pgoff)
+{
+ unsigned int size = vma->vm_end - vma->vm_start;
+
+ if (!(vma->vm_flags & VM_USERMAP))
+ return -EINVAL;
+
+ vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
+ vma->vm_end = vma->vm_start + size;
+
+ return 0;
+}
+EXPORT_SYMBOL(remap_vmalloc_range);
+
void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 96473b482099..4194b9db0104 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -25,9 +25,11 @@
#include <linux/cpuset.h>
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/memcontrol.h>
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
+int sysctl_oom_dump_tasks;
static DEFINE_SPINLOCK(zone_scan_mutex);
/* #define DEBUG */
@@ -50,7 +52,8 @@ static DEFINE_SPINLOCK(zone_scan_mutex);
* of least surprise ... (be careful when you change it)
*/
-unsigned long badness(struct task_struct *p, unsigned long uptime)
+unsigned long badness(struct task_struct *p, unsigned long uptime,
+ struct mem_cgroup *mem)
{
unsigned long points, cpu_time, run_time, s;
struct mm_struct *mm;
@@ -125,8 +128,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
* Superuser processes are usually more important, so we make it
* less likely that we kill those.
*/
- if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_ADMIN) ||
- p->uid == 0 || p->euid == 0)
+ if (__capable(p, CAP_SYS_ADMIN) || __capable(p, CAP_SYS_RESOURCE))
points /= 4;
/*
@@ -135,7 +137,7 @@ unsigned long badness(struct task_struct *p, unsigned long uptime)
* tend to only have this flag set on applications they think
* of as important.
*/
- if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO))
+ if (__capable(p, CAP_SYS_RAWIO))
points /= 4;
/*
@@ -194,7 +196,8 @@ static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
*
* (not docbooked, we don't want this one cluttering up the manual)
*/
-static struct task_struct *select_bad_process(unsigned long *ppoints)
+static struct task_struct *select_bad_process(unsigned long *ppoints,
+ struct mem_cgroup *mem)
{
struct task_struct *g, *p;
struct task_struct *chosen = NULL;
@@ -214,6 +217,8 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
/* skip the init task */
if (is_global_init(p))
continue;
+ if (mem && !task_in_mem_cgroup(p, mem))
+ continue;
/*
* This task already has access to memory reserves and is
@@ -248,7 +253,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
if (p->oomkilladj == OOM_DISABLE)
continue;
- points = badness(p, uptime.tv_sec);
+ points = badness(p, uptime.tv_sec, mem);
if (points > *ppoints || !chosen) {
chosen = p;
*ppoints = points;
@@ -259,6 +264,41 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
}
/**
+ * Dumps the current memory state of all system tasks, excluding kernel threads.
+ * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
+ * score, and name.
+ *
+ * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
+ * shown.
+ *
+ * Call with tasklist_lock read-locked.
+ */
+static void dump_tasks(const struct mem_cgroup *mem)
+{
+ struct task_struct *g, *p;
+
+ printk(KERN_INFO "[ pid ] uid tgid total_vm rss cpu oom_adj "
+ "name\n");
+ do_each_thread(g, p) {
+ /*
+ * total_vm and rss sizes do not exist for tasks with a
+ * detached mm so there's no need to report them.
+ */
+ if (!p->mm)
+ continue;
+ if (mem && !task_in_mem_cgroup(p, mem))
+ continue;
+
+ task_lock(p);
+ printk(KERN_INFO "[%5d] %5d %5d %8lu %8lu %3d %3d %s\n",
+ p->pid, p->uid, p->tgid, p->mm->total_vm,
+ get_mm_rss(p->mm), (int)task_cpu(p), p->oomkilladj,
+ p->comm);
+ task_unlock(p);
+ } while_each_thread(g, p);
+}
+
+/**
* Send SIGKILL to the selected process irrespective of CAP_SYS_RAW_IO
* flag though it's unlikely that we select a process with CAP_SYS_RAW_IO
* set.
@@ -335,7 +375,8 @@ static int oom_kill_task(struct task_struct *p)
}
static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
- unsigned long points, const char *message)
+ unsigned long points, struct mem_cgroup *mem,
+ const char *message)
{
struct task_struct *c;
@@ -345,6 +386,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
current->comm, gfp_mask, order, current->oomkilladj);
dump_stack();
show_mem();
+ if (sysctl_oom_dump_tasks)
+ dump_tasks(mem);
}
/*
@@ -369,6 +412,31 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
return oom_kill_task(p);
}
+#ifdef CONFIG_CGROUP_MEM_CONT
+void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
+{
+ unsigned long points = 0;
+ struct task_struct *p;
+
+ cgroup_lock();
+ rcu_read_lock();
+retry:
+ p = select_bad_process(&points, mem);
+ if (PTR_ERR(p) == -1UL)
+ goto out;
+
+ if (!p)
+ p = current;
+
+ if (oom_kill_process(p, gfp_mask, 0, points, mem,
+ "Memory cgroup out of memory"))
+ goto retry;
+out:
+ rcu_read_unlock();
+ cgroup_unlock();
+}
+#endif
+
static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
int register_oom_notifier(struct notifier_block *nb)
@@ -466,7 +534,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
switch (constraint) {
case CONSTRAINT_MEMORY_POLICY:
- oom_kill_process(current, gfp_mask, order, points,
+ oom_kill_process(current, gfp_mask, order, points, NULL,
"No available memory (MPOL_BIND)");
break;
@@ -476,7 +544,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)
/* Fall-through */
case CONSTRAINT_CPUSET:
if (sysctl_oom_kill_allocating_task) {
- oom_kill_process(current, gfp_mask, order, points,
+ oom_kill_process(current, gfp_mask, order, points, NULL,
"Out of memory (oom_kill_allocating_task)");
break;
}
@@ -485,7 +553,7 @@ retry:
* Rambo mode: Shoot down a process and hope it solves whatever
* issues we may have.
*/
- p = select_bad_process(&points);
+ p = select_bad_process(&points, NULL);
if (PTR_ERR(p) == -1UL)
goto out;
@@ -496,7 +564,7 @@ retry:
panic("Out of memory and no killable processes...\n");
}
- if (oom_kill_process(p, gfp_mask, order, points,
+ if (oom_kill_process(p, gfp_mask, order, points, NULL,
"Out of memory"))
goto retry;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3d3848fa6324..5e00f1772c20 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -69,6 +69,12 @@ static inline long sync_writeback_pages(void)
int dirty_background_ratio = 5;
/*
+ * free highmem will not be subtracted from the total free memory
+ * for calculating free ratios if vm_highmem_is_dirtyable is true
+ */
+int vm_highmem_is_dirtyable;
+
+/*
* The generator of dirty data starts writeback at this percentage
*/
int vm_dirty_ratio = 10;
@@ -219,7 +225,7 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
*
* dirty -= (dirty/8) * p_{t}
*/
-void task_dirty_limit(struct task_struct *tsk, long *pdirty)
+static void task_dirty_limit(struct task_struct *tsk, long *pdirty)
{
long numerator, denominator;
long dirty = *pdirty;
@@ -287,7 +293,10 @@ static unsigned long determine_dirtyable_memory(void)
x = global_page_state(NR_FREE_PAGES)
+ global_page_state(NR_INACTIVE)
+ global_page_state(NR_ACTIVE);
- x -= highmem_dirtyable_memory(x);
+
+ if (!vm_highmem_is_dirtyable)
+ x -= highmem_dirtyable_memory(x);
+
return x + 1; /* Ensure that we never return 0 */
}
@@ -558,6 +567,7 @@ static void background_writeout(unsigned long _min_pages)
global_page_state(NR_UNSTABLE_NFS) < background_thresh
&& min_pages <= 0)
break;
+ wbc.more_io = 0;
wbc.encountered_congestion = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0;
@@ -565,8 +575,9 @@ static void background_writeout(unsigned long _min_pages)
min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
/* Wrote less than expected */
- congestion_wait(WRITE, HZ/10);
- if (!wbc.encountered_congestion)
+ if (wbc.encountered_congestion || wbc.more_io)
+ congestion_wait(WRITE, HZ/10);
+ else
break;
}
}
@@ -631,11 +642,12 @@ static void wb_kupdate(unsigned long arg)
global_page_state(NR_UNSTABLE_NFS) +
(inodes_stat.nr_inodes - inodes_stat.nr_unused);
while (nr_to_write > 0) {
+ wbc.more_io = 0;
wbc.encountered_congestion = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES;
writeback_inodes(&wbc);
if (wbc.nr_to_write > 0) {
- if (wbc.encountered_congestion)
+ if (wbc.encountered_congestion || wbc.more_io)
congestion_wait(WRITE, HZ/10);
else
break; /* All the old data is written */
@@ -1064,7 +1076,7 @@ static int __set_page_dirty(struct page *page)
return 0;
}
-int fastcall set_page_dirty(struct page *page)
+int set_page_dirty(struct page *page)
{
int ret = __set_page_dirty(page);
if (ret)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b2838c24e582..26a54a17dc9f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -43,6 +43,7 @@
#include <linux/backing-dev.h>
#include <linux/fault-inject.h>
#include <linux/page-isolation.h>
+#include <linux/memcontrol.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -537,7 +538,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
/*
* permit the bootmem allocator to evade page validation on high-order frees
*/
-void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
+void __init __free_pages_bootmem(struct page *page, unsigned int order)
{
if (order == 0) {
__ClearPageReserved(page);
@@ -890,31 +891,51 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
}
#endif
-static void __drain_pages(unsigned int cpu)
+/*
+ * Drain pages of the indicated processor.
+ *
+ * The processor must either be the current processor and the
+ * thread pinned to the current processor or a processor that
+ * is not online.
+ */
+static void drain_pages(unsigned int cpu)
{
unsigned long flags;
struct zone *zone;
- int i;
for_each_zone(zone) {
struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
if (!populated_zone(zone))
continue;
pset = zone_pcp(zone, cpu);
- for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
- struct per_cpu_pages *pcp;
-
- pcp = &pset->pcp[i];
- local_irq_save(flags);
- free_pages_bulk(zone, pcp->count, &pcp->list, 0);
- pcp->count = 0;
- local_irq_restore(flags);
- }
+
+ pcp = &pset->pcp;
+ local_irq_save(flags);
+ free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+ pcp->count = 0;
+ local_irq_restore(flags);
}
}
+/*
+ * Spill all of this CPU's per-cpu pages back into the buddy allocator.
+ */
+void drain_local_pages(void *arg)
+{
+ drain_pages(smp_processor_id());
+}
+
+/*
+ * Spill all the per-cpu pages from all CPUs back into the buddy allocator
+ */
+void drain_all_pages(void)
+{
+ on_each_cpu(drain_local_pages, NULL, 0, 1);
+}
+
#ifdef CONFIG_HIBERNATION
void mark_free_pages(struct zone *zone)
@@ -952,40 +973,9 @@ void mark_free_pages(struct zone *zone)
#endif /* CONFIG_PM */
/*
- * Spill all of this CPU's per-cpu pages back into the buddy allocator.
- */
-void drain_local_pages(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __drain_pages(smp_processor_id());
- local_irq_restore(flags);
-}
-
-void smp_drain_local_pages(void *arg)
-{
- drain_local_pages();
-}
-
-/*
- * Spill all the per-cpu pages from all CPUs back into the buddy allocator
- */
-void drain_all_local_pages(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __drain_pages(smp_processor_id());
- local_irq_restore(flags);
-
- smp_call_function(smp_drain_local_pages, NULL, 0, 1);
-}
-
-/*
* Free a 0-order page
*/
-static void fastcall free_hot_cold_page(struct page *page, int cold)
+static void free_hot_cold_page(struct page *page, int cold)
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
@@ -998,13 +988,17 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
if (!PageHighMem(page))
debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
+ VM_BUG_ON(page_get_page_cgroup(page));
arch_free_page(page, 0);
kernel_map_pages(page, 1, 0);
- pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
+ pcp = &zone_pcp(zone, get_cpu())->pcp;
local_irq_save(flags);
__count_vm_event(PGFREE);
- list_add(&page->lru, &pcp->list);
+ if (cold)
+ list_add_tail(&page->lru, &pcp->list);
+ else
+ list_add(&page->lru, &pcp->list);
set_page_private(page, get_pageblock_migratetype(page));
pcp->count++;
if (pcp->count >= pcp->high) {
@@ -1015,12 +1009,12 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
put_cpu();
}
-void fastcall free_hot_page(struct page *page)
+void free_hot_page(struct page *page)
{
free_hot_cold_page(page, 0);
}
-void fastcall free_cold_page(struct page *page)
+void free_cold_page(struct page *page)
{
free_hot_cold_page(page, 1);
}
@@ -1062,7 +1056,7 @@ again:
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
- pcp = &zone_pcp(zone, cpu)->pcp[cold];
+ pcp = &zone_pcp(zone, cpu)->pcp;
local_irq_save(flags);
if (!pcp->count) {
pcp->count = rmqueue_bulk(zone, 0,
@@ -1072,9 +1066,15 @@ again:
}
/* Find a page of the appropriate migrate type */
- list_for_each_entry(page, &pcp->list, lru)
- if (page_private(page) == migratetype)
- break;
+ if (cold) {
+ list_for_each_entry_reverse(page, &pcp->list, lru)
+ if (page_private(page) == migratetype)
+ break;
+ } else {
+ list_for_each_entry(page, &pcp->list, lru)
+ if (page_private(page) == migratetype)
+ break;
+ }
/* Allocate more to the pcp list if necessary */
if (unlikely(&page->lru == &pcp->list)) {
@@ -1569,7 +1569,7 @@ nofail_alloc:
cond_resched();
if (order != 0)
- drain_all_local_pages();
+ drain_all_pages();
if (likely(did_some_progress)) {
page = get_page_from_freelist(gfp_mask, order,
@@ -1643,7 +1643,7 @@ EXPORT_SYMBOL(__alloc_pages);
/*
* Common helper functions.
*/
-fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
+unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
struct page * page;
page = alloc_pages(gfp_mask, order);
@@ -1654,7 +1654,7 @@ fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
EXPORT_SYMBOL(__get_free_pages);
-fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
+unsigned long get_zeroed_page(gfp_t gfp_mask)
{
struct page * page;
@@ -1680,7 +1680,7 @@ void __pagevec_free(struct pagevec *pvec)
free_hot_cold_page(pvec->pages[i], pvec->cold);
}
-fastcall void __free_pages(struct page *page, unsigned int order)
+void __free_pages(struct page *page, unsigned int order)
{
if (put_page_testzero(page)) {
if (order == 0)
@@ -1692,7 +1692,7 @@ fastcall void __free_pages(struct page *page, unsigned int order)
EXPORT_SYMBOL(__free_pages);
-fastcall void free_pages(unsigned long addr, unsigned int order)
+void free_pages(unsigned long addr, unsigned int order)
{
if (addr != 0) {
VM_BUG_ON(!virt_addr_valid((void *)addr));
@@ -1801,12 +1801,9 @@ void show_free_areas(void)
pageset = zone_pcp(zone, cpu);
- printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d "
- "Cold: hi:%5d, btch:%4d usd:%4d\n",
- cpu, pageset->pcp[0].high,
- pageset->pcp[0].batch, pageset->pcp[0].count,
- pageset->pcp[1].high, pageset->pcp[1].batch,
- pageset->pcp[1].count);
+ printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
+ cpu, pageset->pcp.high,
+ pageset->pcp.batch, pageset->pcp.count);
}
}
@@ -1879,6 +1876,8 @@ void show_free_areas(void)
printk("= %lukB\n", K(total));
}
+ printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
+
show_swap_cache_info();
}
@@ -2528,6 +2527,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
set_page_links(page, zone, nid, pfn);
init_page_count(page);
reset_page_mapcount(page);
+ page_assign_page_cgroup(page, NULL);
SetPageReserved(page);
/*
@@ -2551,8 +2551,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
}
}
-static void __meminit zone_init_free_lists(struct pglist_data *pgdat,
- struct zone *zone, unsigned long size)
+static void __meminit zone_init_free_lists(struct zone *zone)
{
int order, t;
for_each_migratetype_order(order, t) {
@@ -2604,17 +2603,11 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
memset(p, 0, sizeof(*p));
- pcp = &p->pcp[0]; /* hot */
+ pcp = &p->pcp;
pcp->count = 0;
pcp->high = 6 * batch;
pcp->batch = max(1UL, 1 * batch);
INIT_LIST_HEAD(&pcp->list);
-
- pcp = &p->pcp[1]; /* cold*/
- pcp->count = 0;
- pcp->high = 2 * batch;
- pcp->batch = max(1UL, batch/2);
- INIT_LIST_HEAD(&pcp->list);
}
/*
@@ -2627,7 +2620,7 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
{
struct per_cpu_pages *pcp;
- pcp = &p->pcp[0]; /* hot list */
+ pcp = &p->pcp;
pcp->high = high;
pcp->batch = max(1UL, high/4);
if ((high/4) > (PAGE_SHIFT * 8))
@@ -2831,7 +2824,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
- zone_init_free_lists(pgdat, zone, zone->spanned_pages);
+ zone_init_free_lists(zone);
return 0;
}
@@ -3978,10 +3971,23 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
- local_irq_disable();
- __drain_pages(cpu);
+ drain_pages(cpu);
+
+ /*
+ * Spill the event counters of the dead processor
+ * into the current processors event counters.
+ * This artificially elevates the count of the current
+ * processor.
+ */
vm_events_fold_cpu(cpu);
- local_irq_enable();
+
+ /*
+ * Zero the differential counters of the dead processor
+ * so that the vm statistics are consistent.
+ *
+ * This is only okay since the processor is dead and cannot
+ * race with what we are doing.
+ */
refresh_cpu_vm_stats(cpu);
}
return NOTIFY_OK;
@@ -4480,7 +4486,7 @@ int set_migratetype_isolate(struct page *page)
out:
spin_unlock_irqrestore(&zone->lock, flags);
if (!ret)
- drain_all_local_pages();
+ drain_all_pages();
return ret;
}
diff --git a/mm/page_io.c b/mm/page_io.c
index 3b97f6850273..065c4480eaf0 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -126,7 +126,7 @@ int swap_readpage(struct file *file, struct page *page)
int ret = 0;
BUG_ON(!PageLocked(page));
- ClearPageUptodate(page);
+ BUG_ON(PageUptodate(page));
bio = get_swap_bio(GFP_KERNEL, page_private(page), page,
end_swap_bio_read);
if (bio == NULL) {
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
new file mode 100644
index 000000000000..b4f27d22da91
--- /dev/null
+++ b/mm/pagewalk.c
@@ -0,0 +1,131 @@
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/sched.h>
+
+static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ const struct mm_walk *walk, void *private)
+{
+ pte_t *pte;
+ int err = 0;
+
+ pte = pte_offset_map(pmd, addr);
+ do {
+ err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, private);
+ if (err)
+ break;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+
+ pte_unmap(pte);
+ return err;
+}
+
+static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
+ const struct mm_walk *walk, void *private)
+{
+ pmd_t *pmd;
+ unsigned long next;
+ int err = 0;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd)) {
+ if (walk->pte_hole)
+ err = walk->pte_hole(addr, next, private);
+ if (err)
+ break;
+ continue;
+ }
+ if (walk->pmd_entry)
+ err = walk->pmd_entry(pmd, addr, next, private);
+ if (!err && walk->pte_entry)
+ err = walk_pte_range(pmd, addr, next, walk, private);
+ if (err)
+ break;
+ } while (pmd++, addr = next, addr != end);
+
+ return err;
+}
+
+static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
+ const struct mm_walk *walk, void *private)
+{
+ pud_t *pud;
+ unsigned long next;
+ int err = 0;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud)) {
+ if (walk->pte_hole)
+ err = walk->pte_hole(addr, next, private);
+ if (err)
+ break;
+ continue;
+ }
+ if (walk->pud_entry)
+ err = walk->pud_entry(pud, addr, next, private);
+ if (!err && (walk->pmd_entry || walk->pte_entry))
+ err = walk_pmd_range(pud, addr, next, walk, private);
+ if (err)
+ break;
+ } while (pud++, addr = next, addr != end);
+
+ return err;
+}
+
+/**
+ * walk_page_range - walk a memory map's page tables with a callback
+ * @mm - memory map to walk
+ * @addr - starting address
+ * @end - ending address
+ * @walk - set of callbacks to invoke for each level of the tree
+ * @private - private data passed to the callback function
+ *
+ * Recursively walk the page table for the memory area in a VMA,
+ * calling supplied callbacks. Callbacks are called in-order (first
+ * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
+ * etc.). If lower-level callbacks are omitted, walking depth is reduced.
+ *
+ * Each callback receives an entry pointer, the start and end of the
+ * associated range, and a caller-supplied private data pointer.
+ *
+ * No locks are taken, but the bottom level iterator will map PTE
+ * directories from highmem if necessary.
+ *
+ * If any callback returns a non-zero value, the walk is aborted and
+ * the return value is propagated back to the caller. Otherwise 0 is returned.
+ */
+int walk_page_range(const struct mm_struct *mm,
+ unsigned long addr, unsigned long end,
+ const struct mm_walk *walk, void *private)
+{
+ pgd_t *pgd;
+ unsigned long next;
+ int err = 0;
+
+ if (addr >= end)
+ return err;
+
+ pgd = pgd_offset(mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd)) {
+ if (walk->pte_hole)
+ err = walk->pte_hole(addr, next, private);
+ if (err)
+ break;
+ continue;
+ }
+ if (walk->pgd_entry)
+ err = walk->pgd_entry(pgd, addr, next, private);
+ if (!err &&
+ (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
+ err = walk_pud_range(pgd, addr, next, walk, private);
+ if (err)
+ break;
+ } while (pgd++, addr = next, addr != end);
+
+ return err;
+}
diff --git a/mm/rmap.c b/mm/rmap.c
index dbc2ca2057a5..a0e92a263d12 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -36,7 +36,6 @@
* mapping->tree_lock (widely used, in set_page_dirty,
* in arch-dependent flush_dcache_mmap_lock,
* within inode_lock in __sync_single_inode)
- * zone->lock (within radix tree node alloc)
*/
#include <linux/mm.h>
@@ -49,6 +48,7 @@
#include <linux/rcupdate.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
+#include <linux/memcontrol.h>
#include <asm/tlbflush.h>
@@ -284,7 +284,10 @@ static int page_referenced_one(struct page *page,
if (!pte)
goto out;
- if (ptep_clear_flush_young(vma, address, pte))
+ if (vma->vm_flags & VM_LOCKED) {
+ referenced++;
+ *mapcount = 1; /* break early from loop */
+ } else if (ptep_clear_flush_young(vma, address, pte))
referenced++;
/* Pretend the page is referenced if the task has the
@@ -299,7 +302,8 @@ out:
return referenced;
}
-static int page_referenced_anon(struct page *page)
+static int page_referenced_anon(struct page *page,
+ struct mem_cgroup *mem_cont)
{
unsigned int mapcount;
struct anon_vma *anon_vma;
@@ -312,6 +316,13 @@ static int page_referenced_anon(struct page *page)
mapcount = page_mapcount(page);
list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+ /*
+ * If we are reclaiming on behalf of a cgroup, skip
+ * counting on behalf of references from different
+ * cgroups
+ */
+ if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
+ continue;
referenced += page_referenced_one(page, vma, &mapcount);
if (!mapcount)
break;
@@ -332,7 +343,8 @@ static int page_referenced_anon(struct page *page)
*
* This function is only called from page_referenced for object-based pages.
*/
-static int page_referenced_file(struct page *page)
+static int page_referenced_file(struct page *page,
+ struct mem_cgroup *mem_cont)
{
unsigned int mapcount;
struct address_space *mapping = page->mapping;
@@ -365,6 +377,13 @@ static int page_referenced_file(struct page *page)
mapcount = page_mapcount(page);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+ /*
+ * If we are reclaiming on behalf of a cgroup, skip
+ * counting on behalf of references from different
+ * cgroups
+ */
+ if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
+ continue;
if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
== (VM_LOCKED|VM_MAYSHARE)) {
referenced++;
@@ -387,7 +406,8 @@ static int page_referenced_file(struct page *page)
* Quick test_and_clear_referenced for all mappings to a page,
* returns the number of ptes which referenced the page.
*/
-int page_referenced(struct page *page, int is_locked)
+int page_referenced(struct page *page, int is_locked,
+ struct mem_cgroup *mem_cont)
{
int referenced = 0;
@@ -399,14 +419,15 @@ int page_referenced(struct page *page, int is_locked)
if (page_mapped(page) && page->mapping) {
if (PageAnon(page))
- referenced += page_referenced_anon(page);
+ referenced += page_referenced_anon(page, mem_cont);
else if (is_locked)
- referenced += page_referenced_file(page);
+ referenced += page_referenced_file(page, mem_cont);
else if (TestSetPageLocked(page))
referenced++;
else {
if (page->mapping)
- referenced += page_referenced_file(page);
+ referenced +=
+ page_referenced_file(page, mem_cont);
unlock_page(page);
}
}
@@ -552,8 +573,14 @@ void page_add_anon_rmap(struct page *page,
VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
if (atomic_inc_and_test(&page->_mapcount))
__page_set_anon_rmap(page, vma, address);
- else
+ else {
__page_check_anon_rmap(page, vma, address);
+ /*
+ * We unconditionally charged during prepare, we uncharge here
+ * This takes care of balancing the reference counts
+ */
+ mem_cgroup_uncharge_page(page);
+ }
}
/*
@@ -584,6 +611,12 @@ void page_add_file_rmap(struct page *page)
{
if (atomic_inc_and_test(&page->_mapcount))
__inc_zone_page_state(page, NR_FILE_MAPPED);
+ else
+ /*
+ * We unconditionally charged during prepare, we uncharge here
+ * This takes care of balancing the reference counts
+ */
+ mem_cgroup_uncharge_page(page);
}
#ifdef CONFIG_DEBUG_VM
@@ -644,6 +677,8 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
page_clear_dirty(page);
set_page_dirty(page);
}
+ mem_cgroup_uncharge_page(page);
+
__dec_zone_page_state(page,
PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 51b3d6ccddab..85bed948fafc 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -78,11 +78,10 @@
/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
enum sgp_type {
- SGP_QUICK, /* don't try more than file page cache lookup */
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
+ SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
SGP_WRITE, /* may exceed i_size, may allocate page */
- SGP_FAULT, /* same as SGP_CACHE, return with page locked */
};
static int shmem_getpage(struct inode *inode, unsigned long idx,
@@ -194,7 +193,7 @@ static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
};
static LIST_HEAD(shmem_swaplist);
-static DEFINE_SPINLOCK(shmem_swaplist_lock);
+static DEFINE_MUTEX(shmem_swaplist_mutex);
static void shmem_free_blocks(struct inode *inode, long pages)
{
@@ -207,6 +206,31 @@ static void shmem_free_blocks(struct inode *inode, long pages)
}
}
+static int shmem_reserve_inode(struct super_block *sb)
+{
+ struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+ if (sbinfo->max_inodes) {
+ spin_lock(&sbinfo->stat_lock);
+ if (!sbinfo->free_inodes) {
+ spin_unlock(&sbinfo->stat_lock);
+ return -ENOSPC;
+ }
+ sbinfo->free_inodes--;
+ spin_unlock(&sbinfo->stat_lock);
+ }
+ return 0;
+}
+
+static void shmem_free_inode(struct super_block *sb)
+{
+ struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+ if (sbinfo->max_inodes) {
+ spin_lock(&sbinfo->stat_lock);
+ sbinfo->free_inodes++;
+ spin_unlock(&sbinfo->stat_lock);
+ }
+}
+
/*
* shmem_recalc_inode - recalculate the size of an inode
*
@@ -731,6 +755,8 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
(void) shmem_getpage(inode,
attr->ia_size>>PAGE_CACHE_SHIFT,
&page, SGP_READ, NULL);
+ if (page)
+ unlock_page(page);
}
/*
* Reset SHMEM_PAGEIN flag so that shmem_truncate can
@@ -762,7 +788,6 @@ static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
static void shmem_delete_inode(struct inode *inode)
{
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
struct shmem_inode_info *info = SHMEM_I(inode);
if (inode->i_op->truncate == shmem_truncate) {
@@ -771,17 +796,13 @@ static void shmem_delete_inode(struct inode *inode)
inode->i_size = 0;
shmem_truncate(inode);
if (!list_empty(&info->swaplist)) {
- spin_lock(&shmem_swaplist_lock);
+ mutex_lock(&shmem_swaplist_mutex);
list_del_init(&info->swaplist);
- spin_unlock(&shmem_swaplist_lock);
+ mutex_unlock(&shmem_swaplist_mutex);
}
}
BUG_ON(inode->i_blocks);
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- sbinfo->free_inodes++;
- spin_unlock(&sbinfo->stat_lock);
- }
+ shmem_free_inode(inode->i_sb);
clear_inode(inode);
}
@@ -807,19 +828,22 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
struct page *subdir;
swp_entry_t *ptr;
int offset;
+ int error;
idx = 0;
ptr = info->i_direct;
spin_lock(&info->lock);
+ if (!info->swapped) {
+ list_del_init(&info->swaplist);
+ goto lost2;
+ }
limit = info->next_index;
size = limit;
if (size > SHMEM_NR_DIRECT)
size = SHMEM_NR_DIRECT;
offset = shmem_find_swp(entry, ptr, ptr+size);
- if (offset >= 0) {
- shmem_swp_balance_unmap();
+ if (offset >= 0)
goto found;
- }
if (!info->i_indirect)
goto lost2;
@@ -829,6 +853,14 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
if (unlikely(idx == stage)) {
shmem_dir_unmap(dir-1);
+ if (cond_resched_lock(&info->lock)) {
+ /* check it has not been truncated */
+ if (limit > info->next_index) {
+ limit = info->next_index;
+ if (idx >= limit)
+ goto lost2;
+ }
+ }
dir = shmem_dir_map(info->i_indirect) +
ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
while (!*dir) {
@@ -849,11 +881,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
if (size > ENTRIES_PER_PAGE)
size = ENTRIES_PER_PAGE;
offset = shmem_find_swp(entry, ptr, ptr+size);
+ shmem_swp_unmap(ptr);
if (offset >= 0) {
shmem_dir_unmap(dir);
goto found;
}
- shmem_swp_unmap(ptr);
}
}
lost1:
@@ -863,19 +895,69 @@ lost2:
return 0;
found:
idx += offset;
- inode = &info->vfs_inode;
- if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
- info->flags |= SHMEM_PAGEIN;
- shmem_swp_set(info, ptr + offset, 0);
- }
- shmem_swp_unmap(ptr);
+ inode = igrab(&info->vfs_inode);
spin_unlock(&info->lock);
+
/*
- * Decrement swap count even when the entry is left behind:
- * try_to_unuse will skip over mms, then reincrement count.
+ * Move _head_ to start search for next from here.
+ * But be careful: shmem_delete_inode checks list_empty without taking
+ * mutex, and there's an instant in list_move_tail when info->swaplist
+ * would appear empty, if it were the only one on shmem_swaplist. We
+ * could avoid doing it if inode NULL; or use this minor optimization.
*/
- swap_free(entry);
- return 1;
+ if (shmem_swaplist.next != &info->swaplist)
+ list_move_tail(&shmem_swaplist, &info->swaplist);
+ mutex_unlock(&shmem_swaplist_mutex);
+
+ error = 1;
+ if (!inode)
+ goto out;
+ /* Precharge page while we can wait, compensate afterwards */
+ error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
+ if (error)
+ goto out;
+ error = radix_tree_preload(GFP_KERNEL);
+ if (error)
+ goto uncharge;
+ error = 1;
+
+ spin_lock(&info->lock);
+ ptr = shmem_swp_entry(info, idx, NULL);
+ if (ptr && ptr->val == entry.val)
+ error = add_to_page_cache(page, inode->i_mapping,
+ idx, GFP_NOWAIT);
+ if (error == -EEXIST) {
+ struct page *filepage = find_get_page(inode->i_mapping, idx);
+ error = 1;
+ if (filepage) {
+ /*
+ * There might be a more uptodate page coming down
+ * from a stacked writepage: forget our swappage if so.
+ */
+ if (PageUptodate(filepage))
+ error = 0;
+ page_cache_release(filepage);
+ }
+ }
+ if (!error) {
+ delete_from_swap_cache(page);
+ set_page_dirty(page);
+ info->flags |= SHMEM_PAGEIN;
+ shmem_swp_set(info, ptr, 0);
+ swap_free(entry);
+ error = 1; /* not an error, but entry was found */
+ }
+ if (ptr)
+ shmem_swp_unmap(ptr);
+ spin_unlock(&info->lock);
+ radix_tree_preload_end();
+uncharge:
+ mem_cgroup_uncharge_page(page);
+out:
+ unlock_page(page);
+ page_cache_release(page);
+ iput(inode); /* allows for NULL */
+ return error;
}
/*
@@ -887,20 +969,16 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
struct shmem_inode_info *info;
int found = 0;
- spin_lock(&shmem_swaplist_lock);
+ mutex_lock(&shmem_swaplist_mutex);
list_for_each_safe(p, next, &shmem_swaplist) {
info = list_entry(p, struct shmem_inode_info, swaplist);
- if (!info->swapped)
- list_del_init(&info->swaplist);
- else if (shmem_unuse_inode(info, entry, page)) {
- /* move head to start search for next from here */
- list_move_tail(&shmem_swaplist, &info->swaplist);
- found = 1;
- break;
- }
+ found = shmem_unuse_inode(info, entry, page);
+ cond_resched();
+ if (found)
+ goto out;
}
- spin_unlock(&shmem_swaplist_lock);
- return found;
+ mutex_unlock(&shmem_swaplist_mutex);
+out: return found; /* 0 or 1 or -ENOMEM */
}
/*
@@ -915,54 +993,65 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
struct inode *inode;
BUG_ON(!PageLocked(page));
- /*
- * shmem_backing_dev_info's capabilities prevent regular writeback or
- * sync from ever calling shmem_writepage; but a stacking filesystem
- * may use the ->writepage of its underlying filesystem, in which case
- * we want to do nothing when that underlying filesystem is tmpfs
- * (writing out to swap is useful as a response to memory pressure, but
- * of no use to stabilize the data) - just redirty the page, unlock it
- * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the
- * page_mapped check below, must be avoided unless we're in reclaim.
- */
- if (!wbc->for_reclaim) {
- set_page_dirty(page);
- unlock_page(page);
- return 0;
- }
- BUG_ON(page_mapped(page));
-
mapping = page->mapping;
index = page->index;
inode = mapping->host;
info = SHMEM_I(inode);
if (info->flags & VM_LOCKED)
goto redirty;
- swap = get_swap_page();
- if (!swap.val)
+ if (!total_swap_pages)
goto redirty;
+ /*
+ * shmem_backing_dev_info's capabilities prevent regular writeback or
+ * sync from ever calling shmem_writepage; but a stacking filesystem
+ * may use the ->writepage of its underlying filesystem, in which case
+ * tmpfs should write out to swap only in response to memory pressure,
+ * and not for pdflush or sync. However, in those cases, we do still
+ * want to check if there's a redundant swappage to be discarded.
+ */
+ if (wbc->for_reclaim)
+ swap = get_swap_page();
+ else
+ swap.val = 0;
+
spin_lock(&info->lock);
- shmem_recalc_inode(inode);
if (index >= info->next_index) {
BUG_ON(!(info->flags & SHMEM_TRUNCATE));
goto unlock;
}
entry = shmem_swp_entry(info, index, NULL);
- BUG_ON(!entry);
- BUG_ON(entry->val);
+ if (entry->val) {
+ /*
+ * The more uptodate page coming down from a stacked
+ * writepage should replace our old swappage.
+ */
+ free_swap_and_cache(*entry);
+ shmem_swp_set(info, entry, 0);
+ }
+ shmem_recalc_inode(inode);
- if (move_to_swap_cache(page, swap) == 0) {
+ if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
+ remove_from_page_cache(page);
shmem_swp_set(info, entry, swap.val);
shmem_swp_unmap(entry);
+ if (list_empty(&info->swaplist))
+ inode = igrab(inode);
+ else
+ inode = NULL;
spin_unlock(&info->lock);
- if (list_empty(&info->swaplist)) {
- spin_lock(&shmem_swaplist_lock);
+ swap_duplicate(swap);
+ BUG_ON(page_mapped(page));
+ page_cache_release(page); /* pagecache ref */
+ set_page_dirty(page);
+ unlock_page(page);
+ if (inode) {
+ mutex_lock(&shmem_swaplist_mutex);
/* move instead of add in case we're racing */
list_move_tail(&info->swaplist, &shmem_swaplist);
- spin_unlock(&shmem_swaplist_lock);
+ mutex_unlock(&shmem_swaplist_mutex);
+ iput(inode);
}
- unlock_page(page);
return 0;
}
@@ -972,7 +1061,10 @@ unlock:
swap_free(swap);
redirty:
set_page_dirty(page);
- return AOP_WRITEPAGE_ACTIVATE; /* Return with the page locked */
+ if (wbc->for_reclaim)
+ return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
+ unlock_page(page);
+ return 0;
}
#ifdef CONFIG_NUMA
@@ -1025,53 +1117,33 @@ out:
return err;
}
-static struct page *shmem_swapin_async(struct shared_policy *p,
- swp_entry_t entry, unsigned long idx)
+static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
- struct page *page;
struct vm_area_struct pvma;
+ struct page *page;
/* Create a pseudo vma that just contains the policy */
- memset(&pvma, 0, sizeof(struct vm_area_struct));
- pvma.vm_end = PAGE_SIZE;
+ pvma.vm_start = 0;
pvma.vm_pgoff = idx;
- pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
- page = read_swap_cache_async(entry, &pvma, 0);
+ pvma.vm_ops = NULL;
+ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+ page = swapin_readahead(entry, gfp, &pvma, 0);
mpol_free(pvma.vm_policy);
return page;
}
-static struct page *shmem_swapin(struct shmem_inode_info *info,
- swp_entry_t entry, unsigned long idx)
-{
- struct shared_policy *p = &info->policy;
- int i, num;
- struct page *page;
- unsigned long offset;
-
- num = valid_swaphandles(entry, &offset);
- for (i = 0; i < num; offset++, i++) {
- page = shmem_swapin_async(p,
- swp_entry(swp_type(entry), offset), idx);
- if (!page)
- break;
- page_cache_release(page);
- }
- lru_add_drain(); /* Push any new pages onto the LRU now */
- return shmem_swapin_async(p, entry, idx);
-}
-
-static struct page *
-shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
- unsigned long idx)
+static struct page *shmem_alloc_page(gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
struct vm_area_struct pvma;
struct page *page;
- memset(&pvma, 0, sizeof(struct vm_area_struct));
- pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+ /* Create a pseudo vma that just contains the policy */
+ pvma.vm_start = 0;
pvma.vm_pgoff = idx;
- pvma.vm_end = PAGE_SIZE;
+ pvma.vm_ops = NULL;
+ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
page = alloc_page_vma(gfp, &pvma, 0);
mpol_free(pvma.vm_policy);
return page;
@@ -1083,15 +1155,14 @@ static inline int shmem_parse_mpol(char *value, int *policy,
return 1;
}
-static inline struct page *
-shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
+static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
- swapin_readahead(entry, 0, NULL);
- return read_swap_cache_async(entry, NULL, 0);
+ return swapin_readahead(entry, gfp, NULL, 0);
}
-static inline struct page *
-shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
+static inline struct page *shmem_alloc_page(gfp_t gfp,
+ struct shmem_inode_info *info, unsigned long idx)
{
return alloc_page(gfp);
}
@@ -1114,6 +1185,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
struct page *swappage;
swp_entry_t *entry;
swp_entry_t swap;
+ gfp_t gfp;
int error;
if (idx >= SHMEM_MAX_INDEX)
@@ -1126,7 +1198,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
* Normally, filepage is NULL on entry, and either found
* uptodate immediately, or allocated and zeroed, or read
* in under swappage, which is then assigned to filepage.
- * But shmem_readpage and shmem_write_begin pass in a locked
+ * But shmem_readpage (required for splice) passes in a locked
* filepage, which may be found not uptodate by other callers
* too, and may need to be copied from the swappage read in.
*/
@@ -1136,8 +1208,17 @@ repeat:
if (filepage && PageUptodate(filepage))
goto done;
error = 0;
- if (sgp == SGP_QUICK)
- goto failed;
+ gfp = mapping_gfp_mask(mapping);
+ if (!filepage) {
+ /*
+ * Try to preload while we can wait, to not make a habit of
+ * draining atomic reserves; but don't latch on to this cpu.
+ */
+ error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
+ if (error)
+ goto failed;
+ radix_tree_preload_end();
+ }
spin_lock(&info->lock);
shmem_recalc_inode(inode);
@@ -1160,7 +1241,7 @@ repeat:
*type |= VM_FAULT_MAJOR;
}
spin_unlock(&info->lock);
- swappage = shmem_swapin(info, swap, idx);
+ swappage = shmem_swapin(swap, gfp, info, idx);
if (!swappage) {
spin_lock(&info->lock);
entry = shmem_swp_alloc(info, idx, sgp);
@@ -1218,13 +1299,15 @@ repeat:
SetPageUptodate(filepage);
set_page_dirty(filepage);
swap_free(swap);
- } else if (!(error = move_from_swap_cache(
- swappage, idx, mapping))) {
+ } else if (!(error = add_to_page_cache(
+ swappage, mapping, idx, GFP_NOWAIT))) {
info->flags |= SHMEM_PAGEIN;
shmem_swp_set(info, entry, 0);
shmem_swp_unmap(entry);
+ delete_from_swap_cache(swappage);
spin_unlock(&info->lock);
filepage = swappage;
+ set_page_dirty(filepage);
swap_free(swap);
} else {
shmem_swp_unmap(entry);
@@ -1232,8 +1315,11 @@ repeat:
unlock_page(swappage);
page_cache_release(swappage);
if (error == -ENOMEM) {
- /* let kswapd refresh zone for GFP_ATOMICs */
- congestion_wait(WRITE, HZ/50);
+ /* allow reclaim from this memory cgroup */
+ error = mem_cgroup_cache_charge(NULL,
+ current->mm, gfp & ~__GFP_HIGHMEM);
+ if (error)
+ goto failed;
}
goto repeat;
}
@@ -1272,9 +1358,7 @@ repeat:
if (!filepage) {
spin_unlock(&info->lock);
- filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
- info,
- idx);
+ filepage = shmem_alloc_page(gfp, info, idx);
if (!filepage) {
shmem_unacct_blocks(info->flags, 1);
shmem_free_blocks(inode, 1);
@@ -1282,6 +1366,17 @@ repeat:
goto failed;
}
+ /* Precharge page while we can wait, compensate after */
+ error = mem_cgroup_cache_charge(filepage, current->mm,
+ gfp & ~__GFP_HIGHMEM);
+ if (error) {
+ page_cache_release(filepage);
+ shmem_unacct_blocks(info->flags, 1);
+ shmem_free_blocks(inode, 1);
+ filepage = NULL;
+ goto failed;
+ }
+
spin_lock(&info->lock);
entry = shmem_swp_alloc(info, idx, sgp);
if (IS_ERR(entry))
@@ -1291,8 +1386,9 @@ repeat:
shmem_swp_unmap(entry);
}
if (error || swap.val || 0 != add_to_page_cache_lru(
- filepage, mapping, idx, GFP_ATOMIC)) {
+ filepage, mapping, idx, GFP_NOWAIT)) {
spin_unlock(&info->lock);
+ mem_cgroup_uncharge_page(filepage);
page_cache_release(filepage);
shmem_unacct_blocks(info->flags, 1);
shmem_free_blocks(inode, 1);
@@ -1301,6 +1397,7 @@ repeat:
goto failed;
goto repeat;
}
+ mem_cgroup_uncharge_page(filepage);
info->flags |= SHMEM_PAGEIN;
}
@@ -1309,14 +1406,11 @@ repeat:
clear_highpage(filepage);
flush_dcache_page(filepage);
SetPageUptodate(filepage);
+ if (sgp == SGP_DIRTY)
+ set_page_dirty(filepage);
}
done:
- if (*pagep != filepage) {
- *pagep = filepage;
- if (sgp != SGP_FAULT)
- unlock_page(filepage);
-
- }
+ *pagep = filepage;
return 0;
failed:
@@ -1336,7 +1430,7 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
return VM_FAULT_SIGBUS;
- error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_FAULT, &ret);
+ error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
if (error)
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1399,15 +1493,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- if (!sbinfo->free_inodes) {
- spin_unlock(&sbinfo->stat_lock);
- return NULL;
- }
- sbinfo->free_inodes--;
- spin_unlock(&sbinfo->stat_lock);
- }
+ if (shmem_reserve_inode(sb))
+ return NULL;
inode = new_inode(sb);
if (inode) {
@@ -1451,11 +1538,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
NULL);
break;
}
- } else if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- sbinfo->free_inodes++;
- spin_unlock(&sbinfo->stat_lock);
- }
+ } else
+ shmem_free_inode(sb);
return inode;
}
@@ -1494,123 +1578,30 @@ shmem_write_end(struct file *file, struct address_space *mapping,
{
struct inode *inode = mapping->host;
+ if (pos + copied > inode->i_size)
+ i_size_write(inode, pos + copied);
+
+ unlock_page(page);
set_page_dirty(page);
page_cache_release(page);
- if (pos+copied > inode->i_size)
- i_size_write(inode, pos+copied);
-
return copied;
}
-static ssize_t
-shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
-{
- struct inode *inode = file->f_path.dentry->d_inode;
- loff_t pos;
- unsigned long written;
- ssize_t err;
-
- if ((ssize_t) count < 0)
- return -EINVAL;
-
- if (!access_ok(VERIFY_READ, buf, count))
- return -EFAULT;
-
- mutex_lock(&inode->i_mutex);
-
- pos = *ppos;
- written = 0;
-
- err = generic_write_checks(file, &pos, &count, 0);
- if (err || !count)
- goto out;
-
- err = remove_suid(file->f_path.dentry);
- if (err)
- goto out;
-
- inode->i_ctime = inode->i_mtime = CURRENT_TIME;
-
- do {
- struct page *page = NULL;
- unsigned long bytes, index, offset;
- char *kaddr;
- int left;
-
- offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
- index = pos >> PAGE_CACHE_SHIFT;
- bytes = PAGE_CACHE_SIZE - offset;
- if (bytes > count)
- bytes = count;
-
- /*
- * We don't hold page lock across copy from user -
- * what would it guard against? - so no deadlock here.
- * But it still may be a good idea to prefault below.
- */
-
- err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
- if (err)
- break;
-
- left = bytes;
- if (PageHighMem(page)) {
- volatile unsigned char dummy;
- __get_user(dummy, buf);
- __get_user(dummy, buf + bytes - 1);
-
- kaddr = kmap_atomic(page, KM_USER0);
- left = __copy_from_user_inatomic(kaddr + offset,
- buf, bytes);
- kunmap_atomic(kaddr, KM_USER0);
- }
- if (left) {
- kaddr = kmap(page);
- left = __copy_from_user(kaddr + offset, buf, bytes);
- kunmap(page);
- }
-
- written += bytes;
- count -= bytes;
- pos += bytes;
- buf += bytes;
- if (pos > inode->i_size)
- i_size_write(inode, pos);
-
- flush_dcache_page(page);
- set_page_dirty(page);
- mark_page_accessed(page);
- page_cache_release(page);
-
- if (left) {
- pos -= left;
- written -= left;
- err = -EFAULT;
- break;
- }
-
- /*
- * Our dirty pages are not counted in nr_dirty,
- * and we do not attempt to balance dirty pages.
- */
-
- cond_resched();
- } while (count);
-
- *ppos = pos;
- if (written)
- err = written;
-out:
- mutex_unlock(&inode->i_mutex);
- return err;
-}
-
static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
unsigned long index, offset;
+ enum sgp_type sgp = SGP_READ;
+
+ /*
+ * Might this read be for a stacking filesystem? Then when reading
+ * holes of a sparse file, we actually need to allocate those pages,
+ * and even mark them dirty, so it cannot exceed the max_blocks limit.
+ */
+ if (segment_eq(get_fs(), KERNEL_DS))
+ sgp = SGP_DIRTY;
index = *ppos >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
@@ -1629,12 +1620,14 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
break;
}
- desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
+ desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
if (desc->error) {
if (desc->error == -EINVAL)
desc->error = 0;
break;
}
+ if (page)
+ unlock_page(page);
/*
* We must evaluate after, since reads (unlike writes)
@@ -1798,22 +1791,16 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ int ret;
/*
* No ordinary (disk based) filesystem counts links as inodes;
* but each new link needs a new dentry, pinning lowmem, and
* tmpfs dentries cannot be pruned until they are unlinked.
*/
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- if (!sbinfo->free_inodes) {
- spin_unlock(&sbinfo->stat_lock);
- return -ENOSPC;
- }
- sbinfo->free_inodes--;
- spin_unlock(&sbinfo->stat_lock);
- }
+ ret = shmem_reserve_inode(inode->i_sb);
+ if (ret)
+ goto out;
dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -1821,21 +1808,16 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
atomic_inc(&inode->i_count); /* New dentry reference */
dget(dentry); /* Extra pinning count for the created dentry */
d_instantiate(dentry, inode);
- return 0;
+out:
+ return ret;
}
static int shmem_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- sbinfo->free_inodes++;
- spin_unlock(&sbinfo->stat_lock);
- }
- }
+ if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
+ shmem_free_inode(inode->i_sb);
dir->i_size -= BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
@@ -1924,6 +1906,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
iput(inode);
return error;
}
+ unlock_page(page);
inode->i_op = &shmem_symlink_inode_operations;
kaddr = kmap_atomic(page, KM_USER0);
memcpy(kaddr, symname, len);
@@ -1951,6 +1934,8 @@ static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
struct page *page = NULL;
int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
+ if (page)
+ unlock_page(page);
return page;
}
@@ -1996,8 +1981,7 @@ static int shmem_xattr_security_get(struct inode *inode, const char *name,
{
if (strcmp(name, "") == 0)
return -EINVAL;
- return security_inode_getsecurity(inode, name, buffer, size,
- -EOPNOTSUPP);
+ return xattr_getsecurity(inode, name, buffer, size);
}
static int shmem_xattr_security_set(struct inode *inode, const char *name,
@@ -2138,7 +2122,7 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid,
}
if (*rest)
goto bad_val;
- *blocks = size >> PAGE_CACHE_SHIFT;
+ *blocks = DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
} else if (!strcmp(this_char,"nr_blocks")) {
*blocks = memparse(value,&rest);
if (*rest)
@@ -2375,7 +2359,8 @@ static const struct file_operations shmem_file_operations = {
#ifdef CONFIG_TMPFS
.llseek = generic_file_llseek,
.read = shmem_file_read,
- .write = shmem_file_write,
+ .write = do_sync_write,
+ .aio_write = generic_file_aio_write,
.fsync = simple_sync_file,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
diff --git a/mm/slob.c b/mm/slob.c
index 773a7aa80ab5..e2c3c0ec5463 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -12,10 +12,17 @@
* allocator is as little as 2 bytes, however typically most architectures
* will require 4 bytes on 32-bit and 8 bytes on 64-bit.
*
- * The slob heap is a linked list of pages from alloc_pages(), and
- * within each page, there is a singly-linked list of free blocks (slob_t).
- * The heap is grown on demand and allocation from the heap is currently
- * first-fit.
+ * The slob heap is a set of linked list of pages from alloc_pages(),
+ * and within each page, there is a singly-linked list of free blocks
+ * (slob_t). The heap is grown on demand. To reduce fragmentation,
+ * heap pages are segregated into three lists, with objects less than
+ * 256 bytes, objects less than 1024 bytes, and all other objects.
+ *
+ * Allocation from heap involves first searching for a page with
+ * sufficient free blocks (using a next-fit-like approach) followed by
+ * a first-fit scan of the page. Deallocation inserts objects back
+ * into the free list in address order, so this is effectively an
+ * address-ordered first fit.
*
* Above this is an implementation of kmalloc/kfree. Blocks returned
* from kmalloc are prepended with a 4-byte header with the kmalloc size.
@@ -110,9 +117,13 @@ static inline void free_slob_page(struct slob_page *sp)
}
/*
- * All (partially) free slob pages go on this list.
+ * All partially free slob pages go on these lists.
*/
-static LIST_HEAD(free_slob_pages);
+#define SLOB_BREAK1 256
+#define SLOB_BREAK2 1024
+static LIST_HEAD(free_slob_small);
+static LIST_HEAD(free_slob_medium);
+static LIST_HEAD(free_slob_large);
/*
* slob_page: True for all slob pages (false for bigblock pages)
@@ -140,9 +151,9 @@ static inline int slob_page_free(struct slob_page *sp)
return test_bit(PG_private, &sp->flags);
}
-static inline void set_slob_page_free(struct slob_page *sp)
+static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
{
- list_add(&sp->list, &free_slob_pages);
+ list_add(&sp->list, list);
__set_bit(PG_private, &sp->flags);
}
@@ -294,12 +305,20 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
{
struct slob_page *sp;
struct list_head *prev;
+ struct list_head *slob_list;
slob_t *b = NULL;
unsigned long flags;
+ if (size < SLOB_BREAK1)
+ slob_list = &free_slob_small;
+ else if (size < SLOB_BREAK2)
+ slob_list = &free_slob_medium;
+ else
+ slob_list = &free_slob_large;
+
spin_lock_irqsave(&slob_lock, flags);
/* Iterate through each partially free page, try to find room */
- list_for_each_entry(sp, &free_slob_pages, list) {
+ list_for_each_entry(sp, slob_list, list) {
#ifdef CONFIG_NUMA
/*
* If there's a node specification, search for a partial
@@ -321,9 +340,9 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
/* Improve fragment distribution and reduce our average
* search time by starting our next search here. (see
* Knuth vol 1, sec 2.5, pg 449) */
- if (prev != free_slob_pages.prev &&
- free_slob_pages.next != prev->next)
- list_move_tail(&free_slob_pages, prev->next);
+ if (prev != slob_list->prev &&
+ slob_list->next != prev->next)
+ list_move_tail(slob_list, prev->next);
break;
}
spin_unlock_irqrestore(&slob_lock, flags);
@@ -341,7 +360,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
sp->free = b;
INIT_LIST_HEAD(&sp->list);
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
- set_slob_page_free(sp);
+ set_slob_page_free(sp, slob_list);
b = slob_page_alloc(sp, size, align);
BUG_ON(!b);
spin_unlock_irqrestore(&slob_lock, flags);
@@ -387,7 +406,7 @@ static void slob_free(void *block, int size)
set_slob(b, units,
(void *)((unsigned long)(b +
SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
- set_slob_page_free(sp);
+ set_slob_page_free(sp, &free_slob_small);
goto out;
}
@@ -398,6 +417,10 @@ static void slob_free(void *block, int size)
sp->units += units;
if (b < sp->free) {
+ if (b + units == sp->free) {
+ units += slob_units(sp->free);
+ sp->free = slob_next(sp->free);
+ }
set_slob(b, units, sp->free);
sp->free = b;
} else {
diff --git a/mm/slub.c b/mm/slub.c
index 3f056677fa8f..e2989ae243b5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -149,6 +149,13 @@ static inline void ClearSlabDebug(struct page *page)
/* Enable to test recovery from slab corruption on boot */
#undef SLUB_RESILIENCY_TEST
+/*
+ * Currently fastpath is not supported if preemption is enabled.
+ */
+#if defined(CONFIG_FAST_CMPXCHG_LOCAL) && !defined(CONFIG_PREEMPT)
+#define SLUB_FASTPATH
+#endif
+
#if PAGE_SHIFT <= 12
/*
@@ -243,6 +250,7 @@ enum track_item { TRACK_ALLOC, TRACK_FREE };
static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void sysfs_slab_remove(struct kmem_cache *);
+
#else
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
@@ -251,8 +259,16 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
{
kfree(s);
}
+
#endif
+static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
+{
+#ifdef CONFIG_SLUB_STATS
+ c->stat[si]++;
+#endif
+}
+
/********************************************************************
* Core slab cache functions
*******************************************************************/
@@ -280,15 +296,32 @@ static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
#endif
}
+/*
+ * The end pointer in a slab is special. It points to the first object in the
+ * slab but has bit 0 set to mark it.
+ *
+ * Note that SLUB relies on page_mapping returning NULL for pages with bit 0
+ * in the mapping set.
+ */
+static inline int is_end(void *addr)
+{
+ return (unsigned long)addr & PAGE_MAPPING_ANON;
+}
+
+void *slab_address(struct page *page)
+{
+ return page->end - PAGE_MAPPING_ANON;
+}
+
static inline int check_valid_pointer(struct kmem_cache *s,
struct page *page, const void *object)
{
void *base;
- if (!object)
+ if (object == page->end)
return 1;
- base = page_address(page);
+ base = slab_address(page);
if (object < base || object >= base + s->objects * s->size ||
(object - base) % s->size) {
return 0;
@@ -321,7 +354,8 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
/* Scan freelist */
#define for_each_free_object(__p, __s, __free) \
- for (__p = (__free); __p; __p = get_freepointer((__s), __p))
+ for (__p = (__free); (__p) != page->end; __p = get_freepointer((__s),\
+ __p))
/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
@@ -473,7 +507,7 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
{
unsigned int off; /* Offset of last byte */
- u8 *addr = page_address(page);
+ u8 *addr = slab_address(page);
print_tracking(s, p);
@@ -651,7 +685,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
if (!(s->flags & SLAB_POISON))
return 1;
- start = page_address(page);
+ start = slab_address(page);
end = start + (PAGE_SIZE << s->order);
length = s->objects * s->size;
remainder = end - (start + length);
@@ -685,9 +719,10 @@ static int check_object(struct kmem_cache *s, struct page *page,
endobject, red, s->inuse - s->objsize))
return 0;
} else {
- if ((s->flags & SLAB_POISON) && s->objsize < s->inuse)
- check_bytes_and_report(s, page, p, "Alignment padding", endobject,
- POISON_INUSE, s->inuse - s->objsize);
+ if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
+ check_bytes_and_report(s, page, p, "Alignment padding",
+ endobject, POISON_INUSE, s->inuse - s->objsize);
+ }
}
if (s->flags & SLAB_POISON) {
@@ -718,7 +753,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
* of the free objects in this slab. May cause
* another error because the object count is now wrong.
*/
- set_freepointer(s, p, NULL);
+ set_freepointer(s, p, page->end);
return 0;
}
return 1;
@@ -752,18 +787,18 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
void *fp = page->freelist;
void *object = NULL;
- while (fp && nr <= s->objects) {
+ while (fp != page->end && nr <= s->objects) {
if (fp == search)
return 1;
if (!check_valid_pointer(s, page, fp)) {
if (object) {
object_err(s, page, object,
"Freechain corrupt");
- set_freepointer(s, object, NULL);
+ set_freepointer(s, object, page->end);
break;
} else {
slab_err(s, page, "Freepointer corrupt");
- page->freelist = NULL;
+ page->freelist = page->end;
page->inuse = s->objects;
slab_fix(s, "Freelist cleared");
return 0;
@@ -869,7 +904,7 @@ bad:
*/
slab_fix(s, "Marking all objects used");
page->inuse = s->objects;
- page->freelist = NULL;
+ page->freelist = page->end;
}
return 0;
}
@@ -894,11 +929,10 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
return 0;
if (unlikely(s != page->slab)) {
- if (!PageSlab(page))
+ if (!PageSlab(page)) {
slab_err(s, page, "Attempt to free object(0x%p) "
"outside of slab", object);
- else
- if (!page->slab) {
+ } else if (!page->slab) {
printk(KERN_ERR
"SLUB <none>: no slab for object 0x%p.\n",
object);
@@ -910,7 +944,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
}
/* Special debug activities for freeing objects */
- if (!SlabFrozen(page) && !page->freelist)
+ if (!SlabFrozen(page) && page->freelist == page->end)
remove_full(s, page);
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr);
@@ -1007,7 +1041,7 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
*/
if (slub_debug && (!slub_debug_slabs ||
strncmp(slub_debug_slabs, name,
- strlen(slub_debug_slabs)) == 0))
+ strlen(slub_debug_slabs)) == 0))
flags |= slub_debug;
}
@@ -1102,6 +1136,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
SetSlabDebug(page);
start = page_address(page);
+ page->end = start + 1;
if (unlikely(s->flags & SLAB_POISON))
memset(start, POISON_INUSE, PAGE_SIZE << s->order);
@@ -1113,7 +1148,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
last = p;
}
setup_object(s, page, last);
- set_freepointer(s, last, NULL);
+ set_freepointer(s, last, page->end);
page->freelist = start;
page->inuse = 0;
@@ -1129,7 +1164,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
void *p;
slab_pad_check(s, page);
- for_each_object(p, s, page_address(page))
+ for_each_object(p, s, slab_address(page))
check_object(s, page, p, 0);
ClearSlabDebug(page);
}
@@ -1139,6 +1174,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-pages);
+ page->mapping = NULL;
__free_pages(page, s->order);
}
@@ -1183,7 +1219,7 @@ static __always_inline void slab_lock(struct page *page)
static __always_inline void slab_unlock(struct page *page)
{
- bit_spin_unlock(PG_locked, &page->flags);
+ __bit_spin_unlock(PG_locked, &page->flags);
}
static __always_inline int slab_trylock(struct page *page)
@@ -1294,8 +1330,8 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
get_cycles() % 1024 > s->remote_node_defrag_ratio)
return NULL;
- zonelist = &NODE_DATA(slab_node(current->mempolicy))
- ->node_zonelists[gfp_zone(flags)];
+ zonelist = &NODE_DATA(
+ slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
for (z = zonelist->zones; *z; z++) {
struct kmem_cache_node *n;
@@ -1337,17 +1373,22 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+ struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
ClearSlabFrozen(page);
if (page->inuse) {
- if (page->freelist)
+ if (page->freelist != page->end) {
add_partial(n, page, tail);
- else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
- add_full(n, page);
+ stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
+ } else {
+ stat(c, DEACTIVATE_FULL);
+ if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
+ add_full(n, page);
+ }
slab_unlock(page);
-
} else {
+ stat(c, DEACTIVATE_EMPTY);
if (n->nr_partial < MIN_PARTIAL) {
/*
* Adding an empty slab to the partial slabs in order
@@ -1361,6 +1402,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
slab_unlock(page);
} else {
slab_unlock(page);
+ stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
discard_slab(s, page);
}
}
@@ -1373,12 +1415,19 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
struct page *page = c->page;
int tail = 1;
+
+ if (c->freelist)
+ stat(c, DEACTIVATE_REMOTE_FREES);
/*
* Merge cpu freelist into freelist. Typically we get here
* because both freelists are empty. So this is unlikely
* to occur.
+ *
+ * We need to use _is_end here because deactivate slab may
+ * be called for a debug slab. Then c->freelist may contain
+ * a dummy pointer.
*/
- while (unlikely(c->freelist)) {
+ while (unlikely(!is_end(c->freelist))) {
void **object;
tail = 0; /* Hot objects. Put the slab first */
@@ -1398,6 +1447,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
+ stat(c, CPUSLAB_FLUSH);
slab_lock(c->page);
deactivate_slab(s, c);
}
@@ -1469,16 +1519,21 @@ static void *__slab_alloc(struct kmem_cache *s,
{
void **object;
struct page *new;
+#ifdef SLUB_FASTPATH
+ unsigned long flags;
+ local_irq_save(flags);
+#endif
if (!c->page)
goto new_slab;
slab_lock(c->page);
if (unlikely(!node_match(c, node)))
goto another_slab;
+ stat(c, ALLOC_REFILL);
load_freelist:
object = c->page->freelist;
- if (unlikely(!object))
+ if (unlikely(object == c->page->end))
goto another_slab;
if (unlikely(SlabDebug(c->page)))
goto debug;
@@ -1486,9 +1541,15 @@ load_freelist:
object = c->page->freelist;
c->freelist = object[c->offset];
c->page->inuse = s->objects;
- c->page->freelist = NULL;
+ c->page->freelist = c->page->end;
c->node = page_to_nid(c->page);
+unlock_out:
slab_unlock(c->page);
+ stat(c, ALLOC_SLOWPATH);
+out:
+#ifdef SLUB_FASTPATH
+ local_irq_restore(flags);
+#endif
return object;
another_slab:
@@ -1498,6 +1559,7 @@ new_slab:
new = get_partial(s, gfpflags, node);
if (new) {
c->page = new;
+ stat(c, ALLOC_FROM_PARTIAL);
goto load_freelist;
}
@@ -1511,6 +1573,7 @@ new_slab:
if (new) {
c = get_cpu_slab(s, smp_processor_id());
+ stat(c, ALLOC_SLAB);
if (c->page)
flush_slab(s, c);
slab_lock(new);
@@ -1518,7 +1581,8 @@ new_slab:
c->page = new;
goto load_freelist;
}
- return NULL;
+ object = NULL;
+ goto out;
debug:
object = c->page->freelist;
if (!alloc_debug_processing(s, c->page, object, addr))
@@ -1527,8 +1591,7 @@ debug:
c->page->inuse++;
c->page->freelist = object[c->offset];
c->node = -1;
- slab_unlock(c->page);
- return object;
+ goto unlock_out;
}
/*
@@ -1545,20 +1608,50 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr)
{
void **object;
- unsigned long flags;
struct kmem_cache_cpu *c;
+/*
+ * The SLUB_FASTPATH path is provisional and is currently disabled if the
+ * kernel is compiled with preemption or if the arch does not support
+ * fast cmpxchg operations. There are a couple of coming changes that will
+ * simplify matters and allow preemption. Ultimately we may end up making
+ * SLUB_FASTPATH the default.
+ *
+ * 1. The introduction of the per cpu allocator will avoid array lookups
+ * through get_cpu_slab(). A special register can be used instead.
+ *
+ * 2. The introduction of per cpu atomic operations (cpu_ops) means that
+ * we can realize the logic here entirely with per cpu atomics. The
+ * per cpu atomic ops will take care of the preemption issues.
+ */
+
+#ifdef SLUB_FASTPATH
+ c = get_cpu_slab(s, raw_smp_processor_id());
+ do {
+ object = c->freelist;
+ if (unlikely(is_end(object) || !node_match(c, node))) {
+ object = __slab_alloc(s, gfpflags, node, addr, c);
+ break;
+ }
+ stat(c, ALLOC_FASTPATH);
+ } while (cmpxchg_local(&c->freelist, object, object[c->offset])
+ != object);
+#else
+ unsigned long flags;
+
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
- if (unlikely(!c->freelist || !node_match(c, node)))
+ if (unlikely(is_end(c->freelist) || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
object = c->freelist;
c->freelist = object[c->offset];
+ stat(c, ALLOC_FASTPATH);
}
local_irq_restore(flags);
+#endif
if (unlikely((gfpflags & __GFP_ZERO) && object))
memset(object, 0, c->objsize);
@@ -1593,7 +1686,15 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
{
void *prior;
void **object = (void *)x;
+ struct kmem_cache_cpu *c;
+
+#ifdef SLUB_FASTPATH
+ unsigned long flags;
+ local_irq_save(flags);
+#endif
+ c = get_cpu_slab(s, raw_smp_processor_id());
+ stat(c, FREE_SLOWPATH);
slab_lock(page);
if (unlikely(SlabDebug(page)))
@@ -1603,8 +1704,10 @@ checks_ok:
page->freelist = object;
page->inuse--;
- if (unlikely(SlabFrozen(page)))
+ if (unlikely(SlabFrozen(page))) {
+ stat(c, FREE_FROZEN);
goto out_unlock;
+ }
if (unlikely(!page->inuse))
goto slab_empty;
@@ -1614,21 +1717,31 @@ checks_ok:
* was not on the partial list before
* then add it.
*/
- if (unlikely(!prior))
+ if (unlikely(prior == page->end)) {
add_partial(get_node(s, page_to_nid(page)), page, 1);
+ stat(c, FREE_ADD_PARTIAL);
+ }
out_unlock:
slab_unlock(page);
+#ifdef SLUB_FASTPATH
+ local_irq_restore(flags);
+#endif
return;
slab_empty:
- if (prior)
+ if (prior != page->end) {
/*
* Slab still on the partial list.
*/
remove_partial(s, page);
-
+ stat(c, FREE_REMOVE_PARTIAL);
+ }
slab_unlock(page);
+ stat(c, FREE_SLAB);
+#ifdef SLUB_FASTPATH
+ local_irq_restore(flags);
+#endif
discard_slab(s, page);
return;
@@ -1653,19 +1766,49 @@ static __always_inline void slab_free(struct kmem_cache *s,
struct page *page, void *x, void *addr)
{
void **object = (void *)x;
- unsigned long flags;
struct kmem_cache_cpu *c;
+#ifdef SLUB_FASTPATH
+ void **freelist;
+
+ c = get_cpu_slab(s, raw_smp_processor_id());
+ debug_check_no_locks_freed(object, s->objsize);
+ do {
+ freelist = c->freelist;
+ barrier();
+ /*
+ * If the compiler would reorder the retrieval of c->page to
+ * come before c->freelist then an interrupt could
+ * change the cpu slab before we retrieve c->freelist. We
+ * could be matching on a page no longer active and put the
+ * object onto the freelist of the wrong slab.
+ *
+ * On the other hand: If we already have the freelist pointer
+ * then any change of cpu_slab will cause the cmpxchg to fail
+ * since the freelist pointers are unique per slab.
+ */
+ if (unlikely(page != c->page || c->node < 0)) {
+ __slab_free(s, page, x, addr, c->offset);
+ break;
+ }
+ object[c->offset] = freelist;
+ stat(c, FREE_FASTPATH);
+ } while (cmpxchg_local(&c->freelist, freelist, object) != freelist);
+#else
+ unsigned long flags;
+
local_irq_save(flags);
debug_check_no_locks_freed(object, s->objsize);
c = get_cpu_slab(s, smp_processor_id());
if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
c->freelist = object;
+ stat(c, FREE_FASTPATH);
} else
__slab_free(s, page, x, addr, c->offset);
local_irq_restore(flags);
+#endif
}
void kmem_cache_free(struct kmem_cache *s, void *x)
@@ -1842,7 +1985,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
struct kmem_cache_cpu *c)
{
c->page = NULL;
- c->freelist = NULL;
+ c->freelist = (void *)PAGE_MAPPING_ANON;
c->node = 0;
c->offset = s->offset / sizeof(void *);
c->objsize = s->objsize;
@@ -2446,7 +2589,8 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
goto unlock_out;
realsize = kmalloc_caches[index].objsize;
- text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize),
+ text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
+ (unsigned int)realsize);
s = kmalloc(kmem_size, flags & ~SLUB_DMA);
if (!s || !text || !kmem_cache_open(s, flags, text,
@@ -2601,6 +2745,7 @@ EXPORT_SYMBOL(ksize);
void kfree(const void *x)
{
struct page *page;
+ void *object = (void *)x;
if (unlikely(ZERO_OR_NULL_PTR(x)))
return;
@@ -2610,7 +2755,7 @@ void kfree(const void *x)
put_page(page);
return;
}
- slab_free(page->slab, page, (void *)x, __builtin_return_address(0));
+ slab_free(page->slab, page, object, __builtin_return_address(0));
}
EXPORT_SYMBOL(kfree);
@@ -2896,7 +3041,8 @@ void __init kmem_cache_init(void)
#endif
- printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
+ printk(KERN_INFO
+ "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
" CPUs=%d, Nodes=%d\n",
caches, cache_line_size(),
slub_min_order, slub_max_order, slub_min_objects,
@@ -3063,7 +3209,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
}
static struct notifier_block __cpuinitdata slab_notifier = {
- &slab_cpuup_callback, NULL, 0
+ .notifier_call = slab_cpuup_callback
};
#endif
@@ -3104,7 +3250,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned long *map)
{
void *p;
- void *addr = page_address(page);
+ void *addr = slab_address(page);
if (!check_slab(s, page) ||
!on_freelist(s, page, NULL))
@@ -3221,8 +3367,9 @@ static void resiliency_test(void)
p = kzalloc(32, GFP_KERNEL);
p[32 + sizeof(void *)] = 0x34;
printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
- " 0x34 -> -0x%p\n", p);
- printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
+ " 0x34 -> -0x%p\n", p);
+ printk(KERN_ERR
+ "If allocated object is overwritten then not detectable\n\n");
validate_slab_cache(kmalloc_caches + 5);
p = kzalloc(64, GFP_KERNEL);
@@ -3230,7 +3377,8 @@ static void resiliency_test(void)
*p = 0x56;
printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
p);
- printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
+ printk(KERN_ERR
+ "If allocated object is overwritten then not detectable\n\n");
validate_slab_cache(kmalloc_caches + 6);
printk(KERN_ERR "\nB. Corruption after free\n");
@@ -3243,7 +3391,8 @@ static void resiliency_test(void)
p = kzalloc(256, GFP_KERNEL);
kfree(p);
p[50] = 0x9a;
- printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
+ printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
+ p);
validate_slab_cache(kmalloc_caches + 8);
p = kzalloc(512, GFP_KERNEL);
@@ -3384,7 +3533,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
static void process_slab(struct loc_track *t, struct kmem_cache *s,
struct page *page, enum track_item alloc)
{
- void *addr = page_address(page);
+ void *addr = slab_address(page);
DECLARE_BITMAP(map, s->objects);
void *p;
@@ -3872,6 +4021,62 @@ static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
SLAB_ATTR(remote_node_defrag_ratio);
#endif
+#ifdef CONFIG_SLUB_STATS
+
+static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
+{
+ unsigned long sum = 0;
+ int cpu;
+ int len;
+ int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ for_each_online_cpu(cpu) {
+ unsigned x = get_cpu_slab(s, cpu)->stat[si];
+
+ data[cpu] = x;
+ sum += x;
+ }
+
+ len = sprintf(buf, "%lu", sum);
+
+ for_each_online_cpu(cpu) {
+ if (data[cpu] && len < PAGE_SIZE - 20)
+ len += sprintf(buf + len, " c%d=%u", cpu, data[cpu]);
+ }
+ kfree(data);
+ return len + sprintf(buf + len, "\n");
+}
+
+#define STAT_ATTR(si, text) \
+static ssize_t text##_show(struct kmem_cache *s, char *buf) \
+{ \
+ return show_stat(s, buf, si); \
+} \
+SLAB_ATTR_RO(text); \
+
+STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
+STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
+STAT_ATTR(FREE_FASTPATH, free_fastpath);
+STAT_ATTR(FREE_SLOWPATH, free_slowpath);
+STAT_ATTR(FREE_FROZEN, free_frozen);
+STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
+STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
+STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
+STAT_ATTR(ALLOC_SLAB, alloc_slab);
+STAT_ATTR(ALLOC_REFILL, alloc_refill);
+STAT_ATTR(FREE_SLAB, free_slab);
+STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
+STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
+STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
+STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
+STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
+STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
+
+#endif
+
static struct attribute *slab_attrs[] = {
&slab_size_attr.attr,
&object_size_attr.attr,
@@ -3902,6 +4107,25 @@ static struct attribute *slab_attrs[] = {
#ifdef CONFIG_NUMA
&remote_node_defrag_ratio_attr.attr,
#endif
+#ifdef CONFIG_SLUB_STATS
+ &alloc_fastpath_attr.attr,
+ &alloc_slowpath_attr.attr,
+ &free_fastpath_attr.attr,
+ &free_slowpath_attr.attr,
+ &free_frozen_attr.attr,
+ &free_add_partial_attr.attr,
+ &free_remove_partial_attr.attr,
+ &alloc_from_partial_attr.attr,
+ &alloc_slab_attr.attr,
+ &alloc_refill_attr.attr,
+ &free_slab_attr.attr,
+ &cpuslab_flush_attr.attr,
+ &deactivate_full_attr.attr,
+ &deactivate_empty_attr.attr,
+ &deactivate_to_head_attr.attr,
+ &deactivate_to_tail_attr.attr,
+ &deactivate_remote_frees_attr.attr,
+#endif
NULL
};
diff --git a/mm/sparse.c b/mm/sparse.c
index a2183cb5d524..f6a43c09c322 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -237,7 +237,7 @@ static unsigned long *__kmalloc_section_usemap(void)
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-static unsigned long *sparse_early_usemap_alloc(unsigned long pnum)
+static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
{
unsigned long *usemap;
struct mem_section *ms = __nr_to_section(pnum);
@@ -353,17 +353,9 @@ static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
return __kmalloc_section_memmap(nr_pages);
}
-static int vaddr_in_vmalloc_area(void *addr)
-{
- if (addr >= (void *)VMALLOC_START &&
- addr < (void *)VMALLOC_END)
- return 1;
- return 0;
-}
-
static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
{
- if (vaddr_in_vmalloc_area(memmap))
+ if (is_vmalloc_addr(memmap))
vfree(memmap);
else
free_pages((unsigned long)memmap,
diff --git a/mm/swap.c b/mm/swap.c
index 9ac88323d237..710a20bb9749 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -29,6 +29,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/backing-dev.h>
+#include <linux/memcontrol.h>
/* How many pages do we try to swap or page in/out together? */
int page_cluster;
@@ -41,7 +42,7 @@ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, };
* This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking.
*/
-static void fastcall __page_cache_release(struct page *page)
+static void __page_cache_release(struct page *page)
{
if (PageLRU(page)) {
unsigned long flags;
@@ -165,7 +166,7 @@ int rotate_reclaimable_page(struct page *page)
/*
* FIXME: speed this up?
*/
-void fastcall activate_page(struct page *page)
+void activate_page(struct page *page)
{
struct zone *zone = page_zone(page);
@@ -175,6 +176,7 @@ void fastcall activate_page(struct page *page)
SetPageActive(page);
add_page_to_active_list(zone, page);
__count_vm_event(PGACTIVATE);
+ mem_cgroup_move_lists(page_get_page_cgroup(page), true);
}
spin_unlock_irq(&zone->lru_lock);
}
@@ -186,7 +188,7 @@ void fastcall activate_page(struct page *page)
* inactive,referenced -> active,unreferenced
* active,unreferenced -> active,referenced
*/
-void fastcall mark_page_accessed(struct page *page)
+void mark_page_accessed(struct page *page)
{
if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
activate_page(page);
@@ -202,7 +204,7 @@ EXPORT_SYMBOL(mark_page_accessed);
* lru_cache_add: add a page to the page lists
* @page: the page to add
*/
-void fastcall lru_cache_add(struct page *page)
+void lru_cache_add(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
@@ -212,7 +214,7 @@ void fastcall lru_cache_add(struct page *page)
put_cpu_var(lru_add_pvecs);
}
-void fastcall lru_cache_add_active(struct page *page)
+void lru_cache_add_active(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
diff --git a/mm/swap_state.c b/mm/swap_state.c
index b52635601dfe..ec42f01a8d02 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
+#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
@@ -51,26 +52,22 @@ static struct {
unsigned long del_total;
unsigned long find_success;
unsigned long find_total;
- unsigned long noent_race;
- unsigned long exist_race;
} swap_cache_info;
void show_swap_cache_info(void)
{
- printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
+ printk("Swap cache: add %lu, delete %lu, find %lu/%lu\n",
swap_cache_info.add_total, swap_cache_info.del_total,
- swap_cache_info.find_success, swap_cache_info.find_total,
- swap_cache_info.noent_race, swap_cache_info.exist_race);
+ swap_cache_info.find_success, swap_cache_info.find_total);
printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
}
/*
- * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
+ * add_to_swap_cache resembles add_to_page_cache on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
-static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
- gfp_t gfp_mask)
+int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
int error;
@@ -88,6 +85,7 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
set_page_private(page, entry.val);
total_swapcache_pages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
+ INC_CACHE_INFO(add_total);
}
write_unlock_irq(&swapper_space.tree_lock);
radix_tree_preload_end();
@@ -95,31 +93,6 @@ static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
return error;
}
-static int add_to_swap_cache(struct page *page, swp_entry_t entry)
-{
- int error;
-
- BUG_ON(PageLocked(page));
- if (!swap_duplicate(entry)) {
- INC_CACHE_INFO(noent_race);
- return -ENOENT;
- }
- SetPageLocked(page);
- error = __add_to_swap_cache(page, entry, GFP_KERNEL);
- /*
- * Anon pages are already on the LRU, we don't run lru_cache_add here.
- */
- if (error) {
- ClearPageLocked(page);
- swap_free(entry);
- if (error == -EEXIST)
- INC_CACHE_INFO(exist_race);
- return error;
- }
- INC_CACHE_INFO(add_total);
- return 0;
-}
-
/*
* This must be called only on pages that have
* been verified to be in the swap cache.
@@ -152,6 +125,7 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
int err;
BUG_ON(!PageLocked(page));
+ BUG_ON(!PageUptodate(page));
for (;;) {
entry = get_swap_page();
@@ -169,18 +143,15 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
/*
* Add it to the swap cache and mark it dirty
*/
- err = __add_to_swap_cache(page, entry,
+ err = add_to_swap_cache(page, entry,
gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
switch (err) {
case 0: /* Success */
- SetPageUptodate(page);
SetPageDirty(page);
- INC_CACHE_INFO(add_total);
return 1;
case -EEXIST:
/* Raced with "speculative" read_swap_cache_async */
- INC_CACHE_INFO(exist_race);
swap_free(entry);
continue;
default:
@@ -211,40 +182,6 @@ void delete_from_swap_cache(struct page *page)
page_cache_release(page);
}
-/*
- * Strange swizzling function only for use by shmem_writepage
- */
-int move_to_swap_cache(struct page *page, swp_entry_t entry)
-{
- int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
- if (!err) {
- remove_from_page_cache(page);
- page_cache_release(page); /* pagecache ref */
- if (!swap_duplicate(entry))
- BUG();
- SetPageDirty(page);
- INC_CACHE_INFO(add_total);
- } else if (err == -EEXIST)
- INC_CACHE_INFO(exist_race);
- return err;
-}
-
-/*
- * Strange swizzling function for shmem_getpage (and shmem_unuse)
- */
-int move_from_swap_cache(struct page *page, unsigned long index,
- struct address_space *mapping)
-{
- int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
- if (!err) {
- delete_from_swap_cache(page);
- /* shift page from clean_pages to dirty_pages list */
- ClearPageDirty(page);
- set_page_dirty(page);
- }
- return err;
-}
-
/*
* If we are the only user, then try to free up the swap cache.
*
@@ -317,7 +254,7 @@ struct page * lookup_swap_cache(swp_entry_t entry)
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*/
-struct page *read_swap_cache_async(swp_entry_t entry,
+struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
struct page *found_page, *new_page = NULL;
@@ -337,23 +274,27 @@ struct page *read_swap_cache_async(swp_entry_t entry,
* Get a new page to read into from swap.
*/
if (!new_page) {
- new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
- vma, addr);
+ new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page)
break; /* Out of memory */
}
/*
+ * Swap entry may have been freed since our caller observed it.
+ */
+ if (!swap_duplicate(entry))
+ break;
+
+ /*
* Associate the page with swap entry in the swap cache.
- * May fail (-ENOENT) if swap entry has been freed since
- * our caller observed it. May fail (-EEXIST) if there
- * is already a page associated with this entry in the
- * swap cache: added by a racing read_swap_cache_async,
- * or by try_to_swap_out (or shmem_writepage) re-using
- * the just freed swap entry for an existing page.
+ * May fail (-EEXIST) if there is already a page associated
+ * with this entry in the swap cache: added by a racing
+ * read_swap_cache_async, or add_to_swap or shmem_writepage
+ * re-using the just freed swap entry for an existing page.
* May fail (-ENOMEM) if radix-tree node allocation failed.
*/
- err = add_to_swap_cache(new_page, entry);
+ SetPageLocked(new_page);
+ err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
if (!err) {
/*
* Initiate read into locked page and return.
@@ -362,9 +303,57 @@ struct page *read_swap_cache_async(swp_entry_t entry,
swap_readpage(NULL, new_page);
return new_page;
}
- } while (err != -ENOENT && err != -ENOMEM);
+ ClearPageLocked(new_page);
+ swap_free(entry);
+ } while (err != -ENOMEM);
if (new_page)
page_cache_release(new_page);
return found_page;
}
+
+/**
+ * swapin_readahead - swap in pages in hope we need them soon
+ * @entry: swap entry of this memory
+ * @vma: user vma this address belongs to
+ * @addr: target address for mempolicy
+ *
+ * Returns the struct page for entry and addr, after queueing swapin.
+ *
+ * Primitive swap readahead code. We simply read an aligned block of
+ * (1 << page_cluster) entries in the swap area. This method is chosen
+ * because it doesn't cost us any seek time. We also make sure to queue
+ * the 'original' request together with the readahead ones...
+ *
+ * This has been extended to use the NUMA policies from the mm triggering
+ * the readahead.
+ *
+ * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
+ */
+struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ int nr_pages;
+ struct page *page;
+ unsigned long offset;
+ unsigned long end_offset;
+
+ /*
+ * Get starting offset for readaround, and number of pages to read.
+ * Adjust starting address by readbehind (for NUMA interleave case)?
+ * No, it's very unlikely that swap layout would follow vma layout,
+ * more likely that neighbouring swap pages came from the same node:
+ * so use the same "addr" to choose the same node for each swap read.
+ */
+ nr_pages = valid_swaphandles(entry, &offset);
+ for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
+ /* Ok, do the async read-ahead now */
+ page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
+ gfp_mask, vma, addr);
+ if (!page)
+ break;
+ page_cache_release(page);
+ }
+ lru_add_drain(); /* Push any new pages onto the LRU now */
+ return read_swap_cache_async(entry, gfp_mask, vma, addr);
+}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f071648e1360..02ccab5ad9d9 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -27,6 +27,7 @@
#include <linux/mutex.h>
#include <linux/capability.h>
#include <linux/syscalls.h>
+#include <linux/memcontrol.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -506,9 +507,24 @@ unsigned int count_swap_pages(int type, int free)
* just let do_wp_page work it out if a write is requested later - to
* force COW, vm_page_prot omits write permission from any private vma.
*/
-static void unuse_pte(struct vm_area_struct *vma, pte_t *pte,
+static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page)
{
+ spinlock_t *ptl;
+ pte_t *pte;
+ int ret = 1;
+
+ if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
+ ret = -ENOMEM;
+
+ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
+ if (ret > 0)
+ mem_cgroup_uncharge_page(page);
+ ret = 0;
+ goto out;
+ }
+
inc_mm_counter(vma->vm_mm, anon_rss);
get_page(page);
set_pte_at(vma->vm_mm, addr, pte,
@@ -520,6 +536,9 @@ static void unuse_pte(struct vm_area_struct *vma, pte_t *pte,
* immediately swapped out again after swapon.
*/
activate_page(page);
+out:
+ pte_unmap_unlock(pte, ptl);
+ return ret;
}
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
@@ -528,23 +547,34 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
{
pte_t swp_pte = swp_entry_to_pte(entry);
pte_t *pte;
- spinlock_t *ptl;
- int found = 0;
+ int ret = 0;
- pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ /*
+ * We don't actually need pte lock while scanning for swp_pte: since
+ * we hold page lock and mmap_sem, swp_pte cannot be inserted into the
+ * page table while we're scanning; though it could get zapped, and on
+ * some architectures (e.g. x86_32 with PAE) we might catch a glimpse
+ * of unmatched parts which look like swp_pte, so unuse_pte must
+ * recheck under pte lock. Scanning without pte lock lets it be
+ * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE.
+ */
+ pte = pte_offset_map(pmd, addr);
do {
/*
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
if (unlikely(pte_same(*pte, swp_pte))) {
- unuse_pte(vma, pte++, addr, entry, page);
- found = 1;
- break;
+ pte_unmap(pte);
+ ret = unuse_pte(vma, pmd, addr, entry, page);
+ if (ret)
+ goto out;
+ pte = pte_offset_map(pmd, addr);
}
} while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap_unlock(pte - 1, ptl);
- return found;
+ pte_unmap(pte - 1);
+out:
+ return ret;
}
static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
@@ -553,14 +583,16 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
{
pmd_t *pmd;
unsigned long next;
+ int ret;
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
if (pmd_none_or_clear_bad(pmd))
continue;
- if (unuse_pte_range(vma, pmd, addr, next, entry, page))
- return 1;
+ ret = unuse_pte_range(vma, pmd, addr, next, entry, page);
+ if (ret)
+ return ret;
} while (pmd++, addr = next, addr != end);
return 0;
}
@@ -571,14 +603,16 @@ static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
{
pud_t *pud;
unsigned long next;
+ int ret;
pud = pud_offset(pgd, addr);
do {
next = pud_addr_end(addr, end);
if (pud_none_or_clear_bad(pud))
continue;
- if (unuse_pmd_range(vma, pud, addr, next, entry, page))
- return 1;
+ ret = unuse_pmd_range(vma, pud, addr, next, entry, page);
+ if (ret)
+ return ret;
} while (pud++, addr = next, addr != end);
return 0;
}
@@ -588,6 +622,7 @@ static int unuse_vma(struct vm_area_struct *vma,
{
pgd_t *pgd;
unsigned long addr, end, next;
+ int ret;
if (page->mapping) {
addr = page_address_in_vma(page, vma);
@@ -605,8 +640,9 @@ static int unuse_vma(struct vm_area_struct *vma,
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
continue;
- if (unuse_pud_range(vma, pgd, addr, next, entry, page))
- return 1;
+ ret = unuse_pud_range(vma, pgd, addr, next, entry, page);
+ if (ret)
+ return ret;
} while (pgd++, addr = next, addr != end);
return 0;
}
@@ -615,6 +651,7 @@ static int unuse_mm(struct mm_struct *mm,
swp_entry_t entry, struct page *page)
{
struct vm_area_struct *vma;
+ int ret = 0;
if (!down_read_trylock(&mm->mmap_sem)) {
/*
@@ -627,15 +664,11 @@ static int unuse_mm(struct mm_struct *mm,
lock_page(page);
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (vma->anon_vma && unuse_vma(vma, entry, page))
+ if (vma->anon_vma && (ret = unuse_vma(vma, entry, page)))
break;
}
up_read(&mm->mmap_sem);
- /*
- * Currently unuse_mm cannot fail, but leave error handling
- * at call sites for now, since we change it from time to time.
- */
- return 0;
+ return (ret < 0)? ret: 0;
}
/*
@@ -730,7 +763,8 @@ static int try_to_unuse(unsigned int type)
*/
swap_map = &si->swap_map[i];
entry = swp_entry(type, i);
- page = read_swap_cache_async(entry, NULL, 0);
+ page = read_swap_cache_async(entry,
+ GFP_HIGHUSER_MOVABLE, NULL, 0);
if (!page) {
/*
* Either swap_duplicate() failed because entry
@@ -789,7 +823,7 @@ static int try_to_unuse(unsigned int type)
atomic_inc(&new_start_mm->mm_users);
atomic_inc(&prev_mm->mm_users);
spin_lock(&mmlist_lock);
- while (*swap_map > 1 && !retval &&
+ while (*swap_map > 1 && !retval && !shmem &&
(p = p->next) != &start_mm->mmlist) {
mm = list_entry(p, struct mm_struct, mmlist);
if (!atomic_inc_not_zero(&mm->mm_users))
@@ -821,6 +855,13 @@ static int try_to_unuse(unsigned int type)
mmput(start_mm);
start_mm = new_start_mm;
}
+ if (shmem) {
+ /* page has already been unlocked and released */
+ if (shmem > 0)
+ continue;
+ retval = shmem;
+ break;
+ }
if (retval) {
unlock_page(page);
page_cache_release(page);
@@ -859,12 +900,6 @@ static int try_to_unuse(unsigned int type)
* read from disk into another page. Splitting into two
* pages would be incorrect if swap supported "shared
* private" pages, but they are handled by tmpfs files.
- *
- * Note shmem_unuse already deleted a swappage from
- * the swap cache, unless the move to filepage failed:
- * in which case it left swappage in cache, lowered its
- * swap count to pass quickly through the loops above,
- * and now we must reincrement count to try again later.
*/
if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
struct writeback_control wbc = {
@@ -875,12 +910,8 @@ static int try_to_unuse(unsigned int type)
lock_page(page);
wait_on_page_writeback(page);
}
- if (PageSwapCache(page)) {
- if (shmem)
- swap_duplicate(entry);
- else
- delete_from_swap_cache(page);
- }
+ if (PageSwapCache(page))
+ delete_from_swap_cache(page);
/*
* So we could skip searching mms once swap count went
@@ -1768,31 +1799,48 @@ get_swap_info_struct(unsigned type)
*/
int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
{
+ struct swap_info_struct *si;
int our_page_cluster = page_cluster;
- int ret = 0, i = 1 << our_page_cluster;
- unsigned long toff;
- struct swap_info_struct *swapdev = swp_type(entry) + swap_info;
+ pgoff_t target, toff;
+ pgoff_t base, end;
+ int nr_pages = 0;
if (!our_page_cluster) /* no readahead */
return 0;
- toff = (swp_offset(entry) >> our_page_cluster) << our_page_cluster;
- if (!toff) /* first page is swap header */
- toff++, i--;
- *offset = toff;
+
+ si = &swap_info[swp_type(entry)];
+ target = swp_offset(entry);
+ base = (target >> our_page_cluster) << our_page_cluster;
+ end = base + (1 << our_page_cluster);
+ if (!base) /* first page is swap header */
+ base++;
spin_lock(&swap_lock);
- do {
- /* Don't read-ahead past the end of the swap area */
- if (toff >= swapdev->max)
+ if (end > si->max) /* don't go beyond end of map */
+ end = si->max;
+
+ /* Count contiguous allocated slots above our target */
+ for (toff = target; ++toff < end; nr_pages++) {
+ /* Don't read in free or bad pages */
+ if (!si->swap_map[toff])
break;
+ if (si->swap_map[toff] == SWAP_MAP_BAD)
+ break;
+ }
+ /* Count contiguous allocated slots below our target */
+ for (toff = target; --toff >= base; nr_pages++) {
/* Don't read in free or bad pages */
- if (!swapdev->swap_map[toff])
+ if (!si->swap_map[toff])
break;
- if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
+ if (si->swap_map[toff] == SWAP_MAP_BAD)
break;
- toff++;
- ret++;
- } while (--i);
+ }
spin_unlock(&swap_lock);
- return ret;
+
+ /*
+ * Indicate starting offset, and return number of pages to get:
+ * if only 1, say 0, since there's then no readahead to be done.
+ */
+ *offset = ++toff;
+ return nr_pages? ++nr_pages: 0;
}
diff --git a/mm/tiny-shmem.c b/mm/tiny-shmem.c
index d436a9c82db7..702083638c16 100644
--- a/mm/tiny-shmem.c
+++ b/mm/tiny-shmem.c
@@ -121,18 +121,6 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
return 0;
}
-#if 0
-int shmem_mmap(struct file *file, struct vm_area_struct *vma)
-{
- file_accessed(file);
-#ifndef CONFIG_MMU
- return ramfs_nommu_mmap(file, vma);
-#else
- return 0;
-#endif
-}
-#endif /* 0 */
-
#ifndef CONFIG_MMU
unsigned long shmem_get_unmapped_area(struct file *file,
unsigned long addr,
diff --git a/mm/truncate.c b/mm/truncate.c
index c3123b08ff6d..c35c49e54fb6 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -48,7 +48,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
static inline void truncate_partial_page(struct page *page, unsigned partial)
{
- zero_user_page(page, partial, PAGE_CACHE_SIZE - partial, KM_USER0);
+ zero_user_segment(page, partial, PAGE_CACHE_SIZE);
if (PagePrivate(page))
do_invalidatepage(page, partial);
}
@@ -84,7 +84,7 @@ EXPORT_SYMBOL(cancel_dirty_page);
/*
* If truncate cannot remove the fs-private metadata from the page, the page
- * becomes anonymous. It will be left on the LRU and may even be mapped into
+ * becomes orphaned. It will be left on the LRU and may even be mapped into
* user pagetables if we're racing with filemap_fault().
*
* We need to bale out if page->mapping is no longer equal to the original
@@ -98,11 +98,11 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
if (page->mapping != mapping)
return;
- cancel_dirty_page(page, PAGE_CACHE_SIZE);
-
if (PagePrivate(page))
do_invalidatepage(page, 0);
+ cancel_dirty_page(page, PAGE_CACHE_SIZE);
+
remove_from_page_cache(page);
ClearPageUptodate(page);
ClearPageMappedToDisk(page);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index af77e171e339..0536dde139d1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -166,6 +166,44 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
}
EXPORT_SYMBOL_GPL(map_vm_area);
+/*
+ * Map a vmalloc()-space virtual address to the physical page.
+ */
+struct page *vmalloc_to_page(const void *vmalloc_addr)
+{
+ unsigned long addr = (unsigned long) vmalloc_addr;
+ struct page *page = NULL;
+ pgd_t *pgd = pgd_offset_k(addr);
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ if (!pgd_none(*pgd)) {
+ pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud)) {
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd)) {
+ ptep = pte_offset_map(pmd, addr);
+ pte = *ptep;
+ if (pte_present(pte))
+ page = pte_page(pte);
+ pte_unmap(ptep);
+ }
+ }
+ }
+ return page;
+}
+EXPORT_SYMBOL(vmalloc_to_page);
+
+/*
+ * Map a vmalloc()-space virtual address to the physical page frame number.
+ */
+unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
+{
+ return page_to_pfn(vmalloc_to_page(vmalloc_addr));
+}
+EXPORT_SYMBOL(vmalloc_to_pfn);
+
static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end,
int node, gfp_t gfp_mask)
@@ -216,6 +254,10 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl
if (addr > end - size)
goto out;
}
+ if ((size + addr) < addr)
+ goto out;
+ if (addr > end - size)
+ goto out;
found:
area->next = *p;
@@ -268,7 +310,7 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
}
/* Caller must hold vmlist_lock */
-static struct vm_struct *__find_vm_area(void *addr)
+static struct vm_struct *__find_vm_area(const void *addr)
{
struct vm_struct *tmp;
@@ -281,7 +323,7 @@ static struct vm_struct *__find_vm_area(void *addr)
}
/* Caller must hold vmlist_lock */
-static struct vm_struct *__remove_vm_area(void *addr)
+static struct vm_struct *__remove_vm_area(const void *addr)
{
struct vm_struct **p, *tmp;
@@ -310,7 +352,7 @@ found:
* This function returns the found VM area, but using it is NOT safe
* on SMP machines, except for its size or flags.
*/
-struct vm_struct *remove_vm_area(void *addr)
+struct vm_struct *remove_vm_area(const void *addr)
{
struct vm_struct *v;
write_lock(&vmlist_lock);
@@ -319,7 +361,7 @@ struct vm_struct *remove_vm_area(void *addr)
return v;
}
-static void __vunmap(void *addr, int deallocate_pages)
+static void __vunmap(const void *addr, int deallocate_pages)
{
struct vm_struct *area;
@@ -346,8 +388,10 @@ static void __vunmap(void *addr, int deallocate_pages)
int i;
for (i = 0; i < area->nr_pages; i++) {
- BUG_ON(!area->pages[i]);
- __free_page(area->pages[i]);
+ struct page *page = area->pages[i];
+
+ BUG_ON(!page);
+ __free_page(page);
}
if (area->flags & VM_VPAGES)
@@ -370,7 +414,7 @@ static void __vunmap(void *addr, int deallocate_pages)
*
* Must not be called in interrupt context.
*/
-void vfree(void *addr)
+void vfree(const void *addr)
{
BUG_ON(in_interrupt());
__vunmap(addr, 1);
@@ -386,7 +430,7 @@ EXPORT_SYMBOL(vfree);
*
* Must not be called in interrupt context.
*/
-void vunmap(void *addr)
+void vunmap(const void *addr)
{
BUG_ON(in_interrupt());
__vunmap(addr, 0);
@@ -423,8 +467,8 @@ void *vmap(struct page **pages, unsigned int count,
}
EXPORT_SYMBOL(vmap);
-void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node)
+static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+ pgprot_t prot, int node)
{
struct page **pages;
unsigned int nr_pages, array_size, i;
@@ -451,15 +495,19 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
}
for (i = 0; i < area->nr_pages; i++) {
+ struct page *page;
+
if (node < 0)
- area->pages[i] = alloc_page(gfp_mask);
+ page = alloc_page(gfp_mask);
else
- area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
- if (unlikely(!area->pages[i])) {
+ page = alloc_pages_node(node, gfp_mask, 0);
+
+ if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
area->nr_pages = i;
goto fail;
}
+ area->pages[i] = page;
}
if (map_vm_area(area, prot, &pages))
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e5a9597e3bbc..a26dabd62fed 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -37,6 +37,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/memcontrol.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -68,6 +69,22 @@ struct scan_control {
int all_unreclaimable;
int order;
+
+ /*
+ * Pages that have (or should have) IO pending. If we run into
+ * a lot of these, we're better off waiting a little for IO to
+ * finish rather than scanning more pages in the VM.
+ */
+ int nr_io_pages;
+
+ /* Which cgroup do we reclaim from */
+ struct mem_cgroup *mem_cgroup;
+
+ /* Pluggable isolate pages callback */
+ unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
+ unsigned long *scanned, int order, int mode,
+ struct zone *z, struct mem_cgroup *mem_cont,
+ int active);
};
#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -109,6 +126,12 @@ long vm_total_pages; /* The total number of pages which the VM controls */
static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
+#ifdef CONFIG_CGROUP_MEM_CONT
+#define scan_global_lru(sc) (!(sc)->mem_cgroup)
+#else
+#define scan_global_lru(sc) (1)
+#endif
+
/*
* Add a shrinker callback to be called from the vm
*/
@@ -489,11 +512,13 @@ static unsigned long shrink_page_list(struct list_head *page_list,
*/
if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
wait_on_page_writeback(page);
- else
+ else {
+ sc->nr_io_pages++;
goto keep_locked;
+ }
}
- referenced = page_referenced(page, 1);
+ referenced = page_referenced(page, 1, sc->mem_cgroup);
/* In active use or really unfreeable? Activate it. */
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
referenced && page_mapping_inuse(page))
@@ -529,8 +554,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (PageDirty(page)) {
if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
goto keep_locked;
- if (!may_enter_fs)
+ if (!may_enter_fs) {
+ sc->nr_io_pages++;
goto keep_locked;
+ }
if (!sc->may_writepage)
goto keep_locked;
@@ -541,8 +568,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
case PAGE_ACTIVATE:
goto activate_locked;
case PAGE_SUCCESS:
- if (PageWriteback(page) || PageDirty(page))
+ if (PageWriteback(page) || PageDirty(page)) {
+ sc->nr_io_pages++;
goto keep;
+ }
/*
* A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page.
@@ -626,7 +655,7 @@ keep:
*
* returns 0 on success, -ve errno on failure.
*/
-static int __isolate_lru_page(struct page *page, int mode)
+int __isolate_lru_page(struct page *page, int mode)
{
int ret = -EINVAL;
@@ -760,6 +789,21 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
return nr_taken;
}
+static unsigned long isolate_pages_global(unsigned long nr,
+ struct list_head *dst,
+ unsigned long *scanned, int order,
+ int mode, struct zone *z,
+ struct mem_cgroup *mem_cont,
+ int active)
+{
+ if (active)
+ return isolate_lru_pages(nr, &z->active_list, dst,
+ scanned, order, mode);
+ else
+ return isolate_lru_pages(nr, &z->inactive_list, dst,
+ scanned, order, mode);
+}
+
/*
* clear_active_flags() is a helper for shrink_active_list(), clearing
* any active bits from the pages in the list.
@@ -801,18 +845,19 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
unsigned long nr_freed;
unsigned long nr_active;
- nr_taken = isolate_lru_pages(sc->swap_cluster_max,
- &zone->inactive_list,
+ nr_taken = sc->isolate_pages(sc->swap_cluster_max,
&page_list, &nr_scan, sc->order,
(sc->order > PAGE_ALLOC_COSTLY_ORDER)?
- ISOLATE_BOTH : ISOLATE_INACTIVE);
+ ISOLATE_BOTH : ISOLATE_INACTIVE,
+ zone, sc->mem_cgroup, 0);
nr_active = clear_active_flags(&page_list);
__count_vm_events(PGDEACTIVATE, nr_active);
__mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
__mod_zone_page_state(zone, NR_INACTIVE,
-(nr_taken - nr_active));
- zone->pages_scanned += nr_scan;
+ if (scan_global_lru(sc))
+ zone->pages_scanned += nr_scan;
spin_unlock_irq(&zone->lru_lock);
nr_scanned += nr_scan;
@@ -844,8 +889,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
if (current_is_kswapd()) {
__count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
__count_vm_events(KSWAPD_STEAL, nr_freed);
- } else
+ } else if (scan_global_lru(sc))
__count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
+
__count_zone_vm_events(PGSTEAL, zone, nr_freed);
if (nr_taken == 0)
@@ -899,6 +945,113 @@ static inline int zone_is_near_oom(struct zone *zone)
}
/*
+ * Determine we should try to reclaim mapped pages.
+ * This is called only when sc->mem_cgroup is NULL.
+ */
+static int calc_reclaim_mapped(struct scan_control *sc, struct zone *zone,
+ int priority)
+{
+ long mapped_ratio;
+ long distress;
+ long swap_tendency;
+ long imbalance;
+ int reclaim_mapped = 0;
+ int prev_priority;
+
+ if (scan_global_lru(sc) && zone_is_near_oom(zone))
+ return 1;
+ /*
+ * `distress' is a measure of how much trouble we're having
+ * reclaiming pages. 0 -> no problems. 100 -> great trouble.
+ */
+ if (scan_global_lru(sc))
+ prev_priority = zone->prev_priority;
+ else
+ prev_priority = mem_cgroup_get_reclaim_priority(sc->mem_cgroup);
+
+ distress = 100 >> min(prev_priority, priority);
+
+ /*
+ * The point of this algorithm is to decide when to start
+ * reclaiming mapped memory instead of just pagecache. Work out
+ * how much memory
+ * is mapped.
+ */
+ if (scan_global_lru(sc))
+ mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
+ global_page_state(NR_ANON_PAGES)) * 100) /
+ vm_total_pages;
+ else
+ mapped_ratio = mem_cgroup_calc_mapped_ratio(sc->mem_cgroup);
+
+ /*
+ * Now decide how much we really want to unmap some pages. The
+ * mapped ratio is downgraded - just because there's a lot of
+ * mapped memory doesn't necessarily mean that page reclaim
+ * isn't succeeding.
+ *
+ * The distress ratio is important - we don't want to start
+ * going oom.
+ *
+ * A 100% value of vm_swappiness overrides this algorithm
+ * altogether.
+ */
+ swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
+
+ /*
+ * If there's huge imbalance between active and inactive
+ * (think active 100 times larger than inactive) we should
+ * become more permissive, or the system will take too much
+ * cpu before it start swapping during memory pressure.
+ * Distress is about avoiding early-oom, this is about
+ * making swappiness graceful despite setting it to low
+ * values.
+ *
+ * Avoid div by zero with nr_inactive+1, and max resulting
+ * value is vm_total_pages.
+ */
+ if (scan_global_lru(sc)) {
+ imbalance = zone_page_state(zone, NR_ACTIVE);
+ imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
+ } else
+ imbalance = mem_cgroup_reclaim_imbalance(sc->mem_cgroup);
+
+ /*
+ * Reduce the effect of imbalance if swappiness is low,
+ * this means for a swappiness very low, the imbalance
+ * must be much higher than 100 for this logic to make
+ * the difference.
+ *
+ * Max temporary value is vm_total_pages*100.
+ */
+ imbalance *= (vm_swappiness + 1);
+ imbalance /= 100;
+
+ /*
+ * If not much of the ram is mapped, makes the imbalance
+ * less relevant, it's high priority we refill the inactive
+ * list with mapped pages only in presence of high ratio of
+ * mapped pages.
+ *
+ * Max temporary value is vm_total_pages*100.
+ */
+ imbalance *= mapped_ratio;
+ imbalance /= 100;
+
+ /* apply imbalance feedback to swap_tendency */
+ swap_tendency += imbalance;
+
+ /*
+ * Now use this metric to decide whether to start moving mapped
+ * memory onto the inactive list.
+ */
+ if (swap_tendency >= 100)
+ reclaim_mapped = 1;
+
+ return reclaim_mapped;
+}
+
+/*
* This moves pages from the active list to the inactive list.
*
* We move them the other way if the page is referenced by one or more
@@ -915,6 +1068,8 @@ static inline int zone_is_near_oom(struct zone *zone)
* The downside is that we have to touch page->_count against each page.
* But we had to alter page->flags anyway.
*/
+
+
static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct scan_control *sc, int priority)
{
@@ -928,99 +1083,21 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct pagevec pvec;
int reclaim_mapped = 0;
- if (sc->may_swap) {
- long mapped_ratio;
- long distress;
- long swap_tendency;
- long imbalance;
-
- if (zone_is_near_oom(zone))
- goto force_reclaim_mapped;
-
- /*
- * `distress' is a measure of how much trouble we're having
- * reclaiming pages. 0 -> no problems. 100 -> great trouble.
- */
- distress = 100 >> min(zone->prev_priority, priority);
-
- /*
- * The point of this algorithm is to decide when to start
- * reclaiming mapped memory instead of just pagecache. Work out
- * how much memory
- * is mapped.
- */
- mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
- global_page_state(NR_ANON_PAGES)) * 100) /
- vm_total_pages;
-
- /*
- * Now decide how much we really want to unmap some pages. The
- * mapped ratio is downgraded - just because there's a lot of
- * mapped memory doesn't necessarily mean that page reclaim
- * isn't succeeding.
- *
- * The distress ratio is important - we don't want to start
- * going oom.
- *
- * A 100% value of vm_swappiness overrides this algorithm
- * altogether.
- */
- swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
-
- /*
- * If there's huge imbalance between active and inactive
- * (think active 100 times larger than inactive) we should
- * become more permissive, or the system will take too much
- * cpu before it start swapping during memory pressure.
- * Distress is about avoiding early-oom, this is about
- * making swappiness graceful despite setting it to low
- * values.
- *
- * Avoid div by zero with nr_inactive+1, and max resulting
- * value is vm_total_pages.
- */
- imbalance = zone_page_state(zone, NR_ACTIVE);
- imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
-
- /*
- * Reduce the effect of imbalance if swappiness is low,
- * this means for a swappiness very low, the imbalance
- * must be much higher than 100 for this logic to make
- * the difference.
- *
- * Max temporary value is vm_total_pages*100.
- */
- imbalance *= (vm_swappiness + 1);
- imbalance /= 100;
-
- /*
- * If not much of the ram is mapped, makes the imbalance
- * less relevant, it's high priority we refill the inactive
- * list with mapped pages only in presence of high ratio of
- * mapped pages.
- *
- * Max temporary value is vm_total_pages*100.
- */
- imbalance *= mapped_ratio;
- imbalance /= 100;
-
- /* apply imbalance feedback to swap_tendency */
- swap_tendency += imbalance;
-
- /*
- * Now use this metric to decide whether to start moving mapped
- * memory onto the inactive list.
- */
- if (swap_tendency >= 100)
-force_reclaim_mapped:
- reclaim_mapped = 1;
- }
+ if (sc->may_swap)
+ reclaim_mapped = calc_reclaim_mapped(sc, zone, priority);
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
- pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
- &l_hold, &pgscanned, sc->order, ISOLATE_ACTIVE);
- zone->pages_scanned += pgscanned;
+ pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
+ ISOLATE_ACTIVE, zone,
+ sc->mem_cgroup, 1);
+ /*
+ * zone->pages_scanned is used for detect zone's oom
+ * mem_cgroup remembers nr_scan by itself.
+ */
+ if (scan_global_lru(sc))
+ zone->pages_scanned += pgscanned;
+
__mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
spin_unlock_irq(&zone->lru_lock);
@@ -1031,7 +1108,7 @@ force_reclaim_mapped:
if (page_mapped(page)) {
if (!reclaim_mapped ||
(total_swap_pages == 0 && PageAnon(page)) ||
- page_referenced(page, 0)) {
+ page_referenced(page, 0, sc->mem_cgroup)) {
list_add(&page->lru, &l_active);
continue;
}
@@ -1051,6 +1128,7 @@ force_reclaim_mapped:
ClearPageActive(page);
list_move(&page->lru, &zone->inactive_list);
+ mem_cgroup_move_lists(page_get_page_cgroup(page), false);
pgmoved++;
if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
@@ -1079,6 +1157,7 @@ force_reclaim_mapped:
SetPageLRU(page);
VM_BUG_ON(!PageActive(page));
list_move(&page->lru, &zone->active_list);
+ mem_cgroup_move_lists(page_get_page_cgroup(page), true);
pgmoved++;
if (!pagevec_add(&pvec, page)) {
__mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
@@ -1108,25 +1187,39 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
unsigned long nr_to_scan;
unsigned long nr_reclaimed = 0;
- /*
- * Add one to `nr_to_scan' just to make sure that the kernel will
- * slowly sift through the active list.
- */
- zone->nr_scan_active +=
- (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
- nr_active = zone->nr_scan_active;
- if (nr_active >= sc->swap_cluster_max)
- zone->nr_scan_active = 0;
- else
- nr_active = 0;
+ if (scan_global_lru(sc)) {
+ /*
+ * Add one to nr_to_scan just to make sure that the kernel
+ * will slowly sift through the active list.
+ */
+ zone->nr_scan_active +=
+ (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
+ nr_active = zone->nr_scan_active;
+ zone->nr_scan_inactive +=
+ (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
+ nr_inactive = zone->nr_scan_inactive;
+ if (nr_inactive >= sc->swap_cluster_max)
+ zone->nr_scan_inactive = 0;
+ else
+ nr_inactive = 0;
+
+ if (nr_active >= sc->swap_cluster_max)
+ zone->nr_scan_active = 0;
+ else
+ nr_active = 0;
+ } else {
+ /*
+ * This reclaim occurs not because zone memory shortage but
+ * because memory controller hits its limit.
+ * Then, don't modify zone reclaim related data.
+ */
+ nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup,
+ zone, priority);
+
+ nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup,
+ zone, priority);
+ }
- zone->nr_scan_inactive +=
- (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
- nr_inactive = zone->nr_scan_inactive;
- if (nr_inactive >= sc->swap_cluster_max)
- zone->nr_scan_inactive = 0;
- else
- nr_inactive = 0;
while (nr_active || nr_inactive) {
if (nr_active) {
@@ -1171,25 +1264,39 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
unsigned long nr_reclaimed = 0;
int i;
+
sc->all_unreclaimable = 1;
for (i = 0; zones[i] != NULL; i++) {
struct zone *zone = zones[i];
if (!populated_zone(zone))
continue;
+ /*
+ * Take care memory controller reclaiming has small influence
+ * to global LRU.
+ */
+ if (scan_global_lru(sc)) {
+ if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ continue;
+ note_zone_scanning_priority(zone, priority);
- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
- continue;
-
- note_zone_scanning_priority(zone, priority);
-
- if (zone_is_all_unreclaimable(zone) && priority != DEF_PRIORITY)
- continue; /* Let kswapd poll it */
-
- sc->all_unreclaimable = 0;
+ if (zone_is_all_unreclaimable(zone) &&
+ priority != DEF_PRIORITY)
+ continue; /* Let kswapd poll it */
+ sc->all_unreclaimable = 0;
+ } else {
+ /*
+ * Ignore cpuset limitation here. We just want to reduce
+ * # of used pages by us regardless of memory shortage.
+ */
+ sc->all_unreclaimable = 0;
+ mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
+ priority);
+ }
nr_reclaimed += shrink_zone(priority, zone, sc);
}
+
return nr_reclaimed;
}
@@ -1206,7 +1313,8 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
* holds filesystem locks which prevent writeout this might not work, and the
* allocation attempt will fail.
*/
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
+ struct scan_control *sc)
{
int priority;
int ret = 0;
@@ -1215,39 +1323,43 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
int i;
- struct scan_control sc = {
- .gfp_mask = gfp_mask,
- .may_writepage = !laptop_mode,
- .swap_cluster_max = SWAP_CLUSTER_MAX,
- .may_swap = 1,
- .swappiness = vm_swappiness,
- .order = order,
- };
-
- count_vm_event(ALLOCSTALL);
- for (i = 0; zones[i] != NULL; i++) {
- struct zone *zone = zones[i];
+ if (scan_global_lru(sc))
+ count_vm_event(ALLOCSTALL);
+ /*
+ * mem_cgroup will not do shrink_slab.
+ */
+ if (scan_global_lru(sc)) {
+ for (i = 0; zones[i] != NULL; i++) {
+ struct zone *zone = zones[i];
- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
- continue;
+ if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ continue;
- lru_pages += zone_page_state(zone, NR_ACTIVE)
- + zone_page_state(zone, NR_INACTIVE);
+ lru_pages += zone_page_state(zone, NR_ACTIVE)
+ + zone_page_state(zone, NR_INACTIVE);
+ }
}
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
- sc.nr_scanned = 0;
+ sc->nr_scanned = 0;
+ sc->nr_io_pages = 0;
if (!priority)
disable_swap_token();
- nr_reclaimed += shrink_zones(priority, zones, &sc);
- shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
- if (reclaim_state) {
- nr_reclaimed += reclaim_state->reclaimed_slab;
- reclaim_state->reclaimed_slab = 0;
+ nr_reclaimed += shrink_zones(priority, zones, sc);
+ /*
+ * Don't shrink slabs when reclaiming memory from
+ * over limit cgroups
+ */
+ if (scan_global_lru(sc)) {
+ shrink_slab(sc->nr_scanned, gfp_mask, lru_pages);
+ if (reclaim_state) {
+ nr_reclaimed += reclaim_state->reclaimed_slab;
+ reclaim_state->reclaimed_slab = 0;
+ }
}
- total_scanned += sc.nr_scanned;
- if (nr_reclaimed >= sc.swap_cluster_max) {
+ total_scanned += sc->nr_scanned;
+ if (nr_reclaimed >= sc->swap_cluster_max) {
ret = 1;
goto out;
}
@@ -1259,18 +1371,19 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
* that's undesirable in laptop mode, where we *want* lumpy
* writeout. So in laptop mode, write out the whole world.
*/
- if (total_scanned > sc.swap_cluster_max +
- sc.swap_cluster_max / 2) {
+ if (total_scanned > sc->swap_cluster_max +
+ sc->swap_cluster_max / 2) {
wakeup_pdflush(laptop_mode ? 0 : total_scanned);
- sc.may_writepage = 1;
+ sc->may_writepage = 1;
}
/* Take a nap, wait for some writeback to complete */
- if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
+ if (sc->nr_scanned && priority < DEF_PRIORITY - 2 &&
+ sc->nr_io_pages > sc->swap_cluster_max)
congestion_wait(WRITE, HZ/10);
}
/* top priority shrink_caches still had more to do? don't OOM, then */
- if (!sc.all_unreclaimable)
+ if (!sc->all_unreclaimable && scan_global_lru(sc))
ret = 1;
out:
/*
@@ -1282,17 +1395,63 @@ out:
*/
if (priority < 0)
priority = 0;
- for (i = 0; zones[i] != NULL; i++) {
- struct zone *zone = zones[i];
- if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
- continue;
+ if (scan_global_lru(sc)) {
+ for (i = 0; zones[i] != NULL; i++) {
+ struct zone *zone = zones[i];
+
+ if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+ continue;
+
+ zone->prev_priority = priority;
+ }
+ } else
+ mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
- zone->prev_priority = priority;
- }
return ret;
}
+unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+{
+ struct scan_control sc = {
+ .gfp_mask = gfp_mask,
+ .may_writepage = !laptop_mode,
+ .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .may_swap = 1,
+ .swappiness = vm_swappiness,
+ .order = order,
+ .mem_cgroup = NULL,
+ .isolate_pages = isolate_pages_global,
+ };
+
+ return do_try_to_free_pages(zones, gfp_mask, &sc);
+}
+
+#ifdef CONFIG_CGROUP_MEM_CONT
+
+unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
+ gfp_t gfp_mask)
+{
+ struct scan_control sc = {
+ .gfp_mask = gfp_mask,
+ .may_writepage = !laptop_mode,
+ .may_swap = 1,
+ .swap_cluster_max = SWAP_CLUSTER_MAX,
+ .swappiness = vm_swappiness,
+ .order = 0,
+ .mem_cgroup = mem_cont,
+ .isolate_pages = mem_cgroup_isolate_pages,
+ };
+ struct zone **zones;
+ int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE);
+
+ zones = NODE_DATA(numa_node_id())->node_zonelists[target_zone].zones;
+ if (do_try_to_free_pages(zones, sc.gfp_mask, &sc))
+ return 1;
+ return 0;
+}
+#endif
+
/*
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at pages_high.
@@ -1328,6 +1487,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
.swap_cluster_max = SWAP_CLUSTER_MAX,
.swappiness = vm_swappiness,
.order = order,
+ .mem_cgroup = NULL,
+ .isolate_pages = isolate_pages_global,
};
/*
* temp_priority is used to remember the scanning priority at which
@@ -1352,6 +1513,7 @@ loop_again:
if (!priority)
disable_swap_token();
+ sc.nr_io_pages = 0;
all_zones_ok = 1;
/*
@@ -1444,7 +1606,8 @@ loop_again:
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
- if (total_scanned && priority < DEF_PRIORITY - 2)
+ if (total_scanned && priority < DEF_PRIORITY - 2 &&
+ sc.nr_io_pages > sc.swap_cluster_max)
congestion_wait(WRITE, HZ/10);
/*
@@ -1649,6 +1812,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
.swap_cluster_max = nr_pages,
.may_writepage = 1,
.swappiness = vm_swappiness,
+ .isolate_pages = isolate_pages_global,
};
current->reclaim_state = &reclaim_state;
@@ -1834,6 +1998,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
SWAP_CLUSTER_MAX),
.gfp_mask = gfp_mask,
.swappiness = vm_swappiness,
+ .isolate_pages = isolate_pages_global,
};
unsigned long slab_reclaimable;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index e8d846f57774..422d960ffcd8 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -21,21 +21,14 @@ EXPORT_PER_CPU_SYMBOL(vm_event_states);
static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
{
- int cpu = 0;
+ int cpu;
int i;
memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
- cpu = first_cpu(*cpumask);
- while (cpu < NR_CPUS) {
+ for_each_cpu_mask(cpu, *cpumask) {
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
- cpu = next_cpu(cpu, *cpumask);
-
- if (cpu < NR_CPUS)
- prefetch(&per_cpu(vm_event_states, cpu));
-
-
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
ret[i] += this->event[i];
}
@@ -284,6 +277,10 @@ EXPORT_SYMBOL(dec_zone_page_state);
/*
* Update the zone counters for one cpu.
*
+ * The cpu specified must be either the current cpu or a processor that
+ * is not online. If it is the current cpu then the execution thread must
+ * be pinned to the current cpu.
+ *
* Note that refresh_cpu_vm_stats strives to only access
* node local memory. The per cpu pagesets on remote zones are placed
* in the memory local to the processor using that pageset. So the
@@ -299,7 +296,7 @@ void refresh_cpu_vm_stats(int cpu)
{
struct zone *zone;
int i;
- unsigned long flags;
+ int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
for_each_zone(zone) {
struct per_cpu_pageset *p;
@@ -311,15 +308,19 @@ void refresh_cpu_vm_stats(int cpu)
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (p->vm_stat_diff[i]) {
+ unsigned long flags;
+ int v;
+
local_irq_save(flags);
- zone_page_state_add(p->vm_stat_diff[i],
- zone, i);
+ v = p->vm_stat_diff[i];
p->vm_stat_diff[i] = 0;
+ local_irq_restore(flags);
+ atomic_long_add(v, &zone->vm_stat[i]);
+ global_diff[i] += v;
#ifdef CONFIG_NUMA
/* 3 seconds idle till flush */
p->expire = 3;
#endif
- local_irq_restore(flags);
}
#ifdef CONFIG_NUMA
/*
@@ -329,7 +330,7 @@ void refresh_cpu_vm_stats(int cpu)
* Check if there are pages remaining in this pageset
* if not then there is nothing to expire.
*/
- if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count))
+ if (!p->expire || !p->pcp.count)
continue;
/*
@@ -344,13 +345,14 @@ void refresh_cpu_vm_stats(int cpu)
if (p->expire)
continue;
- if (p->pcp[0].count)
- drain_zone_pages(zone, p->pcp + 0);
-
- if (p->pcp[1].count)
- drain_zone_pages(zone, p->pcp + 1);
+ if (p->pcp.count)
+ drain_zone_pages(zone, &p->pcp);
#endif
}
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ if (global_diff[i])
+ atomic_long_add(global_diff[i], &vm_stat[i]);
}
#endif
@@ -681,20 +683,17 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n pagesets");
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
- int j;
pageset = zone_pcp(zone, i);
- for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
- seq_printf(m,
- "\n cpu: %i pcp: %i"
- "\n count: %i"
- "\n high: %i"
- "\n batch: %i",
- i, j,
- pageset->pcp[j].count,
- pageset->pcp[j].high,
- pageset->pcp[j].batch);
- }
+ seq_printf(m,
+ "\n cpu: %i"
+ "\n count: %i"
+ "\n high: %i"
+ "\n batch: %i",
+ i,
+ pageset->pcp.count,
+ pageset->pcp.high,
+ pageset->pcp.batch);
#ifdef CONFIG_SMP
seq_printf(m, "\n vm stats threshold: %d",
pageset->stat_threshold);
diff --git a/net/9p/Makefile b/net/9p/Makefile
index d3abb246ccab..8a1051101898 100644
--- a/net/9p/Makefile
+++ b/net/9p/Makefile
@@ -4,7 +4,6 @@ obj-$(CONFIG_NET_9P_VIRTIO) += 9pnet_virtio.o
9pnet-objs := \
mod.o \
- mux.o \
client.o \
conv.o \
error.o \
diff --git a/net/9p/client.c b/net/9p/client.c
index af9199364049..84e087e24146 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -3,6 +3,7 @@
*
* 9P Client
*
+ * Copyright (C) 2008 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net>
*
* This program is free software; you can redistribute it and/or modify
@@ -25,6 +26,7 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/poll.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/sched.h>
@@ -32,15 +34,97 @@
#include <net/9p/9p.h>
#include <linux/parser.h>
#include <net/9p/transport.h>
-#include <net/9p/conn.h>
#include <net/9p/client.h>
static struct p9_fid *p9_fid_create(struct p9_client *clnt);
static void p9_fid_destroy(struct p9_fid *fid);
static struct p9_stat *p9_clone_stat(struct p9_stat *st, int dotu);
-struct p9_client *p9_client_create(struct p9_trans *trans, int msize,
- int dotu)
+/*
+ * Client Option Parsing (code inspired by NFS code)
+ * - a little lazy - parse all client options
+ */
+
+enum {
+ Opt_msize,
+ Opt_trans,
+ Opt_legacy,
+ Opt_err,
+};
+
+static match_table_t tokens = {
+ {Opt_msize, "msize=%u"},
+ {Opt_legacy, "noextend"},
+ {Opt_trans, "trans=%s"},
+ {Opt_err, NULL},
+};
+
+/**
+ * v9fs_parse_options - parse mount options into session structure
+ * @options: options string passed from mount
+ * @v9ses: existing v9fs session information
+ *
+ */
+
+static void parse_opts(char *options, struct p9_client *clnt)
+{
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int option;
+ int ret;
+
+ clnt->trans_mod = v9fs_default_trans();
+ clnt->dotu = 1;
+ clnt->msize = 8192;
+
+ if (!options)
+ return;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+ if (!*p)
+ continue;
+ token = match_token(p, tokens, args);
+ if (token < Opt_trans) {
+ ret = match_int(&args[0], &option);
+ if (ret < 0) {
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "integer field, but no integer?\n");
+ continue;
+ }
+ }
+ switch (token) {
+ case Opt_msize:
+ clnt->msize = option;
+ break;
+ case Opt_trans:
+ clnt->trans_mod = v9fs_match_trans(&args[0]);
+ break;
+ case Opt_legacy:
+ clnt->dotu = 0;
+ break;
+ default:
+ continue;
+ }
+ }
+}
+
+
+/**
+ * p9_client_rpc - sends 9P request and waits until a response is available.
+ * The function can be interrupted.
+ * @c: client data
+ * @tc: request to be sent
+ * @rc: pointer where a pointer to the response is stored
+ */
+int
+p9_client_rpc(struct p9_client *c, struct p9_fcall *tc,
+ struct p9_fcall **rc)
+{
+ return c->trans->rpc(c->trans, tc, rc);
+}
+
+struct p9_client *p9_client_create(const char *dev_name, char *options)
{
int err, n;
struct p9_client *clnt;
@@ -54,12 +138,7 @@ struct p9_client *p9_client_create(struct p9_trans *trans, int msize,
if (!clnt)
return ERR_PTR(-ENOMEM);
- P9_DPRINTK(P9_DEBUG_9P, "clnt %p trans %p msize %d dotu %d\n",
- clnt, trans, msize, dotu);
spin_lock_init(&clnt->lock);
- clnt->trans = trans;
- clnt->msize = msize;
- clnt->dotu = dotu;
INIT_LIST_HEAD(&clnt->fidlist);
clnt->fidpool = p9_idpool_create();
if (!clnt->fidpool) {
@@ -68,13 +147,29 @@ struct p9_client *p9_client_create(struct p9_trans *trans, int msize,
goto error;
}
- clnt->conn = p9_conn_create(clnt->trans, clnt->msize, &clnt->dotu);
- if (IS_ERR(clnt->conn)) {
- err = PTR_ERR(clnt->conn);
- clnt->conn = NULL;
+ parse_opts(options, clnt);
+ if (clnt->trans_mod == NULL) {
+ err = -EPROTONOSUPPORT;
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "No transport defined or default transport\n");
goto error;
}
+ P9_DPRINTK(P9_DEBUG_9P, "clnt %p trans %p msize %d dotu %d\n",
+ clnt, clnt->trans_mod, clnt->msize, clnt->dotu);
+
+
+ clnt->trans = clnt->trans_mod->create(dev_name, options, clnt->msize,
+ clnt->dotu);
+ if (IS_ERR(clnt->trans)) {
+ err = PTR_ERR(clnt->trans);
+ clnt->trans = NULL;
+ goto error;
+ }
+
+ if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
+ clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
+
tc = p9_create_tversion(clnt->msize, clnt->dotu?"9P2000.u":"9P2000");
if (IS_ERR(tc)) {
err = PTR_ERR(tc);
@@ -82,7 +177,7 @@ struct p9_client *p9_client_create(struct p9_trans *trans, int msize,
goto error;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto error;
@@ -117,10 +212,6 @@ void p9_client_destroy(struct p9_client *clnt)
struct p9_fid *fid, *fidptr;
P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
- if (clnt->conn) {
- p9_conn_destroy(clnt->conn);
- clnt->conn = NULL;
- }
if (clnt->trans) {
clnt->trans->close(clnt->trans);
@@ -142,7 +233,6 @@ void p9_client_disconnect(struct p9_client *clnt)
{
P9_DPRINTK(P9_DEBUG_9P, "clnt %p\n", clnt);
clnt->trans->status = Disconnected;
- p9_conn_cancel(clnt->conn, -EIO);
}
EXPORT_SYMBOL(p9_client_disconnect);
@@ -174,7 +264,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
goto error;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto error;
@@ -219,7 +309,7 @@ struct p9_fid *p9_client_auth(struct p9_client *clnt, char *uname,
goto error;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto error;
@@ -270,7 +360,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
goto error;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err) {
if (rc && rc->id == P9_RWALK)
goto clunk_fid;
@@ -305,7 +395,7 @@ clunk_fid:
goto error;
}
- p9_conn_rpc(clnt->conn, tc, &rc);
+ p9_client_rpc(clnt, tc, &rc);
error:
kfree(tc);
@@ -339,7 +429,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
goto done;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto done;
@@ -378,7 +468,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
goto done;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto done;
@@ -411,7 +501,7 @@ int p9_client_clunk(struct p9_fid *fid)
goto done;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto done;
@@ -443,7 +533,7 @@ int p9_client_remove(struct p9_fid *fid)
goto done;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto done;
@@ -485,7 +575,7 @@ int p9_client_read(struct p9_fid *fid, char *data, u64 offset, u32 count)
goto error;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto error;
@@ -542,7 +632,7 @@ int p9_client_write(struct p9_fid *fid, char *data, u64 offset, u32 count)
goto error;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto error;
@@ -596,7 +686,7 @@ p9_client_uread(struct p9_fid *fid, char __user *data, u64 offset, u32 count)
goto error;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto error;
@@ -660,7 +750,7 @@ p9_client_uwrite(struct p9_fid *fid, const char __user *data, u64 offset,
goto error;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto error;
@@ -731,7 +821,7 @@ struct p9_stat *p9_client_stat(struct p9_fid *fid)
goto error;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto error;
@@ -773,7 +863,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
goto done;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
done:
kfree(tc);
@@ -830,7 +920,7 @@ struct p9_stat *p9_client_dirread(struct p9_fid *fid, u64 offset)
goto error;
}
- err = p9_conn_rpc(clnt->conn, tc, &rc);
+ err = p9_client_rpc(clnt, tc, &rc);
if (err)
goto error;
@@ -901,16 +991,21 @@ static struct p9_stat *p9_clone_stat(struct p9_stat *st, int dotu)
memmove(ret, st, sizeof(struct p9_stat));
p = ((char *) ret) + sizeof(struct p9_stat);
memmove(p, st->name.str, st->name.len);
+ ret->name.str = p;
p += st->name.len;
memmove(p, st->uid.str, st->uid.len);
+ ret->uid.str = p;
p += st->uid.len;
memmove(p, st->gid.str, st->gid.len);
+ ret->gid.str = p;
p += st->gid.len;
memmove(p, st->muid.str, st->muid.len);
+ ret->muid.str = p;
p += st->muid.len;
if (dotu) {
memmove(p, st->extension.str, st->extension.len);
+ ret->extension.str = p;
p += st->extension.len;
}
diff --git a/net/9p/fcprint.c b/net/9p/fcprint.c
index b1ae8ec57d54..40244fbd9b0d 100644
--- a/net/9p/fcprint.c
+++ b/net/9p/fcprint.c
@@ -347,12 +347,12 @@ p9_printfcall(char *buf, int buflen, struct p9_fcall *fc, int extended)
return ret;
}
-
#else
int
p9_printfcall(char *buf, int buflen, struct p9_fcall *fc, int extended)
{
return 0;
}
-EXPORT_SYMBOL(p9_printfcall);
#endif /* CONFIG_NET_9P_DEBUG */
+EXPORT_SYMBOL(p9_printfcall);
+
diff --git a/net/9p/mod.c b/net/9p/mod.c
index 8f9763a9dc12..c285aab2af04 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -106,15 +106,10 @@ EXPORT_SYMBOL(v9fs_default_trans);
*/
static int __init init_p9(void)
{
- int ret;
+ int ret = 0;
p9_error_init();
printk(KERN_INFO "Installing 9P2000 support\n");
- ret = p9_mux_global_init();
- if (ret) {
- printk(KERN_WARNING "9p: starting mux failed\n");
- return ret;
- }
return ret;
}
@@ -126,7 +121,7 @@ static int __init init_p9(void)
static void __exit exit_p9(void)
{
- p9_mux_global_exit();
+ printk(KERN_INFO "Unloading 9P2000 support\n");
}
module_init(init_p9)
diff --git a/net/9p/mux.c b/net/9p/mux.c
deleted file mode 100644
index c9f0805048e4..000000000000
--- a/net/9p/mux.c
+++ /dev/null
@@ -1,1060 +0,0 @@
-/*
- * net/9p/mux.c
- *
- * Protocol Multiplexer
- *
- * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
- * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to:
- * Free Software Foundation
- * 51 Franklin Street, Fifth Floor
- * Boston, MA 02111-1301 USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
-#include <linux/kthread.h>
-#include <linux/idr.h>
-#include <linux/mutex.h>
-#include <net/9p/9p.h>
-#include <linux/parser.h>
-#include <net/9p/transport.h>
-#include <net/9p/conn.h>
-
-#define ERREQFLUSH 1
-#define SCHED_TIMEOUT 10
-#define MAXPOLLWADDR 2
-
-enum {
- Rworksched = 1, /* read work scheduled or running */
- Rpending = 2, /* can read */
- Wworksched = 4, /* write work scheduled or running */
- Wpending = 8, /* can write */
-};
-
-enum {
- None,
- Flushing,
- Flushed,
-};
-
-struct p9_mux_poll_task;
-
-struct p9_req {
- spinlock_t lock; /* protect request structure */
- int tag;
- struct p9_fcall *tcall;
- struct p9_fcall *rcall;
- int err;
- p9_conn_req_callback cb;
- void *cba;
- int flush;
- struct list_head req_list;
-};
-
-struct p9_conn {
- spinlock_t lock; /* protect lock structure */
- struct list_head mux_list;
- struct p9_mux_poll_task *poll_task;
- int msize;
- unsigned char *extended;
- struct p9_trans *trans;
- struct p9_idpool *tagpool;
- int err;
- wait_queue_head_t equeue;
- struct list_head req_list;
- struct list_head unsent_req_list;
- struct p9_fcall *rcall;
- int rpos;
- char *rbuf;
- int wpos;
- int wsize;
- char *wbuf;
- wait_queue_t poll_wait[MAXPOLLWADDR];
- wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
- poll_table pt;
- struct work_struct rq;
- struct work_struct wq;
- unsigned long wsched;
-};
-
-struct p9_mux_poll_task {
- struct task_struct *task;
- struct list_head mux_list;
- int muxnum;
-};
-
-struct p9_mux_rpc {
- struct p9_conn *m;
- int err;
- struct p9_fcall *tcall;
- struct p9_fcall *rcall;
- wait_queue_head_t wqueue;
-};
-
-static int p9_poll_proc(void *);
-static void p9_read_work(struct work_struct *work);
-static void p9_write_work(struct work_struct *work);
-static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address,
- poll_table * p);
-static u16 p9_mux_get_tag(struct p9_conn *);
-static void p9_mux_put_tag(struct p9_conn *, u16);
-
-static DEFINE_MUTEX(p9_mux_task_lock);
-static struct workqueue_struct *p9_mux_wq;
-
-static int p9_mux_num;
-static int p9_mux_poll_task_num;
-static struct p9_mux_poll_task p9_mux_poll_tasks[100];
-
-int p9_mux_global_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++)
- p9_mux_poll_tasks[i].task = NULL;
-
- p9_mux_wq = create_workqueue("v9fs");
- if (!p9_mux_wq) {
- printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void p9_mux_global_exit(void)
-{
- destroy_workqueue(p9_mux_wq);
-}
-
-/**
- * p9_mux_calc_poll_procs - calculates the number of polling procs
- * based on the number of mounted v9fs filesystems.
- *
- * The current implementation returns sqrt of the number of mounts.
- */
-static int p9_mux_calc_poll_procs(int muxnum)
-{
- int n;
-
- if (p9_mux_poll_task_num)
- n = muxnum / p9_mux_poll_task_num +
- (muxnum % p9_mux_poll_task_num ? 1 : 0);
- else
- n = 1;
-
- if (n > ARRAY_SIZE(p9_mux_poll_tasks))
- n = ARRAY_SIZE(p9_mux_poll_tasks);
-
- return n;
-}
-
-static int p9_mux_poll_start(struct p9_conn *m)
-{
- int i, n;
- struct p9_mux_poll_task *vpt, *vptlast;
- struct task_struct *pproc;
-
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num,
- p9_mux_poll_task_num);
- mutex_lock(&p9_mux_task_lock);
-
- n = p9_mux_calc_poll_procs(p9_mux_num + 1);
- if (n > p9_mux_poll_task_num) {
- for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) {
- if (p9_mux_poll_tasks[i].task == NULL) {
- vpt = &p9_mux_poll_tasks[i];
- P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n",
- vpt);
- pproc = kthread_create(p9_poll_proc, vpt,
- "v9fs-poll");
-
- if (!IS_ERR(pproc)) {
- vpt->task = pproc;
- INIT_LIST_HEAD(&vpt->mux_list);
- vpt->muxnum = 0;
- p9_mux_poll_task_num++;
- wake_up_process(vpt->task);
- }
- break;
- }
- }
-
- if (i >= ARRAY_SIZE(p9_mux_poll_tasks))
- P9_DPRINTK(P9_DEBUG_ERROR,
- "warning: no free poll slots\n");
- }
-
- n = (p9_mux_num + 1) / p9_mux_poll_task_num +
- ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0);
-
- vptlast = NULL;
- for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) {
- vpt = &p9_mux_poll_tasks[i];
- if (vpt->task != NULL) {
- vptlast = vpt;
- if (vpt->muxnum < n) {
- P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
- list_add(&m->mux_list, &vpt->mux_list);
- vpt->muxnum++;
- m->poll_task = vpt;
- memset(&m->poll_waddr, 0,
- sizeof(m->poll_waddr));
- init_poll_funcptr(&m->pt, p9_pollwait);
- break;
- }
- }
- }
-
- if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) {
- if (vptlast == NULL) {
- mutex_unlock(&p9_mux_task_lock);
- return -ENOMEM;
- }
-
- P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
- list_add(&m->mux_list, &vptlast->mux_list);
- vptlast->muxnum++;
- m->poll_task = vptlast;
- memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
- init_poll_funcptr(&m->pt, p9_pollwait);
- }
-
- p9_mux_num++;
- mutex_unlock(&p9_mux_task_lock);
-
- return 0;
-}
-
-static void p9_mux_poll_stop(struct p9_conn *m)
-{
- int i;
- struct p9_mux_poll_task *vpt;
-
- mutex_lock(&p9_mux_task_lock);
- vpt = m->poll_task;
- list_del(&m->mux_list);
- for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
- if (m->poll_waddr[i] != NULL) {
- remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
- m->poll_waddr[i] = NULL;
- }
- }
- vpt->muxnum--;
- if (!vpt->muxnum) {
- P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt);
- kthread_stop(vpt->task);
- vpt->task = NULL;
- p9_mux_poll_task_num--;
- }
- p9_mux_num--;
- mutex_unlock(&p9_mux_task_lock);
-}
-
-/**
- * p9_conn_create - allocate and initialize the per-session mux data
- * Creates the polling task if this is the first session.
- *
- * @trans - transport structure
- * @msize - maximum message size
- * @extended - pointer to the extended flag
- */
-struct p9_conn *p9_conn_create(struct p9_trans *trans, int msize,
- unsigned char *extended)
-{
- int i, n;
- struct p9_conn *m, *mtmp;
-
- P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, msize);
- m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL);
- if (!m)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_init(&m->lock);
- INIT_LIST_HEAD(&m->mux_list);
- m->msize = msize;
- m->extended = extended;
- m->trans = trans;
- m->tagpool = p9_idpool_create();
- if (IS_ERR(m->tagpool)) {
- mtmp = ERR_PTR(-ENOMEM);
- kfree(m);
- return mtmp;
- }
-
- m->err = 0;
- init_waitqueue_head(&m->equeue);
- INIT_LIST_HEAD(&m->req_list);
- INIT_LIST_HEAD(&m->unsent_req_list);
- m->rcall = NULL;
- m->rpos = 0;
- m->rbuf = NULL;
- m->wpos = m->wsize = 0;
- m->wbuf = NULL;
- INIT_WORK(&m->rq, p9_read_work);
- INIT_WORK(&m->wq, p9_write_work);
- m->wsched = 0;
- memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
- m->poll_task = NULL;
- n = p9_mux_poll_start(m);
- if (n) {
- kfree(m);
- return ERR_PTR(n);
- }
-
- n = trans->poll(trans, &m->pt);
- if (n & POLLIN) {
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
- set_bit(Rpending, &m->wsched);
- }
-
- if (n & POLLOUT) {
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
- set_bit(Wpending, &m->wsched);
- }
-
- for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
- if (IS_ERR(m->poll_waddr[i])) {
- p9_mux_poll_stop(m);
- mtmp = (void *)m->poll_waddr; /* the error code */
- kfree(m);
- m = mtmp;
- break;
- }
- }
-
- return m;
-}
-EXPORT_SYMBOL(p9_conn_create);
-
-/**
- * p9_mux_destroy - cancels all pending requests and frees mux resources
- */
-void p9_conn_destroy(struct p9_conn *m)
-{
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
- m->mux_list.prev, m->mux_list.next);
- p9_conn_cancel(m, -ECONNRESET);
-
- if (!list_empty(&m->req_list)) {
- /* wait until all processes waiting on this session exit */
- P9_DPRINTK(P9_DEBUG_MUX,
- "mux %p waiting for empty request queue\n", m);
- wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m,
- list_empty(&m->req_list));
- }
-
- p9_mux_poll_stop(m);
- m->trans = NULL;
- p9_idpool_destroy(m->tagpool);
- kfree(m);
-}
-EXPORT_SYMBOL(p9_conn_destroy);
-
-/**
- * p9_pollwait - called by files poll operation to add v9fs-poll task
- * to files wait queue
- */
-static void
-p9_pollwait(struct file *filp, wait_queue_head_t *wait_address,
- poll_table * p)
-{
- int i;
- struct p9_conn *m;
-
- m = container_of(p, struct p9_conn, pt);
- for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
- if (m->poll_waddr[i] == NULL)
- break;
-
- if (i >= ARRAY_SIZE(m->poll_waddr)) {
- P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
- return;
- }
-
- m->poll_waddr[i] = wait_address;
-
- if (!wait_address) {
- P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
- m->poll_waddr[i] = ERR_PTR(-EIO);
- return;
- }
-
- init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
- add_wait_queue(wait_address, &m->poll_wait[i]);
-}
-
-/**
- * p9_poll_mux - polls a mux and schedules read or write works if necessary
- */
-static void p9_poll_mux(struct p9_conn *m)
-{
- int n;
-
- if (m->err < 0)
- return;
-
- n = m->trans->poll(m->trans, NULL);
- if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
- P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
- if (n >= 0)
- n = -ECONNRESET;
- p9_conn_cancel(m, n);
- }
-
- if (n & POLLIN) {
- set_bit(Rpending, &m->wsched);
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
- if (!test_and_set_bit(Rworksched, &m->wsched)) {
- P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
- queue_work(p9_mux_wq, &m->rq);
- }
- }
-
- if (n & POLLOUT) {
- set_bit(Wpending, &m->wsched);
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
- if ((m->wsize || !list_empty(&m->unsent_req_list))
- && !test_and_set_bit(Wworksched, &m->wsched)) {
- P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
- queue_work(p9_mux_wq, &m->wq);
- }
- }
-}
-
-/**
- * p9_poll_proc - polls all v9fs transports for new events and queues
- * the appropriate work to the work queue
- */
-static int p9_poll_proc(void *a)
-{
- struct p9_conn *m, *mtmp;
- struct p9_mux_poll_task *vpt;
-
- vpt = a;
- P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt);
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
- p9_poll_mux(m);
- }
-
- P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
- schedule_timeout(SCHED_TIMEOUT * HZ);
- }
-
- __set_current_state(TASK_RUNNING);
- P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
- return 0;
-}
-
-/**
- * p9_write_work - called when a transport can send some data
- */
-static void p9_write_work(struct work_struct *work)
-{
- int n, err;
- struct p9_conn *m;
- struct p9_req *req;
-
- m = container_of(work, struct p9_conn, wq);
-
- if (m->err < 0) {
- clear_bit(Wworksched, &m->wsched);
- return;
- }
-
- if (!m->wsize) {
- if (list_empty(&m->unsent_req_list)) {
- clear_bit(Wworksched, &m->wsched);
- return;
- }
-
- spin_lock(&m->lock);
-again:
- req = list_entry(m->unsent_req_list.next, struct p9_req,
- req_list);
- list_move_tail(&req->req_list, &m->req_list);
- if (req->err == ERREQFLUSH)
- goto again;
-
- m->wbuf = req->tcall->sdata;
- m->wsize = req->tcall->size;
- m->wpos = 0;
- spin_unlock(&m->lock);
- }
-
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
- m->wsize);
- clear_bit(Wpending, &m->wsched);
- err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
- if (err == -EAGAIN) {
- clear_bit(Wworksched, &m->wsched);
- return;
- }
-
- if (err < 0)
- goto error;
- else if (err == 0) {
- err = -EREMOTEIO;
- goto error;
- }
-
- m->wpos += err;
- if (m->wpos == m->wsize)
- m->wpos = m->wsize = 0;
-
- if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
- if (test_and_clear_bit(Wpending, &m->wsched))
- n = POLLOUT;
- else
- n = m->trans->poll(m->trans, NULL);
-
- if (n & POLLOUT) {
- P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
- queue_work(p9_mux_wq, &m->wq);
- } else
- clear_bit(Wworksched, &m->wsched);
- } else
- clear_bit(Wworksched, &m->wsched);
-
- return;
-
-error:
- p9_conn_cancel(m, err);
- clear_bit(Wworksched, &m->wsched);
-}
-
-static void process_request(struct p9_conn *m, struct p9_req *req)
-{
- int ecode;
- struct p9_str *ename;
-
- if (!req->err && req->rcall->id == P9_RERROR) {
- ecode = req->rcall->params.rerror.errno;
- ename = &req->rcall->params.rerror.error;
-
- P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
- ename->str);
-
- if (*m->extended)
- req->err = -ecode;
-
- if (!req->err) {
- req->err = p9_errstr2errno(ename->str, ename->len);
-
- if (!req->err) { /* string match failed */
- PRINT_FCALL_ERROR("unknown error", req->rcall);
- }
-
- if (!req->err)
- req->err = -ESERVERFAULT;
- }
- } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "fcall mismatch: expected %d, got %d\n",
- req->tcall->id + 1, req->rcall->id);
- if (!req->err)
- req->err = -EIO;
- }
-}
-
-/**
- * p9_read_work - called when there is some data to be read from a transport
- */
-static void p9_read_work(struct work_struct *work)
-{
- int n, err;
- struct p9_conn *m;
- struct p9_req *req, *rptr, *rreq;
- struct p9_fcall *rcall;
- char *rbuf;
-
- m = container_of(work, struct p9_conn, rq);
-
- if (m->err < 0)
- return;
-
- rcall = NULL;
- P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
-
- if (!m->rcall) {
- m->rcall =
- kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL);
- if (!m->rcall) {
- err = -ENOMEM;
- goto error;
- }
-
- m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
- m->rpos = 0;
- }
-
- clear_bit(Rpending, &m->wsched);
- err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
- if (err == -EAGAIN) {
- clear_bit(Rworksched, &m->wsched);
- return;
- }
-
- if (err <= 0)
- goto error;
-
- m->rpos += err;
- while (m->rpos > 4) {
- n = le32_to_cpu(*(__le32 *) m->rbuf);
- if (n >= m->msize) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "requested packet size too big: %d\n", n);
- err = -EIO;
- goto error;
- }
-
- if (m->rpos < n)
- break;
-
- err =
- p9_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
- if (err < 0) {
- goto error;
- }
-
-#ifdef CONFIG_NET_9P_DEBUG
- if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
- char buf[150];
-
- p9_printfcall(buf, sizeof(buf), m->rcall,
- *m->extended);
- printk(KERN_NOTICE ">>> %p %s\n", m, buf);
- }
-#endif
-
- rcall = m->rcall;
- rbuf = m->rbuf;
- if (m->rpos > n) {
- m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize,
- GFP_KERNEL);
- if (!m->rcall) {
- err = -ENOMEM;
- goto error;
- }
-
- m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
- memmove(m->rbuf, rbuf + n, m->rpos - n);
- m->rpos -= n;
- } else {
- m->rcall = NULL;
- m->rbuf = NULL;
- m->rpos = 0;
- }
-
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
- rcall->id, rcall->tag);
-
- req = NULL;
- spin_lock(&m->lock);
- list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
- if (rreq->tag == rcall->tag) {
- req = rreq;
- if (req->flush != Flushing)
- list_del(&req->req_list);
- break;
- }
- }
- spin_unlock(&m->lock);
-
- if (req) {
- req->rcall = rcall;
- process_request(m, req);
-
- if (req->flush != Flushing) {
- if (req->cb)
- (*req->cb) (req, req->cba);
- else
- kfree(req->rcall);
-
- wake_up(&m->equeue);
- }
- } else {
- if (err >= 0 && rcall->id != P9_RFLUSH)
- P9_DPRINTK(P9_DEBUG_ERROR,
- "unexpected response mux %p id %d tag %d\n",
- m, rcall->id, rcall->tag);
- kfree(rcall);
- }
- }
-
- if (!list_empty(&m->req_list)) {
- if (test_and_clear_bit(Rpending, &m->wsched))
- n = POLLIN;
- else
- n = m->trans->poll(m->trans, NULL);
-
- if (n & POLLIN) {
- P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
- queue_work(p9_mux_wq, &m->rq);
- } else
- clear_bit(Rworksched, &m->wsched);
- } else
- clear_bit(Rworksched, &m->wsched);
-
- return;
-
-error:
- p9_conn_cancel(m, err);
- clear_bit(Rworksched, &m->wsched);
-}
-
-/**
- * p9_send_request - send 9P request
- * The function can sleep until the request is scheduled for sending.
- * The function can be interrupted. Return from the function is not
- * a guarantee that the request is sent successfully. Can return errors
- * that can be retrieved by PTR_ERR macros.
- *
- * @m: mux data
- * @tc: request to be sent
- * @cb: callback function to call when response is received
- * @cba: parameter to pass to the callback function
- */
-static struct p9_req *p9_send_request(struct p9_conn *m,
- struct p9_fcall *tc,
- p9_conn_req_callback cb, void *cba)
-{
- int n;
- struct p9_req *req;
-
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
- tc, tc->id);
- if (m->err < 0)
- return ERR_PTR(m->err);
-
- req = kmalloc(sizeof(struct p9_req), GFP_KERNEL);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- if (tc->id == P9_TVERSION)
- n = P9_NOTAG;
- else
- n = p9_mux_get_tag(m);
-
- if (n < 0)
- return ERR_PTR(-ENOMEM);
-
- p9_set_tag(tc, n);
-
-#ifdef CONFIG_NET_9P_DEBUG
- if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
- char buf[150];
-
- p9_printfcall(buf, sizeof(buf), tc, *m->extended);
- printk(KERN_NOTICE "<<< %p %s\n", m, buf);
- }
-#endif
-
- spin_lock_init(&req->lock);
- req->tag = n;
- req->tcall = tc;
- req->rcall = NULL;
- req->err = 0;
- req->cb = cb;
- req->cba = cba;
- req->flush = None;
-
- spin_lock(&m->lock);
- list_add_tail(&req->req_list, &m->unsent_req_list);
- spin_unlock(&m->lock);
-
- if (test_and_clear_bit(Wpending, &m->wsched))
- n = POLLOUT;
- else
- n = m->trans->poll(m->trans, NULL);
-
- if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
- queue_work(p9_mux_wq, &m->wq);
-
- return req;
-}
-
-static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
-{
- p9_mux_put_tag(m, req->tag);
- kfree(req);
-}
-
-static void p9_mux_flush_cb(struct p9_req *freq, void *a)
-{
- p9_conn_req_callback cb;
- int tag;
- struct p9_conn *m;
- struct p9_req *req, *rreq, *rptr;
-
- m = a;
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
- freq->tcall, freq->rcall, freq->err,
- freq->tcall->params.tflush.oldtag);
-
- spin_lock(&m->lock);
- cb = NULL;
- tag = freq->tcall->params.tflush.oldtag;
- req = NULL;
- list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
- if (rreq->tag == tag) {
- req = rreq;
- list_del(&req->req_list);
- break;
- }
- }
- spin_unlock(&m->lock);
-
- if (req) {
- spin_lock(&req->lock);
- req->flush = Flushed;
- spin_unlock(&req->lock);
-
- if (req->cb)
- (*req->cb) (req, req->cba);
- else
- kfree(req->rcall);
-
- wake_up(&m->equeue);
- }
-
- kfree(freq->tcall);
- kfree(freq->rcall);
- p9_mux_free_request(m, freq);
-}
-
-static int
-p9_mux_flush_request(struct p9_conn *m, struct p9_req *req)
-{
- struct p9_fcall *fc;
- struct p9_req *rreq, *rptr;
-
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
-
- /* if a response was received for a request, do nothing */
- spin_lock(&req->lock);
- if (req->rcall || req->err) {
- spin_unlock(&req->lock);
- P9_DPRINTK(P9_DEBUG_MUX,
- "mux %p req %p response already received\n", m, req);
- return 0;
- }
-
- req->flush = Flushing;
- spin_unlock(&req->lock);
-
- spin_lock(&m->lock);
- /* if the request is not sent yet, just remove it from the list */
- list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
- if (rreq->tag == req->tag) {
- P9_DPRINTK(P9_DEBUG_MUX,
- "mux %p req %p request is not sent yet\n", m, req);
- list_del(&rreq->req_list);
- req->flush = Flushed;
- spin_unlock(&m->lock);
- if (req->cb)
- (*req->cb) (req, req->cba);
- return 0;
- }
- }
- spin_unlock(&m->lock);
-
- clear_thread_flag(TIF_SIGPENDING);
- fc = p9_create_tflush(req->tag);
- p9_send_request(m, fc, p9_mux_flush_cb, m);
- return 1;
-}
-
-static void
-p9_conn_rpc_cb(struct p9_req *req, void *a)
-{
- struct p9_mux_rpc *r;
-
- P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a);
- r = a;
- r->rcall = req->rcall;
- r->err = req->err;
-
- if (req->flush != None && !req->err)
- r->err = -ERESTARTSYS;
-
- wake_up(&r->wqueue);
-}
-
-/**
- * p9_mux_rpc - sends 9P request and waits until a response is available.
- * The function can be interrupted.
- * @m: mux data
- * @tc: request to be sent
- * @rc: pointer where a pointer to the response is stored
- */
-int
-p9_conn_rpc(struct p9_conn *m, struct p9_fcall *tc,
- struct p9_fcall **rc)
-{
- int err, sigpending;
- unsigned long flags;
- struct p9_req *req;
- struct p9_mux_rpc r;
-
- r.err = 0;
- r.tcall = tc;
- r.rcall = NULL;
- r.m = m;
- init_waitqueue_head(&r.wqueue);
-
- if (rc)
- *rc = NULL;
-
- sigpending = 0;
- if (signal_pending(current)) {
- sigpending = 1;
- clear_thread_flag(TIF_SIGPENDING);
- }
-
- req = p9_send_request(m, tc, p9_conn_rpc_cb, &r);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
- return err;
- }
-
- err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
- if (r.err < 0)
- err = r.err;
-
- if (err == -ERESTARTSYS && m->trans->status == Connected
- && m->err == 0) {
- if (p9_mux_flush_request(m, req)) {
- /* wait until we get response of the flush message */
- do {
- clear_thread_flag(TIF_SIGPENDING);
- err = wait_event_interruptible(r.wqueue,
- r.rcall || r.err);
- } while (!r.rcall && !r.err && err == -ERESTARTSYS &&
- m->trans->status == Connected && !m->err);
-
- err = -ERESTARTSYS;
- }
- sigpending = 1;
- }
-
- if (sigpending) {
- spin_lock_irqsave(&current->sighand->siglock, flags);
- recalc_sigpending();
- spin_unlock_irqrestore(&current->sighand->siglock, flags);
- }
-
- if (rc)
- *rc = r.rcall;
- else
- kfree(r.rcall);
-
- p9_mux_free_request(m, req);
- if (err > 0)
- err = -EIO;
-
- return err;
-}
-EXPORT_SYMBOL(p9_conn_rpc);
-
-#ifdef P9_NONBLOCK
-/**
- * p9_conn_rpcnb - sends 9P request without waiting for response.
- * @m: mux data
- * @tc: request to be sent
- * @cb: callback function to be called when response arrives
- * @cba: value to pass to the callback function
- */
-int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
- p9_conn_req_callback cb, void *a)
-{
- int err;
- struct p9_req *req;
-
- req = p9_send_request(m, tc, cb, a);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
- return PTR_ERR(req);
- }
-
- P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
- return 0;
-}
-EXPORT_SYMBOL(p9_conn_rpcnb);
-#endif /* P9_NONBLOCK */
-
-/**
- * p9_conn_cancel - cancel all pending requests with error
- * @m: mux data
- * @err: error code
- */
-void p9_conn_cancel(struct p9_conn *m, int err)
-{
- struct p9_req *req, *rtmp;
- LIST_HEAD(cancel_list);
-
- P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
- m->err = err;
- spin_lock(&m->lock);
- list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
- list_move(&req->req_list, &cancel_list);
- }
- list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
- list_move(&req->req_list, &cancel_list);
- }
- spin_unlock(&m->lock);
-
- list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
- list_del(&req->req_list);
- if (!req->err)
- req->err = err;
-
- if (req->cb)
- (*req->cb) (req, req->cba);
- else
- kfree(req->rcall);
- }
-
- wake_up(&m->equeue);
-}
-EXPORT_SYMBOL(p9_conn_cancel);
-
-static u16 p9_mux_get_tag(struct p9_conn *m)
-{
- int tag;
-
- tag = p9_idpool_get(m->tagpool);
- if (tag < 0)
- return P9_NOTAG;
- else
- return (u16) tag;
-}
-
-static void p9_mux_put_tag(struct p9_conn *m, u16 tag)
-{
- if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool))
- p9_idpool_put(tag, m->tagpool);
-}
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 62332ed9da4a..1aa9d5175398 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
* Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
- * Copyright (C) 2004-2007 by Eric Van Hensbergen <ericvh@gmail.com>
+ * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/net.h>
#include <linux/ipv6.h>
+#include <linux/kthread.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/un.h>
@@ -42,7 +43,9 @@
#define P9_PORT 564
#define MAX_SOCK_BUF (64*1024)
-
+#define ERREQFLUSH 1
+#define SCHED_TIMEOUT 10
+#define MAXPOLLWADDR 2
struct p9_fd_opts {
int rfd;
@@ -53,6 +56,7 @@ struct p9_fd_opts {
struct p9_trans_fd {
struct file *rd;
struct file *wr;
+ struct p9_conn *conn;
};
/*
@@ -72,6 +76,1028 @@ static match_table_t tokens = {
{Opt_err, NULL},
};
+enum {
+ Rworksched = 1, /* read work scheduled or running */
+ Rpending = 2, /* can read */
+ Wworksched = 4, /* write work scheduled or running */
+ Wpending = 8, /* can write */
+};
+
+enum {
+ None,
+ Flushing,
+ Flushed,
+};
+
+struct p9_req;
+
+typedef void (*p9_conn_req_callback)(struct p9_req *req, void *a);
+struct p9_req {
+ spinlock_t lock; /* protect request structure */
+ int tag;
+ struct p9_fcall *tcall;
+ struct p9_fcall *rcall;
+ int err;
+ p9_conn_req_callback cb;
+ void *cba;
+ int flush;
+ struct list_head req_list;
+};
+
+struct p9_mux_poll_task;
+
+struct p9_conn {
+ spinlock_t lock; /* protect lock structure */
+ struct list_head mux_list;
+ struct p9_mux_poll_task *poll_task;
+ int msize;
+ unsigned char extended;
+ struct p9_trans *trans;
+ struct p9_idpool *tagpool;
+ int err;
+ wait_queue_head_t equeue;
+ struct list_head req_list;
+ struct list_head unsent_req_list;
+ struct p9_fcall *rcall;
+ int rpos;
+ char *rbuf;
+ int wpos;
+ int wsize;
+ char *wbuf;
+ wait_queue_t poll_wait[MAXPOLLWADDR];
+ wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
+ poll_table pt;
+ struct work_struct rq;
+ struct work_struct wq;
+ unsigned long wsched;
+};
+
+struct p9_mux_poll_task {
+ struct task_struct *task;
+ struct list_head mux_list;
+ int muxnum;
+};
+
+struct p9_mux_rpc {
+ struct p9_conn *m;
+ int err;
+ struct p9_fcall *tcall;
+ struct p9_fcall *rcall;
+ wait_queue_head_t wqueue;
+};
+
+static int p9_poll_proc(void *);
+static void p9_read_work(struct work_struct *work);
+static void p9_write_work(struct work_struct *work);
+static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address,
+ poll_table *p);
+static int p9_fd_write(struct p9_trans *trans, void *v, int len);
+static int p9_fd_read(struct p9_trans *trans, void *v, int len);
+
+static DEFINE_MUTEX(p9_mux_task_lock);
+static struct workqueue_struct *p9_mux_wq;
+
+static int p9_mux_num;
+static int p9_mux_poll_task_num;
+static struct p9_mux_poll_task p9_mux_poll_tasks[100];
+
+static void p9_conn_destroy(struct p9_conn *);
+static unsigned int p9_fd_poll(struct p9_trans *trans,
+ struct poll_table_struct *pt);
+
+#ifdef P9_NONBLOCK
+static int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
+ p9_conn_req_callback cb, void *a);
+#endif /* P9_NONBLOCK */
+
+static void p9_conn_cancel(struct p9_conn *m, int err);
+
+static int p9_mux_global_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++)
+ p9_mux_poll_tasks[i].task = NULL;
+
+ p9_mux_wq = create_workqueue("v9fs");
+ if (!p9_mux_wq) {
+ printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static u16 p9_mux_get_tag(struct p9_conn *m)
+{
+ int tag;
+
+ tag = p9_idpool_get(m->tagpool);
+ if (tag < 0)
+ return P9_NOTAG;
+ else
+ return (u16) tag;
+}
+
+static void p9_mux_put_tag(struct p9_conn *m, u16 tag)
+{
+ if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool))
+ p9_idpool_put(tag, m->tagpool);
+}
+
+/**
+ * p9_mux_calc_poll_procs - calculates the number of polling procs
+ * based on the number of mounted v9fs filesystems.
+ *
+ * The current implementation returns sqrt of the number of mounts.
+ */
+static int p9_mux_calc_poll_procs(int muxnum)
+{
+ int n;
+
+ if (p9_mux_poll_task_num)
+ n = muxnum / p9_mux_poll_task_num +
+ (muxnum % p9_mux_poll_task_num ? 1 : 0);
+ else
+ n = 1;
+
+ if (n > ARRAY_SIZE(p9_mux_poll_tasks))
+ n = ARRAY_SIZE(p9_mux_poll_tasks);
+
+ return n;
+}
+
+static int p9_mux_poll_start(struct p9_conn *m)
+{
+ int i, n;
+ struct p9_mux_poll_task *vpt, *vptlast;
+ struct task_struct *pproc;
+
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num,
+ p9_mux_poll_task_num);
+ mutex_lock(&p9_mux_task_lock);
+
+ n = p9_mux_calc_poll_procs(p9_mux_num + 1);
+ if (n > p9_mux_poll_task_num) {
+ for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) {
+ if (p9_mux_poll_tasks[i].task == NULL) {
+ vpt = &p9_mux_poll_tasks[i];
+ P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n",
+ vpt);
+ pproc = kthread_create(p9_poll_proc, vpt,
+ "v9fs-poll");
+
+ if (!IS_ERR(pproc)) {
+ vpt->task = pproc;
+ INIT_LIST_HEAD(&vpt->mux_list);
+ vpt->muxnum = 0;
+ p9_mux_poll_task_num++;
+ wake_up_process(vpt->task);
+ }
+ break;
+ }
+ }
+
+ if (i >= ARRAY_SIZE(p9_mux_poll_tasks))
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "warning: no free poll slots\n");
+ }
+
+ n = (p9_mux_num + 1) / p9_mux_poll_task_num +
+ ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0);
+
+ vptlast = NULL;
+ for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) {
+ vpt = &p9_mux_poll_tasks[i];
+ if (vpt->task != NULL) {
+ vptlast = vpt;
+ if (vpt->muxnum < n) {
+ P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
+ list_add(&m->mux_list, &vpt->mux_list);
+ vpt->muxnum++;
+ m->poll_task = vpt;
+ memset(&m->poll_waddr, 0,
+ sizeof(m->poll_waddr));
+ init_poll_funcptr(&m->pt, p9_pollwait);
+ break;
+ }
+ }
+ }
+
+ if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) {
+ if (vptlast == NULL) {
+ mutex_unlock(&p9_mux_task_lock);
+ return -ENOMEM;
+ }
+
+ P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i);
+ list_add(&m->mux_list, &vptlast->mux_list);
+ vptlast->muxnum++;
+ m->poll_task = vptlast;
+ memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
+ init_poll_funcptr(&m->pt, p9_pollwait);
+ }
+
+ p9_mux_num++;
+ mutex_unlock(&p9_mux_task_lock);
+
+ return 0;
+}
+
+static void p9_mux_poll_stop(struct p9_conn *m)
+{
+ int i;
+ struct p9_mux_poll_task *vpt;
+
+ mutex_lock(&p9_mux_task_lock);
+ vpt = m->poll_task;
+ list_del(&m->mux_list);
+ for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
+ if (m->poll_waddr[i] != NULL) {
+ remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
+ m->poll_waddr[i] = NULL;
+ }
+ }
+ vpt->muxnum--;
+ if (!vpt->muxnum) {
+ P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt);
+ kthread_stop(vpt->task);
+ vpt->task = NULL;
+ p9_mux_poll_task_num--;
+ }
+ p9_mux_num--;
+ mutex_unlock(&p9_mux_task_lock);
+}
+
+/**
+ * p9_conn_create - allocate and initialize the per-session mux data
+ * Creates the polling task if this is the first session.
+ *
+ * @trans - transport structure
+ * @msize - maximum message size
+ * @extended - extended flag
+ */
+static struct p9_conn *p9_conn_create(struct p9_trans *trans)
+{
+ int i, n;
+ struct p9_conn *m, *mtmp;
+
+ P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans,
+ trans->msize);
+ m = kmalloc(sizeof(struct p9_conn), GFP_KERNEL);
+ if (!m)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&m->lock);
+ INIT_LIST_HEAD(&m->mux_list);
+ m->msize = trans->msize;
+ m->extended = trans->extended;
+ m->trans = trans;
+ m->tagpool = p9_idpool_create();
+ if (IS_ERR(m->tagpool)) {
+ mtmp = ERR_PTR(-ENOMEM);
+ kfree(m);
+ return mtmp;
+ }
+
+ m->err = 0;
+ init_waitqueue_head(&m->equeue);
+ INIT_LIST_HEAD(&m->req_list);
+ INIT_LIST_HEAD(&m->unsent_req_list);
+ m->rcall = NULL;
+ m->rpos = 0;
+ m->rbuf = NULL;
+ m->wpos = m->wsize = 0;
+ m->wbuf = NULL;
+ INIT_WORK(&m->rq, p9_read_work);
+ INIT_WORK(&m->wq, p9_write_work);
+ m->wsched = 0;
+ memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
+ m->poll_task = NULL;
+ n = p9_mux_poll_start(m);
+ if (n) {
+ kfree(m);
+ return ERR_PTR(n);
+ }
+
+ n = p9_fd_poll(trans, &m->pt);
+ if (n & POLLIN) {
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
+ set_bit(Rpending, &m->wsched);
+ }
+
+ if (n & POLLOUT) {
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
+ set_bit(Wpending, &m->wsched);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
+ if (IS_ERR(m->poll_waddr[i])) {
+ p9_mux_poll_stop(m);
+ mtmp = (void *)m->poll_waddr; /* the error code */
+ kfree(m);
+ m = mtmp;
+ break;
+ }
+ }
+
+ return m;
+}
+
+/**
+ * p9_mux_destroy - cancels all pending requests and frees mux resources
+ */
+static void p9_conn_destroy(struct p9_conn *m)
+{
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m,
+ m->mux_list.prev, m->mux_list.next);
+ p9_conn_cancel(m, -ECONNRESET);
+
+ if (!list_empty(&m->req_list)) {
+ /* wait until all processes waiting on this session exit */
+ P9_DPRINTK(P9_DEBUG_MUX,
+ "mux %p waiting for empty request queue\n", m);
+ wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p request queue empty: %d\n", m,
+ list_empty(&m->req_list));
+ }
+
+ p9_mux_poll_stop(m);
+ m->trans = NULL;
+ p9_idpool_destroy(m->tagpool);
+ kfree(m);
+}
+
+/**
+ * p9_pollwait - called by files poll operation to add v9fs-poll task
+ * to files wait queue
+ */
+static void
+p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
+{
+ int i;
+ struct p9_conn *m;
+
+ m = container_of(p, struct p9_conn, pt);
+ for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
+ if (m->poll_waddr[i] == NULL)
+ break;
+
+ if (i >= ARRAY_SIZE(m->poll_waddr)) {
+ P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
+ return;
+ }
+
+ m->poll_waddr[i] = wait_address;
+
+ if (!wait_address) {
+ P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
+ m->poll_waddr[i] = ERR_PTR(-EIO);
+ return;
+ }
+
+ init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
+ add_wait_queue(wait_address, &m->poll_wait[i]);
+}
+
+/**
+ * p9_poll_mux - polls a mux and schedules read or write works if necessary
+ */
+static void p9_poll_mux(struct p9_conn *m)
+{
+ int n;
+
+ if (m->err < 0)
+ return;
+
+ n = p9_fd_poll(m->trans, NULL);
+ if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
+ P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
+ if (n >= 0)
+ n = -ECONNRESET;
+ p9_conn_cancel(m, n);
+ }
+
+ if (n & POLLIN) {
+ set_bit(Rpending, &m->wsched);
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
+ if (!test_and_set_bit(Rworksched, &m->wsched)) {
+ P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
+ queue_work(p9_mux_wq, &m->rq);
+ }
+ }
+
+ if (n & POLLOUT) {
+ set_bit(Wpending, &m->wsched);
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
+ if ((m->wsize || !list_empty(&m->unsent_req_list))
+ && !test_and_set_bit(Wworksched, &m->wsched)) {
+ P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
+ queue_work(p9_mux_wq, &m->wq);
+ }
+ }
+}
+
+/**
+ * p9_poll_proc - polls all v9fs transports for new events and queues
+ * the appropriate work to the work queue
+ */
+static int p9_poll_proc(void *a)
+{
+ struct p9_conn *m, *mtmp;
+ struct p9_mux_poll_task *vpt;
+
+ vpt = a;
+ P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
+ p9_poll_mux(m);
+ }
+
+ P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n");
+ schedule_timeout(SCHED_TIMEOUT * HZ);
+ }
+
+ __set_current_state(TASK_RUNNING);
+ P9_DPRINTK(P9_DEBUG_MUX, "finish\n");
+ return 0;
+}
+
+/**
+ * p9_write_work - called when a transport can send some data
+ */
+static void p9_write_work(struct work_struct *work)
+{
+ int n, err;
+ struct p9_conn *m;
+ struct p9_req *req;
+
+ m = container_of(work, struct p9_conn, wq);
+
+ if (m->err < 0) {
+ clear_bit(Wworksched, &m->wsched);
+ return;
+ }
+
+ if (!m->wsize) {
+ if (list_empty(&m->unsent_req_list)) {
+ clear_bit(Wworksched, &m->wsched);
+ return;
+ }
+
+ spin_lock(&m->lock);
+again:
+ req = list_entry(m->unsent_req_list.next, struct p9_req,
+ req_list);
+ list_move_tail(&req->req_list, &m->req_list);
+ if (req->err == ERREQFLUSH)
+ goto again;
+
+ m->wbuf = req->tcall->sdata;
+ m->wsize = req->tcall->size;
+ m->wpos = 0;
+ spin_unlock(&m->lock);
+ }
+
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
+ m->wsize);
+ clear_bit(Wpending, &m->wsched);
+ err = p9_fd_write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
+ if (err == -EAGAIN) {
+ clear_bit(Wworksched, &m->wsched);
+ return;
+ }
+
+ if (err < 0)
+ goto error;
+ else if (err == 0) {
+ err = -EREMOTEIO;
+ goto error;
+ }
+
+ m->wpos += err;
+ if (m->wpos == m->wsize)
+ m->wpos = m->wsize = 0;
+
+ if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
+ if (test_and_clear_bit(Wpending, &m->wsched))
+ n = POLLOUT;
+ else
+ n = p9_fd_poll(m->trans, NULL);
+
+ if (n & POLLOUT) {
+ P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
+ queue_work(p9_mux_wq, &m->wq);
+ } else
+ clear_bit(Wworksched, &m->wsched);
+ } else
+ clear_bit(Wworksched, &m->wsched);
+
+ return;
+
+error:
+ p9_conn_cancel(m, err);
+ clear_bit(Wworksched, &m->wsched);
+}
+
+static void process_request(struct p9_conn *m, struct p9_req *req)
+{
+ int ecode;
+ struct p9_str *ename;
+
+ if (!req->err && req->rcall->id == P9_RERROR) {
+ ecode = req->rcall->params.rerror.errno;
+ ename = &req->rcall->params.rerror.error;
+
+ P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
+ ename->str);
+
+ if (m->extended)
+ req->err = -ecode;
+
+ if (!req->err) {
+ req->err = p9_errstr2errno(ename->str, ename->len);
+
+ /* string match failed */
+ if (!req->err) {
+ PRINT_FCALL_ERROR("unknown error", req->rcall);
+ req->err = -ESERVERFAULT;
+ }
+ }
+ } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "fcall mismatch: expected %d, got %d\n",
+ req->tcall->id + 1, req->rcall->id);
+ if (!req->err)
+ req->err = -EIO;
+ }
+}
+
+/**
+ * p9_read_work - called when there is some data to be read from a transport
+ */
+static void p9_read_work(struct work_struct *work)
+{
+ int n, err;
+ struct p9_conn *m;
+ struct p9_req *req, *rptr, *rreq;
+ struct p9_fcall *rcall;
+ char *rbuf;
+
+ m = container_of(work, struct p9_conn, rq);
+
+ if (m->err < 0)
+ return;
+
+ rcall = NULL;
+ P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
+
+ if (!m->rcall) {
+ m->rcall =
+ kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL);
+ if (!m->rcall) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
+ m->rpos = 0;
+ }
+
+ clear_bit(Rpending, &m->wsched);
+ err = p9_fd_read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
+ if (err == -EAGAIN) {
+ clear_bit(Rworksched, &m->wsched);
+ return;
+ }
+
+ if (err <= 0)
+ goto error;
+
+ m->rpos += err;
+ while (m->rpos > 4) {
+ n = le32_to_cpu(*(__le32 *) m->rbuf);
+ if (n >= m->msize) {
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "requested packet size too big: %d\n", n);
+ err = -EIO;
+ goto error;
+ }
+
+ if (m->rpos < n)
+ break;
+
+ err =
+ p9_deserialize_fcall(m->rbuf, n, m->rcall, m->extended);
+ if (err < 0)
+ goto error;
+
+#ifdef CONFIG_NET_9P_DEBUG
+ if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
+ char buf[150];
+
+ p9_printfcall(buf, sizeof(buf), m->rcall,
+ m->extended);
+ printk(KERN_NOTICE ">>> %p %s\n", m, buf);
+ }
+#endif
+
+ rcall = m->rcall;
+ rbuf = m->rbuf;
+ if (m->rpos > n) {
+ m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize,
+ GFP_KERNEL);
+ if (!m->rcall) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
+ memmove(m->rbuf, rbuf + n, m->rpos - n);
+ m->rpos -= n;
+ } else {
+ m->rcall = NULL;
+ m->rbuf = NULL;
+ m->rpos = 0;
+ }
+
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
+ rcall->id, rcall->tag);
+
+ req = NULL;
+ spin_lock(&m->lock);
+ list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
+ if (rreq->tag == rcall->tag) {
+ req = rreq;
+ if (req->flush != Flushing)
+ list_del(&req->req_list);
+ break;
+ }
+ }
+ spin_unlock(&m->lock);
+
+ if (req) {
+ req->rcall = rcall;
+ process_request(m, req);
+
+ if (req->flush != Flushing) {
+ if (req->cb)
+ (*req->cb) (req, req->cba);
+ else
+ kfree(req->rcall);
+
+ wake_up(&m->equeue);
+ }
+ } else {
+ if (err >= 0 && rcall->id != P9_RFLUSH)
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "unexpected response mux %p id %d tag %d\n",
+ m, rcall->id, rcall->tag);
+ kfree(rcall);
+ }
+ }
+
+ if (!list_empty(&m->req_list)) {
+ if (test_and_clear_bit(Rpending, &m->wsched))
+ n = POLLIN;
+ else
+ n = p9_fd_poll(m->trans, NULL);
+
+ if (n & POLLIN) {
+ P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
+ queue_work(p9_mux_wq, &m->rq);
+ } else
+ clear_bit(Rworksched, &m->wsched);
+ } else
+ clear_bit(Rworksched, &m->wsched);
+
+ return;
+
+error:
+ p9_conn_cancel(m, err);
+ clear_bit(Rworksched, &m->wsched);
+}
+
+/**
+ * p9_send_request - send 9P request
+ * The function can sleep until the request is scheduled for sending.
+ * The function can be interrupted. Return from the function is not
+ * a guarantee that the request is sent successfully. Can return errors
+ * that can be retrieved by PTR_ERR macros.
+ *
+ * @m: mux data
+ * @tc: request to be sent
+ * @cb: callback function to call when response is received
+ * @cba: parameter to pass to the callback function
+ */
+static struct p9_req *p9_send_request(struct p9_conn *m,
+ struct p9_fcall *tc,
+ p9_conn_req_callback cb, void *cba)
+{
+ int n;
+ struct p9_req *req;
+
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
+ tc, tc->id);
+ if (m->err < 0)
+ return ERR_PTR(m->err);
+
+ req = kmalloc(sizeof(struct p9_req), GFP_KERNEL);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ if (tc->id == P9_TVERSION)
+ n = P9_NOTAG;
+ else
+ n = p9_mux_get_tag(m);
+
+ if (n < 0)
+ return ERR_PTR(-ENOMEM);
+
+ p9_set_tag(tc, n);
+
+#ifdef CONFIG_NET_9P_DEBUG
+ if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
+ char buf[150];
+
+ p9_printfcall(buf, sizeof(buf), tc, m->extended);
+ printk(KERN_NOTICE "<<< %p %s\n", m, buf);
+ }
+#endif
+
+ spin_lock_init(&req->lock);
+ req->tag = n;
+ req->tcall = tc;
+ req->rcall = NULL;
+ req->err = 0;
+ req->cb = cb;
+ req->cba = cba;
+ req->flush = None;
+
+ spin_lock(&m->lock);
+ list_add_tail(&req->req_list, &m->unsent_req_list);
+ spin_unlock(&m->lock);
+
+ if (test_and_clear_bit(Wpending, &m->wsched))
+ n = POLLOUT;
+ else
+ n = p9_fd_poll(m->trans, NULL);
+
+ if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
+ queue_work(p9_mux_wq, &m->wq);
+
+ return req;
+}
+
+static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
+{
+ p9_mux_put_tag(m, req->tag);
+ kfree(req);
+}
+
+static void p9_mux_flush_cb(struct p9_req *freq, void *a)
+{
+ p9_conn_req_callback cb;
+ int tag;
+ struct p9_conn *m;
+ struct p9_req *req, *rreq, *rptr;
+
+ m = a;
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
+ freq->tcall, freq->rcall, freq->err,
+ freq->tcall->params.tflush.oldtag);
+
+ spin_lock(&m->lock);
+ cb = NULL;
+ tag = freq->tcall->params.tflush.oldtag;
+ req = NULL;
+ list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
+ if (rreq->tag == tag) {
+ req = rreq;
+ list_del(&req->req_list);
+ break;
+ }
+ }
+ spin_unlock(&m->lock);
+
+ if (req) {
+ spin_lock(&req->lock);
+ req->flush = Flushed;
+ spin_unlock(&req->lock);
+
+ if (req->cb)
+ (*req->cb) (req, req->cba);
+ else
+ kfree(req->rcall);
+
+ wake_up(&m->equeue);
+ }
+
+ kfree(freq->tcall);
+ kfree(freq->rcall);
+ p9_mux_free_request(m, freq);
+}
+
+static int
+p9_mux_flush_request(struct p9_conn *m, struct p9_req *req)
+{
+ struct p9_fcall *fc;
+ struct p9_req *rreq, *rptr;
+
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
+
+ /* if a response was received for a request, do nothing */
+ spin_lock(&req->lock);
+ if (req->rcall || req->err) {
+ spin_unlock(&req->lock);
+ P9_DPRINTK(P9_DEBUG_MUX,
+ "mux %p req %p response already received\n", m, req);
+ return 0;
+ }
+
+ req->flush = Flushing;
+ spin_unlock(&req->lock);
+
+ spin_lock(&m->lock);
+ /* if the request is not sent yet, just remove it from the list */
+ list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
+ if (rreq->tag == req->tag) {
+ P9_DPRINTK(P9_DEBUG_MUX,
+ "mux %p req %p request is not sent yet\n", m, req);
+ list_del(&rreq->req_list);
+ req->flush = Flushed;
+ spin_unlock(&m->lock);
+ if (req->cb)
+ (*req->cb) (req, req->cba);
+ return 0;
+ }
+ }
+ spin_unlock(&m->lock);
+
+ clear_thread_flag(TIF_SIGPENDING);
+ fc = p9_create_tflush(req->tag);
+ p9_send_request(m, fc, p9_mux_flush_cb, m);
+ return 1;
+}
+
+static void
+p9_conn_rpc_cb(struct p9_req *req, void *a)
+{
+ struct p9_mux_rpc *r;
+
+ P9_DPRINTK(P9_DEBUG_MUX, "req %p r %p\n", req, a);
+ r = a;
+ r->rcall = req->rcall;
+ r->err = req->err;
+
+ if (req->flush != None && !req->err)
+ r->err = -ERESTARTSYS;
+
+ wake_up(&r->wqueue);
+}
+
+/**
+ * p9_fd_rpc- sends 9P request and waits until a response is available.
+ * The function can be interrupted.
+ * @m: mux data
+ * @tc: request to be sent
+ * @rc: pointer where a pointer to the response is stored
+ */
+int
+p9_fd_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc)
+{
+ struct p9_trans_fd *p = t->priv;
+ struct p9_conn *m = p->conn;
+ int err, sigpending;
+ unsigned long flags;
+ struct p9_req *req;
+ struct p9_mux_rpc r;
+
+ r.err = 0;
+ r.tcall = tc;
+ r.rcall = NULL;
+ r.m = m;
+ init_waitqueue_head(&r.wqueue);
+
+ if (rc)
+ *rc = NULL;
+
+ sigpending = 0;
+ if (signal_pending(current)) {
+ sigpending = 1;
+ clear_thread_flag(TIF_SIGPENDING);
+ }
+
+ req = p9_send_request(m, tc, p9_conn_rpc_cb, &r);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
+ return err;
+ }
+
+ err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
+ if (r.err < 0)
+ err = r.err;
+
+ if (err == -ERESTARTSYS && m->trans->status == Connected
+ && m->err == 0) {
+ if (p9_mux_flush_request(m, req)) {
+ /* wait until we get response of the flush message */
+ do {
+ clear_thread_flag(TIF_SIGPENDING);
+ err = wait_event_interruptible(r.wqueue,
+ r.rcall || r.err);
+ } while (!r.rcall && !r.err && err == -ERESTARTSYS &&
+ m->trans->status == Connected && !m->err);
+
+ err = -ERESTARTSYS;
+ }
+ sigpending = 1;
+ }
+
+ if (sigpending) {
+ spin_lock_irqsave(&current->sighand->siglock, flags);
+ recalc_sigpending();
+ spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ }
+
+ if (rc)
+ *rc = r.rcall;
+ else
+ kfree(r.rcall);
+
+ p9_mux_free_request(m, req);
+ if (err > 0)
+ err = -EIO;
+
+ return err;
+}
+
+#ifdef P9_NONBLOCK
+/**
+ * p9_conn_rpcnb - sends 9P request without waiting for response.
+ * @m: mux data
+ * @tc: request to be sent
+ * @cb: callback function to be called when response arrives
+ * @cba: value to pass to the callback function
+ */
+int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc,
+ p9_conn_req_callback cb, void *a)
+{
+ int err;
+ struct p9_req *req;
+
+ req = p9_send_request(m, tc, cb, a);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
+ return PTR_ERR(req);
+ }
+
+ P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
+ return 0;
+}
+#endif /* P9_NONBLOCK */
+
+/**
+ * p9_conn_cancel - cancel all pending requests with error
+ * @m: mux data
+ * @err: error code
+ */
+void p9_conn_cancel(struct p9_conn *m, int err)
+{
+ struct p9_req *req, *rtmp;
+ LIST_HEAD(cancel_list);
+
+ P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
+ m->err = err;
+ spin_lock(&m->lock);
+ list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
+ list_move(&req->req_list, &cancel_list);
+ }
+ list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
+ list_move(&req->req_list, &cancel_list);
+ }
+ spin_unlock(&m->lock);
+
+ list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
+ list_del(&req->req_list);
+ if (!req->err)
+ req->err = err;
+
+ if (req->cb)
+ (*req->cb) (req, req->cba);
+ else
+ kfree(req->rcall);
+ }
+
+ wake_up(&m->equeue);
+}
+
/**
* v9fs_parse_options - parse mount options into session structure
* @options: options string passed from mount
@@ -268,7 +1294,7 @@ end:
}
/**
- * p9_sock_close - shutdown socket
+ * p9_fd_close - shutdown socket
* @trans: private socket structure
*
*/
@@ -284,6 +1310,8 @@ static void p9_fd_close(struct p9_trans *trans)
if (!ts)
return;
+ p9_conn_destroy(ts->conn);
+
trans->status = Disconnected;
if (ts->rd)
fput(ts->rd);
@@ -292,13 +1320,15 @@ static void p9_fd_close(struct p9_trans *trans)
kfree(ts);
}
-static struct p9_trans *p9_trans_create_tcp(const char *addr, char *args)
+static struct p9_trans *
+p9_trans_create_tcp(const char *addr, char *args, int msize, unsigned char dotu)
{
int err;
struct p9_trans *trans;
struct socket *csocket;
struct sockaddr_in sin_server;
struct p9_fd_opts opts;
+ struct p9_trans_fd *p;
parse_opts(args, &opts);
@@ -306,11 +1336,10 @@ static struct p9_trans *p9_trans_create_tcp(const char *addr, char *args)
trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL);
if (!trans)
return ERR_PTR(-ENOMEM);
-
- trans->write = p9_fd_write;
- trans->read = p9_fd_read;
+ trans->msize = msize;
+ trans->extended = dotu;
+ trans->rpc = p9_fd_rpc;
trans->close = p9_fd_close;
- trans->poll = p9_fd_poll;
sin_server.sin_family = AF_INET;
sin_server.sin_addr.s_addr = in_aton(addr);
@@ -337,6 +1366,14 @@ static struct p9_trans *p9_trans_create_tcp(const char *addr, char *args)
if (err < 0)
goto error;
+ p = (struct p9_trans_fd *) trans->priv;
+ p->conn = p9_conn_create(trans);
+ if (IS_ERR(p->conn)) {
+ err = PTR_ERR(p->conn);
+ p->conn = NULL;
+ goto error;
+ }
+
return trans;
error:
@@ -347,22 +1384,23 @@ error:
return ERR_PTR(err);
}
-static struct p9_trans *p9_trans_create_unix(const char *addr, char *args)
+static struct p9_trans *
+p9_trans_create_unix(const char *addr, char *args, int msize,
+ unsigned char dotu)
{
int err;
struct socket *csocket;
struct sockaddr_un sun_server;
struct p9_trans *trans;
+ struct p9_trans_fd *p;
csocket = NULL;
trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL);
if (!trans)
return ERR_PTR(-ENOMEM);
- trans->write = p9_fd_write;
- trans->read = p9_fd_read;
+ trans->rpc = p9_fd_rpc;
trans->close = p9_fd_close;
- trans->poll = p9_fd_poll;
if (strlen(addr) > UNIX_PATH_MAX) {
P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
@@ -387,6 +1425,16 @@ static struct p9_trans *p9_trans_create_unix(const char *addr, char *args)
if (err < 0)
goto error;
+ trans->msize = msize;
+ trans->extended = dotu;
+ p = (struct p9_trans_fd *) trans->priv;
+ p->conn = p9_conn_create(trans);
+ if (IS_ERR(p->conn)) {
+ err = PTR_ERR(p->conn);
+ p->conn = NULL;
+ goto error;
+ }
+
return trans;
error:
@@ -397,11 +1445,14 @@ error:
return ERR_PTR(err);
}
-static struct p9_trans *p9_trans_create_fd(const char *name, char *args)
+static struct p9_trans *
+p9_trans_create_fd(const char *name, char *args, int msize,
+ unsigned char extended)
{
int err;
struct p9_trans *trans;
struct p9_fd_opts opts;
+ struct p9_trans_fd *p;
parse_opts(args, &opts);
@@ -414,15 +1465,23 @@ static struct p9_trans *p9_trans_create_fd(const char *name, char *args)
if (!trans)
return ERR_PTR(-ENOMEM);
- trans->write = p9_fd_write;
- trans->read = p9_fd_read;
+ trans->rpc = p9_fd_rpc;
trans->close = p9_fd_close;
- trans->poll = p9_fd_poll;
err = p9_fd_open(trans, opts.rfd, opts.wfd);
if (err < 0)
goto error;
+ trans->msize = msize;
+ trans->extended = extended;
+ p = (struct p9_trans_fd *) trans->priv;
+ p->conn = p9_conn_create(trans);
+ if (IS_ERR(p->conn)) {
+ err = PTR_ERR(p->conn);
+ p->conn = NULL;
+ goto error;
+ }
+
return trans;
error:
@@ -453,6 +1512,12 @@ static struct p9_trans_module p9_fd_trans = {
static int __init p9_trans_fd_init(void)
{
+ int ret = p9_mux_global_init();
+ if (ret) {
+ printk(KERN_WARNING "9p: starting mux failed\n");
+ return ret;
+ }
+
v9fs_register_trans(&p9_tcp_trans);
v9fs_register_trans(&p9_unix_trans);
v9fs_register_trans(&p9_fd_trans);
@@ -460,13 +1525,7 @@ static int __init p9_trans_fd_init(void)
return 1;
}
-static void __exit p9_trans_fd_exit(void) {
- printk(KERN_ERR "Removal of 9p transports not implemented\n");
- BUG();
-}
-
module_init(p9_trans_fd_init);
-module_exit(p9_trans_fd_exit);
MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>");
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 42eea5fe2628..0117b9fb8480 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -1,17 +1,8 @@
/*
* The Guest 9p transport driver
*
- * This is a trivial pipe-based transport driver based on the lguest console
- * code: we use lguest's DMA mechanism to send bytes out, and register a
- * DMA buffer to receive bytes in. It is assumed to be present and available
- * from the very beginning of boot.
- *
- * This may be have been done by just instaniating another HVC console,
- * but HVC's blocksize of 16 bytes is annoying and painful to performance.
- *
- * A more efficient transport could be built based on the virtio block driver
- * but it requires some changes in the 9p transport model (which are in
- * progress)
+ * This is a block based transport driver based on the lguest block driver
+ * code.
*
*/
/*
@@ -55,11 +46,25 @@
#include <linux/virtio.h>
#include <linux/virtio_9p.h>
+#define VIRTQUEUE_NUM 128
+
/* a single mutex to manage channel initialization and attachment */
static DECLARE_MUTEX(virtio_9p_lock);
/* global which tracks highest initialized channel */
static int chan_index;
+#define P9_INIT_MAXTAG 16
+
+#define REQ_STATUS_IDLE 0
+#define REQ_STATUS_SENT 1
+#define REQ_STATUS_RCVD 2
+#define REQ_STATUS_FLSH 3
+
+struct p9_req_t {
+ int status;
+ wait_queue_head_t *wq;
+};
+
/* We keep all per-channel information in a structure.
* This structure is allocated within the devices dev->mem space.
* A pointer to the structure will get put in the transport private.
@@ -68,146 +73,198 @@ static struct virtio_chan {
bool initialized; /* channel is initialized */
bool inuse; /* channel is in use */
- struct virtqueue *in_vq, *out_vq;
+ spinlock_t lock;
+
struct virtio_device *vdev;
+ struct virtqueue *vq;
- /* This is our input buffer, and how much data is left in it. */
- unsigned int in_len;
- char *in, *inbuf;
+ struct p9_idpool *tagpool;
+ struct p9_req_t *reqs;
+ int max_tag;
- wait_queue_head_t wq; /* waitq for buffer */
+ /* Scatterlist: can be too big for stack. */
+ struct scatterlist sg[VIRTQUEUE_NUM];
} channels[MAX_9P_CHAN];
+/* Lookup requests by tag */
+static struct p9_req_t *p9_lookup_tag(struct virtio_chan *c, u16 tag)
+{
+ /* This looks up the original request by tag so we know which
+ * buffer to read the data into */
+ tag++;
+
+ while (tag >= c->max_tag) {
+ int old_max = c->max_tag;
+ int count;
+
+ if (c->max_tag)
+ c->max_tag *= 2;
+ else
+ c->max_tag = P9_INIT_MAXTAG;
+
+ c->reqs = krealloc(c->reqs, sizeof(struct p9_req_t)*c->max_tag,
+ GFP_ATOMIC);
+ if (!c->reqs) {
+ printk(KERN_ERR "Couldn't grow tag array\n");
+ BUG();
+ }
+ for (count = old_max; count < c->max_tag; count++) {
+ c->reqs[count].status = REQ_STATUS_IDLE;
+ c->reqs[count].wq = kmalloc(sizeof(wait_queue_t),
+ GFP_ATOMIC);
+ if (!c->reqs[count].wq) {
+ printk(KERN_ERR "Couldn't grow tag array\n");
+ BUG();
+ }
+ init_waitqueue_head(c->reqs[count].wq);
+ }
+ }
+
+ return &c->reqs[tag];
+}
+
+
/* How many bytes left in this page. */
static unsigned int rest_of_page(void *data)
{
return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE);
}
-static int p9_virtio_write(struct p9_trans *trans, void *buf, int count)
+static void p9_virtio_close(struct p9_trans *trans)
{
- struct virtio_chan *chan = (struct virtio_chan *) trans->priv;
- struct virtqueue *out_vq = chan->out_vq;
- struct scatterlist sg[1];
- unsigned int len;
+ struct virtio_chan *chan = trans->priv;
+ int count;
+ unsigned int flags;
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio write (%d)\n", count);
+ spin_lock_irqsave(&chan->lock, flags);
+ p9_idpool_destroy(chan->tagpool);
+ for (count = 0; count < chan->max_tag; count++)
+ kfree(chan->reqs[count].wq);
+ kfree(chan->reqs);
+ chan->max_tag = 0;
+ spin_unlock_irqrestore(&chan->lock, flags);
- /* keep it simple - make sure we don't overflow a page */
- if (rest_of_page(buf) < count)
- count = rest_of_page(buf);
+ down(&virtio_9p_lock);
+ chan->inuse = false;
+ up(&virtio_9p_lock);
- sg_init_one(sg, buf, count);
+ kfree(trans);
+}
- /* add_buf wants a token to identify this buffer: we hand it any
- * non-NULL pointer, since there's only ever one buffer. */
- if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) == 0) {
- /* Tell Host to go! */
- out_vq->vq_ops->kick(out_vq);
- /* Chill out until it's done with the buffer. */
- while (!out_vq->vq_ops->get_buf(out_vq, &len))
- cpu_relax();
+static void req_done(struct virtqueue *vq)
+{
+ struct virtio_chan *chan = vq->vdev->priv;
+ struct p9_fcall *rc;
+ unsigned int len;
+ unsigned long flags;
+ struct p9_req_t *req;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) {
+ req = p9_lookup_tag(chan, rc->tag);
+ req->status = REQ_STATUS_RCVD;
+ wake_up(req->wq);
}
-
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio wrote (%d)\n", count);
-
- /* We're expected to return the amount of data we wrote: all of it. */
- return count;
+ /* In case queue is stopped waiting for more buffers. */
+ spin_unlock_irqrestore(&chan->lock, flags);
}
-/* Create a scatter-gather list representing our input buffer and put it in the
- * queue. */
-static void add_inbuf(struct virtio_chan *chan)
+static int
+pack_sg_list(struct scatterlist *sg, int start, int limit, char *data,
+ int count)
{
- struct scatterlist sg[1];
-
- sg_init_one(sg, chan->inbuf, PAGE_SIZE);
+ int s;
+ int index = start;
+
+ while (count) {
+ s = rest_of_page(data);
+ if (s > count)
+ s = count;
+ sg_set_buf(&sg[index++], data, s);
+ count -= s;
+ data += s;
+ if (index > limit)
+ BUG();
+ }
- /* We should always be able to add one buffer to an empty queue. */
- if (chan->in_vq->vq_ops->add_buf(chan->in_vq, sg, 0, 1, chan->inbuf))
- BUG();
- chan->in_vq->vq_ops->kick(chan->in_vq);
+ return index-start;
}
-static int p9_virtio_read(struct p9_trans *trans, void *buf, int count)
+static int
+p9_virtio_rpc(struct p9_trans *t, struct p9_fcall *tc, struct p9_fcall **rc)
{
- struct virtio_chan *chan = (struct virtio_chan *) trans->priv;
- struct virtqueue *in_vq = chan->in_vq;
-
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio read (%d)\n", count);
+ int in, out;
+ int n, err, size;
+ struct virtio_chan *chan = t->priv;
+ char *rdata;
+ struct p9_req_t *req;
+ unsigned long flags;
+
+ if (*rc == NULL) {
+ *rc = kmalloc(sizeof(struct p9_fcall) + t->msize, GFP_KERNEL);
+ if (!*rc)
+ return -ENOMEM;
+ }
- /* If we don't have an input queue yet, we can't get input. */
- BUG_ON(!in_vq);
+ rdata = (char *)*rc+sizeof(struct p9_fcall);
- /* No buffer? Try to get one. */
- if (!chan->in_len) {
- chan->in = in_vq->vq_ops->get_buf(in_vq, &chan->in_len);
- if (!chan->in)
- return 0;
+ n = P9_NOTAG;
+ if (tc->id != P9_TVERSION) {
+ n = p9_idpool_get(chan->tagpool);
+ if (n < 0)
+ return -ENOMEM;
}
- /* You want more than we have to give? Well, try wanting less! */
- if (chan->in_len < count)
- count = chan->in_len;
+ spin_lock_irqsave(&chan->lock, flags);
+ req = p9_lookup_tag(chan, n);
+ spin_unlock_irqrestore(&chan->lock, flags);
- /* Copy across to their buffer and increment offset. */
- memcpy(buf, chan->in, count);
- chan->in += count;
- chan->in_len -= count;
+ p9_set_tag(tc, n);
- /* Finished? Re-register buffer so Host will use it again. */
- if (chan->in_len == 0)
- add_inbuf(chan);
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio rpc tag %d\n", n);
- P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio finished read (%d)\n",
- count);
+ out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, tc->sdata, tc->size);
+ in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM-out, rdata, t->msize);
- return count;
-}
+ req->status = REQ_STATUS_SENT;
-/* The poll function is used by 9p transports to determine if there
- * is there is activity available on a particular channel. In our case
- * we use it to wait for a callback from the input routines.
- */
-static unsigned int
-p9_virtio_poll(struct p9_trans *trans, struct poll_table_struct *pt)
-{
- struct virtio_chan *chan = (struct virtio_chan *)trans->priv;
- struct virtqueue *in_vq = chan->in_vq;
- int ret = POLLOUT; /* we can always handle more output */
+ if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, tc)) {
+ P9_DPRINTK(P9_DEBUG_TRANS,
+ "9p debug: virtio rpc add_buf returned failure");
+ return -EIO;
+ }
- poll_wait(NULL, &chan->wq, pt);
+ chan->vq->vq_ops->kick(chan->vq);
- /* No buffer? Try to get one. */
- if (!chan->in_len)
- chan->in = in_vq->vq_ops->get_buf(in_vq, &chan->in_len);
+ wait_event(*req->wq, req->status == REQ_STATUS_RCVD);
- if (chan->in_len)
- ret |= POLLIN;
+ size = le32_to_cpu(*(__le32 *) rdata);
- return ret;
-}
+ err = p9_deserialize_fcall(rdata, size, *rc, t->extended);
+ if (err < 0) {
+ P9_DPRINTK(P9_DEBUG_TRANS,
+ "9p debug: virtio rpc deserialize returned %d\n", err);
+ return err;
+ }
-static void p9_virtio_close(struct p9_trans *trans)
-{
- struct virtio_chan *chan = trans->priv;
+#ifdef CONFIG_NET_9P_DEBUG
+ if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
+ char buf[150];
- down(&virtio_9p_lock);
- chan->inuse = false;
- up(&virtio_9p_lock);
+ p9_printfcall(buf, sizeof(buf), *rc, t->extended);
+ printk(KERN_NOTICE ">>> %p %s\n", t, buf);
+ }
+#endif
- kfree(trans);
-}
+ if (n != P9_NOTAG && p9_idpool_check(n, chan->tagpool))
+ p9_idpool_put(n, chan->tagpool);
-static void p9_virtio_intr(struct virtqueue *q)
-{
- struct virtio_chan *chan = q->vdev->priv;
+ req->status = REQ_STATUS_IDLE;
- P9_DPRINTK(P9_DEBUG_TRANS, "9p poll_wakeup: %p\n", &chan->wq);
- wake_up_interruptible(&chan->wq);
+ return 0;
}
-static int p9_virtio_probe(struct virtio_device *dev)
+static int p9_virtio_probe(struct virtio_device *vdev)
{
int err;
struct virtio_chan *chan;
@@ -221,44 +278,29 @@ static int p9_virtio_probe(struct virtio_device *dev)
if (chan_index > MAX_9P_CHAN) {
printk(KERN_ERR "9p: virtio: Maximum channels exceeded\n");
BUG();
- }
-
- chan->vdev = dev;
-
- /* This is the scratch page we use to receive console input */
- chan->inbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!chan->inbuf) {
err = -ENOMEM;
goto fail;
}
- /* Find the input queue. */
- dev->priv = chan;
- chan->in_vq = dev->config->find_vq(dev, 0, p9_virtio_intr);
- if (IS_ERR(chan->in_vq)) {
- err = PTR_ERR(chan->in_vq);
- goto free;
- }
+ chan->vdev = vdev;
- chan->out_vq = dev->config->find_vq(dev, 1, NULL);
- if (IS_ERR(chan->out_vq)) {
- err = PTR_ERR(chan->out_vq);
- goto free_in_vq;
+ /* We expect one virtqueue, for requests. */
+ chan->vq = vdev->config->find_vq(vdev, 0, req_done);
+ if (IS_ERR(chan->vq)) {
+ err = PTR_ERR(chan->vq);
+ goto out_free_vq;
}
+ chan->vq->vdev->priv = chan;
+ spin_lock_init(&chan->lock);
- init_waitqueue_head(&chan->wq);
+ sg_init_table(chan->sg, VIRTQUEUE_NUM);
- /* Register the input buffer the first time. */
- add_inbuf(chan);
chan->inuse = false;
chan->initialized = true;
-
return 0;
-free_in_vq:
- dev->config->del_vq(chan->in_vq);
-free:
- kfree(chan->inbuf);
+out_free_vq:
+ vdev->config->del_vq(chan->vq);
fail:
down(&virtio_9p_lock);
chan_index--;
@@ -271,11 +313,13 @@ fail:
* alternate channels by matching devname versus a virtio_config entry.
* We use a simple reference count mechanism to ensure that only a single
* mount has a channel open at a time. */
-static struct p9_trans *p9_virtio_create(const char *devname, char *args)
+static struct p9_trans *
+p9_virtio_create(const char *devname, char *args, int msize,
+ unsigned char extended)
{
struct p9_trans *trans;
- int index = 0;
struct virtio_chan *chan = channels;
+ int index = 0;
down(&virtio_9p_lock);
while (index < MAX_9P_CHAN) {
@@ -290,25 +334,45 @@ static struct p9_trans *p9_virtio_create(const char *devname, char *args)
up(&virtio_9p_lock);
if (index >= MAX_9P_CHAN) {
- printk(KERN_ERR "9p: virtio: couldn't find a free channel\n");
- return NULL;
+ printk(KERN_ERR "9p: no channels available\n");
+ return ERR_PTR(-ENODEV);
}
+ chan->tagpool = p9_idpool_create();
+ if (IS_ERR(chan->tagpool)) {
+ printk(KERN_ERR "9p: couldn't allocate tagpool\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ p9_idpool_get(chan->tagpool); /* reserve tag 0 */
+ chan->max_tag = 0;
+ chan->reqs = NULL;
+
trans = kmalloc(sizeof(struct p9_trans), GFP_KERNEL);
if (!trans) {
printk(KERN_ERR "9p: couldn't allocate transport\n");
return ERR_PTR(-ENOMEM);
}
-
- trans->write = p9_virtio_write;
- trans->read = p9_virtio_read;
+ trans->extended = extended;
+ trans->msize = msize;
trans->close = p9_virtio_close;
- trans->poll = p9_virtio_poll;
+ trans->rpc = p9_virtio_rpc;
trans->priv = chan;
return trans;
}
+static void p9_virtio_remove(struct virtio_device *vdev)
+{
+ struct virtio_chan *chan = vdev->priv;
+
+ BUG_ON(chan->inuse);
+
+ if (chan->initialized) {
+ vdev->config->del_vq(chan->vq);
+ chan->initialized = false;
+ }
+}
+
#define VIRTIO_ID_9P 9
static struct virtio_device_id id_table[] = {
@@ -322,12 +386,13 @@ static struct virtio_driver p9_virtio_drv = {
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = p9_virtio_probe,
+ .remove = p9_virtio_remove,
};
static struct p9_trans_module p9_virtio_trans = {
.name = "virtio",
.create = p9_virtio_create,
- .maxsize = PAGE_SIZE,
+ .maxsize = PAGE_SIZE*16,
.def = 0,
};
@@ -343,7 +408,13 @@ static int __init p9_virtio_init(void)
return register_virtio_driver(&p9_virtio_drv);
}
+static void __exit p9_virtio_cleanup(void)
+{
+ unregister_virtio_driver(&p9_virtio_drv);
+}
+
module_init(p9_virtio_init);
+module_exit(p9_virtio_cleanup);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
diff --git a/net/9p/util.c b/net/9p/util.c
index 22077b79395d..ef7215565d88 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -33,7 +33,7 @@
#include <net/9p/9p.h>
struct p9_idpool {
- struct semaphore lock;
+ spinlock_t lock;
struct idr pool;
};
@@ -45,7 +45,7 @@ struct p9_idpool *p9_idpool_create(void)
if (!p)
return ERR_PTR(-ENOMEM);
- init_MUTEX(&p->lock);
+ spin_lock_init(&p->lock);
idr_init(&p->pool);
return p;
@@ -71,19 +71,17 @@ int p9_idpool_get(struct p9_idpool *p)
{
int i = 0;
int error;
+ unsigned int flags;
retry:
if (idr_pre_get(&p->pool, GFP_KERNEL) == 0)
return 0;
- if (down_interruptible(&p->lock) == -EINTR) {
- P9_EPRINTK(KERN_WARNING, "Interrupted while locking\n");
- return -1;
- }
+ spin_lock_irqsave(&p->lock, flags);
/* no need to store exactly p, we just need something non-null */
error = idr_get_new(&p->pool, p, &i);
- up(&p->lock);
+ spin_unlock_irqrestore(&p->lock, flags);
if (error == -EAGAIN)
goto retry;
@@ -104,12 +102,10 @@ EXPORT_SYMBOL(p9_idpool_get);
void p9_idpool_put(int id, struct p9_idpool *p)
{
- if (down_interruptible(&p->lock) == -EINTR) {
- P9_EPRINTK(KERN_WARNING, "Interrupted while locking\n");
- return;
- }
+ unsigned int flags;
+ spin_lock_irqsave(&p->lock, flags);
idr_remove(&p->pool, id);
- up(&p->lock);
+ spin_unlock_irqrestore(&p->lock, flags);
}
EXPORT_SYMBOL(p9_idpool_put);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 782a22602b86..519cdb920f93 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -135,8 +135,8 @@ static void __hidp_copy_session(struct hidp_session *session, struct hidp_connin
}
}
-static inline int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
+static int hidp_queue_event(struct hidp_session *session, struct input_dev *dev,
+ unsigned int type, unsigned int code, int value)
{
unsigned char newleds;
struct sk_buff *skb;
@@ -243,7 +243,8 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
input_sync(dev);
}
-static inline int hidp_queue_report(struct hidp_session *session, unsigned char *data, int size)
+static int hidp_queue_report(struct hidp_session *session,
+ unsigned char *data, int size)
{
struct sk_buff *skb;
@@ -287,7 +288,7 @@ static void hidp_idle_timeout(unsigned long arg)
hidp_schedule(session);
}
-static inline void hidp_set_timer(struct hidp_session *session)
+static void hidp_set_timer(struct hidp_session *session)
{
if (session->idle_to > 0)
mod_timer(&session->timer, jiffies + HZ * session->idle_to);
@@ -332,7 +333,8 @@ static inline int hidp_send_ctrl_message(struct hidp_session *session,
return err;
}
-static inline void hidp_process_handshake(struct hidp_session *session, unsigned char param)
+static void hidp_process_handshake(struct hidp_session *session,
+ unsigned char param)
{
BT_DBG("session %p param 0x%02x", session, param);
@@ -365,38 +367,23 @@ static inline void hidp_process_handshake(struct hidp_session *session, unsigned
}
}
-static inline void hidp_process_hid_control(struct hidp_session *session, unsigned char param)
+static void hidp_process_hid_control(struct hidp_session *session,
+ unsigned char param)
{
BT_DBG("session %p param 0x%02x", session, param);
- switch (param) {
- case HIDP_CTRL_NOP:
- break;
-
- case HIDP_CTRL_VIRTUAL_CABLE_UNPLUG:
+ if (param == HIDP_CTRL_VIRTUAL_CABLE_UNPLUG) {
/* Flush the transmit queues */
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
/* Kill session thread */
atomic_inc(&session->terminate);
- break;
-
- case HIDP_CTRL_HARD_RESET:
- case HIDP_CTRL_SOFT_RESET:
- case HIDP_CTRL_SUSPEND:
- case HIDP_CTRL_EXIT_SUSPEND:
- /* FIXME: We have to parse these and return no error */
- break;
-
- default:
- __hidp_send_ctrl_message(session,
- HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0);
- break;
}
}
-static inline void hidp_process_data(struct hidp_session *session, struct sk_buff *skb, unsigned char param)
+static void hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
+ unsigned char param)
{
BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param);
@@ -423,7 +410,8 @@ static inline void hidp_process_data(struct hidp_session *session, struct sk_buf
}
}
-static inline void hidp_recv_ctrl_frame(struct hidp_session *session, struct sk_buff *skb)
+static void hidp_recv_ctrl_frame(struct hidp_session *session,
+ struct sk_buff *skb)
{
unsigned char hdr, type, param;
@@ -457,7 +445,8 @@ static inline void hidp_recv_ctrl_frame(struct hidp_session *session, struct sk_
kfree_skb(skb);
}
-static inline void hidp_recv_intr_frame(struct hidp_session *session, struct sk_buff *skb)
+static void hidp_recv_intr_frame(struct hidp_session *session,
+ struct sk_buff *skb)
{
unsigned char hdr;
@@ -625,7 +614,8 @@ static struct device *hidp_get_device(struct hidp_session *session)
return conn ? &conn->dev : NULL;
}
-static inline int hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req)
+static int hidp_setup_input(struct hidp_session *session,
+ struct hidp_connadd_req *req)
{
struct input_dev *input = session->input;
int i;
@@ -702,7 +692,8 @@ static void hidp_setup_quirks(struct hid_device *hid)
hid->quirks = hidp_blacklist[n].quirks;
}
-static inline void hidp_setup_hid(struct hidp_session *session, struct hidp_connadd_req *req)
+static void hidp_setup_hid(struct hidp_session *session,
+ struct hidp_connadd_req *req)
{
struct hid_device *hid = session->hid;
struct hid_report *report;
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 788c70321858..e4c779bb8d76 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -429,7 +429,8 @@ static int rfcomm_release_dev(void __user *arg)
if (dev->tty)
tty_vhangup(dev->tty);
- rfcomm_dev_del(dev);
+ if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
+ rfcomm_dev_del(dev);
rfcomm_dev_put(dev);
return 0;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index ddbdde82a700..61ac8d06292c 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -82,32 +82,6 @@ int rtnl_trylock(void)
return mutex_trylock(&rtnl_mutex);
}
-int rtattr_parse(struct rtattr *tb[], int maxattr, struct rtattr *rta, int len)
-{
- memset(tb, 0, sizeof(struct rtattr*)*maxattr);
-
- while (RTA_OK(rta, len)) {
- unsigned flavor = rta->rta_type;
- if (flavor && flavor <= maxattr)
- tb[flavor-1] = rta;
- rta = RTA_NEXT(rta, len);
- }
- return 0;
-}
-
-int __rtattr_parse_nested_compat(struct rtattr *tb[], int maxattr,
- struct rtattr *rta, int len)
-{
- if (RTA_PAYLOAD(rta) < len)
- return -1;
- if (RTA_PAYLOAD(rta) >= RTA_ALIGN(len) + sizeof(struct rtattr)) {
- rta = RTA_DATA(rta) + RTA_ALIGN(len);
- return rtattr_parse_nested(tb, maxattr, rta);
- }
- memset(tb, 0, sizeof(struct rtattr *) * maxattr);
- return 0;
-}
-
static struct rtnl_link *rtnl_msg_handlers[NPROTO];
static inline int rtm_msgindex(int msgtype)
@@ -442,21 +416,6 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data
memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
}
-size_t rtattr_strlcpy(char *dest, const struct rtattr *rta, size_t size)
-{
- size_t ret = RTA_PAYLOAD(rta);
- char *src = RTA_DATA(rta);
-
- if (ret > 0 && src[ret - 1] == '\0')
- ret--;
- if (size > 0) {
- size_t len = (ret >= size) ? size - 1 : ret;
- memset(dest, 0, size);
- memcpy(dest, src, len);
- }
- return ret;
-}
-
int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo)
{
struct sock *rtnl = net->rtnl;
@@ -1411,9 +1370,6 @@ void __init rtnetlink_init(void)
}
EXPORT_SYMBOL(__rta_fill);
-EXPORT_SYMBOL(rtattr_strlcpy);
-EXPORT_SYMBOL(rtattr_parse);
-EXPORT_SYMBOL(__rtattr_parse_nested_compat);
EXPORT_SYMBOL(rtnetlink_put_metrics);
EXPORT_SYMBOL(rtnl_lock);
EXPORT_SYMBOL(rtnl_trylock);
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index a2241060113b..8cd357f41283 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -547,8 +547,8 @@ int cipso_v4_doi_remove(u32 doi,
rcu_read_lock();
list_for_each_entry_rcu(dom_iter, &doi_def->dom_list, list)
if (dom_iter->valid)
- netlbl_domhsh_remove(dom_iter->domain,
- audit_info);
+ netlbl_cfg_map_del(dom_iter->domain,
+ audit_info);
rcu_read_unlock();
cipso_v4_cache_invalidate();
call_rcu(&doi_def->rcu, callback);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 35851c96bdfb..f5fba3f71c06 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2431,8 +2431,7 @@ static int fib_trie_seq_show(struct seq_file *seq, void *v)
rtn_type(buf2, sizeof(buf2),
fa->fa_type));
if (fa->fa_tos)
- seq_printf(seq, "tos =%d\n",
- fa->fa_tos);
+ seq_printf(seq, " tos=%d", fa->fa_tos);
seq_putc(seq, '\n');
}
}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index a7321a82df6d..a13c074dac09 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1015,7 +1015,8 @@ int icmp_rcv(struct sk_buff *skb)
goto error;
}
- __skb_pull(skb, sizeof(*icmph));
+ if (!pskb_pull(skb, sizeof(*icmph)))
+ goto error;
icmph = icmp_hdr(skb);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 90f422c9447b..9cac6c034abd 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -398,7 +398,7 @@ out:
EXPORT_SYMBOL_GPL(inet_unhash);
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
- struct sock *sk,
+ struct sock *sk, u32 port_offset,
int (*check_established)(struct inet_timewait_death_row *,
struct sock *, __u16, struct inet_timewait_sock **),
void (*hash)(struct sock *sk))
@@ -413,7 +413,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
if (!snum) {
int i, remaining, low, high, port;
static u32 hint;
- u32 offset = hint + inet_sk_port_offset(sk);
+ u32 offset = hint + port_offset;
struct hlist_node *node;
struct inet_timewait_sock *tw = NULL;
@@ -502,7 +502,7 @@ EXPORT_SYMBOL_GPL(__inet_hash_connect);
int inet_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk)
{
- return __inet_hash_connect(death_row, sk,
+ return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
__inet_check_established, __inet_hash_nolisten);
}
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 749fa044eca5..85c680add6df 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/net.h>
#include <net/ip_vs.h>
@@ -169,7 +170,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
*/
if (mark->cw == 0) {
mark->cl = &svc->destinations;
- IP_VS_INFO("ip_vs_wrr_schedule(): "
+ IP_VS_ERR_RL("ip_vs_wrr_schedule(): "
"no available servers\n");
dest = NULL;
goto out;
diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c
index e093a7b59e18..b47030ba162b 100644
--- a/net/ipv4/xfrm4_mode_beet.c
+++ b/net/ipv4/xfrm4_mode_beet.c
@@ -102,7 +102,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb)
XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr;
- if (!pskb_may_pull(skb, phlen));
+ if (!pskb_may_pull(skb, phlen))
goto out;
__skb_pull(skb, phlen);
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index cbb5b9cf84ad..121d517bf91c 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -683,7 +683,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
}
}
- __skb_pull(skb, sizeof(*hdr));
+ if (!pskb_pull(skb, sizeof(*hdr)))
+ goto discard_it;
hdr = icmp6_hdr(skb);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 43f3993e1f30..99fd25f7f005 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -236,7 +236,7 @@ static inline u32 inet6_sk_port_offset(const struct sock *sk)
int inet6_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk)
{
- return __inet_hash_connect(death_row, sk,
+ return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk),
__inet6_check_established, __inet6_hash);
}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index e77592d050ce..45c7c0c3875e 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -1,6 +1,5 @@
config MAC80211
tristate "Generic IEEE 802.11 Networking Stack (mac80211)"
- depends on EXPERIMENTAL
select CRYPTO
select CRYPTO_ECB
select CRYPTO_ARC4
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index becf91a952ae..c7ad64d664ad 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -90,7 +90,7 @@ static const struct nla_policy netlbl_cipsov4_genl_policy[NLBL_CIPSOV4_A_MAX + 1
* safely.
*
*/
-static void netlbl_cipsov4_doi_free(struct rcu_head *entry)
+void netlbl_cipsov4_doi_free(struct rcu_head *entry)
{
struct cipso_v4_doi *ptr;
diff --git a/net/netlabel/netlabel_cipso_v4.h b/net/netlabel/netlabel_cipso_v4.h
index f03cf9b78286..220cb9d06b49 100644
--- a/net/netlabel/netlabel_cipso_v4.h
+++ b/net/netlabel/netlabel_cipso_v4.h
@@ -163,4 +163,7 @@ enum {
/* NetLabel protocol functions */
int netlbl_cipsov4_genl_init(void);
+/* Free the memory associated with a CIPSOv4 DOI definition */
+void netlbl_cipsov4_doi_free(struct rcu_head *entry);
+
#endif
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index 3689956c3436..8220990ceb96 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -61,6 +61,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
struct netlbl_audit *audit_info);
int netlbl_domhsh_add_default(struct netlbl_dom_map *entry,
struct netlbl_audit *audit_info);
+int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info);
struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain);
int netlbl_domhsh_walk(u32 *skip_bkt,
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index c69e3e1f05c3..39793a1a93aa 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/audit.h>
#include <net/ip.h>
#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
@@ -38,10 +39,186 @@
#include "netlabel_domainhash.h"
#include "netlabel_unlabeled.h"
+#include "netlabel_cipso_v4.h"
#include "netlabel_user.h"
#include "netlabel_mgmt.h"
/*
+ * Configuration Functions
+ */
+
+/**
+ * netlbl_cfg_map_del - Remove a NetLabel/LSM domain mapping
+ * @domain: the domain mapping to remove
+ * @audit_info: NetLabel audit information
+ *
+ * Description:
+ * Removes a NetLabel/LSM domain mapping. A @domain value of NULL causes the
+ * default domain mapping to be removed. Returns zero on success, negative
+ * values on failure.
+ *
+ */
+int netlbl_cfg_map_del(const char *domain, struct netlbl_audit *audit_info)
+{
+ return netlbl_domhsh_remove(domain, audit_info);
+}
+
+/**
+ * netlbl_cfg_unlbl_add_map - Add an unlabeled NetLabel/LSM domain mapping
+ * @domain: the domain mapping to add
+ * @audit_info: NetLabel audit information
+ *
+ * Description:
+ * Adds a new unlabeled NetLabel/LSM domain mapping. A @domain value of NULL
+ * causes a new default domain mapping to be added. Returns zero on success,
+ * negative values on failure.
+ *
+ */
+int netlbl_cfg_unlbl_add_map(const char *domain,
+ struct netlbl_audit *audit_info)
+{
+ int ret_val = -ENOMEM;
+ struct netlbl_dom_map *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (entry == NULL)
+ goto cfg_unlbl_add_map_failure;
+ if (domain != NULL) {
+ entry->domain = kstrdup(domain, GFP_ATOMIC);
+ if (entry->domain == NULL)
+ goto cfg_unlbl_add_map_failure;
+ }
+ entry->type = NETLBL_NLTYPE_UNLABELED;
+
+ ret_val = netlbl_domhsh_add(entry, audit_info);
+ if (ret_val != 0)
+ goto cfg_unlbl_add_map_failure;
+
+ return 0;
+
+cfg_unlbl_add_map_failure:
+ if (entry != NULL)
+ kfree(entry->domain);
+ kfree(entry);
+ return ret_val;
+}
+
+/**
+ * netlbl_cfg_cipsov4_add - Add a new CIPSOv4 DOI definition
+ * @doi_def: the DOI definition
+ * @audit_info: NetLabel audit information
+ *
+ * Description:
+ * Add a new CIPSOv4 DOI definition to the NetLabel subsystem. Returns zero on
+ * success, negative values on failure.
+ *
+ */
+int netlbl_cfg_cipsov4_add(struct cipso_v4_doi *doi_def,
+ struct netlbl_audit *audit_info)
+{
+ int ret_val;
+ const char *type_str;
+ struct audit_buffer *audit_buf;
+
+ ret_val = cipso_v4_doi_add(doi_def);
+
+ audit_buf = netlbl_audit_start_common(AUDIT_MAC_CIPSOV4_ADD,
+ audit_info);
+ if (audit_buf != NULL) {
+ switch (doi_def->type) {
+ case CIPSO_V4_MAP_STD:
+ type_str = "std";
+ break;
+ case CIPSO_V4_MAP_PASS:
+ type_str = "pass";
+ break;
+ default:
+ type_str = "(unknown)";
+ }
+ audit_log_format(audit_buf,
+ " cipso_doi=%u cipso_type=%s res=%u",
+ doi_def->doi,
+ type_str,
+ ret_val == 0 ? 1 : 0);
+ audit_log_end(audit_buf);
+ }
+
+ return ret_val;
+}
+
+/**
+ * netlbl_cfg_cipsov4_add_map - Add a new CIPSOv4 DOI definition and mapping
+ * @doi_def: the DOI definition
+ * @domain: the domain mapping to add
+ * @audit_info: NetLabel audit information
+ *
+ * Description:
+ * Add a new CIPSOv4 DOI definition and NetLabel/LSM domain mapping for this
+ * new DOI definition to the NetLabel subsystem. A @domain value of NULL adds
+ * a new default domain mapping. Returns zero on success, negative values on
+ * failure.
+ *
+ */
+int netlbl_cfg_cipsov4_add_map(struct cipso_v4_doi *doi_def,
+ const char *domain,
+ struct netlbl_audit *audit_info)
+{
+ int ret_val = -ENOMEM;
+ struct netlbl_dom_map *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (entry == NULL)
+ goto cfg_cipsov4_add_map_failure;
+ if (domain != NULL) {
+ entry->domain = kstrdup(domain, GFP_ATOMIC);
+ if (entry->domain == NULL)
+ goto cfg_cipsov4_add_map_failure;
+ }
+ entry->type = NETLBL_NLTYPE_CIPSOV4;
+ entry->type_def.cipsov4 = doi_def;
+
+ /* Grab a RCU read lock here so nothing happens to the doi_def variable
+ * between adding it to the CIPSOv4 protocol engine and adding a
+ * domain mapping for it. */
+
+ rcu_read_lock();
+ ret_val = netlbl_cfg_cipsov4_add(doi_def, audit_info);
+ if (ret_val != 0)
+ goto cfg_cipsov4_add_map_failure_unlock;
+ ret_val = netlbl_domhsh_add(entry, audit_info);
+ if (ret_val != 0)
+ goto cfg_cipsov4_add_map_failure_remove_doi;
+ rcu_read_unlock();
+
+ return 0;
+
+cfg_cipsov4_add_map_failure_remove_doi:
+ cipso_v4_doi_remove(doi_def->doi, audit_info, netlbl_cipsov4_doi_free);
+cfg_cipsov4_add_map_failure_unlock:
+ rcu_read_unlock();
+cfg_cipsov4_add_map_failure:
+ if (entry != NULL)
+ kfree(entry->domain);
+ kfree(entry);
+ return ret_val;
+}
+
+/**
+ * netlbl_cfg_cipsov4_del - Removean existing CIPSOv4 DOI definition
+ * @doi: the CIPSO DOI value
+ * @audit_info: NetLabel audit information
+ *
+ * Description:
+ * Removes an existing CIPSOv4 DOI definition from the NetLabel subsystem.
+ * Returns zero on success, negative values on failure.
+ *
+ */
+int netlbl_cfg_cipsov4_del(u32 doi, struct netlbl_audit *audit_info)
+{
+ return cipso_v4_doi_remove(doi, audit_info, netlbl_cipsov4_doi_free);
+}
+
+/*
* Security Attribute Functions
*/
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 5e82f1c0afbb..2d0c29c837f7 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -239,7 +239,7 @@ static struct rxrpc_transport *rxrpc_name_to_transport(struct socket *sock,
/* find a remote transport endpoint from the local one */
peer = rxrpc_get_peer(srx, gfp);
if (IS_ERR(peer))
- return ERR_PTR(PTR_ERR(peer));
+ return ERR_CAST(peer);
/* find a transport */
trans = rxrpc_get_transport(rx->local, peer, gfp);
@@ -282,7 +282,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
trans = rxrpc_name_to_transport(sock, (struct sockaddr *) srx,
sizeof(*srx), 0, gfp);
if (IS_ERR(trans)) {
- call = ERR_PTR(PTR_ERR(trans));
+ call = ERR_CAST(trans);
trans = NULL;
goto out;
}
@@ -306,7 +306,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
bundle = rxrpc_get_bundle(rx, trans, key, service_id, gfp);
if (IS_ERR(bundle)) {
- call = ERR_PTR(PTR_ERR(bundle));
+ call = ERR_CAST(bundle);
goto out;
}
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 5a7f6a3060fc..971b867e0484 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -19,6 +19,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
#include <net/pkt_cls.h>
#include <net/ip.h>
@@ -270,6 +271,15 @@ static u32 flow_get_skgid(const struct sk_buff *skb)
return 0;
}
+static u32 flow_get_vlan_tag(const struct sk_buff *skb)
+{
+ u16 uninitialized_var(tag);
+
+ if (vlan_get_tag(skb, &tag) < 0)
+ return 0;
+ return tag & VLAN_VID_MASK;
+}
+
static u32 flow_key_get(const struct sk_buff *skb, int key)
{
switch (key) {
@@ -305,6 +315,8 @@ static u32 flow_key_get(const struct sk_buff *skb, int key)
return flow_get_skuid(skb);
case FLOW_KEY_SKGID:
return flow_get_skgid(skb);
+ case FLOW_KEY_VLAN_TAG:
+ return flow_get_vlan_tag(skb);
default:
WARN_ON(1);
return 0;
@@ -402,12 +414,13 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
if (tb[TCA_FLOW_KEYS]) {
keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
- if (fls(keymask) - 1 > FLOW_KEY_MAX)
- return -EOPNOTSUPP;
nkeys = hweight32(keymask);
if (nkeys == 0)
return -EINVAL;
+
+ if (fls(keymask) - 1 > FLOW_KEY_MAX)
+ return -EOPNOTSUPP;
}
err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
@@ -594,11 +607,11 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh,
if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
goto nla_put_failure;
-
+#ifdef CONFIG_NET_EMATCH
if (f->ematches.hdr.nmatches &&
tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
goto nla_put_failure;
-
+#endif
nla_nest_end(skb, nest);
if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0)
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index a1e5619b1876..2a7e648fbcf4 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -65,6 +65,7 @@
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/random.h>
+#include <linux/if_vlan.h>
#include <linux/tc_ematch/tc_em_meta.h>
#include <net/dst.h>
#include <net/route.h>
@@ -170,6 +171,21 @@ META_COLLECTOR(var_dev)
}
/**************************************************************************
+ * vlan tag
+ **************************************************************************/
+
+META_COLLECTOR(int_vlan_tag)
+{
+ unsigned short uninitialized_var(tag);
+ if (vlan_get_tag(skb, &tag) < 0)
+ *err = -1;
+ else
+ dst->value = tag;
+}
+
+
+
+/**************************************************************************
* skb attributes
**************************************************************************/
@@ -520,6 +536,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
[META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo),
[META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
[META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
+ [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
}
};
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 97e6ebd14500..ae367c82e512 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -420,15 +420,15 @@ struct sctp_shared_key *sctp_auth_get_shkey(
const struct sctp_association *asoc,
__u16 key_id)
{
- struct sctp_shared_key *key = NULL;
+ struct sctp_shared_key *key;
/* First search associations set of endpoint pair shared keys */
key_for_each(key, &asoc->endpoint_shared_keys) {
if (key->key_id == key_id)
- break;
+ return key;
}
- return key;
+ return NULL;
}
/*
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 5df0c4bd415b..f98658782d4f 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -3865,6 +3865,10 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
struct sctp_chunk *err_chunk;
sctp_ierror_t error;
+ /* Make sure that the peer has AUTH capable */
+ if (!asoc->peer.auth_capable)
+ return sctp_sf_unk_chunk(ep, asoc, type, arg, commands);
+
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index d716b76098bb..340ad6920511 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -2,7 +2,7 @@
# Check the stack usage of functions
#
-# Copyright Joern Engel <joern@wh.fh-wedel.de>
+# Copyright Joern Engel <joern@lazybastard.org>
# Inspired by Linus Torvalds
# Original idea maybe from Keith Owens
# s390 port and big speedup by Arnd Bergmann <arnd@bergmann-dalldorf.de>
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 1f11d848532a..c912137f80e2 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -31,17 +31,16 @@
#define KSYM_NAME_LEN 128
-
struct sym_entry {
unsigned long long addr;
unsigned int len;
+ unsigned int start_pos;
unsigned char *sym;
};
-
static struct sym_entry *table;
static unsigned int table_size, table_cnt;
-static unsigned long long _text, _stext, _etext, _sinittext, _einittext, _sextratext, _eextratext;
+static unsigned long long _text, _stext, _etext, _sinittext, _einittext;
static int all_symbols = 0;
static char symbol_prefix_char = '\0';
@@ -99,10 +98,6 @@ static int read_symbol(FILE *in, struct sym_entry *s)
_sinittext = s->addr;
else if (strcmp(sym, "_einittext") == 0)
_einittext = s->addr;
- else if (strcmp(sym, "_sextratext") == 0)
- _sextratext = s->addr;
- else if (strcmp(sym, "_eextratext") == 0)
- _eextratext = s->addr;
else if (toupper(stype) == 'A')
{
/* Keep these useful absolute symbols */
@@ -165,18 +160,18 @@ static int symbol_valid(struct sym_entry *s)
* and inittext sections are discarded */
if (!all_symbols) {
if ((s->addr < _stext || s->addr > _etext)
- && (s->addr < _sinittext || s->addr > _einittext)
- && (s->addr < _sextratext || s->addr > _eextratext))
+ && (s->addr < _sinittext || s->addr > _einittext))
return 0;
/* Corner case. Discard any symbols with the same value as
- * _etext _einittext or _eextratext; they can move between pass
- * 1 and 2 when the kallsyms data are added. If these symbols
- * move then they may get dropped in pass 2, which breaks the
- * kallsyms rules.
+ * _etext _einittext; they can move between pass 1 and 2 when
+ * the kallsyms data are added. If these symbols move then
+ * they may get dropped in pass 2, which breaks the kallsyms
+ * rules.
*/
- if ((s->addr == _etext && strcmp((char*)s->sym + offset, "_etext")) ||
- (s->addr == _einittext && strcmp((char*)s->sym + offset, "_einittext")) ||
- (s->addr == _eextratext && strcmp((char*)s->sym + offset, "_eextratext")))
+ if ((s->addr == _etext &&
+ strcmp((char *)s->sym + offset, "_etext")) ||
+ (s->addr == _einittext &&
+ strcmp((char *)s->sym + offset, "_einittext")))
return 0;
}
@@ -202,8 +197,10 @@ static void read_map(FILE *in)
exit (1);
}
}
- if (read_symbol(in, &table[table_cnt]) == 0)
+ if (read_symbol(in, &table[table_cnt]) == 0) {
+ table[table_cnt].start_pos = table_cnt;
table_cnt++;
+ }
}
}
@@ -506,6 +503,35 @@ static void optimize_token_table(void)
optimize_result();
}
+static int compare_symbols(const void *a, const void *b)
+{
+ const struct sym_entry *sa;
+ const struct sym_entry *sb;
+ int wa, wb;
+
+ sa = a;
+ sb = b;
+
+ /* sort by address first */
+ if (sa->addr > sb->addr)
+ return 1;
+ if (sa->addr < sb->addr)
+ return -1;
+
+ /* sort by "weakness" type */
+ wa = (sa->sym[0] == 'w') || (sa->sym[0] == 'W');
+ wb = (sb->sym[0] == 'w') || (sb->sym[0] == 'W');
+ if (wa != wb)
+ return wa - wb;
+
+ /* sort by initial order, so that other symbols are left undisturbed */
+ return sa->start_pos - sb->start_pos;
+}
+
+static void sort_symbols(void)
+{
+ qsort(table, table_cnt, sizeof(struct sym_entry), compare_symbols);
+}
int main(int argc, char **argv)
{
@@ -527,6 +553,7 @@ int main(int argc, char **argv)
usage();
read_map(stdin);
+ sort_symbols();
optimize_token_table();
write_src();
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index ec54f12f57b0..6c18a14386a4 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -218,6 +218,7 @@ sub usage {
print " [ -function funcname [ -function funcname ...] ]\n";
print " [ -nofunction funcname [ -nofunction funcname ...] ]\n";
print " c source file(s) > outputfile\n";
+ print " -v : verbose output, more warnings & other info listed\n";
exit 1;
}
@@ -1654,7 +1655,7 @@ sub dump_function($$) {
$prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
$prototype =~ m/^(\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
$prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
- $prototype =~ m/^(\w+\s+\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
+ $prototype =~ m/^(\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
$prototype =~ m/^(\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
$prototype =~ m/^(\w+\s+\w+\s+\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
$prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ ||
@@ -1881,6 +1882,13 @@ sub process_file($) {
} else {
$declaration_purpose = "";
}
+
+ if (($declaration_purpose eq "") && $verbose) {
+ print STDERR "Warning(${file}:$.): missing initial short description on line:\n";
+ print STDERR $_;
+ ++$warnings;
+ }
+
if ($identifier =~ m/^struct/) {
$decl_type = 'struct';
} elsif ($identifier =~ m/^union/) {
@@ -1907,7 +1915,7 @@ sub process_file($) {
$newsection = $1;
$newcontents = $2;
- if ($contents ne "") {
+ if (($contents ne "") && ($contents ne "\n")) {
if (!$in_doc_sect && $verbose) {
print STDERR "Warning(${file}:$.): contents before sections\n";
++$warnings;
diff --git a/security/Kconfig b/security/Kconfig
index 389e151e3b68..5dfc206748cf 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -104,7 +104,26 @@ config SECURITY_ROOTPLUG
If you are unsure how to answer this question, answer N.
+config SECURITY_DEFAULT_MMAP_MIN_ADDR
+ int "Low address space to protect from user allocation"
+ depends on SECURITY
+ default 0
+ help
+ This is the portion of low virtual memory which should be protected
+ from userspace allocation. Keeping a user from writing to low pages
+ can help reduce the impact of kernel NULL pointer bugs.
+
+ For most users with lots of address space a value of 65536 is
+ reasonable and should cause no problems. Programs which use vm86
+ functionality would either need additional permissions from either
+ the LSM or the capabilities module or have this protection disabled.
+
+ This value can be changed after boot using the
+ /proc/sys/vm/mmap_min_addr tunable.
+
+
source security/selinux/Kconfig
+source security/smack/Kconfig
endmenu
diff --git a/security/Makefile b/security/Makefile
index ef87df2f50a4..9e8b02525014 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_KEYS) += keys/
subdir-$(CONFIG_SECURITY_SELINUX) += selinux
+subdir-$(CONFIG_SECURITY_SMACK) += smack
# if we don't select a security model, use the default capabilities
ifneq ($(CONFIG_SECURITY),y)
@@ -14,5 +15,6 @@ endif
obj-$(CONFIG_SECURITY) += security.o dummy.o inode.o
# Must precede capability.o in order to stack properly.
obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
+obj-$(CONFIG_SECURITY_SMACK) += commoncap.o smack/built-in.o
obj-$(CONFIG_SECURITY_CAPABILITIES) += commoncap.o capability.o
obj-$(CONFIG_SECURITY_ROOTPLUG) += commoncap.o root_plug.o
diff --git a/security/commoncap.c b/security/commoncap.c
index ea61bc73f6d3..5aba82679a0b 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -1,4 +1,4 @@
-/* Common capabilities, needed by capability.o and root_plug.o
+/* Common capabilities, needed by capability.o and root_plug.o
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,20 +25,6 @@
#include <linux/mount.h>
#include <linux/sched.h>
-#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
-/*
- * Because of the reduced scope of CAP_SETPCAP when filesystem
- * capabilities are in effect, it is safe to allow this capability to
- * be available in the default configuration.
- */
-# define CAP_INIT_BSET CAP_FULL_SET
-#else /* ie. ndef CONFIG_SECURITY_FILE_CAPABILITIES */
-# define CAP_INIT_BSET CAP_INIT_EFF_SET
-#endif /* def CONFIG_SECURITY_FILE_CAPABILITIES */
-
-kernel_cap_t cap_bset = CAP_INIT_BSET; /* systemwide capability bound */
-EXPORT_SYMBOL(cap_bset);
-
/* Global security state */
unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
@@ -93,9 +79,9 @@ int cap_capget (struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
/* Derived from kernel/capability.c:sys_capget. */
- *effective = cap_t (target->cap_effective);
- *inheritable = cap_t (target->cap_inheritable);
- *permitted = cap_t (target->cap_permitted);
+ *effective = target->cap_effective;
+ *inheritable = target->cap_inheritable;
+ *permitted = target->cap_permitted;
return 0;
}
@@ -140,6 +126,12 @@ int cap_capset_check (struct task_struct *target, kernel_cap_t *effective,
/* incapable of using this inheritable set */
return -EPERM;
}
+ if (!cap_issubset(*inheritable,
+ cap_combine(target->cap_inheritable,
+ current->cap_bset))) {
+ /* no new pI capabilities outside bounding set */
+ return -EPERM;
+ }
/* verify restrictions on target's new Permitted set */
if (!cap_issubset (*permitted,
@@ -198,28 +190,50 @@ int cap_inode_killpriv(struct dentry *dentry)
}
static inline int cap_from_disk(struct vfs_cap_data *caps,
- struct linux_binprm *bprm,
- int size)
+ struct linux_binprm *bprm, unsigned size)
{
__u32 magic_etc;
+ unsigned tocopy, i;
- if (size != XATTR_CAPS_SZ)
+ if (size < sizeof(magic_etc))
return -EINVAL;
magic_etc = le32_to_cpu(caps->magic_etc);
switch ((magic_etc & VFS_CAP_REVISION_MASK)) {
- case VFS_CAP_REVISION:
- if (magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
- bprm->cap_effective = true;
- else
- bprm->cap_effective = false;
- bprm->cap_permitted = to_cap_t(le32_to_cpu(caps->permitted));
- bprm->cap_inheritable = to_cap_t(le32_to_cpu(caps->inheritable));
- return 0;
+ case VFS_CAP_REVISION_1:
+ if (size != XATTR_CAPS_SZ_1)
+ return -EINVAL;
+ tocopy = VFS_CAP_U32_1;
+ break;
+ case VFS_CAP_REVISION_2:
+ if (size != XATTR_CAPS_SZ_2)
+ return -EINVAL;
+ tocopy = VFS_CAP_U32_2;
+ break;
default:
return -EINVAL;
}
+
+ if (magic_etc & VFS_CAP_FLAGS_EFFECTIVE) {
+ bprm->cap_effective = true;
+ } else {
+ bprm->cap_effective = false;
+ }
+
+ for (i = 0; i < tocopy; ++i) {
+ bprm->cap_permitted.cap[i] =
+ le32_to_cpu(caps->data[i].permitted);
+ bprm->cap_inheritable.cap[i] =
+ le32_to_cpu(caps->data[i].inheritable);
+ }
+ while (i < VFS_CAP_U32) {
+ bprm->cap_permitted.cap[i] = 0;
+ bprm->cap_inheritable.cap[i] = 0;
+ i++;
+ }
+
+ return 0;
}
/* Locate any VFS capabilities: */
@@ -227,7 +241,7 @@ static int get_file_caps(struct linux_binprm *bprm)
{
struct dentry *dentry;
int rc = 0;
- struct vfs_cap_data incaps;
+ struct vfs_cap_data vcaps;
struct inode *inode;
if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID) {
@@ -240,14 +254,8 @@ static int get_file_caps(struct linux_binprm *bprm)
if (!inode->i_op || !inode->i_op->getxattr)
goto out;
- rc = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS, NULL, 0);
- if (rc > 0) {
- if (rc == XATTR_CAPS_SZ)
- rc = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS,
- &incaps, XATTR_CAPS_SZ);
- else
- rc = -EINVAL;
- }
+ rc = inode->i_op->getxattr(dentry, XATTR_NAME_CAPS, &vcaps,
+ XATTR_CAPS_SZ);
if (rc == -ENODATA || rc == -EOPNOTSUPP) {
/* no data, that's ok */
rc = 0;
@@ -256,7 +264,7 @@ static int get_file_caps(struct linux_binprm *bprm)
if (rc < 0)
goto out;
- rc = cap_from_disk(&incaps, bprm, rc);
+ rc = cap_from_disk(&vcaps, bprm, rc);
if (rc)
printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
__FUNCTION__, rc, bprm->filename);
@@ -321,10 +329,11 @@ void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe)
/* Derived from fs/exec.c:compute_creds. */
kernel_cap_t new_permitted, working;
- new_permitted = cap_intersect (bprm->cap_permitted, cap_bset);
- working = cap_intersect (bprm->cap_inheritable,
+ new_permitted = cap_intersect(bprm->cap_permitted,
+ current->cap_bset);
+ working = cap_intersect(bprm->cap_inheritable,
current->cap_inheritable);
- new_permitted = cap_combine (new_permitted, working);
+ new_permitted = cap_combine(new_permitted, working);
if (bprm->e_uid != current->uid || bprm->e_gid != current->gid ||
!cap_issubset (new_permitted, current->cap_permitted)) {
@@ -351,8 +360,10 @@ void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe)
* capability rules */
if (!is_global_init(current)) {
current->cap_permitted = new_permitted;
- current->cap_effective = bprm->cap_effective ?
- new_permitted : 0;
+ if (bprm->cap_effective)
+ current->cap_effective = new_permitted;
+ else
+ cap_clear(current->cap_effective);
}
/* AUD: Audit candidate if current->cap_effective is set */
@@ -474,13 +485,15 @@ int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid,
if (!issecure (SECURE_NO_SETUID_FIXUP)) {
if (old_fsuid == 0 && current->fsuid != 0) {
- cap_t (current->cap_effective) &=
- ~CAP_FS_MASK;
+ current->cap_effective =
+ cap_drop_fs_set(
+ current->cap_effective);
}
if (old_fsuid != 0 && current->fsuid == 0) {
- cap_t (current->cap_effective) |=
- (cap_t (current->cap_permitted) &
- CAP_FS_MASK);
+ current->cap_effective =
+ cap_raise_fs_set(
+ current->cap_effective,
+ current->cap_permitted);
}
}
break;
@@ -561,6 +574,23 @@ int cap_task_kill(struct task_struct *p, struct siginfo *info,
return -EPERM;
}
+
+/*
+ * called from kernel/sys.c for prctl(PR_CABSET_DROP)
+ * done without task_capability_lock() because it introduces
+ * no new races - i.e. only another task doing capget() on
+ * this task could get inconsistent info. There can be no
+ * racing writer bc a task can only change its own caps.
+ */
+long cap_prctl_drop(unsigned long cap)
+{
+ if (!capable(CAP_SETPCAP))
+ return -EPERM;
+ if (!cap_valid(cap))
+ return -EINVAL;
+ cap_lower(current->cap_bset, cap);
+ return 0;
+}
#else
int cap_task_setscheduler (struct task_struct *p, int policy,
struct sched_param *lp)
@@ -584,9 +614,9 @@ int cap_task_kill(struct task_struct *p, struct siginfo *info,
void cap_task_reparent_to_init (struct task_struct *p)
{
- p->cap_effective = CAP_INIT_EFF_SET;
- p->cap_inheritable = CAP_INIT_INH_SET;
- p->cap_permitted = CAP_FULL_SET;
+ cap_set_init_eff(p->cap_effective);
+ cap_clear(p->cap_inheritable);
+ cap_set_full(p->cap_permitted);
p->keep_capabilities = 0;
return;
}
diff --git a/security/dummy.c b/security/dummy.c
index 48d4b0a52737..649326bf64ea 100644
--- a/security/dummy.c
+++ b/security/dummy.c
@@ -36,14 +36,19 @@ static int dummy_ptrace (struct task_struct *parent, struct task_struct *child)
static int dummy_capget (struct task_struct *target, kernel_cap_t * effective,
kernel_cap_t * inheritable, kernel_cap_t * permitted)
{
- *effective = *inheritable = *permitted = 0;
if (target->euid == 0) {
- *permitted |= (~0 & ~CAP_FS_MASK);
- *effective |= (~0 & ~CAP_TO_MASK(CAP_SETPCAP) & ~CAP_FS_MASK);
+ cap_set_full(*permitted);
+ cap_set_init_eff(*effective);
+ } else {
+ cap_clear(*permitted);
+ cap_clear(*effective);
}
- if (target->fsuid == 0) {
- *permitted |= CAP_FS_MASK;
- *effective |= CAP_FS_MASK;
+
+ cap_clear(*inheritable);
+
+ if (target->fsuid != 0) {
+ *permitted = cap_drop_fs_set(*permitted);
+ *effective = cap_drop_fs_set(*effective);
}
return 0;
}
@@ -402,7 +407,7 @@ static int dummy_inode_killpriv(struct dentry *dentry)
return 0;
}
-static int dummy_inode_getsecurity(const struct inode *inode, const char *name, void *buffer, size_t size, int err)
+static int dummy_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
return -EOPNOTSUPP;
}
diff --git a/security/keys/key.c b/security/keys/key.c
index fdd5ca6d89fc..654d23baf352 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -820,7 +820,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key = key_alloc(ktype, description, current->fsuid, current->fsgid,
current, perm, flags);
if (IS_ERR(key)) {
- key_ref = ERR_PTR(PTR_ERR(key));
+ key_ref = ERR_CAST(key);
goto error_3;
}
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 2a0eb946fc7e..c886a2bb792a 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -660,7 +660,7 @@ key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id,
key = key_lookup(id);
if (IS_ERR(key)) {
- key_ref = ERR_PTR(PTR_ERR(key));
+ key_ref = ERR_CAST(key);
goto error;
}
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 6381e616c477..5ecc5057fb54 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -389,7 +389,7 @@ struct key *request_key_and_link(struct key_type *type,
if (!IS_ERR(key_ref)) {
key = key_ref_to_ptr(key_ref);
} else if (PTR_ERR(key_ref) != -EAGAIN) {
- key = ERR_PTR(PTR_ERR(key_ref));
+ key = ERR_CAST(key_ref);
} else {
/* the search failed, but the keyrings were searchable, so we
* should consult userspace if we can */
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 510f7be73a2d..e42b5252486f 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -261,7 +261,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
current);
if (IS_ERR(authkey_ref)) {
- authkey = ERR_PTR(PTR_ERR(authkey_ref));
+ authkey = ERR_CAST(authkey_ref);
goto error;
}
diff --git a/security/security.c b/security/security.c
index ca475ca206e4..d15e56cbaade 100644
--- a/security/security.c
+++ b/security/security.c
@@ -23,7 +23,9 @@ extern struct security_operations dummy_security_ops;
extern void security_fixup_ops(struct security_operations *ops);
struct security_operations *security_ops; /* Initialized to NULL */
-unsigned long mmap_min_addr; /* 0 means no protection */
+
+/* amount of vm to protect from userspace access */
+unsigned long mmap_min_addr = CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR;
static inline int verify(struct security_operations *ops)
{
@@ -493,11 +495,11 @@ int security_inode_killpriv(struct dentry *dentry)
return security_ops->inode_killpriv(dentry);
}
-int security_inode_getsecurity(const struct inode *inode, const char *name, void *buffer, size_t size, int err)
+int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
- return security_ops->inode_getsecurity(inode, name, buffer, size, err);
+ return security_ops->inode_getsecurity(inode, name, buffer, alloc);
}
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index be6de0b8734f..e5ed07510309 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -136,32 +136,6 @@ static DEFINE_SPINLOCK(sb_security_lock);
static struct kmem_cache *sel_inode_cache;
-/* Return security context for a given sid or just the context
- length if the buffer is null or length is 0 */
-static int selinux_getsecurity(u32 sid, void *buffer, size_t size)
-{
- char *context;
- unsigned len;
- int rc;
-
- rc = security_sid_to_context(sid, &context, &len);
- if (rc)
- return rc;
-
- if (!buffer || !size)
- goto getsecurity_exit;
-
- if (size < len) {
- len = -ERANGE;
- goto getsecurity_exit;
- }
- memcpy(buffer, context, len);
-
-getsecurity_exit:
- kfree(context);
- return len;
-}
-
/**
* selinux_secmark_enabled - Check to see if SECMARK is currently enabled
*
@@ -2675,14 +2649,27 @@ static int selinux_inode_removexattr (struct dentry *dentry, char *name)
*
* Permission check is handled by selinux_inode_getxattr hook.
*/
-static int selinux_inode_getsecurity(const struct inode *inode, const char *name, void *buffer, size_t size, int err)
+static int selinux_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
+ u32 size;
+ int error;
+ char *context = NULL;
struct inode_security_struct *isec = inode->i_security;
if (strcmp(name, XATTR_SELINUX_SUFFIX))
return -EOPNOTSUPP;
- return selinux_getsecurity(isec->sid, buffer, size);
+ error = security_sid_to_context(isec->sid, &context, &size);
+ if (error)
+ return error;
+ error = size;
+ if (alloc) {
+ *buffer = context;
+ goto out_nofree;
+ }
+ kfree(context);
+out_nofree:
+ return error;
}
static int selinux_inode_setsecurity(struct inode *inode, const char *name,
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 23137c17f917..837ce420d2f6 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -107,7 +107,6 @@ int security_get_classes(char ***classes, int *nclasses);
int security_get_permissions(char *class, char ***perms, int *nperms);
int security_get_reject_unknown(void);
int security_get_allow_unknown(void);
-int security_get_policycaps(int *len, int **values);
#define SECURITY_FS_USE_XATTR 1 /* use xattr */
#define SECURITY_FS_USE_TRANS 2 /* use transition SIDs, e.g. devpts/tmpfs */
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index fced6bccee76..f37418601215 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2246,39 +2246,6 @@ int security_get_allow_unknown(void)
}
/**
- * security_get_policycaps - Query the loaded policy for its capabilities
- * @len: the number of capability bits
- * @values: the capability bit array
- *
- * Description:
- * Get an array of the policy capabilities in @values where each entry in
- * @values is either true (1) or false (0) depending the policy's support of
- * that feature. The policy capabilities are defined by the
- * POLICYDB_CAPABILITY_* enums. The size of the array is stored in @len and it
- * is up to the caller to free the array in @values. Returns zero on success,
- * negative values on failure.
- *
- */
-int security_get_policycaps(int *len, int **values)
-{
- int rc = -ENOMEM;
- unsigned int iter;
-
- POLICY_RDLOCK;
-
- *values = kcalloc(POLICYDB_CAPABILITY_MAX, sizeof(int), GFP_ATOMIC);
- if (*values == NULL)
- goto out;
- for (iter = 0; iter < POLICYDB_CAPABILITY_MAX; iter++)
- (*values)[iter] = ebitmap_get_bit(&policydb.policycaps, iter);
- *len = POLICYDB_CAPABILITY_MAX;
-
-out:
- POLICY_RDUNLOCK;
- return rc;
-}
-
-/**
* security_policycap_supported - Check for a specific policy capability
* @req_cap: capability
*
diff --git a/security/smack/Kconfig b/security/smack/Kconfig
new file mode 100644
index 000000000000..603b08784341
--- /dev/null
+++ b/security/smack/Kconfig
@@ -0,0 +1,10 @@
+config SECURITY_SMACK
+ bool "Simplified Mandatory Access Control Kernel Support"
+ depends on NETLABEL && SECURITY_NETWORK
+ default n
+ help
+ This selects the Simplified Mandatory Access Control Kernel.
+ Smack is useful for sensitivity, integrity, and a variety
+ of other mandatory security schemes.
+ If you are unsure how to answer this question, answer N.
+
diff --git a/security/smack/Makefile b/security/smack/Makefile
new file mode 100644
index 000000000000..67a63aaec827
--- /dev/null
+++ b/security/smack/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the SMACK LSM
+#
+
+obj-$(CONFIG_SECURITY_SMACK) := smack.o
+
+smack-y := smack_lsm.o smack_access.o smackfs.o
diff --git a/security/smack/smack.h b/security/smack/smack.h
new file mode 100644
index 000000000000..a21a0e907ab3
--- /dev/null
+++ b/security/smack/smack.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2.
+ *
+ * Author:
+ * Casey Schaufler <casey@schaufler-ca.com>
+ *
+ */
+
+#ifndef _SECURITY_SMACK_H
+#define _SECURITY_SMACK_H
+
+#include <linux/capability.h>
+#include <linux/spinlock.h>
+#include <net/netlabel.h>
+
+/*
+ * Why 23? CIPSO is constrained to 30, so a 32 byte buffer is
+ * bigger than can be used, and 24 is the next lower multiple
+ * of 8, and there are too many issues if there isn't space set
+ * aside for the terminating null byte.
+ */
+#define SMK_MAXLEN 23
+#define SMK_LABELLEN (SMK_MAXLEN+1)
+
+/*
+ * How many kinds of access are there?
+ * Here's your answer.
+ */
+#define SMK_ACCESSDASH '-'
+#define SMK_ACCESSLOW "rwxa"
+#define SMK_ACCESSKINDS (sizeof(SMK_ACCESSLOW) - 1)
+
+struct superblock_smack {
+ char *smk_root;
+ char *smk_floor;
+ char *smk_hat;
+ char *smk_default;
+ int smk_initialized;
+ spinlock_t smk_sblock; /* for initialization */
+};
+
+struct socket_smack {
+ char *smk_out; /* outbound label */
+ char *smk_in; /* inbound label */
+ char smk_packet[SMK_LABELLEN]; /* TCP peer label */
+};
+
+/*
+ * Inode smack data
+ */
+struct inode_smack {
+ char *smk_inode; /* label of the fso */
+ struct mutex smk_lock; /* initialization lock */
+ int smk_flags; /* smack inode flags */
+};
+
+#define SMK_INODE_INSTANT 0x01 /* inode is instantiated */
+
+/*
+ * A label access rule.
+ */
+struct smack_rule {
+ char *smk_subject;
+ char *smk_object;
+ int smk_access;
+};
+
+/*
+ * An entry in the table of permitted label accesses.
+ */
+struct smk_list_entry {
+ struct smk_list_entry *smk_next;
+ struct smack_rule smk_rule;
+};
+
+/*
+ * An entry in the table mapping smack values to
+ * CIPSO level/category-set values.
+ */
+struct smack_cipso {
+ int smk_level;
+ char smk_catset[SMK_LABELLEN];
+};
+
+/*
+ * This is the repository for labels seen so that it is
+ * not necessary to keep allocating tiny chuncks of memory
+ * and so that they can be shared.
+ *
+ * Labels are never modified in place. Anytime a label
+ * is imported (e.g. xattrset on a file) the list is checked
+ * for it and it is added if it doesn't exist. The address
+ * is passed out in either case. Entries are added, but
+ * never deleted.
+ *
+ * Since labels are hanging around anyway it doesn't
+ * hurt to maintain a secid for those awkward situations
+ * where kernel components that ought to use LSM independent
+ * interfaces don't. The secid should go away when all of
+ * these components have been repaired.
+ *
+ * If there is a cipso value associated with the label it
+ * gets stored here, too. This will most likely be rare as
+ * the cipso direct mapping in used internally.
+ */
+struct smack_known {
+ struct smack_known *smk_next;
+ char smk_known[SMK_LABELLEN];
+ u32 smk_secid;
+ struct smack_cipso *smk_cipso;
+ spinlock_t smk_cipsolock; /* for changing cipso map */
+};
+
+/*
+ * Mount options
+ */
+#define SMK_FSDEFAULT "smackfsdef="
+#define SMK_FSFLOOR "smackfsfloor="
+#define SMK_FSHAT "smackfshat="
+#define SMK_FSROOT "smackfsroot="
+
+/*
+ * xattr names
+ */
+#define XATTR_SMACK_SUFFIX "SMACK64"
+#define XATTR_SMACK_IPIN "SMACK64IPIN"
+#define XATTR_SMACK_IPOUT "SMACK64IPOUT"
+#define XATTR_NAME_SMACK XATTR_SECURITY_PREFIX XATTR_SMACK_SUFFIX
+#define XATTR_NAME_SMACKIPIN XATTR_SECURITY_PREFIX XATTR_SMACK_IPIN
+#define XATTR_NAME_SMACKIPOUT XATTR_SECURITY_PREFIX XATTR_SMACK_IPOUT
+
+/*
+ * smackfs macic number
+ */
+#define SMACK_MAGIC 0x43415d53 /* "SMAC" */
+
+/*
+ * A limit on the number of entries in the lists
+ * makes some of the list administration easier.
+ */
+#define SMACK_LIST_MAX 10000
+
+/*
+ * CIPSO defaults.
+ */
+#define SMACK_CIPSO_DOI_DEFAULT 3 /* Historical */
+#define SMACK_CIPSO_DIRECT_DEFAULT 250 /* Arbitrary */
+#define SMACK_CIPSO_MAXCATVAL 63 /* Bigger gets harder */
+#define SMACK_CIPSO_MAXLEVEL 255 /* CIPSO 2.2 standard */
+#define SMACK_CIPSO_MAXCATNUM 239 /* CIPSO 2.2 standard */
+
+/*
+ * Just to make the common cases easier to deal with
+ */
+#define MAY_ANY (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
+#define MAY_ANYREAD (MAY_READ | MAY_EXEC)
+#define MAY_ANYWRITE (MAY_WRITE | MAY_APPEND)
+#define MAY_READWRITE (MAY_READ | MAY_WRITE)
+#define MAY_NOT 0
+
+/*
+ * These functions are in smack_lsm.c
+ */
+struct inode_smack *new_inode_smack(char *);
+
+/*
+ * These functions are in smack_access.c
+ */
+int smk_access(char *, char *, int);
+int smk_curacc(char *, u32);
+int smack_to_cipso(const char *, struct smack_cipso *);
+void smack_from_cipso(u32, char *, char *);
+char *smack_from_secid(const u32);
+char *smk_import(const char *, int);
+struct smack_known *smk_import_entry(const char *, int);
+u32 smack_to_secid(const char *);
+
+/*
+ * Shared data.
+ */
+extern int smack_cipso_direct;
+extern int smack_net_nltype;
+extern char *smack_net_ambient;
+
+extern struct smack_known *smack_known;
+extern struct smack_known smack_known_floor;
+extern struct smack_known smack_known_hat;
+extern struct smack_known smack_known_huh;
+extern struct smack_known smack_known_invalid;
+extern struct smack_known smack_known_star;
+extern struct smack_known smack_known_unset;
+
+extern struct smk_list_entry *smack_list;
+
+/*
+ * Stricly for CIPSO level manipulation.
+ * Set the category bit number in a smack label sized buffer.
+ */
+static inline void smack_catset_bit(int cat, char *catsetp)
+{
+ if (cat > SMK_LABELLEN * 8)
+ return;
+
+ catsetp[(cat - 1) / 8] |= 0x80 >> ((cat - 1) % 8);
+}
+
+/*
+ * Present a pointer to the smack label in an inode blob.
+ */
+static inline char *smk_of_inode(const struct inode *isp)
+{
+ struct inode_smack *sip = isp->i_security;
+ return sip->smk_inode;
+}
+
+#endif /* _SECURITY_SMACK_H */
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
new file mode 100644
index 000000000000..f6b5f6eed6dd
--- /dev/null
+++ b/security/smack/smack_access.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2.
+ *
+ * Author:
+ * Casey Schaufler <casey@schaufler-ca.com>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include "smack.h"
+
+struct smack_known smack_known_unset = {
+ .smk_next = NULL,
+ .smk_known = "UNSET",
+ .smk_secid = 1,
+ .smk_cipso = NULL,
+};
+
+struct smack_known smack_known_huh = {
+ .smk_next = &smack_known_unset,
+ .smk_known = "?",
+ .smk_secid = 2,
+ .smk_cipso = NULL,
+};
+
+struct smack_known smack_known_hat = {
+ .smk_next = &smack_known_huh,
+ .smk_known = "^",
+ .smk_secid = 3,
+ .smk_cipso = NULL,
+};
+
+struct smack_known smack_known_star = {
+ .smk_next = &smack_known_hat,
+ .smk_known = "*",
+ .smk_secid = 4,
+ .smk_cipso = NULL,
+};
+
+struct smack_known smack_known_floor = {
+ .smk_next = &smack_known_star,
+ .smk_known = "_",
+ .smk_secid = 5,
+ .smk_cipso = NULL,
+};
+
+struct smack_known smack_known_invalid = {
+ .smk_next = &smack_known_floor,
+ .smk_known = "",
+ .smk_secid = 6,
+ .smk_cipso = NULL,
+};
+
+struct smack_known *smack_known = &smack_known_invalid;
+
+/*
+ * The initial value needs to be bigger than any of the
+ * known values above.
+ */
+static u32 smack_next_secid = 10;
+
+/**
+ * smk_access - determine if a subject has a specific access to an object
+ * @subject_label: a pointer to the subject's Smack label
+ * @object_label: a pointer to the object's Smack label
+ * @request: the access requested, in "MAY" format
+ *
+ * This function looks up the subject/object pair in the
+ * access rule list and returns 0 if the access is permitted,
+ * non zero otherwise.
+ *
+ * Even though Smack labels are usually shared on smack_list
+ * labels that come in off the network can't be imported
+ * and added to the list for locking reasons.
+ *
+ * Therefore, it is necessary to check the contents of the labels,
+ * not just the pointer values. Of course, in most cases the labels
+ * will be on the list, so checking the pointers may be a worthwhile
+ * optimization.
+ */
+int smk_access(char *subject_label, char *object_label, int request)
+{
+ u32 may = MAY_NOT;
+ struct smk_list_entry *sp;
+ struct smack_rule *srp;
+
+ /*
+ * Hardcoded comparisons.
+ *
+ * A star subject can't access any object.
+ */
+ if (subject_label == smack_known_star.smk_known ||
+ strcmp(subject_label, smack_known_star.smk_known) == 0)
+ return -EACCES;
+ /*
+ * A star object can be accessed by any subject.
+ */
+ if (object_label == smack_known_star.smk_known ||
+ strcmp(object_label, smack_known_star.smk_known) == 0)
+ return 0;
+ /*
+ * An object can be accessed in any way by a subject
+ * with the same label.
+ */
+ if (subject_label == object_label ||
+ strcmp(subject_label, object_label) == 0)
+ return 0;
+ /*
+ * A hat subject can read any object.
+ * A floor object can be read by any subject.
+ */
+ if ((request & MAY_ANYREAD) == request) {
+ if (object_label == smack_known_floor.smk_known ||
+ strcmp(object_label, smack_known_floor.smk_known) == 0)
+ return 0;
+ if (subject_label == smack_known_hat.smk_known ||
+ strcmp(subject_label, smack_known_hat.smk_known) == 0)
+ return 0;
+ }
+ /*
+ * Beyond here an explicit relationship is required.
+ * If the requested access is contained in the available
+ * access (e.g. read is included in readwrite) it's
+ * good.
+ */
+ for (sp = smack_list; sp != NULL; sp = sp->smk_next) {
+ srp = &sp->smk_rule;
+
+ if (srp->smk_subject == subject_label ||
+ strcmp(srp->smk_subject, subject_label) == 0) {
+ if (srp->smk_object == object_label ||
+ strcmp(srp->smk_object, object_label) == 0) {
+ may = srp->smk_access;
+ break;
+ }
+ }
+ }
+ /*
+ * This is a bit map operation.
+ */
+ if ((request & may) == request)
+ return 0;
+
+ return -EACCES;
+}
+
+/**
+ * smk_curacc - determine if current has a specific access to an object
+ * @object_label: a pointer to the object's Smack label
+ * @request: the access requested, in "MAY" format
+ *
+ * This function checks the current subject label/object label pair
+ * in the access rule list and returns 0 if the access is permitted,
+ * non zero otherwise. It allows that current my have the capability
+ * to override the rules.
+ */
+int smk_curacc(char *obj_label, u32 mode)
+{
+ int rc;
+
+ rc = smk_access(current->security, obj_label, mode);
+ if (rc == 0)
+ return 0;
+
+ if (capable(CAP_MAC_OVERRIDE))
+ return 0;
+
+ return rc;
+}
+
+static DEFINE_MUTEX(smack_known_lock);
+
+/**
+ * smk_import_entry - import a label, return the list entry
+ * @string: a text string that might be a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ *
+ * Returns a pointer to the entry in the label list that
+ * matches the passed string, adding it if necessary.
+ */
+struct smack_known *smk_import_entry(const char *string, int len)
+{
+ struct smack_known *skp;
+ char smack[SMK_LABELLEN];
+ int found;
+ int i;
+
+ if (len <= 0 || len > SMK_MAXLEN)
+ len = SMK_MAXLEN;
+
+ for (i = 0, found = 0; i < SMK_LABELLEN; i++) {
+ if (found)
+ smack[i] = '\0';
+ else if (i >= len || string[i] > '~' || string[i] <= ' ' ||
+ string[i] == '/') {
+ smack[i] = '\0';
+ found = 1;
+ } else
+ smack[i] = string[i];
+ }
+
+ if (smack[0] == '\0')
+ return NULL;
+
+ mutex_lock(&smack_known_lock);
+
+ for (skp = smack_known; skp != NULL; skp = skp->smk_next)
+ if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0)
+ break;
+
+ if (skp == NULL) {
+ skp = kzalloc(sizeof(struct smack_known), GFP_KERNEL);
+ if (skp != NULL) {
+ skp->smk_next = smack_known;
+ strncpy(skp->smk_known, smack, SMK_MAXLEN);
+ skp->smk_secid = smack_next_secid++;
+ skp->smk_cipso = NULL;
+ spin_lock_init(&skp->smk_cipsolock);
+ /*
+ * Make sure that the entry is actually
+ * filled before putting it on the list.
+ */
+ smp_mb();
+ smack_known = skp;
+ }
+ }
+
+ mutex_unlock(&smack_known_lock);
+
+ return skp;
+}
+
+/**
+ * smk_import - import a smack label
+ * @string: a text string that might be a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ *
+ * Returns a pointer to the label in the label list that
+ * matches the passed string, adding it if necessary.
+ */
+char *smk_import(const char *string, int len)
+{
+ struct smack_known *skp;
+
+ skp = smk_import_entry(string, len);
+ if (skp == NULL)
+ return NULL;
+ return skp->smk_known;
+}
+
+/**
+ * smack_from_secid - find the Smack label associated with a secid
+ * @secid: an integer that might be associated with a Smack label
+ *
+ * Returns a pointer to the appropraite Smack label if there is one,
+ * otherwise a pointer to the invalid Smack label.
+ */
+char *smack_from_secid(const u32 secid)
+{
+ struct smack_known *skp;
+
+ for (skp = smack_known; skp != NULL; skp = skp->smk_next)
+ if (skp->smk_secid == secid)
+ return skp->smk_known;
+
+ /*
+ * If we got this far someone asked for the translation
+ * of a secid that is not on the list.
+ */
+ return smack_known_invalid.smk_known;
+}
+
+/**
+ * smack_to_secid - find the secid associated with a Smack label
+ * @smack: the Smack label
+ *
+ * Returns the appropriate secid if there is one,
+ * otherwise 0
+ */
+u32 smack_to_secid(const char *smack)
+{
+ struct smack_known *skp;
+
+ for (skp = smack_known; skp != NULL; skp = skp->smk_next)
+ if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0)
+ return skp->smk_secid;
+ return 0;
+}
+
+/**
+ * smack_from_cipso - find the Smack label associated with a CIPSO option
+ * @level: Bell & LaPadula level from the network
+ * @cp: Bell & LaPadula categories from the network
+ * @result: where to put the Smack value
+ *
+ * This is a simple lookup in the label table.
+ *
+ * This is an odd duck as far as smack handling goes in that
+ * it sends back a copy of the smack label rather than a pointer
+ * to the master list. This is done because it is possible for
+ * a foreign host to send a smack label that is new to this
+ * machine and hence not on the list. That would not be an
+ * issue except that adding an entry to the master list can't
+ * be done at that point.
+ */
+void smack_from_cipso(u32 level, char *cp, char *result)
+{
+ struct smack_known *kp;
+ char *final = NULL;
+
+ for (kp = smack_known; final == NULL && kp != NULL; kp = kp->smk_next) {
+ if (kp->smk_cipso == NULL)
+ continue;
+
+ spin_lock_bh(&kp->smk_cipsolock);
+
+ if (kp->smk_cipso->smk_level == level &&
+ memcmp(kp->smk_cipso->smk_catset, cp, SMK_LABELLEN) == 0)
+ final = kp->smk_known;
+
+ spin_unlock_bh(&kp->smk_cipsolock);
+ }
+ if (final == NULL)
+ final = smack_known_huh.smk_known;
+ strncpy(result, final, SMK_MAXLEN);
+ return;
+}
+
+/**
+ * smack_to_cipso - find the CIPSO option to go with a Smack label
+ * @smack: a pointer to the smack label in question
+ * @cp: where to put the result
+ *
+ * Returns zero if a value is available, non-zero otherwise.
+ */
+int smack_to_cipso(const char *smack, struct smack_cipso *cp)
+{
+ struct smack_known *kp;
+
+ for (kp = smack_known; kp != NULL; kp = kp->smk_next)
+ if (kp->smk_known == smack ||
+ strcmp(kp->smk_known, smack) == 0)
+ break;
+
+ if (kp == NULL || kp->smk_cipso == NULL)
+ return -ENOENT;
+
+ memcpy(cp, kp->smk_cipso, sizeof(struct smack_cipso));
+ return 0;
+}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
new file mode 100644
index 000000000000..1c11e4245859
--- /dev/null
+++ b/security/smack/smack_lsm.c
@@ -0,0 +1,2518 @@
+/*
+ * Simplified MAC Kernel (smack) security module
+ *
+ * This file contains the smack hook function implementations.
+ *
+ * Author:
+ * Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/xattr.h>
+#include <linux/pagemap.h>
+#include <linux/mount.h>
+#include <linux/stat.h>
+#include <linux/ext2_fs.h>
+#include <linux/kd.h>
+#include <asm/ioctls.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/mutex.h>
+#include <linux/pipe_fs_i.h>
+#include <net/netlabel.h>
+#include <net/cipso_ipv4.h>
+
+#include "smack.h"
+
+/*
+ * I hope these are the hokeyist lines of code in the module. Casey.
+ */
+#define DEVPTS_SUPER_MAGIC 0x1cd1
+#define SOCKFS_MAGIC 0x534F434B
+#define TMPFS_MAGIC 0x01021994
+
+/**
+ * smk_fetch - Fetch the smack label from a file.
+ * @ip: a pointer to the inode
+ * @dp: a pointer to the dentry
+ *
+ * Returns a pointer to the master list entry for the Smack label
+ * or NULL if there was no label to fetch.
+ */
+static char *smk_fetch(struct inode *ip, struct dentry *dp)
+{
+ int rc;
+ char in[SMK_LABELLEN];
+
+ if (ip->i_op->getxattr == NULL)
+ return NULL;
+
+ rc = ip->i_op->getxattr(dp, XATTR_NAME_SMACK, in, SMK_LABELLEN);
+ if (rc < 0)
+ return NULL;
+
+ return smk_import(in, rc);
+}
+
+/**
+ * new_inode_smack - allocate an inode security blob
+ * @smack: a pointer to the Smack label to use in the blob
+ *
+ * Returns the new blob or NULL if there's no memory available
+ */
+struct inode_smack *new_inode_smack(char *smack)
+{
+ struct inode_smack *isp;
+
+ isp = kzalloc(sizeof(struct inode_smack), GFP_KERNEL);
+ if (isp == NULL)
+ return NULL;
+
+ isp->smk_inode = smack;
+ isp->smk_flags = 0;
+ mutex_init(&isp->smk_lock);
+
+ return isp;
+}
+
+/*
+ * LSM hooks.
+ * We he, that is fun!
+ */
+
+/**
+ * smack_ptrace - Smack approval on ptrace
+ * @ptp: parent task pointer
+ * @ctp: child task pointer
+ *
+ * Returns 0 if access is OK, an error code otherwise
+ *
+ * Do the capability checks, and require read and write.
+ */
+static int smack_ptrace(struct task_struct *ptp, struct task_struct *ctp)
+{
+ int rc;
+
+ rc = cap_ptrace(ptp, ctp);
+ if (rc != 0)
+ return rc;
+
+ rc = smk_access(ptp->security, ctp->security, MAY_READWRITE);
+ if (rc != 0 && __capable(ptp, CAP_MAC_OVERRIDE))
+ return 0;
+
+ return rc;
+}
+
+/**
+ * smack_syslog - Smack approval on syslog
+ * @type: message type
+ *
+ * Require that the task has the floor label
+ *
+ * Returns 0 on success, error code otherwise.
+ */
+static int smack_syslog(int type)
+{
+ int rc;
+ char *sp = current->security;
+
+ rc = cap_syslog(type);
+ if (rc != 0)
+ return rc;
+
+ if (capable(CAP_MAC_OVERRIDE))
+ return 0;
+
+ if (sp != smack_known_floor.smk_known)
+ rc = -EACCES;
+
+ return rc;
+}
+
+
+/*
+ * Superblock Hooks.
+ */
+
+/**
+ * smack_sb_alloc_security - allocate a superblock blob
+ * @sb: the superblock getting the blob
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ */
+static int smack_sb_alloc_security(struct super_block *sb)
+{
+ struct superblock_smack *sbsp;
+
+ sbsp = kzalloc(sizeof(struct superblock_smack), GFP_KERNEL);
+
+ if (sbsp == NULL)
+ return -ENOMEM;
+
+ sbsp->smk_root = smack_known_floor.smk_known;
+ sbsp->smk_default = smack_known_floor.smk_known;
+ sbsp->smk_floor = smack_known_floor.smk_known;
+ sbsp->smk_hat = smack_known_hat.smk_known;
+ sbsp->smk_initialized = 0;
+ spin_lock_init(&sbsp->smk_sblock);
+
+ sb->s_security = sbsp;
+
+ return 0;
+}
+
+/**
+ * smack_sb_free_security - free a superblock blob
+ * @sb: the superblock getting the blob
+ *
+ */
+static void smack_sb_free_security(struct super_block *sb)
+{
+ kfree(sb->s_security);
+ sb->s_security = NULL;
+}
+
+/**
+ * smack_sb_copy_data - copy mount options data for processing
+ * @type: file system type
+ * @orig: where to start
+ * @smackopts
+ *
+ * Returns 0 on success or -ENOMEM on error.
+ *
+ * Copy the Smack specific mount options out of the mount
+ * options list.
+ */
+static int smack_sb_copy_data(struct file_system_type *type, void *orig,
+ void *smackopts)
+{
+ char *cp, *commap, *otheropts, *dp;
+
+ /* Binary mount data: just copy */
+ if (type->fs_flags & FS_BINARY_MOUNTDATA) {
+ copy_page(smackopts, orig);
+ return 0;
+ }
+
+ otheropts = (char *)get_zeroed_page(GFP_KERNEL);
+ if (otheropts == NULL)
+ return -ENOMEM;
+
+ for (cp = orig, commap = orig; commap != NULL; cp = commap + 1) {
+ if (strstr(cp, SMK_FSDEFAULT) == cp)
+ dp = smackopts;
+ else if (strstr(cp, SMK_FSFLOOR) == cp)
+ dp = smackopts;
+ else if (strstr(cp, SMK_FSHAT) == cp)
+ dp = smackopts;
+ else if (strstr(cp, SMK_FSROOT) == cp)
+ dp = smackopts;
+ else
+ dp = otheropts;
+
+ commap = strchr(cp, ',');
+ if (commap != NULL)
+ *commap = '\0';
+
+ if (*dp != '\0')
+ strcat(dp, ",");
+ strcat(dp, cp);
+ }
+
+ strcpy(orig, otheropts);
+ free_page((unsigned long)otheropts);
+
+ return 0;
+}
+
+/**
+ * smack_sb_kern_mount - Smack specific mount processing
+ * @sb: the file system superblock
+ * @data: the smack mount options
+ *
+ * Returns 0 on success, an error code on failure
+ */
+static int smack_sb_kern_mount(struct super_block *sb, void *data)
+{
+ struct dentry *root = sb->s_root;
+ struct inode *inode = root->d_inode;
+ struct superblock_smack *sp = sb->s_security;
+ struct inode_smack *isp;
+ char *op;
+ char *commap;
+ char *nsp;
+
+ spin_lock(&sp->smk_sblock);
+ if (sp->smk_initialized != 0) {
+ spin_unlock(&sp->smk_sblock);
+ return 0;
+ }
+ sp->smk_initialized = 1;
+ spin_unlock(&sp->smk_sblock);
+
+ for (op = data; op != NULL; op = commap) {
+ commap = strchr(op, ',');
+ if (commap != NULL)
+ *commap++ = '\0';
+
+ if (strncmp(op, SMK_FSHAT, strlen(SMK_FSHAT)) == 0) {
+ op += strlen(SMK_FSHAT);
+ nsp = smk_import(op, 0);
+ if (nsp != NULL)
+ sp->smk_hat = nsp;
+ } else if (strncmp(op, SMK_FSFLOOR, strlen(SMK_FSFLOOR)) == 0) {
+ op += strlen(SMK_FSFLOOR);
+ nsp = smk_import(op, 0);
+ if (nsp != NULL)
+ sp->smk_floor = nsp;
+ } else if (strncmp(op, SMK_FSDEFAULT,
+ strlen(SMK_FSDEFAULT)) == 0) {
+ op += strlen(SMK_FSDEFAULT);
+ nsp = smk_import(op, 0);
+ if (nsp != NULL)
+ sp->smk_default = nsp;
+ } else if (strncmp(op, SMK_FSROOT, strlen(SMK_FSROOT)) == 0) {
+ op += strlen(SMK_FSROOT);
+ nsp = smk_import(op, 0);
+ if (nsp != NULL)
+ sp->smk_root = nsp;
+ }
+ }
+
+ /*
+ * Initialize the root inode.
+ */
+ isp = inode->i_security;
+ if (isp == NULL)
+ inode->i_security = new_inode_smack(sp->smk_root);
+ else
+ isp->smk_inode = sp->smk_root;
+
+ return 0;
+}
+
+/**
+ * smack_sb_statfs - Smack check on statfs
+ * @dentry: identifies the file system in question
+ *
+ * Returns 0 if current can read the floor of the filesystem,
+ * and error code otherwise
+ */
+static int smack_sb_statfs(struct dentry *dentry)
+{
+ struct superblock_smack *sbp = dentry->d_sb->s_security;
+
+ return smk_curacc(sbp->smk_floor, MAY_READ);
+}
+
+/**
+ * smack_sb_mount - Smack check for mounting
+ * @dev_name: unused
+ * @nd: mount point
+ * @type: unused
+ * @flags: unused
+ * @data: unused
+ *
+ * Returns 0 if current can write the floor of the filesystem
+ * being mounted on, an error code otherwise.
+ */
+static int smack_sb_mount(char *dev_name, struct nameidata *nd,
+ char *type, unsigned long flags, void *data)
+{
+ struct superblock_smack *sbp = nd->mnt->mnt_sb->s_security;
+
+ return smk_curacc(sbp->smk_floor, MAY_WRITE);
+}
+
+/**
+ * smack_sb_umount - Smack check for unmounting
+ * @mnt: file system to unmount
+ * @flags: unused
+ *
+ * Returns 0 if current can write the floor of the filesystem
+ * being unmounted, an error code otherwise.
+ */
+static int smack_sb_umount(struct vfsmount *mnt, int flags)
+{
+ struct superblock_smack *sbp;
+
+ sbp = mnt->mnt_sb->s_security;
+
+ return smk_curacc(sbp->smk_floor, MAY_WRITE);
+}
+
+/*
+ * Inode hooks
+ */
+
+/**
+ * smack_inode_alloc_security - allocate an inode blob
+ * @inode - the inode in need of a blob
+ *
+ * Returns 0 if it gets a blob, -ENOMEM otherwise
+ */
+static int smack_inode_alloc_security(struct inode *inode)
+{
+ inode->i_security = new_inode_smack(current->security);
+ if (inode->i_security == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * smack_inode_free_security - free an inode blob
+ * @inode - the inode with a blob
+ *
+ * Clears the blob pointer in inode
+ */
+static void smack_inode_free_security(struct inode *inode)
+{
+ kfree(inode->i_security);
+ inode->i_security = NULL;
+}
+
+/**
+ * smack_inode_init_security - copy out the smack from an inode
+ * @inode: the inode
+ * @dir: unused
+ * @name: where to put the attribute name
+ * @value: where to put the attribute value
+ * @len: where to put the length of the attribute
+ *
+ * Returns 0 if it all works out, -ENOMEM if there's no memory
+ */
+static int smack_inode_init_security(struct inode *inode, struct inode *dir,
+ char **name, void **value, size_t *len)
+{
+ char *isp = smk_of_inode(inode);
+
+ if (name) {
+ *name = kstrdup(XATTR_SMACK_SUFFIX, GFP_KERNEL);
+ if (*name == NULL)
+ return -ENOMEM;
+ }
+
+ if (value) {
+ *value = kstrdup(isp, GFP_KERNEL);
+ if (*value == NULL)
+ return -ENOMEM;
+ }
+
+ if (len)
+ *len = strlen(isp) + 1;
+
+ return 0;
+}
+
+/**
+ * smack_inode_link - Smack check on link
+ * @old_dentry: the existing object
+ * @dir: unused
+ * @new_dentry: the new object
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry)
+{
+ int rc;
+ char *isp;
+
+ isp = smk_of_inode(old_dentry->d_inode);
+ rc = smk_curacc(isp, MAY_WRITE);
+
+ if (rc == 0 && new_dentry->d_inode != NULL) {
+ isp = smk_of_inode(new_dentry->d_inode);
+ rc = smk_curacc(isp, MAY_WRITE);
+ }
+
+ return rc;
+}
+
+/**
+ * smack_inode_unlink - Smack check on inode deletion
+ * @dir: containing directory object
+ * @dentry: file to unlink
+ *
+ * Returns 0 if current can write the containing directory
+ * and the object, error code otherwise
+ */
+static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *ip = dentry->d_inode;
+ int rc;
+
+ /*
+ * You need write access to the thing you're unlinking
+ */
+ rc = smk_curacc(smk_of_inode(ip), MAY_WRITE);
+ if (rc == 0)
+ /*
+ * You also need write access to the containing directory
+ */
+ rc = smk_curacc(smk_of_inode(dir), MAY_WRITE);
+
+ return rc;
+}
+
+/**
+ * smack_inode_rmdir - Smack check on directory deletion
+ * @dir: containing directory object
+ * @dentry: directory to unlink
+ *
+ * Returns 0 if current can write the containing directory
+ * and the directory, error code otherwise
+ */
+static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ int rc;
+
+ /*
+ * You need write access to the thing you're removing
+ */
+ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE);
+ if (rc == 0)
+ /*
+ * You also need write access to the containing directory
+ */
+ rc = smk_curacc(smk_of_inode(dir), MAY_WRITE);
+
+ return rc;
+}
+
+/**
+ * smack_inode_rename - Smack check on rename
+ * @old_inode: the old directory
+ * @old_dentry: unused
+ * @new_inode: the new directory
+ * @new_dentry: unused
+ *
+ * Read and write access is required on both the old and
+ * new directories.
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_rename(struct inode *old_inode,
+ struct dentry *old_dentry,
+ struct inode *new_inode,
+ struct dentry *new_dentry)
+{
+ int rc;
+ char *isp;
+
+ isp = smk_of_inode(old_dentry->d_inode);
+ rc = smk_curacc(isp, MAY_READWRITE);
+
+ if (rc == 0 && new_dentry->d_inode != NULL) {
+ isp = smk_of_inode(new_dentry->d_inode);
+ rc = smk_curacc(isp, MAY_READWRITE);
+ }
+
+ return rc;
+}
+
+/**
+ * smack_inode_permission - Smack version of permission()
+ * @inode: the inode in question
+ * @mask: the access requested
+ * @nd: unused
+ *
+ * This is the important Smack hook.
+ *
+ * Returns 0 if access is permitted, -EACCES otherwise
+ */
+static int smack_inode_permission(struct inode *inode, int mask,
+ struct nameidata *nd)
+{
+ /*
+ * No permission to check. Existence test. Yup, it's there.
+ */
+ if (mask == 0)
+ return 0;
+
+ return smk_curacc(smk_of_inode(inode), mask);
+}
+
+/**
+ * smack_inode_setattr - Smack check for setting attributes
+ * @dentry: the object
+ * @iattr: for the force flag
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+ /*
+ * Need to allow for clearing the setuid bit.
+ */
+ if (iattr->ia_valid & ATTR_FORCE)
+ return 0;
+
+ return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE);
+}
+
+/**
+ * smack_inode_getattr - Smack check for getting attributes
+ * @mnt: unused
+ * @dentry: the object
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+{
+ return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ);
+}
+
+/**
+ * smack_inode_setxattr - Smack check for setting xattrs
+ * @dentry: the object
+ * @name: name of the attribute
+ * @value: unused
+ * @size: unused
+ * @flags: unused
+ *
+ * This protects the Smack attribute explicitly.
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_setxattr(struct dentry *dentry, char *name,
+ void *value, size_t size, int flags)
+{
+ if (!capable(CAP_MAC_ADMIN)) {
+ if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKIPOUT) == 0)
+ return -EPERM;
+ }
+
+ return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE);
+}
+
+/**
+ * smack_inode_post_setxattr - Apply the Smack update approved above
+ * @dentry: object
+ * @name: attribute name
+ * @value: attribute value
+ * @size: attribute size
+ * @flags: unused
+ *
+ * Set the pointer in the inode blob to the entry found
+ * in the master label list.
+ */
+static void smack_inode_post_setxattr(struct dentry *dentry, char *name,
+ void *value, size_t size, int flags)
+{
+ struct inode_smack *isp;
+ char *nsp;
+
+ /*
+ * Not SMACK
+ */
+ if (strcmp(name, XATTR_NAME_SMACK))
+ return;
+
+ if (size >= SMK_LABELLEN)
+ return;
+
+ isp = dentry->d_inode->i_security;
+
+ /*
+ * No locking is done here. This is a pointer
+ * assignment.
+ */
+ nsp = smk_import(value, size);
+ if (nsp != NULL)
+ isp->smk_inode = nsp;
+ else
+ isp->smk_inode = smack_known_invalid.smk_known;
+
+ return;
+}
+
+/*
+ * smack_inode_getxattr - Smack check on getxattr
+ * @dentry: the object
+ * @name: unused
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_getxattr(struct dentry *dentry, char *name)
+{
+ return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ);
+}
+
+/*
+ * smack_inode_removexattr - Smack check on removexattr
+ * @dentry: the object
+ * @name: name of the attribute
+ *
+ * Removing the Smack attribute requires CAP_MAC_ADMIN
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+static int smack_inode_removexattr(struct dentry *dentry, char *name)
+{
+ if (strcmp(name, XATTR_NAME_SMACK) == 0 && !capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE);
+}
+
+/**
+ * smack_inode_getsecurity - get smack xattrs
+ * @inode: the object
+ * @name: attribute name
+ * @buffer: where to put the result
+ * @size: size of the buffer
+ * @err: unused
+ *
+ * Returns the size of the attribute or an error code
+ */
+static int smack_inode_getsecurity(const struct inode *inode,
+ const char *name, void **buffer,
+ bool alloc)
+{
+ struct socket_smack *ssp;
+ struct socket *sock;
+ struct super_block *sbp;
+ struct inode *ip = (struct inode *)inode;
+ char *isp;
+ int ilen;
+ int rc = 0;
+
+ if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
+ isp = smk_of_inode(inode);
+ ilen = strlen(isp) + 1;
+ *buffer = isp;
+ return ilen;
+ }
+
+ /*
+ * The rest of the Smack xattrs are only on sockets.
+ */
+ sbp = ip->i_sb;
+ if (sbp->s_magic != SOCKFS_MAGIC)
+ return -EOPNOTSUPP;
+
+ sock = SOCKET_I(ip);
+ if (sock == NULL)
+ return -EOPNOTSUPP;
+
+ ssp = sock->sk->sk_security;
+
+ if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+ isp = ssp->smk_in;
+ else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
+ isp = ssp->smk_out;
+ else
+ return -EOPNOTSUPP;
+
+ ilen = strlen(isp) + 1;
+ if (rc == 0) {
+ *buffer = isp;
+ rc = ilen;
+ }
+
+ return rc;
+}
+
+
+/**
+ * smack_inode_listsecurity - list the Smack attributes
+ * @inode: the object
+ * @buffer: where they go
+ * @buffer_size: size of buffer
+ *
+ * Returns 0 on success, -EINVAL otherwise
+ */
+static int smack_inode_listsecurity(struct inode *inode, char *buffer,
+ size_t buffer_size)
+{
+ int len = strlen(XATTR_NAME_SMACK);
+
+ if (buffer != NULL && len <= buffer_size) {
+ memcpy(buffer, XATTR_NAME_SMACK, len);
+ return len;
+ }
+ return -EINVAL;
+}
+
+/*
+ * File Hooks
+ */
+
+/**
+ * smack_file_permission - Smack check on file operations
+ * @file: unused
+ * @mask: unused
+ *
+ * Returns 0
+ *
+ * Should access checks be done on each read or write?
+ * UNICOS and SELinux say yes.
+ * Trusted Solaris, Trusted Irix, and just about everyone else says no.
+ *
+ * I'll say no for now. Smack does not do the frequent
+ * label changing that SELinux does.
+ */
+static int smack_file_permission(struct file *file, int mask)
+{
+ return 0;
+}
+
+/**
+ * smack_file_alloc_security - assign a file security blob
+ * @file: the object
+ *
+ * The security blob for a file is a pointer to the master
+ * label list, so no allocation is done.
+ *
+ * Returns 0
+ */
+static int smack_file_alloc_security(struct file *file)
+{
+ file->f_security = current->security;
+ return 0;
+}
+
+/**
+ * smack_file_free_security - clear a file security blob
+ * @file: the object
+ *
+ * The security blob for a file is a pointer to the master
+ * label list, so no memory is freed.
+ */
+static void smack_file_free_security(struct file *file)
+{
+ file->f_security = NULL;
+}
+
+/**
+ * smack_file_ioctl - Smack check on ioctls
+ * @file: the object
+ * @cmd: what to do
+ * @arg: unused
+ *
+ * Relies heavily on the correct use of the ioctl command conventions.
+ *
+ * Returns 0 if allowed, error code otherwise
+ */
+static int smack_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc = 0;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE)
+ rc = smk_curacc(file->f_security, MAY_WRITE);
+
+ if (rc == 0 && (_IOC_DIR(cmd) & _IOC_READ))
+ rc = smk_curacc(file->f_security, MAY_READ);
+
+ return rc;
+}
+
+/**
+ * smack_file_lock - Smack check on file locking
+ * @file: the object
+ * @cmd unused
+ *
+ * Returns 0 if current has write access, error code otherwise
+ */
+static int smack_file_lock(struct file *file, unsigned int cmd)
+{
+ return smk_curacc(file->f_security, MAY_WRITE);
+}
+
+/**
+ * smack_file_fcntl - Smack check on fcntl
+ * @file: the object
+ * @cmd: what action to check
+ * @arg: unused
+ *
+ * Returns 0 if current has access, error code otherwise
+ */
+static int smack_file_fcntl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+
+ switch (cmd) {
+ case F_DUPFD:
+ case F_GETFD:
+ case F_GETFL:
+ case F_GETLK:
+ case F_GETOWN:
+ case F_GETSIG:
+ rc = smk_curacc(file->f_security, MAY_READ);
+ break;
+ case F_SETFD:
+ case F_SETFL:
+ case F_SETLK:
+ case F_SETLKW:
+ case F_SETOWN:
+ case F_SETSIG:
+ rc = smk_curacc(file->f_security, MAY_WRITE);
+ break;
+ default:
+ rc = smk_curacc(file->f_security, MAY_READWRITE);
+ }
+
+ return rc;
+}
+
+/**
+ * smack_file_set_fowner - set the file security blob value
+ * @file: object in question
+ *
+ * Returns 0
+ * Further research may be required on this one.
+ */
+static int smack_file_set_fowner(struct file *file)
+{
+ file->f_security = current->security;
+ return 0;
+}
+
+/**
+ * smack_file_send_sigiotask - Smack on sigio
+ * @tsk: The target task
+ * @fown: the object the signal come from
+ * @signum: unused
+ *
+ * Allow a privileged task to get signals even if it shouldn't
+ *
+ * Returns 0 if a subject with the object's smack could
+ * write to the task, an error code otherwise.
+ */
+static int smack_file_send_sigiotask(struct task_struct *tsk,
+ struct fown_struct *fown, int signum)
+{
+ struct file *file;
+ int rc;
+
+ /*
+ * struct fown_struct is never outside the context of a struct file
+ */
+ file = container_of(fown, struct file, f_owner);
+ rc = smk_access(file->f_security, tsk->security, MAY_WRITE);
+ if (rc != 0 && __capable(tsk, CAP_MAC_OVERRIDE))
+ return 0;
+ return rc;
+}
+
+/**
+ * smack_file_receive - Smack file receive check
+ * @file: the object
+ *
+ * Returns 0 if current has access, error code otherwise
+ */
+static int smack_file_receive(struct file *file)
+{
+ int may = 0;
+
+ /*
+ * This code relies on bitmasks.
+ */
+ if (file->f_mode & FMODE_READ)
+ may = MAY_READ;
+ if (file->f_mode & FMODE_WRITE)
+ may |= MAY_WRITE;
+
+ return smk_curacc(file->f_security, may);
+}
+
+/*
+ * Task hooks
+ */
+
+/**
+ * smack_task_alloc_security - "allocate" a task blob
+ * @tsk: the task in need of a blob
+ *
+ * Smack isn't using copies of blobs. Everyone
+ * points to an immutable list. No alloc required.
+ * No data copy required.
+ *
+ * Always returns 0
+ */
+static int smack_task_alloc_security(struct task_struct *tsk)
+{
+ tsk->security = current->security;
+
+ return 0;
+}
+
+/**
+ * smack_task_free_security - "free" a task blob
+ * @task: the task with the blob
+ *
+ * Smack isn't using copies of blobs. Everyone
+ * points to an immutable list. The blobs never go away.
+ * There is no leak here.
+ */
+static void smack_task_free_security(struct task_struct *task)
+{
+ task->security = NULL;
+}
+
+/**
+ * smack_task_setpgid - Smack check on setting pgid
+ * @p: the task object
+ * @pgid: unused
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
+{
+ return smk_curacc(p->security, MAY_WRITE);
+}
+
+/**
+ * smack_task_getpgid - Smack access check for getpgid
+ * @p: the object task
+ *
+ * Returns 0 if current can read the object task, error code otherwise
+ */
+static int smack_task_getpgid(struct task_struct *p)
+{
+ return smk_curacc(p->security, MAY_READ);
+}
+
+/**
+ * smack_task_getsid - Smack access check for getsid
+ * @p: the object task
+ *
+ * Returns 0 if current can read the object task, error code otherwise
+ */
+static int smack_task_getsid(struct task_struct *p)
+{
+ return smk_curacc(p->security, MAY_READ);
+}
+
+/**
+ * smack_task_getsecid - get the secid of the task
+ * @p: the object task
+ * @secid: where to put the result
+ *
+ * Sets the secid to contain a u32 version of the smack label.
+ */
+static void smack_task_getsecid(struct task_struct *p, u32 *secid)
+{
+ *secid = smack_to_secid(p->security);
+}
+
+/**
+ * smack_task_setnice - Smack check on setting nice
+ * @p: the task object
+ * @nice: unused
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_setnice(struct task_struct *p, int nice)
+{
+ return smk_curacc(p->security, MAY_WRITE);
+}
+
+/**
+ * smack_task_setioprio - Smack check on setting ioprio
+ * @p: the task object
+ * @ioprio: unused
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_setioprio(struct task_struct *p, int ioprio)
+{
+ return smk_curacc(p->security, MAY_WRITE);
+}
+
+/**
+ * smack_task_getioprio - Smack check on reading ioprio
+ * @p: the task object
+ *
+ * Return 0 if read access is permitted
+ */
+static int smack_task_getioprio(struct task_struct *p)
+{
+ return smk_curacc(p->security, MAY_READ);
+}
+
+/**
+ * smack_task_setscheduler - Smack check on setting scheduler
+ * @p: the task object
+ * @policy: unused
+ * @lp: unused
+ *
+ * Return 0 if read access is permitted
+ */
+static int smack_task_setscheduler(struct task_struct *p, int policy,
+ struct sched_param *lp)
+{
+ return smk_curacc(p->security, MAY_WRITE);
+}
+
+/**
+ * smack_task_getscheduler - Smack check on reading scheduler
+ * @p: the task object
+ *
+ * Return 0 if read access is permitted
+ */
+static int smack_task_getscheduler(struct task_struct *p)
+{
+ return smk_curacc(p->security, MAY_READ);
+}
+
+/**
+ * smack_task_movememory - Smack check on moving memory
+ * @p: the task object
+ *
+ * Return 0 if write access is permitted
+ */
+static int smack_task_movememory(struct task_struct *p)
+{
+ return smk_curacc(p->security, MAY_WRITE);
+}
+
+/**
+ * smack_task_kill - Smack check on signal delivery
+ * @p: the task object
+ * @info: unused
+ * @sig: unused
+ * @secid: identifies the smack to use in lieu of current's
+ *
+ * Return 0 if write access is permitted
+ *
+ * The secid behavior is an artifact of an SELinux hack
+ * in the USB code. Someday it may go away.
+ */
+static int smack_task_kill(struct task_struct *p, struct siginfo *info,
+ int sig, u32 secid)
+{
+ /*
+ * Special cases where signals really ought to go through
+ * in spite of policy. Stephen Smalley suggests it may
+ * make sense to change the caller so that it doesn't
+ * bother with the LSM hook in these cases.
+ */
+ if (info != SEND_SIG_NOINFO &&
+ (is_si_special(info) || SI_FROMKERNEL(info)))
+ return 0;
+ /*
+ * Sending a signal requires that the sender
+ * can write the receiver.
+ */
+ if (secid == 0)
+ return smk_curacc(p->security, MAY_WRITE);
+ /*
+ * If the secid isn't 0 we're dealing with some USB IO
+ * specific behavior. This is not clean. For one thing
+ * we can't take privilege into account.
+ */
+ return smk_access(smack_from_secid(secid), p->security, MAY_WRITE);
+}
+
+/**
+ * smack_task_wait - Smack access check for waiting
+ * @p: task to wait for
+ *
+ * Returns 0 if current can wait for p, error code otherwise
+ */
+static int smack_task_wait(struct task_struct *p)
+{
+ int rc;
+
+ rc = smk_access(current->security, p->security, MAY_WRITE);
+ if (rc == 0)
+ return 0;
+
+ /*
+ * Allow the operation to succeed if either task
+ * has privilege to perform operations that might
+ * account for the smack labels having gotten to
+ * be different in the first place.
+ *
+ * This breaks the strict subjet/object access
+ * control ideal, taking the object's privilege
+ * state into account in the decision as well as
+ * the smack value.
+ */
+ if (capable(CAP_MAC_OVERRIDE) || __capable(p, CAP_MAC_OVERRIDE))
+ return 0;
+
+ return rc;
+}
+
+/**
+ * smack_task_to_inode - copy task smack into the inode blob
+ * @p: task to copy from
+ * inode: inode to copy to
+ *
+ * Sets the smack pointer in the inode security blob
+ */
+static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
+{
+ struct inode_smack *isp = inode->i_security;
+ isp->smk_inode = p->security;
+}
+
+/*
+ * Socket hooks.
+ */
+
+/**
+ * smack_sk_alloc_security - Allocate a socket blob
+ * @sk: the socket
+ * @family: unused
+ * @priority: memory allocation priority
+ *
+ * Assign Smack pointers to current
+ *
+ * Returns 0 on success, -ENOMEM is there's no memory
+ */
+static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
+{
+ char *csp = current->security;
+ struct socket_smack *ssp;
+
+ ssp = kzalloc(sizeof(struct socket_smack), gfp_flags);
+ if (ssp == NULL)
+ return -ENOMEM;
+
+ ssp->smk_in = csp;
+ ssp->smk_out = csp;
+ ssp->smk_packet[0] = '\0';
+
+ sk->sk_security = ssp;
+
+ return 0;
+}
+
+/**
+ * smack_sk_free_security - Free a socket blob
+ * @sk: the socket
+ *
+ * Clears the blob pointer
+ */
+static void smack_sk_free_security(struct sock *sk)
+{
+ kfree(sk->sk_security);
+}
+
+/**
+ * smack_set_catset - convert a capset to netlabel mls categories
+ * @catset: the Smack categories
+ * @sap: where to put the netlabel categories
+ *
+ * Allocates and fills attr.mls.cat
+ */
+static void smack_set_catset(char *catset, struct netlbl_lsm_secattr *sap)
+{
+ unsigned char *cp;
+ unsigned char m;
+ int cat;
+ int rc;
+ int byte;
+
+ if (catset == 0)
+ return;
+
+ sap->flags |= NETLBL_SECATTR_MLS_CAT;
+ sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+ sap->attr.mls.cat->startbit = 0;
+
+ for (cat = 1, cp = catset, byte = 0; byte < SMK_LABELLEN; cp++, byte++)
+ for (m = 0x80; m != 0; m >>= 1, cat++) {
+ if ((m & *cp) == 0)
+ continue;
+ rc = netlbl_secattr_catmap_setbit(sap->attr.mls.cat,
+ cat, GFP_ATOMIC);
+ }
+}
+
+/**
+ * smack_to_secattr - fill a secattr from a smack value
+ * @smack: the smack value
+ * @nlsp: where the result goes
+ *
+ * Casey says that CIPSO is good enough for now.
+ * It can be used to effect.
+ * It can also be abused to effect when necessary.
+ * Appologies to the TSIG group in general and GW in particular.
+ */
+static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp)
+{
+ struct smack_cipso cipso;
+ int rc;
+
+ switch (smack_net_nltype) {
+ case NETLBL_NLTYPE_CIPSOV4:
+ nlsp->domain = NULL;
+ nlsp->flags = NETLBL_SECATTR_DOMAIN;
+ nlsp->flags |= NETLBL_SECATTR_MLS_LVL;
+
+ rc = smack_to_cipso(smack, &cipso);
+ if (rc == 0) {
+ nlsp->attr.mls.lvl = cipso.smk_level;
+ smack_set_catset(cipso.smk_catset, nlsp);
+ } else {
+ nlsp->attr.mls.lvl = smack_cipso_direct;
+ smack_set_catset(smack, nlsp);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * smack_netlabel - Set the secattr on a socket
+ * @sk: the socket
+ *
+ * Convert the outbound smack value (smk_out) to a
+ * secattr and attach it to the socket.
+ *
+ * Returns 0 on success or an error code
+ */
+static int smack_netlabel(struct sock *sk)
+{
+ struct socket_smack *ssp = sk->sk_security;
+ struct netlbl_lsm_secattr secattr;
+ int rc = 0;
+
+ netlbl_secattr_init(&secattr);
+ smack_to_secattr(ssp->smk_out, &secattr);
+ if (secattr.flags != NETLBL_SECATTR_NONE)
+ rc = netlbl_sock_setattr(sk, &secattr);
+
+ netlbl_secattr_destroy(&secattr);
+ return rc;
+}
+
+/**
+ * smack_inode_setsecurity - set smack xattrs
+ * @inode: the object
+ * @name: attribute name
+ * @value: attribute value
+ * @size: size of the attribute
+ * @flags: unused
+ *
+ * Sets the named attribute in the appropriate blob
+ *
+ * Returns 0 on success, or an error code
+ */
+static int smack_inode_setsecurity(struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
+{
+ char *sp;
+ struct inode_smack *nsp = inode->i_security;
+ struct socket_smack *ssp;
+ struct socket *sock;
+
+ if (value == NULL || size > SMK_LABELLEN)
+ return -EACCES;
+
+ sp = smk_import(value, size);
+ if (sp == NULL)
+ return -EINVAL;
+
+ if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
+ nsp->smk_inode = sp;
+ return 0;
+ }
+ /*
+ * The rest of the Smack xattrs are only on sockets.
+ */
+ if (inode->i_sb->s_magic != SOCKFS_MAGIC)
+ return -EOPNOTSUPP;
+
+ sock = SOCKET_I(inode);
+ if (sock == NULL)
+ return -EOPNOTSUPP;
+
+ ssp = sock->sk->sk_security;
+
+ if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+ ssp->smk_in = sp;
+ else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) {
+ ssp->smk_out = sp;
+ return smack_netlabel(sock->sk);
+ } else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * smack_socket_post_create - finish socket setup
+ * @sock: the socket
+ * @family: protocol family
+ * @type: unused
+ * @protocol: unused
+ * @kern: unused
+ *
+ * Sets the netlabel information on the socket
+ *
+ * Returns 0 on success, and error code otherwise
+ */
+static int smack_socket_post_create(struct socket *sock, int family,
+ int type, int protocol, int kern)
+{
+ if (family != PF_INET)
+ return 0;
+ /*
+ * Set the outbound netlbl.
+ */
+ return smack_netlabel(sock->sk);
+}
+
+/**
+ * smack_flags_to_may - convert S_ to MAY_ values
+ * @flags: the S_ value
+ *
+ * Returns the equivalent MAY_ value
+ */
+static int smack_flags_to_may(int flags)
+{
+ int may = 0;
+
+ if (flags & S_IRUGO)
+ may |= MAY_READ;
+ if (flags & S_IWUGO)
+ may |= MAY_WRITE;
+ if (flags & S_IXUGO)
+ may |= MAY_EXEC;
+
+ return may;
+}
+
+/**
+ * smack_msg_msg_alloc_security - Set the security blob for msg_msg
+ * @msg: the object
+ *
+ * Returns 0
+ */
+static int smack_msg_msg_alloc_security(struct msg_msg *msg)
+{
+ msg->security = current->security;
+ return 0;
+}
+
+/**
+ * smack_msg_msg_free_security - Clear the security blob for msg_msg
+ * @msg: the object
+ *
+ * Clears the blob pointer
+ */
+static void smack_msg_msg_free_security(struct msg_msg *msg)
+{
+ msg->security = NULL;
+}
+
+/**
+ * smack_of_shm - the smack pointer for the shm
+ * @shp: the object
+ *
+ * Returns a pointer to the smack value
+ */
+static char *smack_of_shm(struct shmid_kernel *shp)
+{
+ return (char *)shp->shm_perm.security;
+}
+
+/**
+ * smack_shm_alloc_security - Set the security blob for shm
+ * @shp: the object
+ *
+ * Returns 0
+ */
+static int smack_shm_alloc_security(struct shmid_kernel *shp)
+{
+ struct kern_ipc_perm *isp = &shp->shm_perm;
+
+ isp->security = current->security;
+ return 0;
+}
+
+/**
+ * smack_shm_free_security - Clear the security blob for shm
+ * @shp: the object
+ *
+ * Clears the blob pointer
+ */
+static void smack_shm_free_security(struct shmid_kernel *shp)
+{
+ struct kern_ipc_perm *isp = &shp->shm_perm;
+
+ isp->security = NULL;
+}
+
+/**
+ * smack_shm_associate - Smack access check for shm
+ * @shp: the object
+ * @shmflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_shm_associate(struct shmid_kernel *shp, int shmflg)
+{
+ char *ssp = smack_of_shm(shp);
+ int may;
+
+ may = smack_flags_to_may(shmflg);
+ return smk_curacc(ssp, may);
+}
+
+/**
+ * smack_shm_shmctl - Smack access check for shm
+ * @shp: the object
+ * @cmd: what it wants to do
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_shm_shmctl(struct shmid_kernel *shp, int cmd)
+{
+ char *ssp = smack_of_shm(shp);
+ int may;
+
+ switch (cmd) {
+ case IPC_STAT:
+ case SHM_STAT:
+ may = MAY_READ;
+ break;
+ case IPC_SET:
+ case SHM_LOCK:
+ case SHM_UNLOCK:
+ case IPC_RMID:
+ may = MAY_READWRITE;
+ break;
+ case IPC_INFO:
+ case SHM_INFO:
+ /*
+ * System level information.
+ */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return smk_curacc(ssp, may);
+}
+
+/**
+ * smack_shm_shmat - Smack access for shmat
+ * @shp: the object
+ * @shmaddr: unused
+ * @shmflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_shm_shmat(struct shmid_kernel *shp, char __user *shmaddr,
+ int shmflg)
+{
+ char *ssp = smack_of_shm(shp);
+ int may;
+
+ may = smack_flags_to_may(shmflg);
+ return smk_curacc(ssp, may);
+}
+
+/**
+ * smack_of_sem - the smack pointer for the sem
+ * @sma: the object
+ *
+ * Returns a pointer to the smack value
+ */
+static char *smack_of_sem(struct sem_array *sma)
+{
+ return (char *)sma->sem_perm.security;
+}
+
+/**
+ * smack_sem_alloc_security - Set the security blob for sem
+ * @sma: the object
+ *
+ * Returns 0
+ */
+static int smack_sem_alloc_security(struct sem_array *sma)
+{
+ struct kern_ipc_perm *isp = &sma->sem_perm;
+
+ isp->security = current->security;
+ return 0;
+}
+
+/**
+ * smack_sem_free_security - Clear the security blob for sem
+ * @sma: the object
+ *
+ * Clears the blob pointer
+ */
+static void smack_sem_free_security(struct sem_array *sma)
+{
+ struct kern_ipc_perm *isp = &sma->sem_perm;
+
+ isp->security = NULL;
+}
+
+/**
+ * smack_sem_associate - Smack access check for sem
+ * @sma: the object
+ * @semflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_sem_associate(struct sem_array *sma, int semflg)
+{
+ char *ssp = smack_of_sem(sma);
+ int may;
+
+ may = smack_flags_to_may(semflg);
+ return smk_curacc(ssp, may);
+}
+
+/**
+ * smack_sem_shmctl - Smack access check for sem
+ * @sma: the object
+ * @cmd: what it wants to do
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_sem_semctl(struct sem_array *sma, int cmd)
+{
+ char *ssp = smack_of_sem(sma);
+ int may;
+
+ switch (cmd) {
+ case GETPID:
+ case GETNCNT:
+ case GETZCNT:
+ case GETVAL:
+ case GETALL:
+ case IPC_STAT:
+ case SEM_STAT:
+ may = MAY_READ;
+ break;
+ case SETVAL:
+ case SETALL:
+ case IPC_RMID:
+ case IPC_SET:
+ may = MAY_READWRITE;
+ break;
+ case IPC_INFO:
+ case SEM_INFO:
+ /*
+ * System level information
+ */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return smk_curacc(ssp, may);
+}
+
+/**
+ * smack_sem_semop - Smack checks of semaphore operations
+ * @sma: the object
+ * @sops: unused
+ * @nsops: unused
+ * @alter: unused
+ *
+ * Treated as read and write in all cases.
+ *
+ * Returns 0 if access is allowed, error code otherwise
+ */
+static int smack_sem_semop(struct sem_array *sma, struct sembuf *sops,
+ unsigned nsops, int alter)
+{
+ char *ssp = smack_of_sem(sma);
+
+ return smk_curacc(ssp, MAY_READWRITE);
+}
+
+/**
+ * smack_msg_alloc_security - Set the security blob for msg
+ * @msq: the object
+ *
+ * Returns 0
+ */
+static int smack_msg_queue_alloc_security(struct msg_queue *msq)
+{
+ struct kern_ipc_perm *kisp = &msq->q_perm;
+
+ kisp->security = current->security;
+ return 0;
+}
+
+/**
+ * smack_msg_free_security - Clear the security blob for msg
+ * @msq: the object
+ *
+ * Clears the blob pointer
+ */
+static void smack_msg_queue_free_security(struct msg_queue *msq)
+{
+ struct kern_ipc_perm *kisp = &msq->q_perm;
+
+ kisp->security = NULL;
+}
+
+/**
+ * smack_of_msq - the smack pointer for the msq
+ * @msq: the object
+ *
+ * Returns a pointer to the smack value
+ */
+static char *smack_of_msq(struct msg_queue *msq)
+{
+ return (char *)msq->q_perm.security;
+}
+
+/**
+ * smack_msg_queue_associate - Smack access check for msg_queue
+ * @msq: the object
+ * @msqflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_msg_queue_associate(struct msg_queue *msq, int msqflg)
+{
+ char *msp = smack_of_msq(msq);
+ int may;
+
+ may = smack_flags_to_may(msqflg);
+ return smk_curacc(msp, may);
+}
+
+/**
+ * smack_msg_queue_msgctl - Smack access check for msg_queue
+ * @msq: the object
+ * @cmd: what it wants to do
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_msg_queue_msgctl(struct msg_queue *msq, int cmd)
+{
+ char *msp = smack_of_msq(msq);
+ int may;
+
+ switch (cmd) {
+ case IPC_STAT:
+ case MSG_STAT:
+ may = MAY_READ;
+ break;
+ case IPC_SET:
+ case IPC_RMID:
+ may = MAY_READWRITE;
+ break;
+ case IPC_INFO:
+ case MSG_INFO:
+ /*
+ * System level information
+ */
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return smk_curacc(msp, may);
+}
+
+/**
+ * smack_msg_queue_msgsnd - Smack access check for msg_queue
+ * @msq: the object
+ * @msg: unused
+ * @msqflg: access requested
+ *
+ * Returns 0 if current has the requested access, error code otherwise
+ */
+static int smack_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
+ int msqflg)
+{
+ char *msp = smack_of_msq(msq);
+ int rc;
+
+ rc = smack_flags_to_may(msqflg);
+ return smk_curacc(msp, rc);
+}
+
+/**
+ * smack_msg_queue_msgsnd - Smack access check for msg_queue
+ * @msq: the object
+ * @msg: unused
+ * @target: unused
+ * @type: unused
+ * @mode: unused
+ *
+ * Returns 0 if current has read and write access, error code otherwise
+ */
+static int smack_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
+ struct task_struct *target, long type, int mode)
+{
+ char *msp = smack_of_msq(msq);
+
+ return smk_curacc(msp, MAY_READWRITE);
+}
+
+/**
+ * smack_ipc_permission - Smack access for ipc_permission()
+ * @ipp: the object permissions
+ * @flag: access requested
+ *
+ * Returns 0 if current has read and write access, error code otherwise
+ */
+static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag)
+{
+ char *isp = ipp->security;
+ int may;
+
+ may = smack_flags_to_may(flag);
+ return smk_curacc(isp, may);
+}
+
+/**
+ * smack_d_instantiate - Make sure the blob is correct on an inode
+ * @opt_dentry: unused
+ * @inode: the object
+ *
+ * Set the inode's security blob if it hasn't been done already.
+ */
+static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
+{
+ struct super_block *sbp;
+ struct superblock_smack *sbsp;
+ struct inode_smack *isp;
+ char *csp = current->security;
+ char *fetched;
+ char *final;
+ struct dentry *dp;
+
+ if (inode == NULL)
+ return;
+
+ isp = inode->i_security;
+
+ mutex_lock(&isp->smk_lock);
+ /*
+ * If the inode is already instantiated
+ * take the quick way out
+ */
+ if (isp->smk_flags & SMK_INODE_INSTANT)
+ goto unlockandout;
+
+ sbp = inode->i_sb;
+ sbsp = sbp->s_security;
+ /*
+ * We're going to use the superblock default label
+ * if there's no label on the file.
+ */
+ final = sbsp->smk_default;
+
+ /*
+ * This is pretty hackish.
+ * Casey says that we shouldn't have to do
+ * file system specific code, but it does help
+ * with keeping it simple.
+ */
+ switch (sbp->s_magic) {
+ case SMACK_MAGIC:
+ /*
+ * Casey says that it's a little embarassing
+ * that the smack file system doesn't do
+ * extended attributes.
+ */
+ final = smack_known_star.smk_known;
+ break;
+ case PIPEFS_MAGIC:
+ /*
+ * Casey says pipes are easy (?)
+ */
+ final = smack_known_star.smk_known;
+ break;
+ case DEVPTS_SUPER_MAGIC:
+ /*
+ * devpts seems content with the label of the task.
+ * Programs that change smack have to treat the
+ * pty with respect.
+ */
+ final = csp;
+ break;
+ case SOCKFS_MAGIC:
+ /*
+ * Casey says sockets get the smack of the task.
+ */
+ final = csp;
+ break;
+ case PROC_SUPER_MAGIC:
+ /*
+ * Casey says procfs appears not to care.
+ * The superblock default suffices.
+ */
+ break;
+ case TMPFS_MAGIC:
+ /*
+ * Device labels should come from the filesystem,
+ * but watch out, because they're volitile,
+ * getting recreated on every reboot.
+ */
+ final = smack_known_star.smk_known;
+ /*
+ * No break.
+ *
+ * If a smack value has been set we want to use it,
+ * but since tmpfs isn't giving us the opportunity
+ * to set mount options simulate setting the
+ * superblock default.
+ */
+ default:
+ /*
+ * This isn't an understood special case.
+ * Get the value from the xattr.
+ *
+ * No xattr support means, alas, no SMACK label.
+ * Use the aforeapplied default.
+ * It would be curious if the label of the task
+ * does not match that assigned.
+ */
+ if (inode->i_op->getxattr == NULL)
+ break;
+ /*
+ * Get the dentry for xattr.
+ */
+ if (opt_dentry == NULL) {
+ dp = d_find_alias(inode);
+ if (dp == NULL)
+ break;
+ } else {
+ dp = dget(opt_dentry);
+ if (dp == NULL)
+ break;
+ }
+
+ fetched = smk_fetch(inode, dp);
+ if (fetched != NULL)
+ final = fetched;
+
+ dput(dp);
+ break;
+ }
+
+ if (final == NULL)
+ isp->smk_inode = csp;
+ else
+ isp->smk_inode = final;
+
+ isp->smk_flags |= SMK_INODE_INSTANT;
+
+unlockandout:
+ mutex_unlock(&isp->smk_lock);
+ return;
+}
+
+/**
+ * smack_getprocattr - Smack process attribute access
+ * @p: the object task
+ * @name: the name of the attribute in /proc/.../attr
+ * @value: where to put the result
+ *
+ * Places a copy of the task Smack into value
+ *
+ * Returns the length of the smack label or an error code
+ */
+static int smack_getprocattr(struct task_struct *p, char *name, char **value)
+{
+ char *cp;
+ int slen;
+
+ if (strcmp(name, "current") != 0)
+ return -EINVAL;
+
+ cp = kstrdup(p->security, GFP_KERNEL);
+ if (cp == NULL)
+ return -ENOMEM;
+
+ slen = strlen(cp);
+ *value = cp;
+ return slen;
+}
+
+/**
+ * smack_setprocattr - Smack process attribute setting
+ * @p: the object task
+ * @name: the name of the attribute in /proc/.../attr
+ * @value: the value to set
+ * @size: the size of the value
+ *
+ * Sets the Smack value of the task. Only setting self
+ * is permitted and only with privilege
+ *
+ * Returns the length of the smack label or an error code
+ */
+static int smack_setprocattr(struct task_struct *p, char *name,
+ void *value, size_t size)
+{
+ char *newsmack;
+
+ if (!__capable(p, CAP_MAC_ADMIN))
+ return -EPERM;
+
+ /*
+ * Changing another process' Smack value is too dangerous
+ * and supports no sane use case.
+ */
+ if (p != current)
+ return -EPERM;
+
+ if (value == NULL || size == 0 || size >= SMK_LABELLEN)
+ return -EINVAL;
+
+ if (strcmp(name, "current") != 0)
+ return -EINVAL;
+
+ newsmack = smk_import(value, size);
+ if (newsmack == NULL)
+ return -EINVAL;
+
+ p->security = newsmack;
+ return size;
+}
+
+/**
+ * smack_unix_stream_connect - Smack access on UDS
+ * @sock: one socket
+ * @other: the other socket
+ * @newsk: unused
+ *
+ * Return 0 if a subject with the smack of sock could access
+ * an object with the smack of other, otherwise an error code
+ */
+static int smack_unix_stream_connect(struct socket *sock,
+ struct socket *other, struct sock *newsk)
+{
+ struct inode *sp = SOCK_INODE(sock);
+ struct inode *op = SOCK_INODE(other);
+
+ return smk_access(smk_of_inode(sp), smk_of_inode(op), MAY_READWRITE);
+}
+
+/**
+ * smack_unix_may_send - Smack access on UDS
+ * @sock: one socket
+ * @other: the other socket
+ *
+ * Return 0 if a subject with the smack of sock could access
+ * an object with the smack of other, otherwise an error code
+ */
+static int smack_unix_may_send(struct socket *sock, struct socket *other)
+{
+ struct inode *sp = SOCK_INODE(sock);
+ struct inode *op = SOCK_INODE(other);
+
+ return smk_access(smk_of_inode(sp), smk_of_inode(op), MAY_WRITE);
+}
+
+/**
+ * smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat
+ * pair to smack
+ * @sap: netlabel secattr
+ * @sip: where to put the result
+ *
+ * Copies a smack label into sip
+ */
+static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
+{
+ char smack[SMK_LABELLEN];
+ int pcat;
+
+ if ((sap->flags & NETLBL_SECATTR_MLS_LVL) == 0) {
+ /*
+ * If there are flags but no level netlabel isn't
+ * behaving the way we expect it to.
+ *
+ * Without guidance regarding the smack value
+ * for the packet fall back on the network
+ * ambient value.
+ */
+ strncpy(sip, smack_net_ambient, SMK_MAXLEN);
+ return;
+ }
+ /*
+ * Get the categories, if any
+ */
+ memset(smack, '\0', SMK_LABELLEN);
+ if ((sap->flags & NETLBL_SECATTR_MLS_CAT) != 0)
+ for (pcat = -1;;) {
+ pcat = netlbl_secattr_catmap_walk(sap->attr.mls.cat,
+ pcat + 1);
+ if (pcat < 0)
+ break;
+ smack_catset_bit(pcat, smack);
+ }
+ /*
+ * If it is CIPSO using smack direct mapping
+ * we are already done. WeeHee.
+ */
+ if (sap->attr.mls.lvl == smack_cipso_direct) {
+ memcpy(sip, smack, SMK_MAXLEN);
+ return;
+ }
+ /*
+ * Look it up in the supplied table if it is not a direct mapping.
+ */
+ smack_from_cipso(sap->attr.mls.lvl, smack, sip);
+ return;
+}
+
+/**
+ * smack_socket_sock_rcv_skb - Smack packet delivery access check
+ * @sk: socket
+ * @skb: packet
+ *
+ * Returns 0 if the packet should be delivered, an error code otherwise
+ */
+static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct netlbl_lsm_secattr secattr;
+ struct socket_smack *ssp = sk->sk_security;
+ char smack[SMK_LABELLEN];
+ int rc;
+
+ if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
+ return 0;
+
+ /*
+ * Translate what netlabel gave us.
+ */
+ memset(smack, '\0', SMK_LABELLEN);
+ netlbl_secattr_init(&secattr);
+ rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
+ if (rc == 0)
+ smack_from_secattr(&secattr, smack);
+ else
+ strncpy(smack, smack_net_ambient, SMK_MAXLEN);
+ netlbl_secattr_destroy(&secattr);
+ /*
+ * Receiving a packet requires that the other end
+ * be able to write here. Read access is not required.
+ * This is the simplist possible security model
+ * for networking.
+ */
+ return smk_access(smack, ssp->smk_in, MAY_WRITE);
+}
+
+/**
+ * smack_socket_getpeersec_stream - pull in packet label
+ * @sock: the socket
+ * @optval: user's destination
+ * @optlen: size thereof
+ * @len: max thereoe
+ *
+ * returns zero on success, an error code otherwise
+ */
+static int smack_socket_getpeersec_stream(struct socket *sock,
+ char __user *optval,
+ int __user *optlen, unsigned len)
+{
+ struct socket_smack *ssp;
+ int slen;
+ int rc = 0;
+
+ ssp = sock->sk->sk_security;
+ slen = strlen(ssp->smk_packet) + 1;
+
+ if (slen > len)
+ rc = -ERANGE;
+ else if (copy_to_user(optval, ssp->smk_packet, slen) != 0)
+ rc = -EFAULT;
+
+ if (put_user(slen, optlen) != 0)
+ rc = -EFAULT;
+
+ return rc;
+}
+
+
+/**
+ * smack_socket_getpeersec_dgram - pull in packet label
+ * @sock: the socket
+ * @skb: packet data
+ * @secid: pointer to where to put the secid of the packet
+ *
+ * Sets the netlabel socket state on sk from parent
+ */
+static int smack_socket_getpeersec_dgram(struct socket *sock,
+ struct sk_buff *skb, u32 *secid)
+
+{
+ struct netlbl_lsm_secattr secattr;
+ struct sock *sk;
+ char smack[SMK_LABELLEN];
+ int family = PF_INET;
+ u32 s;
+ int rc;
+
+ /*
+ * Only works for families with packets.
+ */
+ if (sock != NULL) {
+ sk = sock->sk;
+ if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
+ return 0;
+ family = sk->sk_family;
+ }
+ /*
+ * Translate what netlabel gave us.
+ */
+ memset(smack, '\0', SMK_LABELLEN);
+ netlbl_secattr_init(&secattr);
+ rc = netlbl_skbuff_getattr(skb, family, &secattr);
+ if (rc == 0)
+ smack_from_secattr(&secattr, smack);
+ netlbl_secattr_destroy(&secattr);
+
+ /*
+ * Give up if we couldn't get anything
+ */
+ if (rc != 0)
+ return rc;
+
+ s = smack_to_secid(smack);
+ if (s == 0)
+ return -EINVAL;
+
+ *secid = s;
+ return 0;
+}
+
+/**
+ * smack_sock_graft - graft access state between two sockets
+ * @sk: fresh sock
+ * @parent: donor socket
+ *
+ * Sets the netlabel socket state on sk from parent
+ */
+static void smack_sock_graft(struct sock *sk, struct socket *parent)
+{
+ struct socket_smack *ssp;
+ int rc;
+
+ if (sk == NULL)
+ return;
+
+ if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
+ return;
+
+ ssp = sk->sk_security;
+ ssp->smk_in = current->security;
+ ssp->smk_out = current->security;
+ ssp->smk_packet[0] = '\0';
+
+ rc = smack_netlabel(sk);
+}
+
+/**
+ * smack_inet_conn_request - Smack access check on connect
+ * @sk: socket involved
+ * @skb: packet
+ * @req: unused
+ *
+ * Returns 0 if a task with the packet label could write to
+ * the socket, otherwise an error code
+ */
+static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+{
+ struct netlbl_lsm_secattr skb_secattr;
+ struct socket_smack *ssp = sk->sk_security;
+ char smack[SMK_LABELLEN];
+ int rc;
+
+ if (skb == NULL)
+ return -EACCES;
+
+ memset(smack, '\0', SMK_LABELLEN);
+ netlbl_secattr_init(&skb_secattr);
+ rc = netlbl_skbuff_getattr(skb, sk->sk_family, &skb_secattr);
+ if (rc == 0)
+ smack_from_secattr(&skb_secattr, smack);
+ else
+ strncpy(smack, smack_known_huh.smk_known, SMK_MAXLEN);
+ netlbl_secattr_destroy(&skb_secattr);
+ /*
+ * Receiving a packet requires that the other end
+ * be able to write here. Read access is not required.
+ *
+ * If the request is successful save the peer's label
+ * so that SO_PEERCRED can report it.
+ */
+ rc = smk_access(smack, ssp->smk_in, MAY_WRITE);
+ if (rc == 0)
+ strncpy(ssp->smk_packet, smack, SMK_MAXLEN);
+
+ return rc;
+}
+
+/*
+ * Key management security hooks
+ *
+ * Casey has not tested key support very heavily.
+ * The permission check is most likely too restrictive.
+ * If you care about keys please have a look.
+ */
+#ifdef CONFIG_KEYS
+
+/**
+ * smack_key_alloc - Set the key security blob
+ * @key: object
+ * @tsk: the task associated with the key
+ * @flags: unused
+ *
+ * No allocation required
+ *
+ * Returns 0
+ */
+static int smack_key_alloc(struct key *key, struct task_struct *tsk,
+ unsigned long flags)
+{
+ key->security = tsk->security;
+ return 0;
+}
+
+/**
+ * smack_key_free - Clear the key security blob
+ * @key: the object
+ *
+ * Clear the blob pointer
+ */
+static void smack_key_free(struct key *key)
+{
+ key->security = NULL;
+}
+
+/*
+ * smack_key_permission - Smack access on a key
+ * @key_ref: gets to the object
+ * @context: task involved
+ * @perm: unused
+ *
+ * Return 0 if the task has read and write to the object,
+ * an error code otherwise
+ */
+static int smack_key_permission(key_ref_t key_ref,
+ struct task_struct *context, key_perm_t perm)
+{
+ struct key *keyp;
+
+ keyp = key_ref_to_ptr(key_ref);
+ if (keyp == NULL)
+ return -EINVAL;
+ /*
+ * If the key hasn't been initialized give it access so that
+ * it may do so.
+ */
+ if (keyp->security == NULL)
+ return 0;
+ /*
+ * This should not occur
+ */
+ if (context->security == NULL)
+ return -EACCES;
+
+ return smk_access(context->security, keyp->security, MAY_READWRITE);
+}
+#endif /* CONFIG_KEYS */
+
+/*
+ * smack_secid_to_secctx - return the smack label for a secid
+ * @secid: incoming integer
+ * @secdata: destination
+ * @seclen: how long it is
+ *
+ * Exists for networking code.
+ */
+static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
+{
+ char *sp = smack_from_secid(secid);
+
+ *secdata = sp;
+ *seclen = strlen(sp);
+ return 0;
+}
+
+/*
+ * smack_release_secctx - don't do anything.
+ * @key_ref: unused
+ * @context: unused
+ * @perm: unused
+ *
+ * Exists to make sure nothing gets done, and properly
+ */
+static void smack_release_secctx(char *secdata, u32 seclen)
+{
+}
+
+static struct security_operations smack_ops = {
+ .ptrace = smack_ptrace,
+ .capget = cap_capget,
+ .capset_check = cap_capset_check,
+ .capset_set = cap_capset_set,
+ .capable = cap_capable,
+ .syslog = smack_syslog,
+ .settime = cap_settime,
+ .vm_enough_memory = cap_vm_enough_memory,
+
+ .bprm_apply_creds = cap_bprm_apply_creds,
+ .bprm_set_security = cap_bprm_set_security,
+ .bprm_secureexec = cap_bprm_secureexec,
+
+ .sb_alloc_security = smack_sb_alloc_security,
+ .sb_free_security = smack_sb_free_security,
+ .sb_copy_data = smack_sb_copy_data,
+ .sb_kern_mount = smack_sb_kern_mount,
+ .sb_statfs = smack_sb_statfs,
+ .sb_mount = smack_sb_mount,
+ .sb_umount = smack_sb_umount,
+
+ .inode_alloc_security = smack_inode_alloc_security,
+ .inode_free_security = smack_inode_free_security,
+ .inode_init_security = smack_inode_init_security,
+ .inode_link = smack_inode_link,
+ .inode_unlink = smack_inode_unlink,
+ .inode_rmdir = smack_inode_rmdir,
+ .inode_rename = smack_inode_rename,
+ .inode_permission = smack_inode_permission,
+ .inode_setattr = smack_inode_setattr,
+ .inode_getattr = smack_inode_getattr,
+ .inode_setxattr = smack_inode_setxattr,
+ .inode_post_setxattr = smack_inode_post_setxattr,
+ .inode_getxattr = smack_inode_getxattr,
+ .inode_removexattr = smack_inode_removexattr,
+ .inode_getsecurity = smack_inode_getsecurity,
+ .inode_setsecurity = smack_inode_setsecurity,
+ .inode_listsecurity = smack_inode_listsecurity,
+
+ .file_permission = smack_file_permission,
+ .file_alloc_security = smack_file_alloc_security,
+ .file_free_security = smack_file_free_security,
+ .file_ioctl = smack_file_ioctl,
+ .file_lock = smack_file_lock,
+ .file_fcntl = smack_file_fcntl,
+ .file_set_fowner = smack_file_set_fowner,
+ .file_send_sigiotask = smack_file_send_sigiotask,
+ .file_receive = smack_file_receive,
+
+ .task_alloc_security = smack_task_alloc_security,
+ .task_free_security = smack_task_free_security,
+ .task_post_setuid = cap_task_post_setuid,
+ .task_setpgid = smack_task_setpgid,
+ .task_getpgid = smack_task_getpgid,
+ .task_getsid = smack_task_getsid,
+ .task_getsecid = smack_task_getsecid,
+ .task_setnice = smack_task_setnice,
+ .task_setioprio = smack_task_setioprio,
+ .task_getioprio = smack_task_getioprio,
+ .task_setscheduler = smack_task_setscheduler,
+ .task_getscheduler = smack_task_getscheduler,
+ .task_movememory = smack_task_movememory,
+ .task_kill = smack_task_kill,
+ .task_wait = smack_task_wait,
+ .task_reparent_to_init = cap_task_reparent_to_init,
+ .task_to_inode = smack_task_to_inode,
+
+ .ipc_permission = smack_ipc_permission,
+
+ .msg_msg_alloc_security = smack_msg_msg_alloc_security,
+ .msg_msg_free_security = smack_msg_msg_free_security,
+
+ .msg_queue_alloc_security = smack_msg_queue_alloc_security,
+ .msg_queue_free_security = smack_msg_queue_free_security,
+ .msg_queue_associate = smack_msg_queue_associate,
+ .msg_queue_msgctl = smack_msg_queue_msgctl,
+ .msg_queue_msgsnd = smack_msg_queue_msgsnd,
+ .msg_queue_msgrcv = smack_msg_queue_msgrcv,
+
+ .shm_alloc_security = smack_shm_alloc_security,
+ .shm_free_security = smack_shm_free_security,
+ .shm_associate = smack_shm_associate,
+ .shm_shmctl = smack_shm_shmctl,
+ .shm_shmat = smack_shm_shmat,
+
+ .sem_alloc_security = smack_sem_alloc_security,
+ .sem_free_security = smack_sem_free_security,
+ .sem_associate = smack_sem_associate,
+ .sem_semctl = smack_sem_semctl,
+ .sem_semop = smack_sem_semop,
+
+ .netlink_send = cap_netlink_send,
+ .netlink_recv = cap_netlink_recv,
+
+ .d_instantiate = smack_d_instantiate,
+
+ .getprocattr = smack_getprocattr,
+ .setprocattr = smack_setprocattr,
+
+ .unix_stream_connect = smack_unix_stream_connect,
+ .unix_may_send = smack_unix_may_send,
+
+ .socket_post_create = smack_socket_post_create,
+ .socket_sock_rcv_skb = smack_socket_sock_rcv_skb,
+ .socket_getpeersec_stream = smack_socket_getpeersec_stream,
+ .socket_getpeersec_dgram = smack_socket_getpeersec_dgram,
+ .sk_alloc_security = smack_sk_alloc_security,
+ .sk_free_security = smack_sk_free_security,
+ .sock_graft = smack_sock_graft,
+ .inet_conn_request = smack_inet_conn_request,
+ /* key management security hooks */
+#ifdef CONFIG_KEYS
+ .key_alloc = smack_key_alloc,
+ .key_free = smack_key_free,
+ .key_permission = smack_key_permission,
+#endif /* CONFIG_KEYS */
+ .secid_to_secctx = smack_secid_to_secctx,
+ .release_secctx = smack_release_secctx,
+};
+
+/**
+ * smack_init - initialize the smack system
+ *
+ * Returns 0
+ */
+static __init int smack_init(void)
+{
+ printk(KERN_INFO "Smack: Initializing.\n");
+
+ /*
+ * Set the security state for the initial task.
+ */
+ current->security = &smack_known_floor.smk_known;
+
+ /*
+ * Initialize locks
+ */
+ spin_lock_init(&smack_known_unset.smk_cipsolock);
+ spin_lock_init(&smack_known_huh.smk_cipsolock);
+ spin_lock_init(&smack_known_hat.smk_cipsolock);
+ spin_lock_init(&smack_known_star.smk_cipsolock);
+ spin_lock_init(&smack_known_floor.smk_cipsolock);
+ spin_lock_init(&smack_known_invalid.smk_cipsolock);
+
+ /*
+ * Register with LSM
+ */
+ if (register_security(&smack_ops))
+ panic("smack: Unable to register with kernel.\n");
+
+ return 0;
+}
+
+/*
+ * Smack requires early initialization in order to label
+ * all processes and objects when they are created.
+ */
+security_initcall(smack_init);
+
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
new file mode 100644
index 000000000000..15aa37f65b39
--- /dev/null
+++ b/security/smack/smackfs.c
@@ -0,0 +1,981 @@
+/*
+ * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2.
+ *
+ * Authors:
+ * Casey Schaufler <casey@schaufler-ca.com>
+ * Ahmed S. Darwish <darwish.07@gmail.com>
+ *
+ * Special thanks to the authors of selinuxfs.
+ *
+ * Karl MacMillan <kmacmillan@tresys.com>
+ * James Morris <jmorris@redhat.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
+#include <linux/mutex.h>
+#include <net/netlabel.h>
+#include <net/cipso_ipv4.h>
+#include <linux/seq_file.h>
+#include <linux/ctype.h>
+#include "smack.h"
+
+/*
+ * smackfs pseudo filesystem.
+ */
+
+enum smk_inos {
+ SMK_ROOT_INO = 2,
+ SMK_LOAD = 3, /* load policy */
+ SMK_CIPSO = 4, /* load label -> CIPSO mapping */
+ SMK_DOI = 5, /* CIPSO DOI */
+ SMK_DIRECT = 6, /* CIPSO level indicating direct label */
+ SMK_AMBIENT = 7, /* internet ambient label */
+ SMK_NLTYPE = 8, /* label scheme to use by default */
+};
+
+/*
+ * List locks
+ */
+static DEFINE_MUTEX(smack_list_lock);
+static DEFINE_MUTEX(smack_cipso_lock);
+
+/*
+ * This is the "ambient" label for network traffic.
+ * If it isn't somehow marked, use this.
+ * It can be reset via smackfs/ambient
+ */
+char *smack_net_ambient = smack_known_floor.smk_known;
+
+/*
+ * This is the default packet marking scheme for network traffic.
+ * It can be reset via smackfs/nltype
+ */
+int smack_net_nltype = NETLBL_NLTYPE_CIPSOV4;
+
+/*
+ * This is the level in a CIPSO header that indicates a
+ * smack label is contained directly in the category set.
+ * It can be reset via smackfs/direct
+ */
+int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT;
+
+static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
+struct smk_list_entry *smack_list;
+
+#define SEQ_READ_FINISHED 1
+
+/*
+ * Disable concurrent writing open() operations
+ */
+static struct semaphore smack_write_sem;
+
+/*
+ * Values for parsing cipso rules
+ * SMK_DIGITLEN: Length of a digit field in a rule.
+ * SMK_CIPSOMEN: Minimum possible cipso rule length.
+ */
+#define SMK_DIGITLEN 4
+#define SMK_CIPSOMIN (SMK_MAXLEN + 2 * SMK_DIGITLEN)
+
+/*
+ * Seq_file read operations for /smack/load
+ */
+
+static void *load_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos == SEQ_READ_FINISHED)
+ return NULL;
+
+ return smack_list;
+}
+
+static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct smk_list_entry *skp = ((struct smk_list_entry *) v)->smk_next;
+
+ if (skp == NULL)
+ *pos = SEQ_READ_FINISHED;
+
+ return skp;
+}
+
+static int load_seq_show(struct seq_file *s, void *v)
+{
+ struct smk_list_entry *slp = (struct smk_list_entry *) v;
+ struct smack_rule *srp = &slp->smk_rule;
+
+ seq_printf(s, "%s %s", (char *)srp->smk_subject,
+ (char *)srp->smk_object);
+
+ seq_putc(s, ' ');
+
+ if (srp->smk_access & MAY_READ)
+ seq_putc(s, 'r');
+ if (srp->smk_access & MAY_WRITE)
+ seq_putc(s, 'w');
+ if (srp->smk_access & MAY_EXEC)
+ seq_putc(s, 'x');
+ if (srp->smk_access & MAY_APPEND)
+ seq_putc(s, 'a');
+ if (srp->smk_access == 0)
+ seq_putc(s, '-');
+
+ seq_putc(s, '\n');
+
+ return 0;
+}
+
+static void load_seq_stop(struct seq_file *s, void *v)
+{
+ /* No-op */
+}
+
+static struct seq_operations load_seq_ops = {
+ .start = load_seq_start,
+ .next = load_seq_next,
+ .show = load_seq_show,
+ .stop = load_seq_stop,
+};
+
+/**
+ * smk_open_load - open() for /smack/load
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load(struct inode *inode, struct file *file)
+{
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return seq_open(file, &load_seq_ops);
+
+ if (down_interruptible(&smack_write_sem))
+ return -ERESTARTSYS;
+
+ return 0;
+}
+
+/**
+ * smk_release_load - release() for /smack/load
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For a reading session, use the seq_file release
+ * implementation.
+ * Otherwise, we are at the end of a writing session so
+ * clean everything up.
+ */
+static int smk_release_load(struct inode *inode, struct file *file)
+{
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return seq_release(inode, file);
+
+ up(&smack_write_sem);
+ return 0;
+}
+
+/**
+ * smk_set_access - add a rule to the rule list
+ * @srp: the new rule to add
+ *
+ * Looks through the current subject/object/access list for
+ * the subject/object pair and replaces the access that was
+ * there. If the pair isn't found add it with the specified
+ * access.
+ */
+static void smk_set_access(struct smack_rule *srp)
+{
+ struct smk_list_entry *sp;
+ struct smk_list_entry *newp;
+
+ mutex_lock(&smack_list_lock);
+
+ for (sp = smack_list; sp != NULL; sp = sp->smk_next)
+ if (sp->smk_rule.smk_subject == srp->smk_subject &&
+ sp->smk_rule.smk_object == srp->smk_object) {
+ sp->smk_rule.smk_access = srp->smk_access;
+ break;
+ }
+
+ if (sp == NULL) {
+ newp = kzalloc(sizeof(struct smk_list_entry), GFP_KERNEL);
+ newp->smk_rule = *srp;
+ newp->smk_next = smack_list;
+ smack_list = newp;
+ }
+
+ mutex_unlock(&smack_list_lock);
+
+ return;
+}
+
+/**
+ * smk_write_load - write() for /smack/load
+ * @filp: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ * Get one smack access rule from above.
+ * The format is exactly:
+ * char subject[SMK_LABELLEN]
+ * char object[SMK_LABELLEN]
+ * char access[SMK_ACCESSKINDS]
+ *
+ * Anything following is commentary and ignored.
+ *
+ * writes must be SMK_LABELLEN+SMK_LABELLEN+4 bytes.
+ */
+#define MINIMUM_LOAD (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSKINDS)
+
+static ssize_t smk_write_load(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smack_rule rule;
+ char *data;
+ int rc = -EINVAL;
+
+ /*
+ * Must have privilege.
+ * No partial writes.
+ * Enough data must be present.
+ */
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+ if (*ppos != 0)
+ return -EINVAL;
+ if (count < MINIMUM_LOAD)
+ return -EINVAL;
+
+ data = kzalloc(count, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, buf, count) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rule.smk_subject = smk_import(data, 0);
+ if (rule.smk_subject == NULL)
+ goto out;
+
+ rule.smk_object = smk_import(data + SMK_LABELLEN, 0);
+ if (rule.smk_object == NULL)
+ goto out;
+
+ rule.smk_access = 0;
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN]) {
+ case '-':
+ break;
+ case 'r':
+ case 'R':
+ rule.smk_access |= MAY_READ;
+ break;
+ default:
+ goto out;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) {
+ case '-':
+ break;
+ case 'w':
+ case 'W':
+ rule.smk_access |= MAY_WRITE;
+ break;
+ default:
+ goto out;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) {
+ case '-':
+ break;
+ case 'x':
+ case 'X':
+ rule.smk_access |= MAY_EXEC;
+ break;
+ default:
+ goto out;
+ }
+
+ switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) {
+ case '-':
+ break;
+ case 'a':
+ case 'A':
+ rule.smk_access |= MAY_READ;
+ break;
+ default:
+ goto out;
+ }
+
+ smk_set_access(&rule);
+ rc = count;
+
+out:
+ kfree(data);
+ return rc;
+}
+
+static const struct file_operations smk_load_ops = {
+ .open = smk_open_load,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load,
+ .release = smk_release_load,
+};
+
+/**
+ * smk_cipso_doi - initialize the CIPSO domain
+ */
+void smk_cipso_doi(void)
+{
+ int rc;
+ struct cipso_v4_doi *doip;
+ struct netlbl_audit audit_info;
+
+ rc = netlbl_cfg_map_del(NULL, &audit_info);
+ if (rc != 0)
+ printk(KERN_WARNING "%s:%d remove rc = %d\n",
+ __func__, __LINE__, rc);
+
+ doip = kmalloc(sizeof(struct cipso_v4_doi), GFP_KERNEL);
+ if (doip == NULL)
+ panic("smack: Failed to initialize cipso DOI.\n");
+ doip->map.std = NULL;
+ doip->doi = smk_cipso_doi_value;
+ doip->type = CIPSO_V4_MAP_PASS;
+ doip->tags[0] = CIPSO_V4_TAG_RBITMAP;
+ for (rc = 1; rc < CIPSO_V4_TAG_MAXCNT; rc++)
+ doip->tags[rc] = CIPSO_V4_TAG_INVALID;
+
+ rc = netlbl_cfg_cipsov4_add_map(doip, NULL, &audit_info);
+ if (rc != 0)
+ printk(KERN_WARNING "%s:%d add rc = %d\n",
+ __func__, __LINE__, rc);
+}
+
+/*
+ * Seq_file read operations for /smack/cipso
+ */
+
+static void *cipso_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos == SEQ_READ_FINISHED)
+ return NULL;
+
+ return smack_known;
+}
+
+static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct smack_known *skp = ((struct smack_known *) v)->smk_next;
+
+ /*
+ * Omit labels with no associated cipso value
+ */
+ while (skp != NULL && !skp->smk_cipso)
+ skp = skp->smk_next;
+
+ if (skp == NULL)
+ *pos = SEQ_READ_FINISHED;
+
+ return skp;
+}
+
+/*
+ * Print cipso labels in format:
+ * label level[/cat[,cat]]
+ */
+static int cipso_seq_show(struct seq_file *s, void *v)
+{
+ struct smack_known *skp = (struct smack_known *) v;
+ struct smack_cipso *scp = skp->smk_cipso;
+ char *cbp;
+ char sep = '/';
+ int cat = 1;
+ int i;
+ unsigned char m;
+
+ if (scp == NULL)
+ return 0;
+
+ seq_printf(s, "%s %3d", (char *)&skp->smk_known, scp->smk_level);
+
+ cbp = scp->smk_catset;
+ for (i = 0; i < SMK_LABELLEN; i++)
+ for (m = 0x80; m != 0; m >>= 1) {
+ if (m & cbp[i]) {
+ seq_printf(s, "%c%d", sep, cat);
+ sep = ',';
+ }
+ cat++;
+ }
+
+ seq_putc(s, '\n');
+
+ return 0;
+}
+
+static void cipso_seq_stop(struct seq_file *s, void *v)
+{
+ /* No-op */
+}
+
+static struct seq_operations cipso_seq_ops = {
+ .start = cipso_seq_start,
+ .stop = cipso_seq_stop,
+ .next = cipso_seq_next,
+ .show = cipso_seq_show,
+};
+
+/**
+ * smk_open_cipso - open() for /smack/cipso
+ * @inode: inode structure representing file
+ * @file: "cipso" file pointer
+ *
+ * Connect our cipso_seq_* operations with /smack/cipso
+ * file_operations
+ */
+static int smk_open_cipso(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &cipso_seq_ops);
+}
+
+/**
+ * smk_write_cipso - write() for /smack/cipso
+ * @filp: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smack_known *skp;
+ struct smack_cipso *scp = NULL;
+ char mapcatset[SMK_LABELLEN];
+ int maplevel;
+ int cat;
+ int catlen;
+ ssize_t rc = -EINVAL;
+ char *data = NULL;
+ char *rule;
+ int ret;
+ int i;
+
+ /*
+ * Must have privilege.
+ * No partial writes.
+ * Enough data must be present.
+ */
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+ if (*ppos != 0)
+ return -EINVAL;
+ if (count <= SMK_CIPSOMIN)
+ return -EINVAL;
+
+ data = kzalloc(count + 1, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, buf, count) != 0) {
+ rc = -EFAULT;
+ goto unlockedout;
+ }
+
+ data[count] = '\0';
+ rule = data;
+ /*
+ * Only allow one writer at a time. Writes should be
+ * quite rare and small in any case.
+ */
+ mutex_lock(&smack_cipso_lock);
+
+ skp = smk_import_entry(rule, 0);
+ if (skp == NULL)
+ goto out;
+
+ rule += SMK_LABELLEN;;
+ ret = sscanf(rule, "%d", &maplevel);
+ if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
+ goto out;
+
+ rule += SMK_DIGITLEN;
+ ret = sscanf(rule, "%d", &catlen);
+ if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
+ goto out;
+
+ if (count <= (SMK_CIPSOMIN + catlen * SMK_DIGITLEN))
+ goto out;
+
+ memset(mapcatset, 0, sizeof(mapcatset));
+
+ for (i = 0; i < catlen; i++) {
+ rule += SMK_DIGITLEN;
+ ret = sscanf(rule, "%d", &cat);
+ if (ret != 1 || cat > SMACK_CIPSO_MAXCATVAL)
+ goto out;
+
+ smack_catset_bit(cat, mapcatset);
+ }
+
+ if (skp->smk_cipso == NULL) {
+ scp = kzalloc(sizeof(struct smack_cipso), GFP_KERNEL);
+ if (scp == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ }
+
+ spin_lock_bh(&skp->smk_cipsolock);
+
+ if (scp == NULL)
+ scp = skp->smk_cipso;
+ else
+ skp->smk_cipso = scp;
+
+ scp->smk_level = maplevel;
+ memcpy(scp->smk_catset, mapcatset, sizeof(mapcatset));
+
+ spin_unlock_bh(&skp->smk_cipsolock);
+
+ rc = count;
+out:
+ mutex_unlock(&smack_cipso_lock);
+unlockedout:
+ kfree(data);
+ return rc;
+}
+
+static const struct file_operations smk_cipso_ops = {
+ .open = smk_open_cipso,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_cipso,
+ .release = seq_release,
+};
+
+/**
+ * smk_read_doi - read() for /smack/doi
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_doi(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", smk_cipso_doi_value);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * smk_write_doi - write() for /smack/doi
+ * @filp: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_doi(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ int i;
+
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+
+ smk_cipso_doi_value = i;
+
+ smk_cipso_doi();
+
+ return count;
+}
+
+static const struct file_operations smk_doi_ops = {
+ .read = smk_read_doi,
+ .write = smk_write_doi,
+};
+
+/**
+ * smk_read_direct - read() for /smack/direct
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_direct(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", smack_cipso_direct);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * smk_write_direct - write() for /smack/direct
+ * @filp: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_direct(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ int i;
+
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+
+ smack_cipso_direct = i;
+
+ return count;
+}
+
+static const struct file_operations smk_direct_ops = {
+ .read = smk_read_direct,
+ .write = smk_write_direct,
+};
+
+/**
+ * smk_read_ambient - read() for /smack/ambient
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_ambient(struct file *filp, char __user *buf,
+ size_t cn, loff_t *ppos)
+{
+ ssize_t rc;
+ char out[SMK_LABELLEN];
+ int asize;
+
+ if (*ppos != 0)
+ return 0;
+ /*
+ * Being careful to avoid a problem in the case where
+ * smack_net_ambient gets changed in midstream.
+ * Since smack_net_ambient is always set with a value
+ * from the label list, including initially, and those
+ * never get freed, the worst case is that the pointer
+ * gets changed just after this strncpy, in which case
+ * the value passed up is incorrect. Locking around
+ * smack_net_ambient wouldn't be any better than this
+ * copy scheme as by the time the caller got to look
+ * at the ambient value it would have cleared the lock
+ * and been changed.
+ */
+ strncpy(out, smack_net_ambient, SMK_LABELLEN);
+ asize = strlen(out) + 1;
+
+ if (cn < asize)
+ return -EINVAL;
+
+ rc = simple_read_from_buffer(buf, cn, ppos, out, asize);
+
+ return rc;
+}
+
+/**
+ * smk_write_ambient - write() for /smack/ambient
+ * @filp: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_ambient(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char in[SMK_LABELLEN];
+ char *smack;
+
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count >= SMK_LABELLEN)
+ return -EINVAL;
+
+ if (copy_from_user(in, buf, count) != 0)
+ return -EFAULT;
+
+ smack = smk_import(in, count);
+ if (smack == NULL)
+ return -EINVAL;
+
+ smack_net_ambient = smack;
+
+ return count;
+}
+
+static const struct file_operations smk_ambient_ops = {
+ .read = smk_read_ambient,
+ .write = smk_write_ambient,
+};
+
+struct option_names {
+ int o_number;
+ char *o_name;
+ char *o_alias;
+};
+
+static struct option_names netlbl_choices[] = {
+ { NETLBL_NLTYPE_RIPSO,
+ NETLBL_NLTYPE_RIPSO_NAME, "ripso" },
+ { NETLBL_NLTYPE_CIPSOV4,
+ NETLBL_NLTYPE_CIPSOV4_NAME, "cipsov4" },
+ { NETLBL_NLTYPE_CIPSOV4,
+ NETLBL_NLTYPE_CIPSOV4_NAME, "cipso" },
+ { NETLBL_NLTYPE_CIPSOV6,
+ NETLBL_NLTYPE_CIPSOV6_NAME, "cipsov6" },
+ { NETLBL_NLTYPE_UNLABELED,
+ NETLBL_NLTYPE_UNLABELED_NAME, "unlabeled" },
+};
+
+/**
+ * smk_read_nltype - read() for /smack/nltype
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_nltype(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char bound[40];
+ ssize_t rc;
+ int i;
+
+ if (count < SMK_LABELLEN)
+ return -EINVAL;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(bound, "unknown");
+
+ for (i = 0; i < ARRAY_SIZE(netlbl_choices); i++)
+ if (smack_net_nltype == netlbl_choices[i].o_number) {
+ sprintf(bound, "%s", netlbl_choices[i].o_name);
+ break;
+ }
+
+ rc = simple_read_from_buffer(buf, count, ppos, bound, strlen(bound));
+
+ return rc;
+}
+
+/**
+ * smk_write_nltype - write() for /smack/nltype
+ * @filp: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_nltype(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char bound[40];
+ char *cp;
+ int i;
+
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count >= 40)
+ return -EINVAL;
+
+ if (copy_from_user(bound, buf, count) != 0)
+ return -EFAULT;
+
+ bound[count] = '\0';
+ cp = strchr(bound, ' ');
+ if (cp != NULL)
+ *cp = '\0';
+ cp = strchr(bound, '\n');
+ if (cp != NULL)
+ *cp = '\0';
+
+ for (i = 0; i < ARRAY_SIZE(netlbl_choices); i++)
+ if (strcmp(bound, netlbl_choices[i].o_name) == 0 ||
+ strcmp(bound, netlbl_choices[i].o_alias) == 0) {
+ smack_net_nltype = netlbl_choices[i].o_number;
+ return count;
+ }
+ /*
+ * Not a valid choice.
+ */
+ return -EINVAL;
+}
+
+static const struct file_operations smk_nltype_ops = {
+ .read = smk_read_nltype,
+ .write = smk_write_nltype,
+};
+
+/**
+ * smk_fill_super - fill the /smackfs superblock
+ * @sb: the empty superblock
+ * @data: unused
+ * @silent: unused
+ *
+ * Fill in the well known entries for /smack
+ *
+ * Returns 0 on success, an error code on failure
+ */
+static int smk_fill_super(struct super_block *sb, void *data, int silent)
+{
+ int rc;
+ struct inode *root_inode;
+
+ static struct tree_descr smack_files[] = {
+ [SMK_LOAD] =
+ {"load", &smk_load_ops, S_IRUGO|S_IWUSR},
+ [SMK_CIPSO] =
+ {"cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR},
+ [SMK_DOI] =
+ {"doi", &smk_doi_ops, S_IRUGO|S_IWUSR},
+ [SMK_DIRECT] =
+ {"direct", &smk_direct_ops, S_IRUGO|S_IWUSR},
+ [SMK_AMBIENT] =
+ {"ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR},
+ [SMK_NLTYPE] =
+ {"nltype", &smk_nltype_ops, S_IRUGO|S_IWUSR},
+ /* last one */ {""}
+ };
+
+ rc = simple_fill_super(sb, SMACK_MAGIC, smack_files);
+ if (rc != 0) {
+ printk(KERN_ERR "%s failed %d while creating inodes\n",
+ __func__, rc);
+ return rc;
+ }
+
+ root_inode = sb->s_root->d_inode;
+ root_inode->i_security = new_inode_smack(smack_known_floor.smk_known);
+
+ return 0;
+}
+
+/**
+ * smk_get_sb - get the smackfs superblock
+ * @fs_type: passed along without comment
+ * @flags: passed along without comment
+ * @dev_name: passed along without comment
+ * @data: passed along without comment
+ * @mnt: passed along without comment
+ *
+ * Just passes everything along.
+ *
+ * Returns what the lower level code does.
+ */
+static int smk_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_single(fs_type, flags, data, smk_fill_super, mnt);
+}
+
+static struct file_system_type smk_fs_type = {
+ .name = "smackfs",
+ .get_sb = smk_get_sb,
+ .kill_sb = kill_litter_super,
+};
+
+static struct vfsmount *smackfs_mount;
+
+/**
+ * init_smk_fs - get the smackfs superblock
+ *
+ * register the smackfs
+ *
+ * Returns 0 unless the registration fails.
+ */
+static int __init init_smk_fs(void)
+{
+ int err;
+
+ err = register_filesystem(&smk_fs_type);
+ if (!err) {
+ smackfs_mount = kern_mount(&smk_fs_type);
+ if (IS_ERR(smackfs_mount)) {
+ printk(KERN_ERR "smackfs: could not mount!\n");
+ err = PTR_ERR(smackfs_mount);
+ smackfs_mount = NULL;
+ }
+ }
+
+ sema_init(&smack_write_sem, 1);
+ smk_cipso_doi();
+
+ return err;
+}
+
+__initcall(init_smk_fs);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 62449117ee14..61f5d425b630 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -23,7 +23,7 @@
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <linux/latency.h>
+#include <linux/pm_qos_params.h>
#include <linux/uio.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -443,9 +443,11 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
snd_pcm_timer_resolution_change(substream);
runtime->status->state = SNDRV_PCM_STATE_SETUP;
- remove_acceptable_latency(substream->latency_id);
+ pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
+ substream->latency_id);
if ((usecs = period_to_usecs(runtime)) >= 0)
- set_acceptable_latency(substream->latency_id, usecs);
+ pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
+ substream->latency_id, usecs);
return 0;
_error:
/* hardware might be unuseable from this time,
@@ -505,7 +507,8 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
if (substream->ops->hw_free)
result = substream->ops->hw_free(substream);
runtime->status->state = SNDRV_PCM_STATE_OPEN;
- remove_acceptable_latency(substream->latency_id);
+ pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
+ substream->latency_id);
return result;
}
diff --git a/sound/oss/Makefile b/sound/oss/Makefile
index f883c4b676ab..1f86299fae40 100644
--- a/sound/oss/Makefile
+++ b/sound/oss/Makefile
@@ -6,7 +6,6 @@
# Each configuration option enables a list of files.
obj-$(CONFIG_SOUND_OSS) += sound.o
-obj-$(CONFIG_SOUND_CS4232) += cs4232.o ad1848.o
# Please leave it as is, cause the link order is significant !
@@ -16,7 +15,6 @@ obj-$(CONFIG_SOUND_AEDSP16) += aedsp16.o
obj-$(CONFIG_SOUND_PSS) += pss.o ad1848.o mpu401.o
obj-$(CONFIG_SOUND_TRIX) += trix.o ad1848.o sb_lib.o uart401.o
obj-$(CONFIG_SOUND_SSCAPE) += sscape.o ad1848.o mpu401.o
-obj-$(CONFIG_SOUND_CS4232) += cs4232.o uart401.o
obj-$(CONFIG_SOUND_MSS) += ad1848.o
obj-$(CONFIG_SOUND_PAS) += pas2.o sb.o sb_lib.o uart401.o
obj-$(CONFIG_SOUND_SB) += sb.o sb_lib.o uart401.o
@@ -27,19 +25,12 @@ obj-$(CONFIG_SOUND_YM3812) += opl3.o
obj-$(CONFIG_SOUND_VMIDI) += v_midi.o
obj-$(CONFIG_SOUND_VIDC) += vidc_mod.o
obj-$(CONFIG_SOUND_WAVEARTIST) += waveartist.o
-
-obj-$(CONFIG_SOUND_VIA82CXXX) += via82cxxx_audio.o ac97_codec.o
-ifeq ($(CONFIG_MIDI_VIA82CXXX),y)
- obj-$(CONFIG_SOUND_VIA82CXXX) += sound.o uart401.o
-endif
obj-$(CONFIG_SOUND_MSNDCLAS) += msnd.o msnd_classic.o
obj-$(CONFIG_SOUND_MSNDPIN) += msnd.o msnd_pinnacle.o
obj-$(CONFIG_SOUND_VWSND) += vwsnd.o
-obj-$(CONFIG_SOUND_ICH) += i810_audio.o ac97_codec.o
obj-$(CONFIG_SOUND_AU1550_AC97) += au1550_ac97.o ac97_codec.o
obj-$(CONFIG_SOUND_TRIDENT) += trident.o ac97_codec.o
obj-$(CONFIG_SOUND_BCM_CS4297A) += swarm_cs4297a.o
-obj-$(CONFIG_SOUND_BT878) += btaudio.o
obj-$(CONFIG_SOUND_WM97XX) += ac97_plugin_wm97xx.o
diff --git a/sound/oss/ac97_codec.c b/sound/oss/ac97_codec.c
index fef56cac06c8..87a672680761 100644
--- a/sound/oss/ac97_codec.c
+++ b/sound/oss/ac97_codec.c
@@ -189,42 +189,6 @@ static const struct {
{0x57454301, "Winbond 83971D", &null_ops},
};
-static const char *ac97_stereo_enhancements[] =
-{
- /* 0 */ "No 3D Stereo Enhancement",
- /* 1 */ "Analog Devices Phat Stereo",
- /* 2 */ "Creative Stereo Enhancement",
- /* 3 */ "National Semi 3D Stereo Enhancement",
- /* 4 */ "YAMAHA Ymersion",
- /* 5 */ "BBE 3D Stereo Enhancement",
- /* 6 */ "Crystal Semi 3D Stereo Enhancement",
- /* 7 */ "Qsound QXpander",
- /* 8 */ "Spatializer 3D Stereo Enhancement",
- /* 9 */ "SRS 3D Stereo Enhancement",
- /* 10 */ "Platform Tech 3D Stereo Enhancement",
- /* 11 */ "AKM 3D Audio",
- /* 12 */ "Aureal Stereo Enhancement",
- /* 13 */ "Aztech 3D Enhancement",
- /* 14 */ "Binaura 3D Audio Enhancement",
- /* 15 */ "ESS Technology Stereo Enhancement",
- /* 16 */ "Harman International VMAx",
- /* 17 */ "Nvidea 3D Stereo Enhancement",
- /* 18 */ "Philips Incredible Sound",
- /* 19 */ "Texas Instruments 3D Stereo Enhancement",
- /* 20 */ "VLSI Technology 3D Stereo Enhancement",
- /* 21 */ "TriTech 3D Stereo Enhancement",
- /* 22 */ "Realtek 3D Stereo Enhancement",
- /* 23 */ "Samsung 3D Stereo Enhancement",
- /* 24 */ "Wolfson Microelectronics 3D Enhancement",
- /* 25 */ "Delta Integration 3D Enhancement",
- /* 26 */ "SigmaTel 3D Enhancement",
- /* 27 */ "Winbond 3D Stereo Enhancement",
- /* 28 */ "Rockwell 3D Stereo Enhancement",
- /* 29 */ "Reserved 29",
- /* 30 */ "Reserved 30",
- /* 31 */ "Reserved 31"
-};
-
/* this table has default mixer values for all OSS mixers. */
static struct mixer_defaults {
int mixer;
@@ -614,83 +578,6 @@ static int ac97_mixer_ioctl(struct ac97_codec *codec, unsigned int cmd, unsigned
return -EINVAL;
}
-/* entry point for /proc/driver/controller_vendor/ac97/%d */
-int ac97_read_proc (char *page, char **start, off_t off,
- int count, int *eof, void *data)
-{
- int len = 0, cap, extid, val, id1, id2;
- struct ac97_codec *codec;
- int is_ac97_20 = 0;
-
- if ((codec = data) == NULL)
- return -ENODEV;
-
- id1 = codec->codec_read(codec, AC97_VENDOR_ID1);
- id2 = codec->codec_read(codec, AC97_VENDOR_ID2);
- len += sprintf (page+len, "Vendor name : %s\n", codec->name);
- len += sprintf (page+len, "Vendor id : %04X %04X\n", id1, id2);
-
- extid = codec->codec_read(codec, AC97_EXTENDED_ID);
- extid &= ~((1<<2)|(1<<4)|(1<<5)|(1<<10)|(1<<11)|(1<<12)|(1<<13));
- len += sprintf (page+len, "AC97 Version : %s\n",
- extid ? "2.0 or later" : "1.0");
- if (extid) is_ac97_20 = 1;
-
- cap = codec->codec_read(codec, AC97_RESET);
- len += sprintf (page+len, "Capabilities :%s%s%s%s%s%s\n",
- cap & 0x0001 ? " -dedicated MIC PCM IN channel-" : "",
- cap & 0x0002 ? " -reserved1-" : "",
- cap & 0x0004 ? " -bass & treble-" : "",
- cap & 0x0008 ? " -simulated stereo-" : "",
- cap & 0x0010 ? " -headphone out-" : "",
- cap & 0x0020 ? " -loudness-" : "");
- val = cap & 0x00c0;
- len += sprintf (page+len, "DAC resolutions :%s%s%s\n",
- " -16-bit-",
- val & 0x0040 ? " -18-bit-" : "",
- val & 0x0080 ? " -20-bit-" : "");
- val = cap & 0x0300;
- len += sprintf (page+len, "ADC resolutions :%s%s%s\n",
- " -16-bit-",
- val & 0x0100 ? " -18-bit-" : "",
- val & 0x0200 ? " -20-bit-" : "");
- len += sprintf (page+len, "3D enhancement : %s\n",
- ac97_stereo_enhancements[(cap >> 10) & 0x1f]);
-
- val = codec->codec_read(codec, AC97_GENERAL_PURPOSE);
- len += sprintf (page+len, "POP path : %s 3D\n"
- "Sim. stereo : %s\n"
- "3D enhancement : %s\n"
- "Loudness : %s\n"
- "Mono output : %s\n"
- "MIC select : %s\n"
- "ADC/DAC loopback : %s\n",
- val & 0x8000 ? "post" : "pre",
- val & 0x4000 ? "on" : "off",
- val & 0x2000 ? "on" : "off",
- val & 0x1000 ? "on" : "off",
- val & 0x0200 ? "MIC" : "MIX",
- val & 0x0100 ? "MIC2" : "MIC1",
- val & 0x0080 ? "on" : "off");
-
- extid = codec->codec_read(codec, AC97_EXTENDED_ID);
- cap = extid;
- len += sprintf (page+len, "Ext Capabilities :%s%s%s%s%s%s%s\n",
- cap & 0x0001 ? " -var rate PCM audio-" : "",
- cap & 0x0002 ? " -2x PCM audio out-" : "",
- cap & 0x0008 ? " -var rate MIC in-" : "",
- cap & 0x0040 ? " -PCM center DAC-" : "",
- cap & 0x0080 ? " -PCM surround DAC-" : "",
- cap & 0x0100 ? " -PCM LFE DAC-" : "",
- cap & 0x0200 ? " -slot/DAC mappings-" : "");
- if (is_ac97_20) {
- len += sprintf (page+len, "Front DAC rate : %d\n",
- codec->codec_read(codec, AC97_PCM_FRONT_DAC_RATE));
- }
-
- return len;
-}
-
/**
* codec_id - Turn id1/id2 into a PnP string
* @id1: Vendor ID1
@@ -1313,176 +1200,5 @@ static int pt101_init(struct ac97_codec * codec)
#endif
-EXPORT_SYMBOL(ac97_read_proc);
EXPORT_SYMBOL(ac97_probe_codec);
-/*
- * AC97 library support routines
- */
-
-/**
- * ac97_set_dac_rate - set codec rate adaption
- * @codec: ac97 code
- * @rate: rate in hertz
- *
- * Set the DAC rate. Assumes the codec supports VRA. The caller is
- * expected to have checked this little detail.
- */
-
-unsigned int ac97_set_dac_rate(struct ac97_codec *codec, unsigned int rate)
-{
- unsigned int new_rate = rate;
- u32 dacp;
- u32 mast_vol, phone_vol, mono_vol, pcm_vol;
- u32 mute_vol = 0x8000; /* The mute volume? */
-
- if(rate != codec->codec_read(codec, AC97_PCM_FRONT_DAC_RATE))
- {
- /* Mute several registers */
- mast_vol = codec->codec_read(codec, AC97_MASTER_VOL_STEREO);
- mono_vol = codec->codec_read(codec, AC97_MASTER_VOL_MONO);
- phone_vol = codec->codec_read(codec, AC97_HEADPHONE_VOL);
- pcm_vol = codec->codec_read(codec, AC97_PCMOUT_VOL);
- codec->codec_write(codec, AC97_MASTER_VOL_STEREO, mute_vol);
- codec->codec_write(codec, AC97_MASTER_VOL_MONO, mute_vol);
- codec->codec_write(codec, AC97_HEADPHONE_VOL, mute_vol);
- codec->codec_write(codec, AC97_PCMOUT_VOL, mute_vol);
-
- /* Power down the DAC */
- dacp=codec->codec_read(codec, AC97_POWER_CONTROL);
- codec->codec_write(codec, AC97_POWER_CONTROL, dacp|0x0200);
- /* Load the rate and read the effective rate */
- codec->codec_write(codec, AC97_PCM_FRONT_DAC_RATE, rate);
- new_rate=codec->codec_read(codec, AC97_PCM_FRONT_DAC_RATE);
- /* Power it back up */
- codec->codec_write(codec, AC97_POWER_CONTROL, dacp);
-
- /* Restore volumes */
- codec->codec_write(codec, AC97_MASTER_VOL_STEREO, mast_vol);
- codec->codec_write(codec, AC97_MASTER_VOL_MONO, mono_vol);
- codec->codec_write(codec, AC97_HEADPHONE_VOL, phone_vol);
- codec->codec_write(codec, AC97_PCMOUT_VOL, pcm_vol);
- }
- return new_rate;
-}
-
-EXPORT_SYMBOL(ac97_set_dac_rate);
-
-/**
- * ac97_set_adc_rate - set codec rate adaption
- * @codec: ac97 code
- * @rate: rate in hertz
- *
- * Set the ADC rate. Assumes the codec supports VRA. The caller is
- * expected to have checked this little detail.
- */
-
-unsigned int ac97_set_adc_rate(struct ac97_codec *codec, unsigned int rate)
-{
- unsigned int new_rate = rate;
- u32 dacp;
-
- if(rate != codec->codec_read(codec, AC97_PCM_LR_ADC_RATE))
- {
- /* Power down the ADC */
- dacp=codec->codec_read(codec, AC97_POWER_CONTROL);
- codec->codec_write(codec, AC97_POWER_CONTROL, dacp|0x0100);
- /* Load the rate and read the effective rate */
- codec->codec_write(codec, AC97_PCM_LR_ADC_RATE, rate);
- new_rate=codec->codec_read(codec, AC97_PCM_LR_ADC_RATE);
- /* Power it back up */
- codec->codec_write(codec, AC97_POWER_CONTROL, dacp);
- }
- return new_rate;
-}
-
-EXPORT_SYMBOL(ac97_set_adc_rate);
-
-static int swap_headphone(int remove_master)
-{
- struct list_head *l;
- struct ac97_codec *c;
-
- if (remove_master) {
- mutex_lock(&codec_mutex);
- list_for_each(l, &codecs)
- {
- c = list_entry(l, struct ac97_codec, list);
- if (supported_mixer(c, SOUND_MIXER_PHONEOUT))
- c->supported_mixers &= ~SOUND_MASK_PHONEOUT;
- }
- mutex_unlock(&codec_mutex);
- } else
- ac97_hw[SOUND_MIXER_PHONEOUT].offset = AC97_MASTER_VOL_STEREO;
-
- /* Scale values already match */
- ac97_hw[SOUND_MIXER_VOLUME].offset = AC97_MASTER_VOL_MONO;
- return 0;
-}
-
-static int apply_quirk(int quirk)
-{
- switch (quirk) {
- case AC97_TUNE_NONE:
- return 0;
- case AC97_TUNE_HP_ONLY:
- return swap_headphone(1);
- case AC97_TUNE_SWAP_HP:
- return swap_headphone(0);
- case AC97_TUNE_SWAP_SURROUND:
- return -ENOSYS; /* not yet implemented */
- case AC97_TUNE_AD_SHARING:
- return -ENOSYS; /* not yet implemented */
- case AC97_TUNE_ALC_JACK:
- return -ENOSYS; /* not yet implemented */
- }
- return -EINVAL;
-}
-
-/**
- * ac97_tune_hardware - tune up the hardware
- * @pdev: pci_dev pointer
- * @quirk: quirk list
- * @override: explicit quirk value (overrides if not AC97_TUNE_DEFAULT)
- *
- * Do some workaround for each pci device, such as renaming of the
- * headphone (true line-out) control as "Master".
- * The quirk-list must be terminated with a zero-filled entry.
- *
- * Returns zero if successful, or a negative error code on failure.
- */
-
-int ac97_tune_hardware(struct pci_dev *pdev, struct ac97_quirk *quirk, int override)
-{
- int result;
-
- if (!quirk)
- return -EINVAL;
-
- if (override != AC97_TUNE_DEFAULT) {
- result = apply_quirk(override);
- if (result < 0)
- printk(KERN_ERR "applying quirk type %d failed (%d)\n", override, result);
- return result;
- }
-
- for (; quirk->vendor; quirk++) {
- if (quirk->vendor != pdev->subsystem_vendor)
- continue;
- if ((! quirk->mask && quirk->device == pdev->subsystem_device) ||
- quirk->device == (quirk->mask & pdev->subsystem_device)) {
-#ifdef DEBUG
- printk("ac97 quirk for %s (%04x:%04x)\n", quirk->name, ac97->subsystem_vendor, pdev->subsystem_device);
-#endif
- result = apply_quirk(quirk->type);
- if (result < 0)
- printk(KERN_ERR "applying quirk type %d for %s failed (%d)\n", quirk->type, quirk->name, result);
- return result;
- }
- }
- return 0;
-}
-
-EXPORT_SYMBOL_GPL(ac97_tune_hardware);
-
-MODULE_LICENSE("GPL");
diff --git a/sound/oss/btaudio.c b/sound/oss/btaudio.c
deleted file mode 100644
index 4d5cf05b8922..000000000000
--- a/sound/oss/btaudio.c
+++ /dev/null
@@ -1,1139 +0,0 @@
-/*
- btaudio - bt878 audio dma driver for linux 2.4.x
-
- (c) 2000-2002 Gerd Knorr <kraxel@bytesex.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-*/
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/sound.h>
-#include <linux/soundcard.h>
-#include <linux/slab.h>
-#include <linux/kdev_t.h>
-#include <linux/mutex.h>
-
-#include <asm/uaccess.h>
-#include <asm/io.h>
-
-
-/* mmio access */
-#define btwrite(dat,adr) writel((dat), (bta->mmio+(adr)))
-#define btread(adr) readl(bta->mmio+(adr))
-
-#define btand(dat,adr) btwrite((dat) & btread(adr), adr)
-#define btor(dat,adr) btwrite((dat) | btread(adr), adr)
-#define btaor(dat,mask,adr) btwrite((dat) | ((mask) & btread(adr)), adr)
-
-/* registers (shifted because bta->mmio is long) */
-#define REG_INT_STAT (0x100 >> 2)
-#define REG_INT_MASK (0x104 >> 2)
-#define REG_GPIO_DMA_CTL (0x10c >> 2)
-#define REG_PACKET_LEN (0x110 >> 2)
-#define REG_RISC_STRT_ADD (0x114 >> 2)
-#define REG_RISC_COUNT (0x120 >> 2)
-
-/* IRQ bits - REG_INT_(STAT|MASK) */
-#define IRQ_SCERR (1 << 19)
-#define IRQ_OCERR (1 << 18)
-#define IRQ_PABORT (1 << 17)
-#define IRQ_RIPERR (1 << 16)
-#define IRQ_PPERR (1 << 15)
-#define IRQ_FDSR (1 << 14)
-#define IRQ_FTRGT (1 << 13)
-#define IRQ_FBUS (1 << 12)
-#define IRQ_RISCI (1 << 11)
-#define IRQ_OFLOW (1 << 3)
-
-#define IRQ_BTAUDIO (IRQ_SCERR | IRQ_OCERR | IRQ_PABORT | IRQ_RIPERR |\
- IRQ_PPERR | IRQ_FDSR | IRQ_FTRGT | IRQ_FBUS |\
- IRQ_RISCI)
-
-/* REG_GPIO_DMA_CTL bits */
-#define DMA_CTL_A_PWRDN (1 << 26)
-#define DMA_CTL_DA_SBR (1 << 14)
-#define DMA_CTL_DA_ES2 (1 << 13)
-#define DMA_CTL_ACAP_EN (1 << 4)
-#define DMA_CTL_RISC_EN (1 << 1)
-#define DMA_CTL_FIFO_EN (1 << 0)
-
-/* RISC instructions */
-#define RISC_WRITE (0x01 << 28)
-#define RISC_JUMP (0x07 << 28)
-#define RISC_SYNC (0x08 << 28)
-
-/* RISC bits */
-#define RISC_WR_SOL (1 << 27)
-#define RISC_WR_EOL (1 << 26)
-#define RISC_IRQ (1 << 24)
-#define RISC_SYNC_RESYNC (1 << 15)
-#define RISC_SYNC_FM1 0x06
-#define RISC_SYNC_VRO 0x0c
-
-#define HWBASE_AD (448000)
-
-/* -------------------------------------------------------------- */
-
-struct btaudio {
- /* linked list */
- struct btaudio *next;
-
- /* device info */
- int dsp_digital;
- int dsp_analog;
- int mixer_dev;
- struct pci_dev *pci;
- unsigned int irq;
- unsigned long mem;
- unsigned long __iomem *mmio;
-
- /* locking */
- int users;
- struct mutex lock;
-
- /* risc instructions */
- unsigned int risc_size;
- unsigned long *risc_cpu;
- dma_addr_t risc_dma;
-
- /* audio data */
- unsigned int buf_size;
- unsigned char *buf_cpu;
- dma_addr_t buf_dma;
-
- /* buffer setup */
- int line_bytes;
- int line_count;
- int block_bytes;
- int block_count;
-
- /* read fifo management */
- int recording;
- int dma_block;
- int read_offset;
- int read_count;
- wait_queue_head_t readq;
-
- /* settings */
- int gain[3];
- int source;
- int bits;
- int decimation;
- int mixcount;
- int sampleshift;
- int channels;
- int analog;
- int rate;
-};
-
-struct cardinfo {
- char *name;
- int rate;
-};
-
-static struct btaudio *btaudios;
-static unsigned int debug;
-static unsigned int irq_debug;
-
-/* -------------------------------------------------------------- */
-
-#define BUF_DEFAULT 128*1024
-#define BUF_MIN 8192
-
-static int alloc_buffer(struct btaudio *bta)
-{
- if (NULL == bta->buf_cpu) {
- for (bta->buf_size = BUF_DEFAULT; bta->buf_size >= BUF_MIN;
- bta->buf_size = bta->buf_size >> 1) {
- bta->buf_cpu = pci_alloc_consistent
- (bta->pci, bta->buf_size, &bta->buf_dma);
- if (NULL != bta->buf_cpu)
- break;
- }
- if (NULL == bta->buf_cpu)
- return -ENOMEM;
- memset(bta->buf_cpu,0,bta->buf_size);
- }
- if (NULL == bta->risc_cpu) {
- bta->risc_size = PAGE_SIZE;
- bta->risc_cpu = pci_alloc_consistent
- (bta->pci, bta->risc_size, &bta->risc_dma);
- if (NULL == bta->risc_cpu) {
- pci_free_consistent(bta->pci, bta->buf_size, bta->buf_cpu, bta->buf_dma);
- bta->buf_cpu = NULL;
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-static void free_buffer(struct btaudio *bta)
-{
- if (NULL != bta->buf_cpu) {
- pci_free_consistent(bta->pci, bta->buf_size,
- bta->buf_cpu, bta->buf_dma);
- bta->buf_cpu = NULL;
- }
- if (NULL != bta->risc_cpu) {
- pci_free_consistent(bta->pci, bta->risc_size,
- bta->risc_cpu, bta->risc_dma);
- bta->risc_cpu = NULL;
- }
-}
-
-static int make_risc(struct btaudio *bta)
-{
- int rp, bp, line, block;
- unsigned long risc;
-
- bta->block_bytes = bta->buf_size >> 4;
- bta->block_count = 1 << 4;
- bta->line_bytes = bta->block_bytes;
- bta->line_count = bta->block_count;
- while (bta->line_bytes > 4095) {
- bta->line_bytes >>= 1;
- bta->line_count <<= 1;
- }
- if (bta->line_count > 255)
- return -EINVAL;
- if (debug)
- printk(KERN_DEBUG
- "btaudio: bufsize=%d - bs=%d bc=%d - ls=%d, lc=%d\n",
- bta->buf_size,bta->block_bytes,bta->block_count,
- bta->line_bytes,bta->line_count);
- rp = 0; bp = 0;
- block = 0;
- bta->risc_cpu[rp++] = cpu_to_le32(RISC_SYNC|RISC_SYNC_FM1);
- bta->risc_cpu[rp++] = cpu_to_le32(0);
- for (line = 0; line < bta->line_count; line++) {
- risc = RISC_WRITE | RISC_WR_SOL | RISC_WR_EOL;
- risc |= bta->line_bytes;
- if (0 == (bp & (bta->block_bytes-1))) {
- risc |= RISC_IRQ;
- risc |= (block & 0x0f) << 16;
- risc |= (~block & 0x0f) << 20;
- block++;
- }
- bta->risc_cpu[rp++] = cpu_to_le32(risc);
- bta->risc_cpu[rp++] = cpu_to_le32(bta->buf_dma + bp);
- bp += bta->line_bytes;
- }
- bta->risc_cpu[rp++] = cpu_to_le32(RISC_SYNC|RISC_SYNC_VRO);
- bta->risc_cpu[rp++] = cpu_to_le32(0);
- bta->risc_cpu[rp++] = cpu_to_le32(RISC_JUMP);
- bta->risc_cpu[rp++] = cpu_to_le32(bta->risc_dma);
- return 0;
-}
-
-static int start_recording(struct btaudio *bta)
-{
- int ret;
-
- if (0 != (ret = alloc_buffer(bta)))
- return ret;
- if (0 != (ret = make_risc(bta)))
- return ret;
-
- btwrite(bta->risc_dma, REG_RISC_STRT_ADD);
- btwrite((bta->line_count << 16) | bta->line_bytes,
- REG_PACKET_LEN);
- btwrite(IRQ_BTAUDIO, REG_INT_MASK);
- if (bta->analog) {
- btwrite(DMA_CTL_ACAP_EN |
- DMA_CTL_RISC_EN |
- DMA_CTL_FIFO_EN |
- DMA_CTL_DA_ES2 |
- ((bta->bits == 8) ? DMA_CTL_DA_SBR : 0) |
- (bta->gain[bta->source] << 28) |
- (bta->source << 24) |
- (bta->decimation << 8),
- REG_GPIO_DMA_CTL);
- } else {
- btwrite(DMA_CTL_ACAP_EN |
- DMA_CTL_RISC_EN |
- DMA_CTL_FIFO_EN |
- DMA_CTL_DA_ES2 |
- DMA_CTL_A_PWRDN |
- (1 << 6) |
- ((bta->bits == 8) ? DMA_CTL_DA_SBR : 0) |
- (bta->gain[bta->source] << 28) |
- (bta->source << 24) |
- (bta->decimation << 8),
- REG_GPIO_DMA_CTL);
- }
- bta->dma_block = 0;
- bta->read_offset = 0;
- bta->read_count = 0;
- bta->recording = 1;
- if (debug)
- printk(KERN_DEBUG "btaudio: recording started\n");
- return 0;
-}
-
-static void stop_recording(struct btaudio *bta)
-{
- btand(~15, REG_GPIO_DMA_CTL);
- bta->recording = 0;
- if (debug)
- printk(KERN_DEBUG "btaudio: recording stopped\n");
-}
-
-
-/* -------------------------------------------------------------- */
-
-static int btaudio_mixer_open(struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- struct btaudio *bta;
-
- for (bta = btaudios; bta != NULL; bta = bta->next)
- if (bta->mixer_dev == minor)
- break;
- if (NULL == bta)
- return -ENODEV;
-
- if (debug)
- printk("btaudio: open mixer [%d]\n",minor);
- file->private_data = bta;
- return 0;
-}
-
-static int btaudio_mixer_release(struct inode *inode, struct file *file)
-{
- return 0;
-}
-
-static int btaudio_mixer_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct btaudio *bta = file->private_data;
- int ret,val=0,i=0;
- void __user *argp = (void __user *)arg;
-
- if (cmd == SOUND_MIXER_INFO) {
- mixer_info info;
- memset(&info,0,sizeof(info));
- strlcpy(info.id,"bt878",sizeof(info.id));
- strlcpy(info.name,"Brooktree Bt878 audio",sizeof(info.name));
- info.modify_counter = bta->mixcount;
- if (copy_to_user(argp, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
- if (cmd == SOUND_OLD_MIXER_INFO) {
- _old_mixer_info info;
- memset(&info,0,sizeof(info));
- strlcpy(info.id, "bt878", sizeof(info.id));
- strlcpy(info.name,"Brooktree Bt878 audio",sizeof(info.name));
- if (copy_to_user(argp, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
- if (cmd == OSS_GETVERSION)
- return put_user(SOUND_VERSION, (int __user *)argp);
-
- /* read */
- if (_SIOC_DIR(cmd) & _SIOC_WRITE)
- if (get_user(val, (int __user *)argp))
- return -EFAULT;
-
- switch (cmd) {
- case MIXER_READ(SOUND_MIXER_CAPS):
- ret = SOUND_CAP_EXCL_INPUT;
- break;
- case MIXER_READ(SOUND_MIXER_STEREODEVS):
- ret = 0;
- break;
- case MIXER_READ(SOUND_MIXER_RECMASK):
- case MIXER_READ(SOUND_MIXER_DEVMASK):
- ret = SOUND_MASK_LINE1|SOUND_MASK_LINE2|SOUND_MASK_LINE3;
- break;
-
- case MIXER_WRITE(SOUND_MIXER_RECSRC):
- if (val & SOUND_MASK_LINE1 && bta->source != 0)
- bta->source = 0;
- else if (val & SOUND_MASK_LINE2 && bta->source != 1)
- bta->source = 1;
- else if (val & SOUND_MASK_LINE3 && bta->source != 2)
- bta->source = 2;
- btaor((bta->gain[bta->source] << 28) |
- (bta->source << 24),
- 0x0cffffff, REG_GPIO_DMA_CTL);
- case MIXER_READ(SOUND_MIXER_RECSRC):
- switch (bta->source) {
- case 0: ret = SOUND_MASK_LINE1; break;
- case 1: ret = SOUND_MASK_LINE2; break;
- case 2: ret = SOUND_MASK_LINE3; break;
- default: ret = 0;
- }
- break;
-
- case MIXER_WRITE(SOUND_MIXER_LINE1):
- case MIXER_WRITE(SOUND_MIXER_LINE2):
- case MIXER_WRITE(SOUND_MIXER_LINE3):
- if (MIXER_WRITE(SOUND_MIXER_LINE1) == cmd)
- i = 0;
- if (MIXER_WRITE(SOUND_MIXER_LINE2) == cmd)
- i = 1;
- if (MIXER_WRITE(SOUND_MIXER_LINE3) == cmd)
- i = 2;
- bta->gain[i] = (val & 0xff) * 15 / 100;
- if (bta->gain[i] > 15) bta->gain[i] = 15;
- if (bta->gain[i] < 0) bta->gain[i] = 0;
- if (i == bta->source)
- btaor((bta->gain[bta->source]<<28),
- 0x0fffffff, REG_GPIO_DMA_CTL);
- ret = bta->gain[i] * 100 / 15;
- ret |= ret << 8;
- break;
-
- case MIXER_READ(SOUND_MIXER_LINE1):
- case MIXER_READ(SOUND_MIXER_LINE2):
- case MIXER_READ(SOUND_MIXER_LINE3):
- if (MIXER_READ(SOUND_MIXER_LINE1) == cmd)
- i = 0;
- if (MIXER_READ(SOUND_MIXER_LINE2) == cmd)
- i = 1;
- if (MIXER_READ(SOUND_MIXER_LINE3) == cmd)
- i = 2;
- ret = bta->gain[i] * 100 / 15;
- ret |= ret << 8;
- break;
-
- default:
- return -EINVAL;
- }
- if (put_user(ret, (int __user *)argp))
- return -EFAULT;
- return 0;
-}
-
-static const struct file_operations btaudio_mixer_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = btaudio_mixer_open,
- .release = btaudio_mixer_release,
- .ioctl = btaudio_mixer_ioctl,
-};
-
-/* -------------------------------------------------------------- */
-
-static int btaudio_dsp_open(struct inode *inode, struct file *file,
- struct btaudio *bta, int analog)
-{
- mutex_lock(&bta->lock);
- if (bta->users)
- goto busy;
- bta->users++;
- file->private_data = bta;
-
- bta->analog = analog;
- bta->dma_block = 0;
- bta->read_offset = 0;
- bta->read_count = 0;
- bta->sampleshift = 0;
-
- mutex_unlock(&bta->lock);
- return 0;
-
- busy:
- mutex_unlock(&bta->lock);
- return -EBUSY;
-}
-
-static int btaudio_dsp_open_digital(struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- struct btaudio *bta;
-
- for (bta = btaudios; bta != NULL; bta = bta->next)
- if (bta->dsp_digital == minor)
- break;
- if (NULL == bta)
- return -ENODEV;
-
- if (debug)
- printk("btaudio: open digital dsp [%d]\n",minor);
- return btaudio_dsp_open(inode,file,bta,0);
-}
-
-static int btaudio_dsp_open_analog(struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- struct btaudio *bta;
-
- for (bta = btaudios; bta != NULL; bta = bta->next)
- if (bta->dsp_analog == minor)
- break;
- if (NULL == bta)
- return -ENODEV;
-
- if (debug)
- printk("btaudio: open analog dsp [%d]\n",minor);
- return btaudio_dsp_open(inode,file,bta,1);
-}
-
-static int btaudio_dsp_release(struct inode *inode, struct file *file)
-{
- struct btaudio *bta = file->private_data;
-
- mutex_lock(&bta->lock);
- if (bta->recording)
- stop_recording(bta);
- bta->users--;
- mutex_unlock(&bta->lock);
- return 0;
-}
-
-static ssize_t btaudio_dsp_read(struct file *file, char __user *buffer,
- size_t swcount, loff_t *ppos)
-{
- struct btaudio *bta = file->private_data;
- int hwcount = swcount << bta->sampleshift;
- int nsrc, ndst, err, ret = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&bta->readq, &wait);
- mutex_lock(&bta->lock);
- while (swcount > 0) {
- if (0 == bta->read_count) {
- if (!bta->recording) {
- if (0 != (err = start_recording(bta))) {
- if (0 == ret)
- ret = err;
- break;
- }
- }
- if (file->f_flags & O_NONBLOCK) {
- if (0 == ret)
- ret = -EAGAIN;
- break;
- }
- mutex_unlock(&bta->lock);
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- mutex_lock(&bta->lock);
- if(signal_pending(current)) {
- if (0 == ret)
- ret = -EINTR;
- break;
- }
- }
- nsrc = (bta->read_count < hwcount) ? bta->read_count : hwcount;
- if (nsrc > bta->buf_size - bta->read_offset)
- nsrc = bta->buf_size - bta->read_offset;
- ndst = nsrc >> bta->sampleshift;
-
- if ((bta->analog && 0 == bta->sampleshift) ||
- (!bta->analog && 2 == bta->channels)) {
- /* just copy */
- if (copy_to_user(buffer + ret, bta->buf_cpu + bta->read_offset, nsrc)) {
- if (0 == ret)
- ret = -EFAULT;
- break;
- }
-
- } else if (!bta->analog) {
- /* stereo => mono (digital audio) */
- __s16 *src = (__s16*)(bta->buf_cpu + bta->read_offset);
- __s16 __user *dst = (__s16 __user *)(buffer + ret);
- __s16 avg;
- int n = ndst>>1;
- if (!access_ok(VERIFY_WRITE, dst, ndst)) {
- if (0 == ret)
- ret = -EFAULT;
- break;
- }
- for (; n; n--, dst++) {
- avg = (__s16)le16_to_cpu(*src) / 2; src++;
- avg += (__s16)le16_to_cpu(*src) / 2; src++;
- __put_user(cpu_to_le16(avg),dst);
- }
-
- } else if (8 == bta->bits) {
- /* copy + byte downsampling (audio A/D) */
- __u8 *src = bta->buf_cpu + bta->read_offset;
- __u8 __user *dst = buffer + ret;
- int n = ndst;
- if (!access_ok(VERIFY_WRITE, dst, ndst)) {
- if (0 == ret)
- ret = -EFAULT;
- break;
- }
- for (; n; n--, src += (1 << bta->sampleshift), dst++)
- __put_user(*src, dst);
-
- } else {
- /* copy + word downsampling (audio A/D) */
- __u16 *src = (__u16*)(bta->buf_cpu + bta->read_offset);
- __u16 __user *dst = (__u16 __user *)(buffer + ret);
- int n = ndst>>1;
- if (!access_ok(VERIFY_WRITE,dst,ndst)) {
- if (0 == ret)
- ret = -EFAULT;
- break;
- }
- for (; n; n--, src += (1 << bta->sampleshift), dst++)
- __put_user(*src, dst);
- }
-
- ret += ndst;
- swcount -= ndst;
- hwcount -= nsrc;
- bta->read_count -= nsrc;
- bta->read_offset += nsrc;
- if (bta->read_offset == bta->buf_size)
- bta->read_offset = 0;
- }
- mutex_unlock(&bta->lock);
- remove_wait_queue(&bta->readq, &wait);
- current->state = TASK_RUNNING;
- return ret;
-}
-
-static ssize_t btaudio_dsp_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- return -EINVAL;
-}
-
-static int btaudio_dsp_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- struct btaudio *bta = file->private_data;
- int s, i, ret, val = 0;
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
-
- switch (cmd) {
- case OSS_GETVERSION:
- return put_user(SOUND_VERSION, p);
- case SNDCTL_DSP_GETCAPS:
- return 0;
-
- case SNDCTL_DSP_SPEED:
- if (get_user(val, p))
- return -EFAULT;
- if (bta->analog) {
- for (s = 0; s < 16; s++)
- if (val << s >= HWBASE_AD*4/15)
- break;
- for (i = 15; i >= 5; i--)
- if (val << s <= HWBASE_AD*4/i)
- break;
- bta->sampleshift = s;
- bta->decimation = i;
- if (debug)
- printk(KERN_DEBUG "btaudio: rate: req=%d "
- "dec=%d shift=%d hwrate=%d swrate=%d\n",
- val,i,s,(HWBASE_AD*4/i),(HWBASE_AD*4/i)>>s);
- } else {
- bta->sampleshift = (bta->channels == 2) ? 0 : 1;
- bta->decimation = 0;
- }
- if (bta->recording) {
- mutex_lock(&bta->lock);
- stop_recording(bta);
- start_recording(bta);
- mutex_unlock(&bta->lock);
- }
- /* fall through */
- case SOUND_PCM_READ_RATE:
- if (bta->analog) {
- return put_user(HWBASE_AD*4/bta->decimation>>bta->sampleshift, p);
- } else {
- return put_user(bta->rate, p);
- }
-
- case SNDCTL_DSP_STEREO:
- if (!bta->analog) {
- if (get_user(val, p))
- return -EFAULT;
- bta->channels = (val > 0) ? 2 : 1;
- bta->sampleshift = (bta->channels == 2) ? 0 : 1;
- if (debug)
- printk(KERN_INFO
- "btaudio: stereo=%d channels=%d\n",
- val,bta->channels);
- } else {
- if (val == 1)
- return -EFAULT;
- else {
- bta->channels = 1;
- if (debug)
- printk(KERN_INFO
- "btaudio: stereo=0 channels=1\n");
- }
- }
- return put_user((bta->channels)-1, p);
-
- case SNDCTL_DSP_CHANNELS:
- if (!bta->analog) {
- if (get_user(val, p))
- return -EFAULT;
- bta->channels = (val > 1) ? 2 : 1;
- bta->sampleshift = (bta->channels == 2) ? 0 : 1;
- if (debug)
- printk(KERN_DEBUG
- "btaudio: val=%d channels=%d\n",
- val,bta->channels);
- }
- /* fall through */
- case SOUND_PCM_READ_CHANNELS:
- return put_user(bta->channels, p);
-
- case SNDCTL_DSP_GETFMTS: /* Returns a mask */
- if (bta->analog)
- return put_user(AFMT_S16_LE|AFMT_S8, p);
- else
- return put_user(AFMT_S16_LE, p);
-
- case SNDCTL_DSP_SETFMT: /* Selects ONE fmt*/
- if (get_user(val, p))
- return -EFAULT;
- if (val != AFMT_QUERY) {
- if (bta->analog)
- bta->bits = (val == AFMT_S8) ? 8 : 16;
- else
- bta->bits = 16;
- if (bta->recording) {
- mutex_lock(&bta->lock);
- stop_recording(bta);
- start_recording(bta);
- mutex_unlock(&bta->lock);
- }
- }
- if (debug)
- printk(KERN_DEBUG "btaudio: fmt: bits=%d\n",bta->bits);
- return put_user((bta->bits==16) ? AFMT_S16_LE : AFMT_S8,
- p);
- break;
- case SOUND_PCM_READ_BITS:
- return put_user(bta->bits, p);
-
- case SNDCTL_DSP_NONBLOCK:
- file->f_flags |= O_NONBLOCK;
- return 0;
-
- case SNDCTL_DSP_RESET:
- if (bta->recording) {
- mutex_lock(&bta->lock);
- stop_recording(bta);
- mutex_unlock(&bta->lock);
- }
- return 0;
- case SNDCTL_DSP_GETBLKSIZE:
- if (!bta->recording) {
- if (0 != (ret = alloc_buffer(bta)))
- return ret;
- if (0 != (ret = make_risc(bta)))
- return ret;
- }
- return put_user(bta->block_bytes>>bta->sampleshift,p);
-
- case SNDCTL_DSP_SYNC:
- /* NOP */
- return 0;
- case SNDCTL_DSP_GETISPACE:
- {
- audio_buf_info info;
- if (!bta->recording)
- return -EINVAL;
- info.fragsize = bta->block_bytes>>bta->sampleshift;
- info.fragstotal = bta->block_count;
- info.bytes = bta->read_count;
- info.fragments = info.bytes / info.fragsize;
- if (debug)
- printk(KERN_DEBUG "btaudio: SNDCTL_DSP_GETISPACE "
- "returns %d/%d/%d/%d\n",
- info.fragsize, info.fragstotal,
- info.bytes, info.fragments);
- if (copy_to_user(argp, &info, sizeof(info)))
- return -EFAULT;
- return 0;
- }
-#if 0 /* TODO */
- case SNDCTL_DSP_GETTRIGGER:
- case SNDCTL_DSP_SETTRIGGER:
- case SNDCTL_DSP_SETFRAGMENT:
-#endif
- default:
- return -EINVAL;
- }
-}
-
-static unsigned int btaudio_dsp_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct btaudio *bta = file->private_data;
- unsigned int mask = 0;
-
- poll_wait(file, &bta->readq, wait);
-
- if (0 != bta->read_count)
- mask |= (POLLIN | POLLRDNORM);
-
- return mask;
-}
-
-static const struct file_operations btaudio_digital_dsp_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = btaudio_dsp_open_digital,
- .release = btaudio_dsp_release,
- .read = btaudio_dsp_read,
- .write = btaudio_dsp_write,
- .ioctl = btaudio_dsp_ioctl,
- .poll = btaudio_dsp_poll,
-};
-
-static const struct file_operations btaudio_analog_dsp_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = btaudio_dsp_open_analog,
- .release = btaudio_dsp_release,
- .read = btaudio_dsp_read,
- .write = btaudio_dsp_write,
- .ioctl = btaudio_dsp_ioctl,
- .poll = btaudio_dsp_poll,
-};
-
-/* -------------------------------------------------------------- */
-
-static char *irq_name[] = { "", "", "", "OFLOW", "", "", "", "", "", "", "",
- "RISCI", "FBUS", "FTRGT", "FDSR", "PPERR",
- "RIPERR", "PABORT", "OCERR", "SCERR" };
-
-static irqreturn_t btaudio_irq(int irq, void *dev_id)
-{
- int count = 0;
- u32 stat,astat;
- struct btaudio *bta = dev_id;
- int handled = 0;
-
- for (;;) {
- count++;
- stat = btread(REG_INT_STAT);
- astat = stat & btread(REG_INT_MASK);
- if (!astat)
- return IRQ_RETVAL(handled);
- handled = 1;
- btwrite(astat,REG_INT_STAT);
-
- if (irq_debug) {
- int i;
- printk(KERN_DEBUG "btaudio: irq loop=%d risc=%x, bits:",
- count, stat>>28);
- for (i = 0; i < (sizeof(irq_name)/sizeof(char*)); i++) {
- if (stat & (1 << i))
- printk(" %s",irq_name[i]);
- if (astat & (1 << i))
- printk("*");
- }
- printk("\n");
- }
- if (stat & IRQ_RISCI) {
- int blocks;
- blocks = (stat >> 28) - bta->dma_block;
- if (blocks < 0)
- blocks += bta->block_count;
- bta->dma_block = stat >> 28;
- if (bta->read_count + 2*bta->block_bytes > bta->buf_size) {
- stop_recording(bta);
- printk(KERN_INFO "btaudio: buffer overrun\n");
- }
- if (blocks > 0) {
- bta->read_count += blocks * bta->block_bytes;
- wake_up_interruptible(&bta->readq);
- }
- }
- if (count > 10) {
- printk(KERN_WARNING
- "btaudio: Oops - irq mask cleared\n");
- btwrite(0, REG_INT_MASK);
- }
- }
- return IRQ_NONE;
-}
-
-/* -------------------------------------------------------------- */
-
-static unsigned int dsp1 = -1;
-static unsigned int dsp2 = -1;
-static unsigned int mixer = -1;
-static int latency = -1;
-static int digital = 1;
-static int analog = 1;
-static int rate;
-
-#define BTA_OSPREY200 1
-
-static struct cardinfo cards[] = {
- [0] = {
- .name = "default",
- .rate = 32000,
- },
- [BTA_OSPREY200] = {
- .name = "Osprey 200",
- .rate = 44100,
- },
-};
-
-static int __devinit btaudio_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *pci_id)
-{
- struct btaudio *bta;
- struct cardinfo *card = &cards[pci_id->driver_data];
- unsigned char revision,lat;
- int rc = -EBUSY;
-
- if (pci_enable_device(pci_dev))
- return -EIO;
- if (!request_mem_region(pci_resource_start(pci_dev,0),
- pci_resource_len(pci_dev,0),
- "btaudio")) {
- return -EBUSY;
- }
-
- bta = kzalloc(sizeof(*bta),GFP_ATOMIC);
- if (!bta) {
- rc = -ENOMEM;
- goto fail0;
- }
-
- bta->pci = pci_dev;
- bta->irq = pci_dev->irq;
- bta->mem = pci_resource_start(pci_dev,0);
- bta->mmio = ioremap(pci_resource_start(pci_dev,0),
- pci_resource_len(pci_dev,0));
-
- bta->source = 1;
- bta->bits = 8;
- bta->channels = 1;
- if (bta->analog) {
- bta->decimation = 15;
- } else {
- bta->decimation = 0;
- bta->sampleshift = 1;
- }
-
- /* sample rate */
- bta->rate = card->rate;
- if (rate)
- bta->rate = rate;
-
- mutex_init(&bta->lock);
- init_waitqueue_head(&bta->readq);
-
- if (-1 != latency) {
- printk(KERN_INFO "btaudio: setting pci latency timer to %d\n",
- latency);
- pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
- }
- pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &revision);
- pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &lat);
- printk(KERN_INFO "btaudio: Bt%x (rev %d) at %02x:%02x.%x, ",
- pci_dev->device,revision,pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn),PCI_FUNC(pci_dev->devfn));
- printk("irq: %d, latency: %d, mmio: 0x%lx\n",
- bta->irq, lat, bta->mem);
- printk("btaudio: using card config \"%s\"\n", card->name);
-
- /* init hw */
- btwrite(0, REG_GPIO_DMA_CTL);
- btwrite(0, REG_INT_MASK);
- btwrite(~0U, REG_INT_STAT);
- pci_set_master(pci_dev);
-
- if ((rc = request_irq(bta->irq, btaudio_irq, IRQF_SHARED|IRQF_DISABLED,
- "btaudio",(void *)bta)) < 0) {
- printk(KERN_WARNING
- "btaudio: can't request irq (rc=%d)\n",rc);
- goto fail1;
- }
-
- /* register devices */
- if (digital) {
- rc = bta->dsp_digital =
- register_sound_dsp(&btaudio_digital_dsp_fops,dsp1);
- if (rc < 0) {
- printk(KERN_WARNING
- "btaudio: can't register digital dsp (rc=%d)\n",rc);
- goto fail2;
- }
- printk(KERN_INFO "btaudio: registered device dsp%d [digital]\n",
- bta->dsp_digital >> 4);
- }
- if (analog) {
- rc = bta->dsp_analog =
- register_sound_dsp(&btaudio_analog_dsp_fops,dsp2);
- if (rc < 0) {
- printk(KERN_WARNING
- "btaudio: can't register analog dsp (rc=%d)\n",rc);
- goto fail3;
- }
- printk(KERN_INFO "btaudio: registered device dsp%d [analog]\n",
- bta->dsp_analog >> 4);
- rc = bta->mixer_dev = register_sound_mixer(&btaudio_mixer_fops,mixer);
- if (rc < 0) {
- printk(KERN_WARNING
- "btaudio: can't register mixer (rc=%d)\n",rc);
- goto fail4;
- }
- printk(KERN_INFO "btaudio: registered device mixer%d\n",
- bta->mixer_dev >> 4);
- }
-
- /* hook into linked list */
- bta->next = btaudios;
- btaudios = bta;
-
- pci_set_drvdata(pci_dev,bta);
- return 0;
-
- fail4:
- unregister_sound_dsp(bta->dsp_analog);
- fail3:
- if (digital)
- unregister_sound_dsp(bta->dsp_digital);
- fail2:
- free_irq(bta->irq,bta);
- fail1:
- iounmap(bta->mmio);
- kfree(bta);
- fail0:
- release_mem_region(pci_resource_start(pci_dev,0),
- pci_resource_len(pci_dev,0));
- return rc;
-}
-
-static void __devexit btaudio_remove(struct pci_dev *pci_dev)
-{
- struct btaudio *bta = pci_get_drvdata(pci_dev);
- struct btaudio *walk;
-
- /* turn off all DMA / IRQs */
- btand(~15, REG_GPIO_DMA_CTL);
- btwrite(0, REG_INT_MASK);
- btwrite(~0U, REG_INT_STAT);
-
- /* unregister devices */
- if (digital) {
- unregister_sound_dsp(bta->dsp_digital);
- }
- if (analog) {
- unregister_sound_dsp(bta->dsp_analog);
- unregister_sound_mixer(bta->mixer_dev);
- }
-
- /* free resources */
- free_buffer(bta);
- free_irq(bta->irq,bta);
- release_mem_region(pci_resource_start(pci_dev,0),
- pci_resource_len(pci_dev,0));
- iounmap(bta->mmio);
-
- /* remove from linked list */
- if (bta == btaudios) {
- btaudios = NULL;
- } else {
- for (walk = btaudios; walk->next != bta; walk = walk->next)
- ; /* if (NULL == walk->next) BUG(); */
- walk->next = bta->next;
- }
-
- pci_set_drvdata(pci_dev, NULL);
- kfree(bta);
- return;
-}
-
-/* -------------------------------------------------------------- */
-
-static struct pci_device_id btaudio_pci_tbl[] = {
- {
- .vendor = PCI_VENDOR_ID_BROOKTREE,
- .device = 0x0878,
- .subvendor = 0x0070,
- .subdevice = 0xff01,
- .driver_data = BTA_OSPREY200,
- },{
- .vendor = PCI_VENDOR_ID_BROOKTREE,
- .device = 0x0878,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },{
- .vendor = PCI_VENDOR_ID_BROOKTREE,
- .device = 0x0878,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },{
- /* --- end of list --- */
- }
-};
-
-static struct pci_driver btaudio_pci_driver = {
- .name = "btaudio",
- .id_table = btaudio_pci_tbl,
- .probe = btaudio_probe,
- .remove = __devexit_p(btaudio_remove),
-};
-
-static int btaudio_init_module(void)
-{
- printk(KERN_INFO "btaudio: driver version 0.7 loaded [%s%s%s]\n",
- digital ? "digital" : "",
- analog && digital ? "+" : "",
- analog ? "analog" : "");
- return pci_register_driver(&btaudio_pci_driver);
-}
-
-static void btaudio_cleanup_module(void)
-{
- pci_unregister_driver(&btaudio_pci_driver);
- return;
-}
-
-module_init(btaudio_init_module);
-module_exit(btaudio_cleanup_module);
-
-module_param(dsp1, int, S_IRUGO);
-module_param(dsp2, int, S_IRUGO);
-module_param(mixer, int, S_IRUGO);
-module_param(debug, int, S_IRUGO | S_IWUSR);
-module_param(irq_debug, int, S_IRUGO | S_IWUSR);
-module_param(digital, int, S_IRUGO);
-module_param(analog, int, S_IRUGO);
-module_param(rate, int, S_IRUGO);
-module_param(latency, int, S_IRUGO);
-MODULE_PARM_DESC(latency,"pci latency timer");
-
-MODULE_DEVICE_TABLE(pci, btaudio_pci_tbl);
-MODULE_DESCRIPTION("bt878 audio dma driver");
-MODULE_AUTHOR("Gerd Knorr");
-MODULE_LICENSE("GPL");
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
diff --git a/sound/oss/cs4232.c b/sound/oss/cs4232.c
deleted file mode 100644
index de40e21bf279..000000000000
--- a/sound/oss/cs4232.c
+++ /dev/null
@@ -1,526 +0,0 @@
-/*
- * Copyright (C) by Hannu Savolainen 1993-1997
- *
- * cs4232.c
- *
- * The low level driver for Crystal CS4232 based cards. The CS4232 is
- * a PnP compatible chip which contains a CS4231A codec, SB emulation,
- * a MPU401 compatible MIDI port, joystick and synthesizer and IDE CD-ROM
- * interfaces. This is just a temporary driver until full PnP support
- * gets implemented. Just the WSS codec, FM synth and the MIDI ports are
- * supported. Other interfaces are left uninitialized.
- *
- * ifdef ...WAVEFRONT...
- *
- * Support is provided for initializing the WaveFront synth
- * interface as well, which is logical device #4. Note that if
- * you have a Tropez+ card, you probably don't need to setup
- * the CS4232-supported MIDI interface, since it corresponds to
- * the internal 26-pin header that's hard to access. Using this
- * requires an additional IRQ, a resource none too plentiful in
- * this environment. Just don't set module parameters mpuio and
- * mpuirq, and the MIDI port will be left uninitialized. You can
- * still use the ICS2115 hosted MIDI interface which corresponds
- * to the 9-pin D connector on the back of the card.
- *
- * endif ...WAVEFRONT...
- *
- * Supported chips are:
- * CS4232
- * CS4236
- * CS4236B
- *
- * Note: You will need a PnP config setup to initialise some CS4232 boards
- * anyway.
- *
- * Changes
- * John Rood Added Bose Sound System Support.
- * Toshio Spoor
- * Alan Cox Modularisation, Basic cleanups.
- * Paul Barton-Davis Separated MPU configuration, added
- * Tropez+ (WaveFront) support
- * Christoph Hellwig Adapted to module_init/module_exit,
- * simple cleanups
- * Arnaldo C. de Melo got rid of attach_uart401
- * Bartlomiej Zolnierkiewicz
- * Added some __init/__initdata/__exit
- * Marcus Meissner Added ISA PnP support.
- */
-
-#include <linux/pnp.h>
-#include <linux/module.h>
-#include <linux/init.h>
-
-#include "sound_config.h"
-
-#include "ad1848.h"
-#include "mpu401.h"
-
-#define KEY_PORT 0x279 /* Same as LPT1 status port */
-#define CSN_NUM 0x99 /* Just a random number */
-#define INDEX_ADDRESS 0x00 /* (R0) Index Address Register */
-#define INDEX_DATA 0x01 /* (R1) Indexed Data Register */
-#define PIN_CONTROL 0x0a /* (I10) Pin Control */
-#define ENABLE_PINS 0xc0 /* XCTRL0/XCTRL1 enable */
-
-static void CS_OUT(unsigned char a)
-{
- outb(a, KEY_PORT);
-}
-
-#define CS_OUT2(a, b) {CS_OUT(a);CS_OUT(b);}
-#define CS_OUT3(a, b, c) {CS_OUT(a);CS_OUT(b);CS_OUT(c);}
-
-static int __initdata bss = 0;
-static int mpu_base, mpu_irq;
-static int synth_base, synth_irq;
-static int mpu_detected;
-
-static int probe_cs4232_mpu(struct address_info *hw_config)
-{
- /*
- * Just write down the config values.
- */
-
- mpu_base = hw_config->io_base;
- mpu_irq = hw_config->irq;
-
- return 1;
-}
-
-static unsigned char crystal_key[] = /* A 32 byte magic key sequence */
-{
- 0x96, 0x35, 0x9a, 0xcd, 0xe6, 0xf3, 0x79, 0xbc,
- 0x5e, 0xaf, 0x57, 0x2b, 0x15, 0x8a, 0xc5, 0xe2,
- 0xf1, 0xf8, 0x7c, 0x3e, 0x9f, 0x4f, 0x27, 0x13,
- 0x09, 0x84, 0x42, 0xa1, 0xd0, 0x68, 0x34, 0x1a
-};
-
-static void sleep(unsigned howlong)
-{
- current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(howlong);
-}
-
-static void enable_xctrl(int baseio)
-{
- unsigned char regd;
-
- /*
- * Some IBM Aptiva's have the Bose Sound System. By default
- * the Bose Amplifier is disabled. The amplifier will be
- * activated, by setting the XCTRL0 and XCTRL1 bits.
- * Volume of the monitor bose speakers/woofer, can then
- * be set by changing the PCM volume.
- *
- */
-
- printk("cs4232: enabling Bose Sound System Amplifier.\n");
-
- /* Switch to Pin Control Address */
- regd = inb(baseio + INDEX_ADDRESS) & 0xe0;
- outb(((unsigned char) (PIN_CONTROL | regd)), baseio + INDEX_ADDRESS );
-
- /* Activate the XCTRL0 and XCTRL1 Pins */
- regd = inb(baseio + INDEX_DATA);
- outb(((unsigned char) (ENABLE_PINS | regd)), baseio + INDEX_DATA );
-}
-
-static int __init probe_cs4232(struct address_info *hw_config, int isapnp_configured)
-{
- int i, n;
- int base = hw_config->io_base, irq = hw_config->irq;
- int dma1 = hw_config->dma, dma2 = hw_config->dma2;
- struct resource *ports;
-
- if (base == -1 || irq == -1 || dma1 == -1) {
- printk(KERN_ERR "cs4232: dma, irq and io must be set.\n");
- return 0;
- }
-
- /*
- * Verify that the I/O port range is free.
- */
-
- ports = request_region(base, 4, "ad1848");
- if (!ports) {
- printk(KERN_ERR "cs4232.c: I/O port 0x%03x not free\n", base);
- return 0;
- }
- if (ad1848_detect(ports, NULL, hw_config->osp)) {
- goto got_it; /* The card is already active */
- }
- if (isapnp_configured) {
- printk(KERN_ERR "cs4232.c: ISA PnP configured, but not detected?\n");
- goto fail;
- }
-
- /*
- * This version of the driver doesn't use the PnP method when configuring
- * the card but a simplified method defined by Crystal. This means that
- * just one CS4232 compatible device can exist on the system. Also this
- * method conflicts with possible PnP support in the OS. For this reason
- * driver is just a temporary kludge.
- *
- * Also the Cirrus/Crystal method doesn't always work. Try ISA PnP first ;)
- */
-
- /*
- * Repeat initialization few times since it doesn't always succeed in
- * first time.
- */
-
- for (n = 0; n < 4; n++)
- {
- /*
- * Wake up the card by sending a 32 byte Crystal key to the key port.
- */
-
- for (i = 0; i < 32; i++)
- CS_OUT(crystal_key[i]);
-
- sleep(HZ / 10);
-
- /*
- * Now set the CSN (Card Select Number).
- */
-
- CS_OUT2(0x06, CSN_NUM);
-
- /*
- * Then set some config bytes. First logical device 0
- */
-
- CS_OUT2(0x15, 0x00); /* Select logical device 0 (WSS/SB/FM) */
- CS_OUT3(0x47, (base >> 8) & 0xff, base & 0xff); /* WSS base */
-
- if (!request_region(0x388, 4, "FM")) /* Not free */
- CS_OUT3(0x48, 0x00, 0x00) /* FM base off */
- else {
- release_region(0x388, 4);
- CS_OUT3(0x48, 0x03, 0x88); /* FM base 0x388 */
- }
-
- CS_OUT3(0x42, 0x00, 0x00); /* SB base off */
- CS_OUT2(0x22, irq); /* SB+WSS IRQ */
- CS_OUT2(0x2a, dma1); /* SB+WSS DMA */
-
- if (dma2 != -1)
- CS_OUT2(0x25, dma2) /* WSS DMA2 */
- else
- CS_OUT2(0x25, 4); /* No WSS DMA2 */
-
- CS_OUT2(0x33, 0x01); /* Activate logical dev 0 */
-
- sleep(HZ / 10);
-
- /*
- * Initialize logical device 3 (MPU)
- */
-
- if (mpu_base != 0 && mpu_irq != 0)
- {
- CS_OUT2(0x15, 0x03); /* Select logical device 3 (MPU) */
- CS_OUT3(0x47, (mpu_base >> 8) & 0xff, mpu_base & 0xff); /* MPU base */
- CS_OUT2(0x22, mpu_irq); /* MPU IRQ */
- CS_OUT2(0x33, 0x01); /* Activate logical dev 3 */
- }
-
- if(synth_base != 0)
- {
- CS_OUT2 (0x15, 0x04); /* logical device 4 (WaveFront) */
- CS_OUT3 (0x47, (synth_base >> 8) & 0xff,
- synth_base & 0xff); /* base */
- CS_OUT2 (0x22, synth_irq); /* IRQ */
- CS_OUT2 (0x33, 0x01); /* Activate logical dev 4 */
- }
-
- /*
- * Finally activate the chip
- */
-
- CS_OUT(0x79);
-
- sleep(HZ / 5);
-
- /*
- * Then try to detect the codec part of the chip
- */
-
- if (ad1848_detect(ports, NULL, hw_config->osp))
- goto got_it;
-
- sleep(HZ);
- }
-fail:
- release_region(base, 4);
- return 0;
-
-got_it:
- if (dma2 == -1)
- dma2 = dma1;
-
- hw_config->slots[0] = ad1848_init("Crystal audio controller", ports,
- irq,
- dma1, /* Playback DMA */
- dma2, /* Capture DMA */
- 0,
- hw_config->osp,
- THIS_MODULE);
-
- if (hw_config->slots[0] != -1 &&
- audio_devs[hw_config->slots[0]]->mixer_dev!=-1)
- {
- /* Assume the mixer map is as suggested in the CS4232 databook */
- AD1848_REROUTE(SOUND_MIXER_LINE1, SOUND_MIXER_LINE);
- AD1848_REROUTE(SOUND_MIXER_LINE2, SOUND_MIXER_CD);
- AD1848_REROUTE(SOUND_MIXER_LINE3, SOUND_MIXER_SYNTH); /* FM synth */
- }
- if (mpu_base != 0 && mpu_irq != 0)
- {
- static struct address_info hw_config2 = {
- 0
- }; /* Ensure it's initialized */
-
- hw_config2.io_base = mpu_base;
- hw_config2.irq = mpu_irq;
- hw_config2.dma = -1;
- hw_config2.dma2 = -1;
- hw_config2.always_detect = 0;
- hw_config2.name = NULL;
- hw_config2.driver_use_1 = 0;
- hw_config2.driver_use_2 = 0;
- hw_config2.card_subtype = 0;
-
- if (probe_uart401(&hw_config2, THIS_MODULE))
- {
- mpu_detected = 1;
- }
- else
- {
- mpu_base = mpu_irq = 0;
- }
- hw_config->slots[1] = hw_config2.slots[1];
- }
-
- if (bss)
- enable_xctrl(base);
-
- return 1;
-}
-
-static void __devexit unload_cs4232(struct address_info *hw_config)
-{
- int base = hw_config->io_base, irq = hw_config->irq;
- int dma1 = hw_config->dma, dma2 = hw_config->dma2;
-
- if (dma2 == -1)
- dma2 = dma1;
-
- ad1848_unload(base,
- irq,
- dma1, /* Playback DMA */
- dma2, /* Capture DMA */
- 0);
-
- sound_unload_audiodev(hw_config->slots[0]);
- if (mpu_base != 0 && mpu_irq != 0 && mpu_detected)
- {
- static struct address_info hw_config2 =
- {
- 0
- }; /* Ensure it's initialized */
-
- hw_config2.io_base = mpu_base;
- hw_config2.irq = mpu_irq;
- hw_config2.dma = -1;
- hw_config2.dma2 = -1;
- hw_config2.always_detect = 0;
- hw_config2.name = NULL;
- hw_config2.driver_use_1 = 0;
- hw_config2.driver_use_2 = 0;
- hw_config2.card_subtype = 0;
- hw_config2.slots[1] = hw_config->slots[1];
-
- unload_uart401(&hw_config2);
- }
-}
-
-static struct address_info cfg;
-static struct address_info cfg_mpu;
-
-static int __initdata io = -1;
-static int __initdata irq = -1;
-static int __initdata dma = -1;
-static int __initdata dma2 = -1;
-static int __initdata mpuio = -1;
-static int __initdata mpuirq = -1;
-static int __initdata synthio = -1;
-static int __initdata synthirq = -1;
-static int __initdata isapnp = 1;
-
-static unsigned int cs4232_devices;
-
-MODULE_DESCRIPTION("CS4232 based soundcard driver");
-MODULE_AUTHOR("Hannu Savolainen, Paul Barton-Davis");
-MODULE_LICENSE("GPL");
-
-module_param(io, int, 0);
-MODULE_PARM_DESC(io,"base I/O port for AD1848");
-module_param(irq, int, 0);
-MODULE_PARM_DESC(irq,"IRQ for AD1848 chip");
-module_param(dma, int, 0);
-MODULE_PARM_DESC(dma,"8 bit DMA for AD1848 chip");
-module_param(dma2, int, 0);
-MODULE_PARM_DESC(dma2,"16 bit DMA for AD1848 chip");
-module_param(mpuio, int, 0);
-MODULE_PARM_DESC(mpuio,"MPU 401 base address");
-module_param(mpuirq, int, 0);
-MODULE_PARM_DESC(mpuirq,"MPU 401 IRQ");
-module_param(synthio, int, 0);
-MODULE_PARM_DESC(synthio,"Maui WaveTable base I/O port");
-module_param(synthirq, int, 0);
-MODULE_PARM_DESC(synthirq,"Maui WaveTable IRQ");
-module_param(isapnp, bool, 0);
-MODULE_PARM_DESC(isapnp,"Enable ISAPnP probing (default 1)");
-module_param(bss, bool, 0);
-MODULE_PARM_DESC(bss,"Enable Bose Sound System Support (default 0)");
-
-/*
- * Install a CS4232 based card. Need to have ad1848 and mpu401
- * loaded ready.
- */
-
-/* All cs4232 based cards have the main ad1848 card either as CSC0000 or
- * CSC0100. */
-static const struct pnp_device_id cs4232_pnp_table[] = {
- { .id = "CSC0100", .driver_data = 0 },
- { .id = "CSC0000", .driver_data = 0 },
- /* Guillemot Turtlebeach something appears to be cs4232 compatible
- * (untested) */
- { .id = "GIM0100", .driver_data = 0 },
- { .id = ""}
-};
-
-MODULE_DEVICE_TABLE(pnp, cs4232_pnp_table);
-
-static int __init cs4232_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
-{
- struct address_info *isapnpcfg;
-
- isapnpcfg = kmalloc(sizeof(*isapnpcfg),GFP_KERNEL);
- if (!isapnpcfg)
- return -ENOMEM;
-
- isapnpcfg->irq = pnp_irq(dev, 0);
- isapnpcfg->dma = pnp_dma(dev, 0);
- isapnpcfg->dma2 = pnp_dma(dev, 1);
- isapnpcfg->io_base = pnp_port_start(dev, 0);
- if (probe_cs4232(isapnpcfg,TRUE) == 0) {
- printk(KERN_ERR "cs4232: ISA PnP card found, but not detected?\n");
- kfree(isapnpcfg);
- return -ENODEV;
- }
- pnp_set_drvdata(dev,isapnpcfg);
- cs4232_devices++;
- return 0;
-}
-
-static void __devexit cs4232_pnp_remove(struct pnp_dev *dev)
-{
- struct address_info *cfg = pnp_get_drvdata(dev);
- if (cfg) {
- unload_cs4232(cfg);
- kfree(cfg);
- }
-}
-
-static struct pnp_driver cs4232_driver = {
- .name = "cs4232",
- .id_table = cs4232_pnp_table,
- .probe = cs4232_pnp_probe,
- .remove = __devexit_p(cs4232_pnp_remove),
-};
-
-static int __init init_cs4232(void)
-{
-#ifdef CONFIG_SOUND_WAVEFRONT_MODULE
- if(synthio == -1)
- printk(KERN_INFO "cs4232: set synthio and synthirq to use the wavefront facilities.\n");
- else {
- synth_base = synthio;
- synth_irq = synthirq;
- }
-#else
- if(synthio != -1)
- printk(KERN_WARNING "cs4232: wavefront support not enabled in this driver.\n");
-#endif
- cfg.irq = -1;
-
- if (isapnp) {
- pnp_register_driver(&cs4232_driver);
- if (cs4232_devices)
- return 0;
- }
-
- if(io==-1||irq==-1||dma==-1)
- {
- printk(KERN_ERR "cs4232: Must set io, irq and dma.\n");
- return -ENODEV;
- }
-
- cfg.io_base = io;
- cfg.irq = irq;
- cfg.dma = dma;
- cfg.dma2 = dma2;
-
- cfg_mpu.io_base = -1;
- cfg_mpu.irq = -1;
-
- if (mpuio != -1 && mpuirq != -1) {
- cfg_mpu.io_base = mpuio;
- cfg_mpu.irq = mpuirq;
- probe_cs4232_mpu(&cfg_mpu); /* Bug always returns 0 not OK -- AC */
- }
-
- if (probe_cs4232(&cfg,FALSE) == 0)
- return -ENODEV;
-
- return 0;
-}
-
-static void __exit cleanup_cs4232(void)
-{
- pnp_unregister_driver(&cs4232_driver);
- if (cfg.irq != -1)
- unload_cs4232(&cfg); /* Unloads global MPU as well, if needed */
-}
-
-module_init(init_cs4232);
-module_exit(cleanup_cs4232);
-
-#ifndef MODULE
-static int __init setup_cs4232(char *str)
-{
- /* io, irq, dma, dma2 mpuio, mpuirq*/
- int ints[7];
-
- /* If we have isapnp cards, no need for options */
- pnp_register_driver(&cs4232_driver);
- if (cs4232_devices)
- return 1;
-
- str = get_options(str, ARRAY_SIZE(ints), ints);
-
- io = ints[1];
- irq = ints[2];
- dma = ints[3];
- dma2 = ints[4];
- mpuio = ints[5];
- mpuirq = ints[6];
-
- return 1;
-}
-
-__setup("cs4232=", setup_cs4232);
-#endif
diff --git a/sound/oss/dmasound/Kconfig b/sound/oss/dmasound/Kconfig
index 71b313479f83..3eb782720e58 100644
--- a/sound/oss/dmasound/Kconfig
+++ b/sound/oss/dmasound/Kconfig
@@ -14,7 +14,7 @@ config DMASOUND_ATARI
config DMASOUND_PAULA
tristate "Amiga DMA sound support"
- depends on (AMIGA || APUS) && SOUND
+ depends on AMIGA && SOUND
select DMASOUND
help
If you want to use the internal audio of your Amiga in Linux, answer
diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c
index 90fc058e1159..202e8103dc4d 100644
--- a/sound/oss/dmasound/dmasound_paula.c
+++ b/sound/oss/dmasound/dmasound_paula.c
@@ -91,10 +91,6 @@ static irqreturn_t AmiInterrupt(int irq, void *dummy);
* power LED are controlled by the same line.
*/
-#ifdef CONFIG_APUS
-#define mach_heartbeat ppc_md.heartbeat
-#endif
-
static void (*saved_heartbeat)(int) = NULL;
static inline void disable_heartbeat(void)
diff --git a/sound/oss/i810_audio.c b/sound/oss/i810_audio.c
deleted file mode 100644
index f5e31f11973d..000000000000
--- a/sound/oss/i810_audio.c
+++ /dev/null
@@ -1,3656 +0,0 @@
-/*
- * Intel i810 and friends ICH driver for Linux
- * Alan Cox <alan@redhat.com>
- *
- * Built from:
- * Low level code: Zach Brown (original nonworking i810 OSS driver)
- * Jaroslav Kysela <perex@suse.cz> (working ALSA driver)
- *
- * Framework: Thomas Sailer <sailer@ife.ee.ethz.ch>
- * Extended by: Zach Brown <zab@redhat.com>
- * and others..
- *
- * Hardware Provided By:
- * Analog Devices (A major AC97 codec maker)
- * Intel Corp (you've probably heard of them already)
- *
- * AC97 clues and assistance provided by
- * Analog Devices
- * Zach 'Fufu' Brown
- * Jeff Garzik
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- *
- * Intel 810 theory of operation
- *
- * The chipset provides three DMA channels that talk to an AC97
- * CODEC (AC97 is a digital/analog mixer standard). At its simplest
- * you get 48Khz audio with basic volume and mixer controls. At the
- * best you get rate adaption in the codec. We set the card up so
- * that we never take completion interrupts but instead keep the card
- * chasing its tail around a ring buffer. This is needed for mmap
- * mode audio and happens to work rather well for non-mmap modes too.
- *
- * The board has one output channel for PCM audio (supported) and
- * a stereo line in and mono microphone input. Again these are normally
- * locked to 48Khz only. Right now recording is not finished.
- *
- * There is no midi support, no synth support. Use timidity. To get
- * esd working you need to use esd -r 48000 as it won't probe 48KHz
- * by default. mpg123 can't handle 48Khz only audio so use xmms.
- *
- * Fix The Sound On Dell
- *
- * Not everyone uses 48KHz. We know of no way to detect this reliably
- * and certainly not to get the right data. If your i810 audio sounds
- * stupid you may need to investigate other speeds. According to Analog
- * they tend to use a 14.318MHz clock which gives you a base rate of
- * 41194Hz.
- *
- * This is available via the 'ftsodell=1' option.
- *
- * If you need to force a specific rate set the clocking= option
- *
- * This driver is cursed. (Ben LaHaise)
- *
- * ICH 3 caveats
- * Intel errata #7 for ICH3 IO. We need to disable SMI stuff
- * when codec probing. [Not Yet Done]
- *
- * ICH 4 caveats
- *
- * The ICH4 has the feature, that the codec ID doesn't have to be
- * congruent with the IO connection.
- *
- * Therefore, from driver version 0.23 on, there is a "codec ID" <->
- * "IO register base offset" mapping (card->ac97_id_map) field.
- *
- * Juergen "George" Sawinski (jsaw)
- */
-
-#include <linux/module.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/sound.h>
-#include <linux/slab.h>
-#include <linux/soundcard.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <linux/spinlock.h>
-#include <linux/smp_lock.h>
-#include <linux/ac97_codec.h>
-#include <linux/bitops.h>
-#include <linux/mutex.h>
-#include <linux/mm.h>
-
-#include <asm/uaccess.h>
-
-#define DRIVER_VERSION "1.01"
-
-#define MODULOP2(a, b) ((a) & ((b) - 1))
-#define MASKP2(a, b) ((a) & ~((b) - 1))
-
-static int ftsodell;
-static int strict_clocking;
-static unsigned int clocking;
-static int spdif_locked;
-static int ac97_quirk = AC97_TUNE_DEFAULT;
-
-//#define DEBUG
-//#define DEBUG2
-//#define DEBUG_INTERRUPTS
-//#define DEBUG_MMAP
-//#define DEBUG_MMIO
-
-#define ADC_RUNNING 1
-#define DAC_RUNNING 2
-
-#define I810_FMT_16BIT 1
-#define I810_FMT_STEREO 2
-#define I810_FMT_MASK 3
-
-#define SPDIF_ON 0x0004
-#define SURR_ON 0x0010
-#define CENTER_LFE_ON 0x0020
-#define VOL_MUTED 0x8000
-
-/* the 810's array of pointers to data buffers */
-
-struct sg_item {
-#define BUSADDR_MASK 0xFFFFFFFE
- u32 busaddr;
-#define CON_IOC 0x80000000 /* interrupt on completion */
-#define CON_BUFPAD 0x40000000 /* pad underrun with last sample, else 0 */
-#define CON_BUFLEN_MASK 0x0000ffff /* buffer length in samples */
- u32 control;
-};
-
-/* an instance of the i810 channel */
-#define SG_LEN 32
-struct i810_channel
-{
- /* these sg guys should probably be allocated
- separately as nocache. Must be 8 byte aligned */
- struct sg_item sg[SG_LEN]; /* 32*8 */
- u32 offset; /* 4 */
- u32 port; /* 4 */
- u32 used;
- u32 num;
-};
-
-/*
- * we have 3 separate dma engines. pcm in, pcm out, and mic.
- * each dma engine has controlling registers. These goofy
- * names are from the datasheet, but make it easy to write
- * code while leafing through it.
- *
- * ICH4 has 6 dma engines, pcm in, pcm out, mic, pcm in 2,
- * mic in 2, s/pdif. Of special interest is the fact that
- * the upper 3 DMA engines on the ICH4 *must* be accessed
- * via mmio access instead of pio access.
- */
-
-#define ENUM_ENGINE(PRE,DIG) \
-enum { \
- PRE##_BASE = 0x##DIG##0, /* Base Address */ \
- PRE##_BDBAR = 0x##DIG##0, /* Buffer Descriptor list Base Address */ \
- PRE##_CIV = 0x##DIG##4, /* Current Index Value */ \
- PRE##_LVI = 0x##DIG##5, /* Last Valid Index */ \
- PRE##_SR = 0x##DIG##6, /* Status Register */ \
- PRE##_PICB = 0x##DIG##8, /* Position In Current Buffer */ \
- PRE##_PIV = 0x##DIG##a, /* Prefetched Index Value */ \
- PRE##_CR = 0x##DIG##b /* Control Register */ \
-}
-
-ENUM_ENGINE(OFF,0); /* Offsets */
-ENUM_ENGINE(PI,0); /* PCM In */
-ENUM_ENGINE(PO,1); /* PCM Out */
-ENUM_ENGINE(MC,2); /* Mic In */
-
-enum {
- GLOB_CNT = 0x2c, /* Global Control */
- GLOB_STA = 0x30, /* Global Status */
- CAS = 0x34 /* Codec Write Semaphore Register */
-};
-
-ENUM_ENGINE(MC2,4); /* Mic In 2 */
-ENUM_ENGINE(PI2,5); /* PCM In 2 */
-ENUM_ENGINE(SP,6); /* S/PDIF */
-
-enum {
- SDM = 0x80 /* SDATA_IN Map Register */
-};
-
-/* interrupts for a dma engine */
-#define DMA_INT_FIFO (1<<4) /* fifo under/over flow */
-#define DMA_INT_COMPLETE (1<<3) /* buffer read/write complete and ioc set */
-#define DMA_INT_LVI (1<<2) /* last valid done */
-#define DMA_INT_CELV (1<<1) /* last valid is current */
-#define DMA_INT_DCH (1) /* DMA Controller Halted (happens on LVI interrupts) */
-#define DMA_INT_MASK (DMA_INT_FIFO|DMA_INT_COMPLETE|DMA_INT_LVI)
-
-/* interrupts for the whole chip */
-#define INT_SEC (1<<11)
-#define INT_PRI (1<<10)
-#define INT_MC (1<<7)
-#define INT_PO (1<<6)
-#define INT_PI (1<<5)
-#define INT_MO (1<<2)
-#define INT_NI (1<<1)
-#define INT_GPI (1<<0)
-#define INT_MASK (INT_SEC|INT_PRI|INT_MC|INT_PO|INT_PI|INT_MO|INT_NI|INT_GPI)
-
-/* magic numbers to protect our data structures */
-#define I810_CARD_MAGIC 0x5072696E /* "Prin" */
-#define I810_STATE_MAGIC 0x63657373 /* "cess" */
-#define I810_DMA_MASK 0xffffffff /* DMA buffer mask for pci_alloc_consist */
-#define NR_HW_CH 3
-
-/* maxinum number of AC97 codecs connected, AC97 2.0 defined 4 */
-#define NR_AC97 4
-
-/* Please note that an 8bit mono stream is not valid on this card, you must have a 16bit */
-/* stream at a minimum for this card to be happy */
-static const unsigned sample_size[] = { 1, 2, 2, 4 };
-/* Samples are 16bit values, so we are shifting to a word, not to a byte, hence shift */
-/* values are one less than might be expected */
-static const unsigned sample_shift[] = { -1, 0, 0, 1 };
-
-enum {
- ICH82801AA = 0,
- ICH82901AB,
- INTEL440MX,
- INTELICH2,
- INTELICH3,
- INTELICH4,
- INTELICH5,
- SI7012,
- NVIDIA_NFORCE,
- AMD768,
- AMD8111
-};
-
-static char * card_names[] = {
- "Intel ICH 82801AA",
- "Intel ICH 82901AB",
- "Intel 440MX",
- "Intel ICH2",
- "Intel ICH3",
- "Intel ICH4",
- "Intel ICH5",
- "SiS 7012",
- "NVIDIA nForce Audio",
- "AMD 768",
- "AMD-8111 IOHub"
-};
-
-/* These are capabilities (and bugs) the chipsets _can_ have */
-static struct {
- int16_t nr_ac97;
-#define CAP_MMIO 0x0001
-#define CAP_20BIT_AUDIO_SUPPORT 0x0002
- u_int16_t flags;
-} card_cap[] = {
- { 1, 0x0000 }, /* ICH82801AA */
- { 1, 0x0000 }, /* ICH82901AB */
- { 1, 0x0000 }, /* INTEL440MX */
- { 1, 0x0000 }, /* INTELICH2 */
- { 2, 0x0000 }, /* INTELICH3 */
- { 3, 0x0003 }, /* INTELICH4 */
- { 3, 0x0003 }, /* INTELICH5 */
- /*@FIXME to be verified*/ { 2, 0x0000 }, /* SI7012 */
- /*@FIXME to be verified*/ { 2, 0x0000 }, /* NVIDIA_NFORCE */
- /*@FIXME to be verified*/ { 2, 0x0000 }, /* AMD768 */
- /*@FIXME to be verified*/ { 3, 0x0001 }, /* AMD8111 */
-};
-
-static struct pci_device_id i810_pci_tbl [] = {
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_5,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, ICH82801AA},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_5,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, ICH82901AB},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_440MX,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTEL440MX},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH2},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_5,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH3},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_5,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH4},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_5,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH5},
- {PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7012,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, SI7012},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, NVIDIA_NFORCE},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, NVIDIA_NFORCE},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, NVIDIA_NFORCE},
- {PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_OPUS_7445,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, AMD768},
- {PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_AUDIO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, AMD8111},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_5,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH4},
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_18,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, INTELICH4},
- {PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_AUDIO,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, NVIDIA_NFORCE},
- {0,}
-};
-
-MODULE_DEVICE_TABLE (pci, i810_pci_tbl);
-
-#ifdef CONFIG_PM
-#define PM_SUSPENDED(card) (card->pm_suspended)
-#else
-#define PM_SUSPENDED(card) (0)
-#endif
-
-/* "software" or virtual channel, an instance of opened /dev/dsp */
-struct i810_state {
- unsigned int magic;
- struct i810_card *card; /* Card info */
-
- /* single open lock mechanism, only used for recording */
- struct mutex open_mutex;
- wait_queue_head_t open_wait;
-
- /* file mode */
- mode_t open_mode;
-
- /* virtual channel number */
- int virt;
-
-#ifdef CONFIG_PM
- unsigned int pm_saved_dac_rate,pm_saved_adc_rate;
-#endif
- struct dmabuf {
- /* wave sample stuff */
- unsigned int rate;
- unsigned char fmt, enable, trigger;
-
- /* hardware channel */
- struct i810_channel *read_channel;
- struct i810_channel *write_channel;
-
- /* OSS buffer management stuff */
- void *rawbuf;
- dma_addr_t dma_handle;
- unsigned buforder;
- unsigned numfrag;
- unsigned fragshift;
-
- /* our buffer acts like a circular ring */
- unsigned hwptr; /* where dma last started, updated by update_ptr */
- unsigned swptr; /* where driver last clear/filled, updated by read/write */
- int count; /* bytes to be consumed or been generated by dma machine */
- unsigned total_bytes; /* total bytes dmaed by hardware */
-
- unsigned error; /* number of over/underruns */
- wait_queue_head_t wait; /* put process on wait queue when no more space in buffer */
-
- /* redundant, but makes calculations easier */
- /* what the hardware uses */
- unsigned dmasize;
- unsigned fragsize;
- unsigned fragsamples;
-
- /* what we tell the user to expect */
- unsigned userfrags;
- unsigned userfragsize;
-
- /* OSS stuff */
- unsigned mapped:1;
- unsigned ready:1;
- unsigned update_flag;
- unsigned ossfragsize;
- unsigned ossmaxfrags;
- unsigned subdivision;
- } dmabuf;
-};
-
-
-struct i810_card {
- unsigned int magic;
-
- /* We keep i810 cards in a linked list */
- struct i810_card *next;
-
- /* The i810 has a certain amount of cross channel interaction
- so we use a single per card lock */
- spinlock_t lock;
-
- /* Control AC97 access serialization */
- spinlock_t ac97_lock;
-
- /* PCI device stuff */
- struct pci_dev * pci_dev;
- u16 pci_id;
- u16 pci_id_internal; /* used to access card_cap[] */
-#ifdef CONFIG_PM
- u16 pm_suspended;
- int pm_saved_mixer_settings[SOUND_MIXER_NRDEVICES][NR_AC97];
-#endif
- /* soundcore stuff */
- int dev_audio;
-
- /* structures for abstraction of hardware facilities, codecs, banks and channels*/
- u16 ac97_id_map[NR_AC97];
- struct ac97_codec *ac97_codec[NR_AC97];
- struct i810_state *states[NR_HW_CH];
- struct i810_channel *channel; /* 1:1 to states[] but diff. lifetime */
- dma_addr_t chandma;
-
- u16 ac97_features;
- u16 ac97_status;
- u16 channels;
-
- /* hardware resources */
- unsigned long ac97base;
- unsigned long iobase;
- u32 irq;
-
- unsigned long ac97base_mmio_phys;
- unsigned long iobase_mmio_phys;
- u_int8_t __iomem *ac97base_mmio;
- u_int8_t __iomem *iobase_mmio;
-
- int use_mmio;
-
- /* Function support */
- struct i810_channel *(*alloc_pcm_channel)(struct i810_card *);
- struct i810_channel *(*alloc_rec_pcm_channel)(struct i810_card *);
- struct i810_channel *(*alloc_rec_mic_channel)(struct i810_card *);
- void (*free_pcm_channel)(struct i810_card *, int chan);
-
- /* We have a *very* long init time possibly, so use this to block */
- /* attempts to open our devices before we are ready (stops oops'es) */
- int initializing;
-};
-
-/* extract register offset from codec struct */
-#define IO_REG_OFF(codec) (((struct i810_card *) codec->private_data)->ac97_id_map[codec->id])
-
-#define I810_IOREAD(size, type, card, off) \
-({ \
- type val; \
- if (card->use_mmio) \
- val=read##size(card->iobase_mmio+off); \
- else \
- val=in##size(card->iobase+off); \
- val; \
-})
-
-#define I810_IOREADL(card, off) I810_IOREAD(l, u32, card, off)
-#define I810_IOREADW(card, off) I810_IOREAD(w, u16, card, off)
-#define I810_IOREADB(card, off) I810_IOREAD(b, u8, card, off)
-
-#define I810_IOWRITE(size, val, card, off) \
-({ \
- if (card->use_mmio) \
- write##size(val, card->iobase_mmio+off); \
- else \
- out##size(val, card->iobase+off); \
-})
-
-#define I810_IOWRITEL(val, card, off) I810_IOWRITE(l, val, card, off)
-#define I810_IOWRITEW(val, card, off) I810_IOWRITE(w, val, card, off)
-#define I810_IOWRITEB(val, card, off) I810_IOWRITE(b, val, card, off)
-
-#define GET_CIV(card, port) MODULOP2(I810_IOREADB((card), (port) + OFF_CIV), SG_LEN)
-#define GET_LVI(card, port) MODULOP2(I810_IOREADB((card), (port) + OFF_LVI), SG_LEN)
-
-/* set LVI from CIV */
-#define CIV_TO_LVI(card, port, off) \
- I810_IOWRITEB(MODULOP2(GET_CIV((card), (port)) + (off), SG_LEN), (card), (port) + OFF_LVI)
-
-static struct ac97_quirk ac97_quirks[] __devinitdata = {
- {
- .vendor = 0x0e11,
- .device = 0x00b8,
- .name = "Compaq Evo D510C",
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x1028,
- .device = 0x00d8,
- .name = "Dell Precision 530", /* AD1885 */
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x1028,
- .device = 0x0126,
- .name = "Dell Optiplex GX260", /* AD1981A */
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x1028,
- .device = 0x012d,
- .name = "Dell Precision 450", /* AD1981B*/
- .type = AC97_TUNE_HP_ONLY
- },
- { /* FIXME: which codec? */
- .vendor = 0x103c,
- .device = 0x00c3,
- .name = "Hewlett-Packard onboard",
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x103c,
- .device = 0x12f1,
- .name = "HP xw8200", /* AD1981B*/
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x103c,
- .device = 0x3008,
- .name = "HP xw4200", /* AD1981B*/
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x10f1,
- .device = 0x2665,
- .name = "Fujitsu-Siemens Celsius", /* AD1981? */
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x10f1,
- .device = 0x2885,
- .name = "AMD64 Mobo", /* ALC650 */
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x110a,
- .device = 0x0056,
- .name = "Fujitsu-Siemens Scenic", /* AD1981? */
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x11d4,
- .device = 0x5375,
- .name = "ADI AD1985 (discrete)",
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x1462,
- .device = 0x5470,
- .name = "MSI P4 ATX 645 Ultra",
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x1734,
- .device = 0x0088,
- .name = "Fujitsu-Siemens D1522", /* AD1981 */
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x8086,
- .device = 0x4856,
- .name = "Intel D845WN (82801BA)",
- .type = AC97_TUNE_SWAP_HP
- },
- {
- .vendor = 0x8086,
- .device = 0x4d44,
- .name = "Intel D850EMV2", /* AD1885 */
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x8086,
- .device = 0x4d56,
- .name = "Intel ICH/AD1885",
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x1028,
- .device = 0x012d,
- .name = "Dell Precision 450", /* AD1981B*/
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x103c,
- .device = 0x3008,
- .name = "HP xw4200", /* AD1981B*/
- .type = AC97_TUNE_HP_ONLY
- },
- {
- .vendor = 0x103c,
- .device = 0x12f1,
- .name = "HP xw8200", /* AD1981B*/
- .type = AC97_TUNE_HP_ONLY
- },
- { } /* terminator */
-};
-
-static struct i810_card *devs = NULL;
-
-static int i810_open_mixdev(struct inode *inode, struct file *file);
-static int i810_ioctl_mixdev(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg);
-static u16 i810_ac97_get(struct ac97_codec *dev, u8 reg);
-static void i810_ac97_set(struct ac97_codec *dev, u8 reg, u16 data);
-static u16 i810_ac97_get_mmio(struct ac97_codec *dev, u8 reg);
-static void i810_ac97_set_mmio(struct ac97_codec *dev, u8 reg, u16 data);
-static u16 i810_ac97_get_io(struct ac97_codec *dev, u8 reg);
-static void i810_ac97_set_io(struct ac97_codec *dev, u8 reg, u16 data);
-
-static struct i810_channel *i810_alloc_pcm_channel(struct i810_card *card)
-{
- if(card->channel[1].used==1)
- return NULL;
- card->channel[1].used=1;
- return &card->channel[1];
-}
-
-static struct i810_channel *i810_alloc_rec_pcm_channel(struct i810_card *card)
-{
- if(card->channel[0].used==1)
- return NULL;
- card->channel[0].used=1;
- return &card->channel[0];
-}
-
-static struct i810_channel *i810_alloc_rec_mic_channel(struct i810_card *card)
-{
- if(card->channel[2].used==1)
- return NULL;
- card->channel[2].used=1;
- return &card->channel[2];
-}
-
-static void i810_free_pcm_channel(struct i810_card *card, int channel)
-{
- card->channel[channel].used=0;
-}
-
-static int i810_valid_spdif_rate ( struct ac97_codec *codec, int rate )
-{
- unsigned long id = 0L;
-
- id = (i810_ac97_get(codec, AC97_VENDOR_ID1) << 16);
- id |= i810_ac97_get(codec, AC97_VENDOR_ID2) & 0xffff;
-#ifdef DEBUG
- printk ( "i810_audio: codec = %s, codec_id = 0x%08lx\n", codec->name, id);
-#endif
- switch ( id ) {
- case 0x41445361: /* AD1886 */
- if (rate == 48000) {
- return 1;
- }
- break;
- default: /* all other codecs, until we know otherwiae */
- if (rate == 48000 || rate == 44100 || rate == 32000) {
- return 1;
- }
- break;
- }
- return (0);
-}
-
-/* i810_set_spdif_output
- *
- * Configure the S/PDIF output transmitter. When we turn on
- * S/PDIF, we turn off the analog output. This may not be
- * the right thing to do.
- *
- * Assumptions:
- * The DSP sample rate must already be set to a supported
- * S/PDIF rate (32kHz, 44.1kHz, or 48kHz) or we abort.
- */
-static int i810_set_spdif_output(struct i810_state *state, int slots, int rate)
-{
- int vol;
- int aud_reg;
- int r = 0;
- struct ac97_codec *codec = state->card->ac97_codec[0];
-
- if(!codec->codec_ops->digital) {
- state->card->ac97_status &= ~SPDIF_ON;
- } else {
- if ( slots == -1 ) { /* Turn off S/PDIF */
- codec->codec_ops->digital(codec, 0, 0, 0);
- /* If the volume wasn't muted before we turned on S/PDIF, unmute it */
- if ( !(state->card->ac97_status & VOL_MUTED) ) {
- aud_reg = i810_ac97_get(codec, AC97_MASTER_VOL_STEREO);
- i810_ac97_set(codec, AC97_MASTER_VOL_STEREO, (aud_reg & ~VOL_MUTED));
- }
- state->card->ac97_status &= ~(VOL_MUTED | SPDIF_ON);
- return 0;
- }
-
- vol = i810_ac97_get(codec, AC97_MASTER_VOL_STEREO);
- state->card->ac97_status = vol & VOL_MUTED;
-
- r = codec->codec_ops->digital(codec, slots, rate, 0);
-
- if(r)
- state->card->ac97_status |= SPDIF_ON;
- else
- state->card->ac97_status &= ~SPDIF_ON;
-
- /* Mute the analog output */
- /* Should this only mute the PCM volume??? */
- i810_ac97_set(codec, AC97_MASTER_VOL_STEREO, (vol | VOL_MUTED));
- }
- return r;
-}
-
-/* i810_set_dac_channels
- *
- * Configure the codec's multi-channel DACs
- *
- * The logic is backwards. Setting the bit to 1 turns off the DAC.
- *
- * What about the ICH? We currently configure it using the
- * SNDCTL_DSP_CHANNELS ioctl. If we're turnning on the DAC,
- * does that imply that we want the ICH set to support
- * these channels?
- *
- * TODO:
- * vailidate that the codec really supports these DACs
- * before turning them on.
- */
-static void i810_set_dac_channels(struct i810_state *state, int channel)
-{
- int aud_reg;
- struct ac97_codec *codec = state->card->ac97_codec[0];
-
- /* No codec, no setup */
-
- if(codec == NULL)
- return;
-
- aud_reg = i810_ac97_get(codec, AC97_EXTENDED_STATUS);
- aud_reg |= AC97_EA_PRI | AC97_EA_PRJ | AC97_EA_PRK;
- state->card->ac97_status &= ~(SURR_ON | CENTER_LFE_ON);
-
- switch ( channel ) {
- case 2: /* always enabled */
- break;
- case 4:
- aud_reg &= ~AC97_EA_PRJ;
- state->card->ac97_status |= SURR_ON;
- break;
- case 6:
- aud_reg &= ~(AC97_EA_PRJ | AC97_EA_PRI | AC97_EA_PRK);
- state->card->ac97_status |= SURR_ON | CENTER_LFE_ON;
- break;
- default:
- break;
- }
- i810_ac97_set(codec, AC97_EXTENDED_STATUS, aud_reg);
-
-}
-
-
-/* set playback sample rate */
-static unsigned int i810_set_dac_rate(struct i810_state * state, unsigned int rate)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- u32 new_rate;
- struct ac97_codec *codec=state->card->ac97_codec[0];
-
- if(!(state->card->ac97_features&0x0001))
- {
- dmabuf->rate = clocking;
-#ifdef DEBUG
- printk("Asked for %d Hz, but ac97_features says we only do %dHz. Sorry!\n",
- rate,clocking);
-#endif
- return clocking;
- }
-
- if (rate > 48000)
- rate = 48000;
- if (rate < 8000)
- rate = 8000;
- dmabuf->rate = rate;
-
- /*
- * Adjust for misclocked crap
- */
- rate = ( rate * clocking)/48000;
- if(strict_clocking && rate < 8000) {
- rate = 8000;
- dmabuf->rate = (rate * 48000)/clocking;
- }
-
- new_rate=ac97_set_dac_rate(codec, rate);
- if(new_rate != rate) {
- dmabuf->rate = (new_rate * 48000)/clocking;
- }
-#ifdef DEBUG
- printk("i810_audio: called i810_set_dac_rate : asked for %d, got %d\n", rate, dmabuf->rate);
-#endif
- rate = new_rate;
- return dmabuf->rate;
-}
-
-/* set recording sample rate */
-static unsigned int i810_set_adc_rate(struct i810_state * state, unsigned int rate)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- u32 new_rate;
- struct ac97_codec *codec=state->card->ac97_codec[0];
-
- if(!(state->card->ac97_features&0x0001))
- {
- dmabuf->rate = clocking;
- return clocking;
- }
-
- if (rate > 48000)
- rate = 48000;
- if (rate < 8000)
- rate = 8000;
- dmabuf->rate = rate;
-
- /*
- * Adjust for misclocked crap
- */
-
- rate = ( rate * clocking)/48000;
- if(strict_clocking && rate < 8000) {
- rate = 8000;
- dmabuf->rate = (rate * 48000)/clocking;
- }
-
- new_rate = ac97_set_adc_rate(codec, rate);
-
- if(new_rate != rate) {
- dmabuf->rate = (new_rate * 48000)/clocking;
- rate = new_rate;
- }
-#ifdef DEBUG
- printk("i810_audio: called i810_set_adc_rate : rate = %d/%d\n", dmabuf->rate, rate);
-#endif
- return dmabuf->rate;
-}
-
-/* get current playback/recording dma buffer pointer (byte offset from LBA),
- called with spinlock held! */
-
-static inline unsigned i810_get_dma_addr(struct i810_state *state, int rec)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- unsigned int civ, offset, port, port_picb, bytes = 2;
-
- if (!dmabuf->enable)
- return 0;
-
- if (rec)
- port = dmabuf->read_channel->port;
- else
- port = dmabuf->write_channel->port;
-
- if(state->card->pci_id == PCI_DEVICE_ID_SI_7012) {
- port_picb = port + OFF_SR;
- bytes = 1;
- } else
- port_picb = port + OFF_PICB;
-
- do {
- civ = GET_CIV(state->card, port);
- offset = I810_IOREADW(state->card, port_picb);
- /* Must have a delay here! */
- if(offset == 0)
- udelay(1);
- /* Reread both registers and make sure that that total
- * offset from the first reading to the second is 0.
- * There is an issue with SiS hardware where it will count
- * picb down to 0, then update civ to the next value,
- * then set the new picb to fragsize bytes. We can catch
- * it between the civ update and the picb update, making
- * it look as though we are 1 fragsize ahead of where we
- * are. The next to we get the address though, it will
- * be back in the right place, and we will suddenly think
- * we just went forward dmasize - fragsize bytes, causing
- * totally stupid *huge* dma overrun messages. We are
- * assuming that the 1us delay is more than long enough
- * that we won't have to worry about the chip still being
- * out of sync with reality ;-)
- */
- } while (civ != GET_CIV(state->card, port) || offset != I810_IOREADW(state->card, port_picb));
-
- return (((civ + 1) * dmabuf->fragsize - (bytes * offset))
- % dmabuf->dmasize);
-}
-
-/* Stop recording (lock held) */
-static inline void __stop_adc(struct i810_state *state)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- struct i810_card *card = state->card;
-
- dmabuf->enable &= ~ADC_RUNNING;
- I810_IOWRITEB(0, card, PI_CR);
- // wait for the card to acknowledge shutdown
- while( I810_IOREADB(card, PI_CR) != 0 ) ;
- // now clear any latent interrupt bits (like the halt bit)
- if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- I810_IOWRITEB( I810_IOREADB(card, PI_PICB), card, PI_PICB );
- else
- I810_IOWRITEB( I810_IOREADB(card, PI_SR), card, PI_SR );
- I810_IOWRITEL( I810_IOREADL(card, GLOB_STA) & INT_PI, card, GLOB_STA);
-}
-
-static void stop_adc(struct i810_state *state)
-{
- struct i810_card *card = state->card;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- __stop_adc(state);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-static inline void __start_adc(struct i810_state *state)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
-
- if (dmabuf->count < dmabuf->dmasize && dmabuf->ready && !dmabuf->enable &&
- (dmabuf->trigger & PCM_ENABLE_INPUT)) {
- dmabuf->enable |= ADC_RUNNING;
- // Interrupt enable, LVI enable, DMA enable
- I810_IOWRITEB(0x10 | 0x04 | 0x01, state->card, PI_CR);
- }
-}
-
-static void start_adc(struct i810_state *state)
-{
- struct i810_card *card = state->card;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- __start_adc(state);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-/* stop playback (lock held) */
-static inline void __stop_dac(struct i810_state *state)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- struct i810_card *card = state->card;
-
- dmabuf->enable &= ~DAC_RUNNING;
- I810_IOWRITEB(0, card, PO_CR);
- // wait for the card to acknowledge shutdown
- while( I810_IOREADB(card, PO_CR) != 0 ) ;
- // now clear any latent interrupt bits (like the halt bit)
- if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- I810_IOWRITEB( I810_IOREADB(card, PO_PICB), card, PO_PICB );
- else
- I810_IOWRITEB( I810_IOREADB(card, PO_SR), card, PO_SR );
- I810_IOWRITEL( I810_IOREADL(card, GLOB_STA) & INT_PO, card, GLOB_STA);
-}
-
-static void stop_dac(struct i810_state *state)
-{
- struct i810_card *card = state->card;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- __stop_dac(state);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-static inline void __start_dac(struct i810_state *state)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
-
- if (dmabuf->count > 0 && dmabuf->ready && !dmabuf->enable &&
- (dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
- dmabuf->enable |= DAC_RUNNING;
- // Interrupt enable, LVI enable, DMA enable
- I810_IOWRITEB(0x10 | 0x04 | 0x01, state->card, PO_CR);
- }
-}
-static void start_dac(struct i810_state *state)
-{
- struct i810_card *card = state->card;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- __start_dac(state);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
-#define DMABUF_DEFAULTORDER (16-PAGE_SHIFT)
-#define DMABUF_MINORDER 1
-
-/* allocate DMA buffer, playback and recording buffer should be allocated separately */
-static int alloc_dmabuf(struct i810_state *state)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- void *rawbuf= NULL;
- int order, size;
- struct page *page, *pend;
-
- /* If we don't have any oss frag params, then use our default ones */
- if(dmabuf->ossmaxfrags == 0)
- dmabuf->ossmaxfrags = 4;
- if(dmabuf->ossfragsize == 0)
- dmabuf->ossfragsize = (PAGE_SIZE<<DMABUF_DEFAULTORDER)/dmabuf->ossmaxfrags;
- size = dmabuf->ossfragsize * dmabuf->ossmaxfrags;
-
- if(dmabuf->rawbuf && (PAGE_SIZE << dmabuf->buforder) == size)
- return 0;
- /* alloc enough to satisfy the oss params */
- for (order = DMABUF_DEFAULTORDER; order >= DMABUF_MINORDER; order--) {
- if ( (PAGE_SIZE<<order) > size )
- continue;
- if ((rawbuf = pci_alloc_consistent(state->card->pci_dev,
- PAGE_SIZE << order,
- &dmabuf->dma_handle)))
- break;
- }
- if (!rawbuf)
- return -ENOMEM;
-
-
-#ifdef DEBUG
- printk("i810_audio: allocated %ld (order = %d) bytes at %p\n",
- PAGE_SIZE << order, order, rawbuf);
-#endif
-
- dmabuf->ready = dmabuf->mapped = 0;
- dmabuf->rawbuf = rawbuf;
- dmabuf->buforder = order;
-
- /* now mark the pages as reserved; otherwise remap_pfn_range doesn't do what we want */
- pend = virt_to_page(rawbuf + (PAGE_SIZE << order) - 1);
- for (page = virt_to_page(rawbuf); page <= pend; page++)
- SetPageReserved(page);
-
- return 0;
-}
-
-/* free DMA buffer */
-static void dealloc_dmabuf(struct i810_state *state)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- struct page *page, *pend;
-
- if (dmabuf->rawbuf) {
- /* undo marking the pages as reserved */
- pend = virt_to_page(dmabuf->rawbuf + (PAGE_SIZE << dmabuf->buforder) - 1);
- for (page = virt_to_page(dmabuf->rawbuf); page <= pend; page++)
- ClearPageReserved(page);
- pci_free_consistent(state->card->pci_dev, PAGE_SIZE << dmabuf->buforder,
- dmabuf->rawbuf, dmabuf->dma_handle);
- }
- dmabuf->rawbuf = NULL;
- dmabuf->mapped = dmabuf->ready = 0;
-}
-
-static int prog_dmabuf(struct i810_state *state, unsigned rec)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- struct i810_channel *c;
- struct sg_item *sg;
- unsigned long flags;
- int ret;
- unsigned fragint;
- int i;
-
- spin_lock_irqsave(&state->card->lock, flags);
- if(dmabuf->enable & DAC_RUNNING)
- __stop_dac(state);
- if(dmabuf->enable & ADC_RUNNING)
- __stop_adc(state);
- dmabuf->total_bytes = 0;
- dmabuf->count = dmabuf->error = 0;
- dmabuf->swptr = dmabuf->hwptr = 0;
- spin_unlock_irqrestore(&state->card->lock, flags);
-
- /* allocate DMA buffer, let alloc_dmabuf determine if we are already
- * allocated well enough or if we should replace the current buffer
- * (assuming one is already allocated, if it isn't, then allocate it).
- */
- if ((ret = alloc_dmabuf(state)))
- return ret;
-
- /* FIXME: figure out all this OSS fragment stuff */
- /* I did, it now does what it should according to the OSS API. DL */
- /* We may not have realloced our dmabuf, but the fragment size to
- * fragment number ratio may have changed, so go ahead and reprogram
- * things
- */
- dmabuf->dmasize = PAGE_SIZE << dmabuf->buforder;
- dmabuf->numfrag = SG_LEN;
- dmabuf->fragsize = dmabuf->dmasize/dmabuf->numfrag;
- dmabuf->fragsamples = dmabuf->fragsize >> 1;
- dmabuf->fragshift = ffs(dmabuf->fragsize) - 1;
- dmabuf->userfragsize = dmabuf->ossfragsize;
- dmabuf->userfrags = dmabuf->dmasize/dmabuf->ossfragsize;
-
- memset(dmabuf->rawbuf, 0, dmabuf->dmasize);
-
- if(dmabuf->ossmaxfrags == 4) {
- fragint = 8;
- } else if (dmabuf->ossmaxfrags == 8) {
- fragint = 4;
- } else if (dmabuf->ossmaxfrags == 16) {
- fragint = 2;
- } else {
- fragint = 1;
- }
- /*
- * Now set up the ring
- */
- if(dmabuf->read_channel)
- c = dmabuf->read_channel;
- else
- c = dmabuf->write_channel;
- while(c != NULL) {
- sg=&c->sg[0];
- /*
- * Load up 32 sg entries and take an interrupt at half
- * way (we might want more interrupts later..)
- */
-
- for(i=0;i<dmabuf->numfrag;i++)
- {
- sg->busaddr=(u32)dmabuf->dma_handle+dmabuf->fragsize*i;
- // the card will always be doing 16bit stereo
- sg->control=dmabuf->fragsamples;
- if(state->card->pci_id == PCI_DEVICE_ID_SI_7012)
- sg->control <<= 1;
- sg->control|=CON_BUFPAD;
- // set us up to get IOC interrupts as often as needed to
- // satisfy numfrag requirements, no more
- if( ((i+1) % fragint) == 0) {
- sg->control|=CON_IOC;
- }
- sg++;
- }
- spin_lock_irqsave(&state->card->lock, flags);
- I810_IOWRITEB(2, state->card, c->port+OFF_CR); /* reset DMA machine */
- while( I810_IOREADB(state->card, c->port+OFF_CR) & 0x02 ) ;
- I810_IOWRITEL((u32)state->card->chandma +
- c->num*sizeof(struct i810_channel),
- state->card, c->port+OFF_BDBAR);
- CIV_TO_LVI(state->card, c->port, 0);
-
- spin_unlock_irqrestore(&state->card->lock, flags);
-
- if(c != dmabuf->write_channel)
- c = dmabuf->write_channel;
- else
- c = NULL;
- }
-
- /* set the ready flag for the dma buffer */
- dmabuf->ready = 1;
-
-#ifdef DEBUG
- printk("i810_audio: prog_dmabuf, sample rate = %d, format = %d,\n\tnumfrag = %d, "
- "fragsize = %d dmasize = %d\n",
- dmabuf->rate, dmabuf->fmt, dmabuf->numfrag,
- dmabuf->fragsize, dmabuf->dmasize);
-#endif
-
- return 0;
-}
-
-static void __i810_update_lvi(struct i810_state *state, int rec)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- int x, port;
- int trigger;
- int count, fragsize;
- void (*start)(struct i810_state *);
-
- count = dmabuf->count;
- if (rec) {
- port = dmabuf->read_channel->port;
- trigger = PCM_ENABLE_INPUT;
- start = __start_adc;
- count = dmabuf->dmasize - count;
- } else {
- port = dmabuf->write_channel->port;
- trigger = PCM_ENABLE_OUTPUT;
- start = __start_dac;
- }
-
- /* Do not process partial fragments. */
- fragsize = dmabuf->fragsize;
- if (count < fragsize)
- return;
-
- /* if we are currently stopped, then our CIV is actually set to our
- * *last* sg segment and we are ready to wrap to the next. However,
- * if we set our LVI to the last sg segment, then it won't wrap to
- * the next sg segment, it won't even get a start. So, instead, when
- * we are stopped, we set both the LVI value and also we increment
- * the CIV value to the next sg segment to be played so that when
- * we call start, things will operate properly. Since the CIV can't
- * be written to directly for this purpose, we set the LVI to CIV + 1
- * temporarily. Once the engine has started we set the LVI to its
- * final value.
- */
- if (!dmabuf->enable && dmabuf->ready) {
- if (!(dmabuf->trigger & trigger))
- return;
-
- CIV_TO_LVI(state->card, port, 1);
-
- start(state);
- while (!(I810_IOREADB(state->card, port + OFF_CR) & ((1<<4) | (1<<2))))
- ;
- }
-
- /* MASKP2(swptr, fragsize) - 1 is the tail of our transfer */
- x = MODULOP2(MASKP2(dmabuf->swptr, fragsize) - 1, dmabuf->dmasize);
- x >>= dmabuf->fragshift;
- I810_IOWRITEB(x, state->card, port + OFF_LVI);
-}
-
-static void i810_update_lvi(struct i810_state *state, int rec)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- unsigned long flags;
-
- if(!dmabuf->ready)
- return;
- spin_lock_irqsave(&state->card->lock, flags);
- __i810_update_lvi(state, rec);
- spin_unlock_irqrestore(&state->card->lock, flags);
-}
-
-/* update buffer manangement pointers, especially, dmabuf->count and dmabuf->hwptr */
-static void i810_update_ptr(struct i810_state *state)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- unsigned hwptr;
- unsigned fragmask, dmamask;
- int diff;
-
- fragmask = MASKP2(~0, dmabuf->fragsize);
- dmamask = MODULOP2(~0, dmabuf->dmasize);
-
- /* error handling and process wake up for ADC */
- if (dmabuf->enable == ADC_RUNNING) {
- /* update hardware pointer */
- hwptr = i810_get_dma_addr(state, 1) & fragmask;
- diff = (hwptr - dmabuf->hwptr) & dmamask;
-#if defined(DEBUG_INTERRUPTS) || defined(DEBUG_MMAP)
- printk("ADC HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
-#endif
- dmabuf->hwptr = hwptr;
- dmabuf->total_bytes += diff;
- dmabuf->count += diff;
- if (dmabuf->count > dmabuf->dmasize) {
- /* buffer underrun or buffer overrun */
- /* this is normal for the end of a read */
- /* only give an error if we went past the */
- /* last valid sg entry */
- if (GET_CIV(state->card, PI_BASE) !=
- GET_LVI(state->card, PI_BASE)) {
- printk(KERN_WARNING "i810_audio: DMA overrun on read\n");
- dmabuf->error++;
- }
- }
- if (diff)
- wake_up(&dmabuf->wait);
- }
- /* error handling and process wake up for DAC */
- if (dmabuf->enable == DAC_RUNNING) {
- /* update hardware pointer */
- hwptr = i810_get_dma_addr(state, 0) & fragmask;
- diff = (hwptr - dmabuf->hwptr) & dmamask;
-#if defined(DEBUG_INTERRUPTS) || defined(DEBUG_MMAP)
- printk("DAC HWP %d,%d,%d\n", hwptr, dmabuf->hwptr, diff);
-#endif
- dmabuf->hwptr = hwptr;
- dmabuf->total_bytes += diff;
- dmabuf->count -= diff;
- if (dmabuf->count < 0) {
- /* buffer underrun or buffer overrun */
- /* this is normal for the end of a write */
- /* only give an error if we went past the */
- /* last valid sg entry */
- if (GET_CIV(state->card, PO_BASE) !=
- GET_LVI(state->card, PO_BASE)) {
- printk(KERN_WARNING "i810_audio: DMA overrun on write\n");
- printk("i810_audio: CIV %d, LVI %d, hwptr %x, "
- "count %d\n",
- GET_CIV(state->card, PO_BASE),
- GET_LVI(state->card, PO_BASE),
- dmabuf->hwptr, dmabuf->count);
- dmabuf->error++;
- }
- }
- if (diff)
- wake_up(&dmabuf->wait);
- }
-}
-
-static inline int i810_get_free_write_space(struct i810_state *state)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- int free;
-
- i810_update_ptr(state);
- // catch underruns during playback
- if (dmabuf->count < 0) {
- dmabuf->count = 0;
- dmabuf->swptr = dmabuf->hwptr;
- }
- free = dmabuf->dmasize - dmabuf->count;
- if(free < 0)
- return(0);
- return(free);
-}
-
-static inline int i810_get_available_read_data(struct i810_state *state)
-{
- struct dmabuf *dmabuf = &state->dmabuf;
- int avail;
-
- i810_update_ptr(state);
- // catch overruns during record
- if (dmabuf->count > dmabuf->dmasize) {
- dmabuf->count = dmabuf->dmasize;
- dmabuf->swptr = dmabuf->hwptr;
- }
- avail = dmabuf->count;
- if(avail < 0)
- return(0);
- return(avail);
-}
-
-static inline void fill_partial_frag(struct dmabuf *dmabuf)
-{
- unsigned fragsize;
- unsigned swptr, len;
-
- fragsize = dmabuf->fragsize;
- swptr = dmabuf->swptr;
- len = fragsize - MODULOP2(dmabuf->swptr, fragsize);
- if (len == fragsize)
- return;
-
- memset(dmabuf->rawbuf + swptr, '\0', len);
- dmabuf->swptr = MODULOP2(swptr + len, dmabuf->dmasize);
- dmabuf->count += len;
-}
-
-static int drain_dac(struct i810_state *state, int signals_allowed)
-{
- DECLARE_WAITQUEUE(wait, current);
- struct dmabuf *dmabuf = &state->dmabuf;
- unsigned long flags;
- unsigned long tmo;
- int count;
-
- if (!dmabuf->ready)
- return 0;
- if(dmabuf->mapped) {
- stop_dac(state);
- return 0;
- }
-
- spin_lock_irqsave(&state->card->lock, flags);
-
- fill_partial_frag(dmabuf);
-
- /*
- * This will make sure that our LVI is correct, that our
- * pointer is updated, and that the DAC is running. We
- * have to force the setting of dmabuf->trigger to avoid
- * any possible deadlocks.
- */
- dmabuf->trigger = PCM_ENABLE_OUTPUT;
- __i810_update_lvi(state, 0);
-
- spin_unlock_irqrestore(&state->card->lock, flags);
-
- add_wait_queue(&dmabuf->wait, &wait);
- for (;;) {
-
- spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
- count = dmabuf->count;
-
- /* It seems that we have to set the current state to
- * TASK_INTERRUPTIBLE every time to make the process
- * really go to sleep. This also has to be *after* the
- * update_ptr() call because update_ptr is likely to
- * do a wake_up() which will unset this before we ever
- * try to sleep, resuling in a tight loop in this code
- * instead of actually sleeping and waiting for an
- * interrupt to wake us up!
- */
- __set_current_state(signals_allowed ?
- TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
- spin_unlock_irqrestore(&state->card->lock, flags);
-
- if (count <= 0)
- break;
-
- if (signal_pending(current) && signals_allowed) {
- break;
- }
-
- /*
- * set the timeout to significantly longer than it *should*
- * take for the DAC to drain the DMA buffer
- */
- tmo = (count * HZ) / (dmabuf->rate);
- if (!schedule_timeout(tmo >= 2 ? tmo : 2)){
- printk(KERN_ERR "i810_audio: drain_dac, dma timeout?\n");
- count = 0;
- break;
- }
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&dmabuf->wait, &wait);
- if(count > 0 && signal_pending(current) && signals_allowed)
- return -ERESTARTSYS;
- stop_dac(state);
- return 0;
-}
-
-static void i810_channel_interrupt(struct i810_card *card)
-{
- int i, count;
-
-#ifdef DEBUG_INTERRUPTS
- printk("CHANNEL ");
-#endif
- for(i=0;i<NR_HW_CH;i++)
- {
- struct i810_state *state = card->states[i];
- struct i810_channel *c;
- struct dmabuf *dmabuf;
- unsigned long port;
- u16 status;
-
- if(!state)
- continue;
- if(!state->dmabuf.ready)
- continue;
- dmabuf = &state->dmabuf;
- if(dmabuf->enable & DAC_RUNNING) {
- c=dmabuf->write_channel;
- } else if(dmabuf->enable & ADC_RUNNING) {
- c=dmabuf->read_channel;
- } else /* This can occur going from R/W to close */
- continue;
-
- port = c->port;
-
- if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- status = I810_IOREADW(card, port + OFF_PICB);
- else
- status = I810_IOREADW(card, port + OFF_SR);
-
-#ifdef DEBUG_INTERRUPTS
- printk("NUM %d PORT %X IRQ ( ST%d ", c->num, c->port, status);
-#endif
- if(status & DMA_INT_COMPLETE)
- {
- /* only wake_up() waiters if this interrupt signals
- * us being beyond a userfragsize of data open or
- * available, and i810_update_ptr() does that for
- * us
- */
- i810_update_ptr(state);
-#ifdef DEBUG_INTERRUPTS
- printk("COMP %d ", dmabuf->hwptr /
- dmabuf->fragsize);
-#endif
- }
- if(status & (DMA_INT_LVI | DMA_INT_DCH))
- {
- /* wake_up() unconditionally on LVI and DCH */
- i810_update_ptr(state);
- wake_up(&dmabuf->wait);
-#ifdef DEBUG_INTERRUPTS
- if(status & DMA_INT_LVI)
- printk("LVI ");
- if(status & DMA_INT_DCH)
- printk("DCH -");
-#endif
- count = dmabuf->count;
- if(dmabuf->enable & ADC_RUNNING)
- count = dmabuf->dmasize - count;
- if (count >= (int)dmabuf->fragsize) {
- I810_IOWRITEB(I810_IOREADB(card, port+OFF_CR) | 1, card, port+OFF_CR);
-#ifdef DEBUG_INTERRUPTS
- printk(" CONTINUE ");
-#endif
- } else {
- if (dmabuf->enable & DAC_RUNNING)
- __stop_dac(state);
- if (dmabuf->enable & ADC_RUNNING)
- __stop_adc(state);
- dmabuf->enable = 0;
-#ifdef DEBUG_INTERRUPTS
- printk(" STOP ");
-#endif
- }
- }
- if(card->pci_id == PCI_DEVICE_ID_SI_7012)
- I810_IOWRITEW(status & DMA_INT_MASK, card, port + OFF_PICB);
- else
- I810_IOWRITEW(status & DMA_INT_MASK, card, port + OFF_SR);
- }
-#ifdef DEBUG_INTERRUPTS
- printk(")\n");
-#endif
-}
-
-static irqreturn_t i810_interrupt(int irq, void *dev_id)
-{
- struct i810_card *card = dev_id;
- u32 status;
-
- spin_lock(&card->lock);
-
- status = I810_IOREADL(card, GLOB_STA);
-
- if(!(status & INT_MASK))
- {
- spin_unlock(&card->lock);
- return IRQ_NONE; /* not for us */
- }
-
- if(status & (INT_PO|INT_PI|INT_MC))
- i810_channel_interrupt(card);
-
- /* clear 'em */
- I810_IOWRITEL(status & INT_MASK, card, GLOB_STA);
- spin_unlock(&card->lock);
- return IRQ_HANDLED;
-}
-
-/* in this loop, dmabuf.count signifies the amount of data that is
- waiting to be copied to the user's buffer. It is filled by the dma
- machine and drained by this loop. */
-
-static ssize_t i810_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
-{
- struct i810_state *state = (struct i810_state *)file->private_data;
- struct i810_card *card=state ? state->card : NULL;
- struct dmabuf *dmabuf = &state->dmabuf;
- ssize_t ret;
- unsigned long flags;
- unsigned int swptr;
- int cnt;
- int pending;
- DECLARE_WAITQUEUE(waita, current);
-
-#ifdef DEBUG2
- printk("i810_audio: i810_read called, count = %d\n", count);
-#endif
-
- if (dmabuf->mapped)
- return -ENXIO;
- if (dmabuf->enable & DAC_RUNNING)
- return -ENODEV;
- if (!dmabuf->read_channel) {
- dmabuf->ready = 0;
- dmabuf->read_channel = card->alloc_rec_pcm_channel(card);
- if (!dmabuf->read_channel) {
- return -EBUSY;
- }
- }
- if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
- return ret;
- if (!access_ok(VERIFY_WRITE, buffer, count))
- return -EFAULT;
- ret = 0;
-
- pending = 0;
-
- add_wait_queue(&dmabuf->wait, &waita);
- while (count > 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&card->lock, flags);
- if (PM_SUSPENDED(card)) {
- spin_unlock_irqrestore(&card->lock, flags);
- schedule();
- if (signal_pending(current)) {
- if (!ret) ret = -EAGAIN;
- break;
- }
- continue;
- }
- cnt = i810_get_available_read_data(state);
- swptr = dmabuf->swptr;
- // this is to make the copy_to_user simpler below
- if(cnt > (dmabuf->dmasize - swptr))
- cnt = dmabuf->dmasize - swptr;
- spin_unlock_irqrestore(&card->lock, flags);
-
- if (cnt > count)
- cnt = count;
- if (cnt <= 0) {
- unsigned long tmo;
- /*
- * Don't let us deadlock. The ADC won't start if
- * dmabuf->trigger isn't set. A call to SETTRIGGER
- * could have turned it off after we set it to on
- * previously.
- */
- dmabuf->trigger = PCM_ENABLE_INPUT;
- /*
- * This does three things. Updates LVI to be correct,
- * makes sure the ADC is running, and updates the
- * hwptr.
- */
- i810_update_lvi(state,1);
- if (file->f_flags & O_NONBLOCK) {
- if (!ret) ret = -EAGAIN;
- goto done;
- }
- /* Set the timeout to how long it would take to fill
- * two of our buffers. If we haven't been woke up
- * by then, then we know something is wrong.
- */
- tmo = (dmabuf->dmasize * HZ * 2) / (dmabuf->rate * 4);
- /* There are two situations when sleep_on_timeout returns, one is when
- the interrupt is serviced correctly and the process is waked up by
- ISR ON TIME. Another is when timeout is expired, which means that
- either interrupt is NOT serviced correctly (pending interrupt) or it
- is TOO LATE for the process to be scheduled to run (scheduler latency)
- which results in a (potential) buffer overrun. And worse, there is
- NOTHING we can do to prevent it. */
- if (!schedule_timeout(tmo >= 2 ? tmo : 2)) {
-#ifdef DEBUG
- printk(KERN_ERR "i810_audio: recording schedule timeout, "
- "dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
- dmabuf->dmasize, dmabuf->fragsize, dmabuf->count,
- dmabuf->hwptr, dmabuf->swptr);
-#endif
- /* a buffer overrun, we delay the recovery until next time the
- while loop begin and we REALLY have space to record */
- }
- if (signal_pending(current)) {
- ret = ret ? ret : -ERESTARTSYS;
- goto done;
- }
- continue;
- }
-
- if (copy_to_user(buffer, dmabuf->rawbuf + swptr, cnt)) {
- if (!ret) ret = -EFAULT;
- goto done;
- }
-
- swptr = MODULOP2(swptr + cnt, dmabuf->dmasize);
-
- spin_lock_irqsave(&card->lock, flags);
-
- if (PM_SUSPENDED(card)) {
- spin_unlock_irqrestore(&card->lock, flags);
- continue;
- }
- dmabuf->swptr = swptr;
- pending = dmabuf->count -= cnt;
- spin_unlock_irqrestore(&card->lock, flags);
-
- count -= cnt;
- buffer += cnt;
- ret += cnt;
- }
- done:
- pending = dmabuf->dmasize - pending;
- if (dmabuf->enable || pending >= dmabuf->userfragsize)
- i810_update_lvi(state, 1);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&dmabuf->wait, &waita);
-
- return ret;
-}
-
-/* in this loop, dmabuf.count signifies the amount of data that is waiting to be dma to
- the soundcard. it is drained by the dma machine and filled by this loop. */
-static ssize_t i810_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
-{
- struct i810_state *state = (struct i810_state *)file->private_data;
- struct i810_card *card=state ? state->card : NULL;
- struct dmabuf *dmabuf = &state->dmabuf;
- ssize_t ret;
- unsigned long flags;
- unsigned int swptr = 0;
- int pending;
- int cnt;
- DECLARE_WAITQUEUE(waita, current);
-
-#ifdef DEBUG2
- printk("i810_audio: i810_write called, count = %d\n", count);
-#endif
-
- if (dmabuf->mapped)
- return -ENXIO;
- if (dmabuf->enable & ADC_RUNNING)
- return -ENODEV;
- if (!dmabuf->write_channel) {
- dmabuf->ready = 0;
- dmabuf->write_channel = card->alloc_pcm_channel(card);
- if(!dmabuf->write_channel)
- return -EBUSY;
- }
- if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
- return ret;
- if (!access_ok(VERIFY_READ, buffer, count))
- return -EFAULT;
- ret = 0;
-
- pending = 0;
-
- add_wait_queue(&dmabuf->wait, &waita);
- while (count > 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&state->card->lock, flags);
- if (PM_SUSPENDED(card)) {
- spin_unlock_irqrestore(&card->lock, flags);
- schedule();
- if (signal_pending(current)) {
- if (!ret) ret = -EAGAIN;
- break;
- }
- continue;
- }
-
- cnt = i810_get_free_write_space(state);
- swptr = dmabuf->swptr;
- /* Bound the maximum size to how much we can copy to the
- * dma buffer before we hit the end. If we have more to
- * copy then it will get done in a second pass of this
- * loop starting from the beginning of the buffer.
- */
- if(cnt > (dmabuf->dmasize - swptr))
- cnt = dmabuf->dmasize - swptr;
- spin_unlock_irqrestore(&state->card->lock, flags);
-
-#ifdef DEBUG2
- printk(KERN_INFO "i810_audio: i810_write: %d bytes available space\n", cnt);
-#endif
- if (cnt > count)
- cnt = count;
- if (cnt <= 0) {
- unsigned long tmo;
- // There is data waiting to be played
- /*
- * Force the trigger setting since we would
- * deadlock with it set any other way
- */
- dmabuf->trigger = PCM_ENABLE_OUTPUT;
- i810_update_lvi(state,0);
- if (file->f_flags & O_NONBLOCK) {
- if (!ret) ret = -EAGAIN;
- goto ret;
- }
- /* Not strictly correct but works */
- tmo = (dmabuf->dmasize * HZ * 2) / (dmabuf->rate * 4);
- /* There are two situations when sleep_on_timeout returns, one is when
- the interrupt is serviced correctly and the process is waked up by
- ISR ON TIME. Another is when timeout is expired, which means that
- either interrupt is NOT serviced correctly (pending interrupt) or it
- is TOO LATE for the process to be scheduled to run (scheduler latency)
- which results in a (potential) buffer underrun. And worse, there is
- NOTHING we can do to prevent it. */
- if (!schedule_timeout(tmo >= 2 ? tmo : 2)) {
-#ifdef DEBUG
- printk(KERN_ERR "i810_audio: playback schedule timeout, "
- "dmasz %u fragsz %u count %i hwptr %u swptr %u\n",
- dmabuf->dmasize, dmabuf->fragsize, dmabuf->count,
- dmabuf->hwptr, dmabuf->swptr);
-#endif
- /* a buffer underrun, we delay the recovery until next time the
- while loop begin and we REALLY have data to play */
- //return ret;
- }
- if (signal_pending(current)) {
- if (!ret) ret = -ERESTARTSYS;
- goto ret;
- }
- continue;
- }
- if (copy_from_user(dmabuf->rawbuf+swptr,buffer,cnt)) {
- if (!ret) ret = -EFAULT;
- goto ret;
- }
-
- swptr = MODULOP2(swptr + cnt, dmabuf->dmasize);
-
- spin_lock_irqsave(&state->card->lock, flags);
- if (PM_SUSPENDED(card)) {
- spin_unlock_irqrestore(&card->lock, flags);
- continue;
- }
-
- dmabuf->swptr = swptr;
- pending = dmabuf->count += cnt;
-
- count -= cnt;
- buffer += cnt;
- ret += cnt;
- spin_unlock_irqrestore(&state->card->lock, flags);
- }
-ret:
- if (dmabuf->enable || pending >= dmabuf->userfragsize)
- i810_update_lvi(state, 0);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&dmabuf->wait, &waita);
-
- return ret;
-}
-
-/* No kernel lock - we have our own spinlock */
-static unsigned int i810_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct i810_state *state = (struct i810_state *)file->private_data;
- struct dmabuf *dmabuf = &state->dmabuf;
- unsigned long flags;
- unsigned int mask = 0;
-
- if(!dmabuf->ready)
- return 0;
- poll_wait(file, &dmabuf->wait, wait);
- spin_lock_irqsave(&state->card->lock, flags);
- if (dmabuf->enable & ADC_RUNNING ||
- dmabuf->trigger & PCM_ENABLE_INPUT) {
- if (i810_get_available_read_data(state) >=
- (signed)dmabuf->userfragsize)
- mask |= POLLIN | POLLRDNORM;
- }
- if (dmabuf->enable & DAC_RUNNING ||
- dmabuf->trigger & PCM_ENABLE_OUTPUT) {
- if (i810_get_free_write_space(state) >=
- (signed)dmabuf->userfragsize)
- mask |= POLLOUT | POLLWRNORM;
- }
- spin_unlock_irqrestore(&state->card->lock, flags);
- return mask;
-}
-
-static int i810_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct i810_state *state = (struct i810_state *)file->private_data;
- struct dmabuf *dmabuf = &state->dmabuf;
- int ret = -EINVAL;
- unsigned long size;
-
- lock_kernel();
- if (vma->vm_flags & VM_WRITE) {
- if (!dmabuf->write_channel &&
- (dmabuf->write_channel =
- state->card->alloc_pcm_channel(state->card)) == NULL) {
- ret = -EBUSY;
- goto out;
- }
- }
- if (vma->vm_flags & VM_READ) {
- if (!dmabuf->read_channel &&
- (dmabuf->read_channel =
- state->card->alloc_rec_pcm_channel(state->card)) == NULL) {
- ret = -EBUSY;
- goto out;
- }
- }
- if ((ret = prog_dmabuf(state, 0)) != 0)
- goto out;
-
- ret = -EINVAL;
- if (vma->vm_pgoff != 0)
- goto out;
- size = vma->vm_end - vma->vm_start;
- if (size > (PAGE_SIZE << dmabuf->buforder))
- goto out;
- ret = -EAGAIN;
- if (remap_pfn_range(vma, vma->vm_start,
- virt_to_phys(dmabuf->rawbuf) >> PAGE_SHIFT,
- size, vma->vm_page_prot))
- goto out;
- dmabuf->mapped = 1;
- dmabuf->trigger = 0;
- ret = 0;
-#ifdef DEBUG_MMAP
- printk("i810_audio: mmap'ed %ld bytes of data space\n", size);
-#endif
-out:
- unlock_kernel();
- return ret;
-}
-
-static int i810_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct i810_state *state = (struct i810_state *)file->private_data;
- struct i810_channel *c = NULL;
- struct dmabuf *dmabuf = &state->dmabuf;
- unsigned long flags;
- audio_buf_info abinfo;
- count_info cinfo;
- unsigned int i_glob_cnt;
- int val = 0, ret;
- struct ac97_codec *codec = state->card->ac97_codec[0];
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
-
-#ifdef DEBUG
- printk("i810_audio: i810_ioctl, arg=0x%x, cmd=", arg ? *p : 0);
-#endif
-
- switch (cmd)
- {
- case OSS_GETVERSION:
-#ifdef DEBUG
- printk("OSS_GETVERSION\n");
-#endif
- return put_user(SOUND_VERSION, p);
-
- case SNDCTL_DSP_RESET:
-#ifdef DEBUG
- printk("SNDCTL_DSP_RESET\n");
-#endif
- spin_lock_irqsave(&state->card->lock, flags);
- if (dmabuf->enable == DAC_RUNNING) {
- c = dmabuf->write_channel;
- __stop_dac(state);
- }
- if (dmabuf->enable == ADC_RUNNING) {
- c = dmabuf->read_channel;
- __stop_adc(state);
- }
- if (c != NULL) {
- I810_IOWRITEB(2, state->card, c->port+OFF_CR); /* reset DMA machine */
- while ( I810_IOREADB(state->card, c->port+OFF_CR) & 2 )
- cpu_relax();
- I810_IOWRITEL((u32)state->card->chandma +
- c->num*sizeof(struct i810_channel),
- state->card, c->port+OFF_BDBAR);
- CIV_TO_LVI(state->card, c->port, 0);
- }
-
- spin_unlock_irqrestore(&state->card->lock, flags);
- synchronize_irq(state->card->pci_dev->irq);
- dmabuf->ready = 0;
- dmabuf->swptr = dmabuf->hwptr = 0;
- dmabuf->count = dmabuf->total_bytes = 0;
- return 0;
-
- case SNDCTL_DSP_SYNC:
-#ifdef DEBUG
- printk("SNDCTL_DSP_SYNC\n");
-#endif
- if (dmabuf->enable != DAC_RUNNING || file->f_flags & O_NONBLOCK)
- return 0;
- if((val = drain_dac(state, 1)))
- return val;
- dmabuf->total_bytes = 0;
- return 0;
-
- case SNDCTL_DSP_SPEED: /* set smaple rate */
-#ifdef DEBUG
- printk("SNDCTL_DSP_SPEED\n");
-#endif
- if (get_user(val, p))
- return -EFAULT;
- if (val >= 0) {
- if (file->f_mode & FMODE_WRITE) {
- if ( (state->card->ac97_status & SPDIF_ON) ) { /* S/PDIF Enabled */
- /* AD1886 only supports 48000, need to check that */
- if ( i810_valid_spdif_rate ( codec, val ) ) {
- /* Set DAC rate */
- i810_set_spdif_output ( state, -1, 0 );
- stop_dac(state);
- dmabuf->ready = 0;
- spin_lock_irqsave(&state->card->lock, flags);
- i810_set_dac_rate(state, val);
- spin_unlock_irqrestore(&state->card->lock, flags);
- /* Set S/PDIF transmitter rate. */
- i810_set_spdif_output ( state, AC97_EA_SPSA_3_4, val );
- if ( ! (state->card->ac97_status & SPDIF_ON) ) {
- val = dmabuf->rate;
- }
- } else { /* Not a valid rate for S/PDIF, ignore it */
- val = dmabuf->rate;
- }
- } else {
- stop_dac(state);
- dmabuf->ready = 0;
- spin_lock_irqsave(&state->card->lock, flags);
- i810_set_dac_rate(state, val);
- spin_unlock_irqrestore(&state->card->lock, flags);
- }
- }
- if (file->f_mode & FMODE_READ) {
- stop_adc(state);
- dmabuf->ready = 0;
- spin_lock_irqsave(&state->card->lock, flags);
- i810_set_adc_rate(state, val);
- spin_unlock_irqrestore(&state->card->lock, flags);
- }
- }
- return put_user(dmabuf->rate, p);
-
- case SNDCTL_DSP_STEREO: /* set stereo or mono channel */
-#ifdef DEBUG
- printk("SNDCTL_DSP_STEREO\n");
-#endif
- if (dmabuf->enable & DAC_RUNNING) {
- stop_dac(state);
- }
- if (dmabuf->enable & ADC_RUNNING) {
- stop_adc(state);
- }
- return put_user(1, p);
-
- case SNDCTL_DSP_GETBLKSIZE:
- if (file->f_mode & FMODE_WRITE) {
- if (!dmabuf->ready && (val = prog_dmabuf(state, 0)))
- return val;
- }
- if (file->f_mode & FMODE_READ) {
- if (!dmabuf->ready && (val = prog_dmabuf(state, 1)))
- return val;
- }
-#ifdef DEBUG
- printk("SNDCTL_DSP_GETBLKSIZE %d\n", dmabuf->userfragsize);
-#endif
- return put_user(dmabuf->userfragsize, p);
-
- case SNDCTL_DSP_GETFMTS: /* Returns a mask of supported sample format*/
-#ifdef DEBUG
- printk("SNDCTL_DSP_GETFMTS\n");
-#endif
- return put_user(AFMT_S16_LE, p);
-
- case SNDCTL_DSP_SETFMT: /* Select sample format */
-#ifdef DEBUG
- printk("SNDCTL_DSP_SETFMT\n");
-#endif
- return put_user(AFMT_S16_LE, p);
-
- case SNDCTL_DSP_CHANNELS:
-#ifdef DEBUG
- printk("SNDCTL_DSP_CHANNELS\n");
-#endif
- if (get_user(val, p))
- return -EFAULT;
-
- if (val > 0) {
- if (dmabuf->enable & DAC_RUNNING) {
- stop_dac(state);
- }
- if (dmabuf->enable & ADC_RUNNING) {
- stop_adc(state);
- }
- } else {
- return put_user(state->card->channels, p);
- }
-
- /* ICH and ICH0 only support 2 channels */
- if ( state->card->pci_id == PCI_DEVICE_ID_INTEL_82801AA_5
- || state->card->pci_id == PCI_DEVICE_ID_INTEL_82801AB_5)
- return put_user(2, p);
-
- /* Multi-channel support was added with ICH2. Bits in */
- /* Global Status and Global Control register are now */
- /* used to indicate this. */
-
- i_glob_cnt = I810_IOREADL(state->card, GLOB_CNT);
-
- /* Current # of channels enabled */
- if ( i_glob_cnt & 0x0100000 )
- ret = 4;
- else if ( i_glob_cnt & 0x0200000 )
- ret = 6;
- else
- ret = 2;
-
- switch ( val ) {
- case 2: /* 2 channels is always supported */
- I810_IOWRITEL(i_glob_cnt & 0xffcfffff,
- state->card, GLOB_CNT);
- /* Do we need to change mixer settings???? */
- break;
- case 4: /* Supported on some chipsets, better check first */
- if ( state->card->channels >= 4 ) {
- I810_IOWRITEL((i_glob_cnt & 0xffcfffff) | 0x100000,
- state->card, GLOB_CNT);
- /* Do we need to change mixer settings??? */
- } else {
- val = ret;
- }
- break;
- case 6: /* Supported on some chipsets, better check first */
- if ( state->card->channels >= 6 ) {
- I810_IOWRITEL((i_glob_cnt & 0xffcfffff) | 0x200000,
- state->card, GLOB_CNT);
- /* Do we need to change mixer settings??? */
- } else {
- val = ret;
- }
- break;
- default: /* nothing else is ever supported by the chipset */
- val = ret;
- break;
- }
-
- return put_user(val, p);
-
- case SNDCTL_DSP_POST: /* the user has sent all data and is notifying us */
- /* we update the swptr to the end of the last sg segment then return */
-#ifdef DEBUG
- printk("SNDCTL_DSP_POST\n");
-#endif
- if(!dmabuf->ready || (dmabuf->enable != DAC_RUNNING))
- return 0;
- if((dmabuf->swptr % dmabuf->fragsize) != 0) {
- val = dmabuf->fragsize - (dmabuf->swptr % dmabuf->fragsize);
- dmabuf->swptr += val;
- dmabuf->count += val;
- }
- return 0;
-
- case SNDCTL_DSP_SUBDIVIDE:
- if (dmabuf->subdivision)
- return -EINVAL;
- if (get_user(val, p))
- return -EFAULT;
- if (val != 1 && val != 2 && val != 4)
- return -EINVAL;
-#ifdef DEBUG
- printk("SNDCTL_DSP_SUBDIVIDE %d\n", val);
-#endif
- dmabuf->subdivision = val;
- dmabuf->ready = 0;
- return 0;
-
- case SNDCTL_DSP_SETFRAGMENT:
- if (get_user(val, p))
- return -EFAULT;
-
- dmabuf->ossfragsize = 1<<(val & 0xffff);
- dmabuf->ossmaxfrags = (val >> 16) & 0xffff;
- if (!dmabuf->ossfragsize || !dmabuf->ossmaxfrags)
- return -EINVAL;
- /*
- * Bound the frag size into our allowed range of 256 - 4096
- */
- if (dmabuf->ossfragsize < 256)
- dmabuf->ossfragsize = 256;
- else if (dmabuf->ossfragsize > 4096)
- dmabuf->ossfragsize = 4096;
- /*
- * The numfrags could be something reasonable, or it could
- * be 0xffff meaning "Give me as much as possible". So,
- * we check the numfrags * fragsize doesn't exceed our
- * 64k buffer limit, nor is it less than our 8k minimum.
- * If it fails either one of these checks, then adjust the
- * number of fragments, not the size of them. It's OK if
- * our number of fragments doesn't equal 32 or anything
- * like our hardware based number now since we are using
- * a different frag count for the hardware. Before we get
- * into this though, bound the maxfrags to avoid overflow
- * issues. A reasonable bound would be 64k / 256 since our
- * maximum buffer size is 64k and our minimum frag size is
- * 256. On the other end, our minimum buffer size is 8k and
- * our maximum frag size is 4k, so the lower bound should
- * be 2.
- */
-
- if(dmabuf->ossmaxfrags > 256)
- dmabuf->ossmaxfrags = 256;
- else if (dmabuf->ossmaxfrags < 2)
- dmabuf->ossmaxfrags = 2;
-
- val = dmabuf->ossfragsize * dmabuf->ossmaxfrags;
- while (val < 8192) {
- val <<= 1;
- dmabuf->ossmaxfrags <<= 1;
- }
- while (val > 65536) {
- val >>= 1;
- dmabuf->ossmaxfrags >>= 1;
- }
- dmabuf->ready = 0;
-#ifdef DEBUG
- printk("SNDCTL_DSP_SETFRAGMENT 0x%x, %d, %d\n", val,
- dmabuf->ossfragsize, dmabuf->ossmaxfrags);
-#endif
-
- return 0;
-
- case SNDCTL_DSP_GETOSPACE:
- if (!(file->f_mode & FMODE_WRITE))
- return -EINVAL;
- if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
- return val;
- spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
- abinfo.fragsize = dmabuf->userfragsize;
- abinfo.fragstotal = dmabuf->userfrags;
- if (dmabuf->mapped)
- abinfo.bytes = dmabuf->dmasize;
- else
- abinfo.bytes = i810_get_free_write_space(state);
- abinfo.fragments = abinfo.bytes / dmabuf->userfragsize;
- spin_unlock_irqrestore(&state->card->lock, flags);
-#if defined(DEBUG) || defined(DEBUG_MMAP)
- printk("SNDCTL_DSP_GETOSPACE %d, %d, %d, %d\n", abinfo.bytes,
- abinfo.fragsize, abinfo.fragments, abinfo.fragstotal);
-#endif
- return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
-
- case SNDCTL_DSP_GETOPTR:
- if (!(file->f_mode & FMODE_WRITE))
- return -EINVAL;
- if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
- return val;
- spin_lock_irqsave(&state->card->lock, flags);
- val = i810_get_free_write_space(state);
- cinfo.bytes = dmabuf->total_bytes;
- cinfo.ptr = dmabuf->hwptr;
- cinfo.blocks = val/dmabuf->userfragsize;
- if (dmabuf->mapped && (dmabuf->trigger & PCM_ENABLE_OUTPUT)) {
- dmabuf->count += val;
- dmabuf->swptr = (dmabuf->swptr + val) % dmabuf->dmasize;
- __i810_update_lvi(state, 0);
- }
- spin_unlock_irqrestore(&state->card->lock, flags);
-#if defined(DEBUG) || defined(DEBUG_MMAP)
- printk("SNDCTL_DSP_GETOPTR %d, %d, %d, %d\n", cinfo.bytes,
- cinfo.blocks, cinfo.ptr, dmabuf->count);
-#endif
- return copy_to_user(argp, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
-
- case SNDCTL_DSP_GETISPACE:
- if (!(file->f_mode & FMODE_READ))
- return -EINVAL;
- if (!dmabuf->ready && (val = prog_dmabuf(state, 1)) != 0)
- return val;
- spin_lock_irqsave(&state->card->lock, flags);
- abinfo.bytes = i810_get_available_read_data(state);
- abinfo.fragsize = dmabuf->userfragsize;
- abinfo.fragstotal = dmabuf->userfrags;
- abinfo.fragments = abinfo.bytes / dmabuf->userfragsize;
- spin_unlock_irqrestore(&state->card->lock, flags);
-#if defined(DEBUG) || defined(DEBUG_MMAP)
- printk("SNDCTL_DSP_GETISPACE %d, %d, %d, %d\n", abinfo.bytes,
- abinfo.fragsize, abinfo.fragments, abinfo.fragstotal);
-#endif
- return copy_to_user(argp, &abinfo, sizeof(abinfo)) ? -EFAULT : 0;
-
- case SNDCTL_DSP_GETIPTR:
- if (!(file->f_mode & FMODE_READ))
- return -EINVAL;
- if (!dmabuf->ready && (val = prog_dmabuf(state, 0)) != 0)
- return val;
- spin_lock_irqsave(&state->card->lock, flags);
- val = i810_get_available_read_data(state);
- cinfo.bytes = dmabuf->total_bytes;
- cinfo.blocks = val/dmabuf->userfragsize;
- cinfo.ptr = dmabuf->hwptr;
- if (dmabuf->mapped && (dmabuf->trigger & PCM_ENABLE_INPUT)) {
- dmabuf->count -= val;
- dmabuf->swptr = (dmabuf->swptr + val) % dmabuf->dmasize;
- __i810_update_lvi(state, 1);
- }
- spin_unlock_irqrestore(&state->card->lock, flags);
-#if defined(DEBUG) || defined(DEBUG_MMAP)
- printk("SNDCTL_DSP_GETIPTR %d, %d, %d, %d\n", cinfo.bytes,
- cinfo.blocks, cinfo.ptr, dmabuf->count);
-#endif
- return copy_to_user(argp, &cinfo, sizeof(cinfo)) ? -EFAULT : 0;
-
- case SNDCTL_DSP_NONBLOCK:
-#ifdef DEBUG
- printk("SNDCTL_DSP_NONBLOCK\n");
-#endif
- file->f_flags |= O_NONBLOCK;
- return 0;
-
- case SNDCTL_DSP_GETCAPS:
-#ifdef DEBUG
- printk("SNDCTL_DSP_GETCAPS\n");
-#endif
- return put_user(DSP_CAP_REALTIME|DSP_CAP_TRIGGER|DSP_CAP_MMAP|DSP_CAP_BIND,
- p);
-
- case SNDCTL_DSP_GETTRIGGER:
- val = 0;
-#ifdef DEBUG
- printk("SNDCTL_DSP_GETTRIGGER 0x%x\n", dmabuf->trigger);
-#endif
- return put_user(dmabuf->trigger, p);
-
- case SNDCTL_DSP_SETTRIGGER:
- if (get_user(val, p))
- return -EFAULT;
-#if defined(DEBUG) || defined(DEBUG_MMAP)
- printk("SNDCTL_DSP_SETTRIGGER 0x%x\n", val);
-#endif
- /* silently ignore invalid PCM_ENABLE_xxx bits,
- * like the other drivers do
- */
- if (!(file->f_mode & FMODE_READ ))
- val &= ~PCM_ENABLE_INPUT;
- if (!(file->f_mode & FMODE_WRITE ))
- val &= ~PCM_ENABLE_OUTPUT;
- if((file->f_mode & FMODE_READ) && !(val & PCM_ENABLE_INPUT) && dmabuf->enable == ADC_RUNNING) {
- stop_adc(state);
- }
- if((file->f_mode & FMODE_WRITE) && !(val & PCM_ENABLE_OUTPUT) && dmabuf->enable == DAC_RUNNING) {
- stop_dac(state);
- }
- dmabuf->trigger = val;
- if((val & PCM_ENABLE_OUTPUT) && !(dmabuf->enable & DAC_RUNNING)) {
- if (!dmabuf->write_channel) {
- dmabuf->ready = 0;
- dmabuf->write_channel = state->card->alloc_pcm_channel(state->card);
- if (!dmabuf->write_channel)
- return -EBUSY;
- }
- if (!dmabuf->ready && (ret = prog_dmabuf(state, 0)))
- return ret;
- if (dmabuf->mapped) {
- spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
- dmabuf->count = 0;
- dmabuf->swptr = dmabuf->hwptr;
- dmabuf->count = i810_get_free_write_space(state);
- dmabuf->swptr = (dmabuf->swptr + dmabuf->count) % dmabuf->dmasize;
- spin_unlock_irqrestore(&state->card->lock, flags);
- }
- i810_update_lvi(state, 0);
- start_dac(state);
- }
- if((val & PCM_ENABLE_INPUT) && !(dmabuf->enable & ADC_RUNNING)) {
- if (!dmabuf->read_channel) {
- dmabuf->ready = 0;
- dmabuf->read_channel = state->card->alloc_rec_pcm_channel(state->card);
- if (!dmabuf->read_channel)
- return -EBUSY;
- }
- if (!dmabuf->ready && (ret = prog_dmabuf(state, 1)))
- return ret;
- if (dmabuf->mapped) {
- spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
- dmabuf->swptr = dmabuf->hwptr;
- dmabuf->count = 0;
- spin_unlock_irqrestore(&state->card->lock, flags);
- }
- i810_update_lvi(state, 1);
- start_adc(state);
- }
- return 0;
-
- case SNDCTL_DSP_SETDUPLEX:
-#ifdef DEBUG
- printk("SNDCTL_DSP_SETDUPLEX\n");
-#endif
- return -EINVAL;
-
- case SNDCTL_DSP_GETODELAY:
- if (!(file->f_mode & FMODE_WRITE))
- return -EINVAL;
- spin_lock_irqsave(&state->card->lock, flags);
- i810_update_ptr(state);
- val = dmabuf->count;
- spin_unlock_irqrestore(&state->card->lock, flags);
-#ifdef DEBUG
- printk("SNDCTL_DSP_GETODELAY %d\n", dmabuf->count);
-#endif
- return put_user(val, p);
-
- case SOUND_PCM_READ_RATE:
-#ifdef DEBUG
- printk("SOUND_PCM_READ_RATE %d\n", dmabuf->rate);
-#endif
- return put_user(dmabuf->rate, p);
-
- case SOUND_PCM_READ_CHANNELS:
-#ifdef DEBUG
- printk("SOUND_PCM_READ_CHANNELS\n");
-#endif
- return put_user(2, p);
-
- case SOUND_PCM_READ_BITS:
-#ifdef DEBUG
- printk("SOUND_PCM_READ_BITS\n");
-#endif
- return put_user(AFMT_S16_LE, p);
-
- case SNDCTL_DSP_SETSPDIF: /* Set S/PDIF Control register */
-#ifdef DEBUG
- printk("SNDCTL_DSP_SETSPDIF\n");
-#endif
- if (get_user(val, p))
- return -EFAULT;
-
- /* Check to make sure the codec supports S/PDIF transmitter */
-
- if((state->card->ac97_features & 4)) {
- /* mask out the transmitter speed bits so the user can't set them */
- val &= ~0x3000;
-
- /* Add the current transmitter speed bits to the passed value */
- ret = i810_ac97_get(codec, AC97_SPDIF_CONTROL);
- val |= (ret & 0x3000);
-
- i810_ac97_set(codec, AC97_SPDIF_CONTROL, val);
- if(i810_ac97_get(codec, AC97_SPDIF_CONTROL) != val ) {
- printk(KERN_ERR "i810_audio: Unable to set S/PDIF configuration to 0x%04x.\n", val);
- return -EFAULT;
- }
- }
-#ifdef DEBUG
- else
- printk(KERN_WARNING "i810_audio: S/PDIF transmitter not avalible.\n");
-#endif
- return put_user(val, p);
-
- case SNDCTL_DSP_GETSPDIF: /* Get S/PDIF Control register */
-#ifdef DEBUG
- printk("SNDCTL_DSP_GETSPDIF\n");
-#endif
- if (get_user(val, p))
- return -EFAULT;
-
- /* Check to make sure the codec supports S/PDIF transmitter */
-
- if(!(state->card->ac97_features & 4)) {
-#ifdef DEBUG
- printk(KERN_WARNING "i810_audio: S/PDIF transmitter not avalible.\n");
-#endif
- val = 0;
- } else {
- val = i810_ac97_get(codec, AC97_SPDIF_CONTROL);
- }
- //return put_user((val & 0xcfff), p);
- return put_user(val, p);
-
- case SNDCTL_DSP_GETCHANNELMASK:
-#ifdef DEBUG
- printk("SNDCTL_DSP_GETCHANNELMASK\n");
-#endif
- if (get_user(val, p))
- return -EFAULT;
-
- /* Based on AC'97 DAC support, not ICH hardware */
- val = DSP_BIND_FRONT;
- if ( state->card->ac97_features & 0x0004 )
- val |= DSP_BIND_SPDIF;
-
- if ( state->card->ac97_features & 0x0080 )
- val |= DSP_BIND_SURR;
- if ( state->card->ac97_features & 0x0140 )
- val |= DSP_BIND_CENTER_LFE;
-
- return put_user(val, p);
-
- case SNDCTL_DSP_BIND_CHANNEL:
-#ifdef DEBUG
- printk("SNDCTL_DSP_BIND_CHANNEL\n");
-#endif
- if (get_user(val, p))
- return -EFAULT;
- if ( val == DSP_BIND_QUERY ) {
- val = DSP_BIND_FRONT; /* Always report this as being enabled */
- if ( state->card->ac97_status & SPDIF_ON )
- val |= DSP_BIND_SPDIF;
- else {
- if ( state->card->ac97_status & SURR_ON )
- val |= DSP_BIND_SURR;
- if ( state->card->ac97_status & CENTER_LFE_ON )
- val |= DSP_BIND_CENTER_LFE;
- }
- } else { /* Not a query, set it */
- if (!(file->f_mode & FMODE_WRITE))
- return -EINVAL;
- if ( dmabuf->enable == DAC_RUNNING ) {
- stop_dac(state);
- }
- if ( val & DSP_BIND_SPDIF ) { /* Turn on SPDIF */
- /* Ok, this should probably define what slots
- * to use. For now, we'll only set it to the
- * defaults:
- *
- * non multichannel codec maps to slots 3&4
- * 2 channel codec maps to slots 7&8
- * 4 channel codec maps to slots 6&9
- * 6 channel codec maps to slots 10&11
- *
- * there should be some way for the app to
- * select the slot assignment.
- */
-
- i810_set_spdif_output ( state, AC97_EA_SPSA_3_4, dmabuf->rate );
- if ( !(state->card->ac97_status & SPDIF_ON) )
- val &= ~DSP_BIND_SPDIF;
- } else {
- int mask;
- int channels;
-
- /* Turn off S/PDIF if it was on */
- if ( state->card->ac97_status & SPDIF_ON )
- i810_set_spdif_output ( state, -1, 0 );
-
- mask = val & (DSP_BIND_FRONT | DSP_BIND_SURR | DSP_BIND_CENTER_LFE);
- switch (mask) {
- case DSP_BIND_FRONT:
- channels = 2;
- break;
- case DSP_BIND_FRONT|DSP_BIND_SURR:
- channels = 4;
- break;
- case DSP_BIND_FRONT|DSP_BIND_SURR|DSP_BIND_CENTER_LFE:
- channels = 6;
- break;
- default:
- val = DSP_BIND_FRONT;
- channels = 2;
- break;
- }
- i810_set_dac_channels ( state, channels );
-
- /* check that they really got turned on */
- if (!(state->card->ac97_status & SURR_ON))
- val &= ~DSP_BIND_SURR;
- if (!(state->card->ac97_status & CENTER_LFE_ON))
- val &= ~DSP_BIND_CENTER_LFE;
- }
- }
- return put_user(val, p);
-
- case SNDCTL_DSP_MAPINBUF:
- case SNDCTL_DSP_MAPOUTBUF:
- case SNDCTL_DSP_SETSYNCRO:
- case SOUND_PCM_WRITE_FILTER:
- case SOUND_PCM_READ_FILTER:
-#ifdef DEBUG
- printk("SNDCTL_* -EINVAL\n");
-#endif
- return -EINVAL;
- }
- return -EINVAL;
-}
-
-static int i810_open(struct inode *inode, struct file *file)
-{
- int i = 0;
- struct i810_card *card = devs;
- struct i810_state *state = NULL;
- struct dmabuf *dmabuf = NULL;
-
- /* find an avaiable virtual channel (instance of /dev/dsp) */
- while (card != NULL) {
- /*
- * If we are initializing and then fail, card could go
- * away unuexpectedly while we are in the for() loop.
- * So, check for card on each iteration before we check
- * for card->initializing to avoid a possible oops.
- * This usually only matters for times when the driver is
- * autoloaded by kmod.
- */
- for (i = 0; i < 50 && card && card->initializing; i++) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/20);
- }
- for (i = 0; i < NR_HW_CH && card && !card->initializing; i++) {
- if (card->states[i] == NULL) {
- state = card->states[i] = (struct i810_state *)
- kzalloc(sizeof(struct i810_state), GFP_KERNEL);
- if (state == NULL)
- return -ENOMEM;
- dmabuf = &state->dmabuf;
- goto found_virt;
- }
- }
- card = card->next;
- }
- /* no more virtual channel avaiable */
- if (!state)
- return -ENODEV;
-
-found_virt:
- /* initialize the virtual channel */
- state->virt = i;
- state->card = card;
- state->magic = I810_STATE_MAGIC;
- init_waitqueue_head(&dmabuf->wait);
- mutex_init(&state->open_mutex);
- file->private_data = state;
- dmabuf->trigger = 0;
-
- /* allocate hardware channels */
- if(file->f_mode & FMODE_READ) {
- if((dmabuf->read_channel = card->alloc_rec_pcm_channel(card)) == NULL) {
- kfree (card->states[i]);
- card->states[i] = NULL;
- return -EBUSY;
- }
- dmabuf->trigger |= PCM_ENABLE_INPUT;
- i810_set_adc_rate(state, 8000);
- }
- if(file->f_mode & FMODE_WRITE) {
- if((dmabuf->write_channel = card->alloc_pcm_channel(card)) == NULL) {
- /* make sure we free the record channel allocated above */
- if(file->f_mode & FMODE_READ)
- card->free_pcm_channel(card,dmabuf->read_channel->num);
- kfree (card->states[i]);
- card->states[i] = NULL;
- return -EBUSY;
- }
- /* Initialize to 8kHz? What if we don't support 8kHz? */
- /* Let's change this to check for S/PDIF stuff */
-
- dmabuf->trigger |= PCM_ENABLE_OUTPUT;
- if ( spdif_locked ) {
- i810_set_dac_rate(state, spdif_locked);
- i810_set_spdif_output(state, AC97_EA_SPSA_3_4, spdif_locked);
- } else {
- i810_set_dac_rate(state, 8000);
- /* Put the ACLink in 2 channel mode by default */
- i = I810_IOREADL(card, GLOB_CNT);
- I810_IOWRITEL(i & 0xffcfffff, card, GLOB_CNT);
- }
- }
-
- /* set default sample format. According to OSS Programmer's Guide /dev/dsp
- should be default to unsigned 8-bits, mono, with sample rate 8kHz and
- /dev/dspW will accept 16-bits sample, but we don't support those so we
- set it immediately to stereo and 16bit, which is all we do support */
- dmabuf->fmt |= I810_FMT_16BIT | I810_FMT_STEREO;
- dmabuf->ossfragsize = 0;
- dmabuf->ossmaxfrags = 0;
- dmabuf->subdivision = 0;
-
- state->open_mode |= file->f_mode & (FMODE_READ | FMODE_WRITE);
-
- return nonseekable_open(inode, file);
-}
-
-static int i810_release(struct inode *inode, struct file *file)
-{
- struct i810_state *state = (struct i810_state *)file->private_data;
- struct i810_card *card = state->card;
- struct dmabuf *dmabuf = &state->dmabuf;
- unsigned long flags;
-
- lock_kernel();
-
- /* stop DMA state machine and free DMA buffers/channels */
- if(dmabuf->trigger & PCM_ENABLE_OUTPUT) {
- drain_dac(state, 0);
- }
- if(dmabuf->trigger & PCM_ENABLE_INPUT) {
- stop_adc(state);
- }
- spin_lock_irqsave(&card->lock, flags);
- dealloc_dmabuf(state);
- if (file->f_mode & FMODE_WRITE) {
- state->card->free_pcm_channel(state->card, dmabuf->write_channel->num);
- }
- if (file->f_mode & FMODE_READ) {
- state->card->free_pcm_channel(state->card, dmabuf->read_channel->num);
- }
-
- state->card->states[state->virt] = NULL;
- kfree(state);
- spin_unlock_irqrestore(&card->lock, flags);
- unlock_kernel();
-
- return 0;
-}
-
-static /*const*/ struct file_operations i810_audio_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = i810_read,
- .write = i810_write,
- .poll = i810_poll,
- .ioctl = i810_ioctl,
- .mmap = i810_mmap,
- .open = i810_open,
- .release = i810_release,
-};
-
-/* Write AC97 codec registers */
-
-static u16 i810_ac97_get_mmio(struct ac97_codec *dev, u8 reg)
-{
- struct i810_card *card = dev->private_data;
- int count = 100;
- u16 reg_set = IO_REG_OFF(dev) | (reg&0x7f);
-
- while(count-- && (readb(card->iobase_mmio + CAS) & 1))
- udelay(1);
-
-#ifdef DEBUG_MMIO
- {
- u16 ans = readw(card->ac97base_mmio + reg_set);
- printk(KERN_DEBUG "i810_audio: ac97_get_mmio(%d) -> 0x%04X\n", ((int) reg_set) & 0xffff, (u32) ans);
- return ans;
- }
-#else
- return readw(card->ac97base_mmio + reg_set);
-#endif
-}
-
-static u16 i810_ac97_get_io(struct ac97_codec *dev, u8 reg)
-{
- struct i810_card *card = dev->private_data;
- int count = 100;
- u16 reg_set = IO_REG_OFF(dev) | (reg&0x7f);
-
- while(count-- && (I810_IOREADB(card, CAS) & 1))
- udelay(1);
-
- return inw(card->ac97base + reg_set);
-}
-
-static void i810_ac97_set_mmio(struct ac97_codec *dev, u8 reg, u16 data)
-{
- struct i810_card *card = dev->private_data;
- int count = 100;
- u16 reg_set = IO_REG_OFF(dev) | (reg&0x7f);
-
- while(count-- && (readb(card->iobase_mmio + CAS) & 1))
- udelay(1);
-
- writew(data, card->ac97base_mmio + reg_set);
-
-#ifdef DEBUG_MMIO
- printk(KERN_DEBUG "i810_audio: ac97_set_mmio(0x%04X, %d)\n", (u32) data, ((int) reg_set) & 0xffff);
-#endif
-}
-
-static void i810_ac97_set_io(struct ac97_codec *dev, u8 reg, u16 data)
-{
- struct i810_card *card = dev->private_data;
- int count = 100;
- u16 reg_set = IO_REG_OFF(dev) | (reg&0x7f);
-
- while(count-- && (I810_IOREADB(card, CAS) & 1))
- udelay(1);
-
- outw(data, card->ac97base + reg_set);
-}
-
-static u16 i810_ac97_get(struct ac97_codec *dev, u8 reg)
-{
- struct i810_card *card = dev->private_data;
- u16 ret;
-
- spin_lock(&card->ac97_lock);
- if (card->use_mmio) {
- ret = i810_ac97_get_mmio(dev, reg);
- }
- else {
- ret = i810_ac97_get_io(dev, reg);
- }
- spin_unlock(&card->ac97_lock);
-
- return ret;
-}
-
-static void i810_ac97_set(struct ac97_codec *dev, u8 reg, u16 data)
-{
- struct i810_card *card = dev->private_data;
-
- spin_lock(&card->ac97_lock);
- if (card->use_mmio) {
- i810_ac97_set_mmio(dev, reg, data);
- }
- else {
- i810_ac97_set_io(dev, reg, data);
- }
- spin_unlock(&card->ac97_lock);
-}
-
-
-/* OSS /dev/mixer file operation methods */
-
-static int i810_open_mixdev(struct inode *inode, struct file *file)
-{
- int i;
- int minor = iminor(inode);
- struct i810_card *card = devs;
-
- for (card = devs; card != NULL; card = card->next) {
- /*
- * If we are initializing and then fail, card could go
- * away unuexpectedly while we are in the for() loop.
- * So, check for card on each iteration before we check
- * for card->initializing to avoid a possible oops.
- * This usually only matters for times when the driver is
- * autoloaded by kmod.
- */
- for (i = 0; i < 50 && card && card->initializing; i++) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/20);
- }
- for (i = 0; i < NR_AC97 && card && !card->initializing; i++)
- if (card->ac97_codec[i] != NULL &&
- card->ac97_codec[i]->dev_mixer == minor) {
- file->private_data = card->ac97_codec[i];
- return nonseekable_open(inode, file);
- }
- }
- return -ENODEV;
-}
-
-static int i810_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct ac97_codec *codec = (struct ac97_codec *)file->private_data;
-
- return codec->mixer_ioctl(codec, cmd, arg);
-}
-
-static /*const*/ struct file_operations i810_mixer_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .ioctl = i810_ioctl_mixdev,
- .open = i810_open_mixdev,
-};
-
-/* AC97 codec initialisation. These small functions exist so we don't
- duplicate code between module init and apm resume */
-
-static inline int i810_ac97_exists(struct i810_card *card, int ac97_number)
-{
- u32 reg = I810_IOREADL(card, GLOB_STA);
- switch (ac97_number) {
- case 0:
- return reg & (1<<8);
- case 1:
- return reg & (1<<9);
- case 2:
- return reg & (1<<28);
- }
- return 0;
-}
-
-static inline int i810_ac97_enable_variable_rate(struct ac97_codec *codec)
-{
- i810_ac97_set(codec, AC97_EXTENDED_STATUS, 9);
- i810_ac97_set(codec,AC97_EXTENDED_STATUS,
- i810_ac97_get(codec, AC97_EXTENDED_STATUS)|0xE800);
-
- return (i810_ac97_get(codec, AC97_EXTENDED_STATUS)&1);
-}
-
-
-static int i810_ac97_probe_and_powerup(struct i810_card *card,struct ac97_codec *codec)
-{
- /* Returns 0 on failure */
- int i;
-
- if (ac97_probe_codec(codec) == 0) return 0;
-
- /* power it all up */
- i810_ac97_set(codec, AC97_POWER_CONTROL,
- i810_ac97_get(codec, AC97_POWER_CONTROL) & ~0x7f00);
-
- /* wait for analog ready */
- for (i=100; i && ((i810_ac97_get(codec, AC97_POWER_CONTROL) & 0xf) != 0xf); i--)
- {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/20);
- }
- return i;
-}
-
-static int is_new_ich(u16 pci_id)
-{
- switch (pci_id) {
- case PCI_DEVICE_ID_INTEL_82801DB_5:
- case PCI_DEVICE_ID_INTEL_82801EB_5:
- case PCI_DEVICE_ID_INTEL_ESB_5:
- case PCI_DEVICE_ID_INTEL_ICH6_18:
- return 1;
- default:
- break;
- }
-
- return 0;
-}
-
-static inline int ich_use_mmio(struct i810_card *card)
-{
- return is_new_ich(card->pci_id) && card->use_mmio;
-}
-
-/**
- * i810_ac97_power_up_bus - bring up AC97 link
- * @card : ICH audio device to power up
- *
- * Bring up the ACLink AC97 codec bus
- */
-
-static int i810_ac97_power_up_bus(struct i810_card *card)
-{
- u32 reg = I810_IOREADL(card, GLOB_CNT);
- int i;
- int primary_codec_id = 0;
-
- if((reg&2)==0) /* Cold required */
- reg|=2;
- else
- reg|=4; /* Warm */
-
- reg&=~8; /* ACLink on */
-
- /* At this point we deassert AC_RESET # */
- I810_IOWRITEL(reg , card, GLOB_CNT);
-
- /* We must now allow time for the Codec initialisation.
- 600mS is the specified time */
-
- for(i=0;i<10;i++)
- {
- if((I810_IOREADL(card, GLOB_CNT)&4)==0)
- break;
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/20);
- }
- if(i==10)
- {
- printk(KERN_ERR "i810_audio: AC'97 reset failed.\n");
- return 0;
- }
-
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ/2);
-
- /*
- * See if the primary codec comes ready. This must happen
- * before we start doing DMA stuff
- */
- /* see i810_ac97_init for the next 10 lines (jsaw) */
- if (card->use_mmio)
- readw(card->ac97base_mmio);
- else
- inw(card->ac97base);
- if (ich_use_mmio(card)) {
- primary_codec_id = (int) readl(card->iobase_mmio + SDM) & 0x3;
- printk(KERN_INFO "i810_audio: Primary codec has ID %d\n",
- primary_codec_id);
- }
-
- if(! i810_ac97_exists(card, primary_codec_id))
- {
- printk(KERN_INFO "i810_audio: Codec not ready.. wait.. ");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(HZ); /* actually 600mS by the spec */
-
- if(i810_ac97_exists(card, primary_codec_id))
- printk("OK\n");
- else
- printk("no response.\n");
- }
- if (card->use_mmio)
- readw(card->ac97base_mmio);
- else
- inw(card->ac97base);
- return 1;
-}
-
-static int __devinit i810_ac97_init(struct i810_card *card)
-{
- int num_ac97 = 0;
- int ac97_id;
- int total_channels = 0;
- int nr_ac97_max = card_cap[card->pci_id_internal].nr_ac97;
- struct ac97_codec *codec;
- u16 eid;
- u32 reg;
-
- if(!i810_ac97_power_up_bus(card)) return 0;
-
- /* Number of channels supported */
- /* What about the codec? Just because the ICH supports */
- /* multiple channels doesn't mean the codec does. */
- /* we'll have to modify this in the codec section below */
- /* to reflect what the codec has. */
- /* ICH and ICH0 only support 2 channels so don't bother */
- /* to check.... */
-
- card->channels = 2;
- reg = I810_IOREADL(card, GLOB_STA);
- if ( reg & 0x0200000 )
- card->channels = 6;
- else if ( reg & 0x0100000 )
- card->channels = 4;
- printk(KERN_INFO "i810_audio: Audio Controller supports %d channels.\n", card->channels);
- printk(KERN_INFO "i810_audio: Defaulting to base 2 channel mode.\n");
- reg = I810_IOREADL(card, GLOB_CNT);
- I810_IOWRITEL(reg & 0xffcfffff, card, GLOB_CNT);
-
- for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++)
- card->ac97_codec[num_ac97] = NULL;
-
- /*@FIXME I don't know, if I'm playing to safe here... (jsaw) */
- if ((nr_ac97_max > 2) && !card->use_mmio) nr_ac97_max = 2;
-
- for (num_ac97 = 0; num_ac97 < nr_ac97_max; num_ac97++) {
- /* codec reset */
- printk(KERN_INFO "i810_audio: Resetting connection %d\n", num_ac97);
- if (card->use_mmio)
- readw(card->ac97base_mmio + 0x80*num_ac97);
- else
- inw(card->ac97base + 0x80*num_ac97);
-
- /* If we have the SDATA_IN Map Register, as on ICH4, we
- do not loop thru all possible codec IDs but thru all
- possible IO channels. Bit 0:1 of SDM then holds the
- last codec ID spoken to.
- */
- if (ich_use_mmio(card)) {
- ac97_id = (int) readl(card->iobase_mmio + SDM) & 0x3;
- printk(KERN_INFO "i810_audio: Connection %d with codec id %d\n",
- num_ac97, ac97_id);
- }
- else {
- ac97_id = num_ac97;
- }
-
- /* The ICH programmer's reference says you should */
- /* check the ready status before probing. So we chk */
- /* What do we do if it's not ready? Wait and try */
- /* again, or abort? */
- if (!i810_ac97_exists(card, ac97_id)) {
- if(num_ac97 == 0)
- printk(KERN_ERR "i810_audio: Primary codec not ready.\n");
- }
-
- if ((codec = ac97_alloc_codec()) == NULL)
- return -ENOMEM;
-
- /* initialize some basic codec information, other fields will be filled
- in ac97_probe_codec */
- codec->private_data = card;
- codec->id = ac97_id;
- card->ac97_id_map[ac97_id] = num_ac97 * 0x80;
-
- if (card->use_mmio) {
- codec->codec_read = i810_ac97_get_mmio;
- codec->codec_write = i810_ac97_set_mmio;
- }
- else {
- codec->codec_read = i810_ac97_get_io;
- codec->codec_write = i810_ac97_set_io;
- }
-
- if(!i810_ac97_probe_and_powerup(card,codec)) {
- printk(KERN_ERR "i810_audio: timed out waiting for codec %d analog ready.\n", ac97_id);
- ac97_release_codec(codec);
- break; /* it didn't work */
- }
- /* Store state information about S/PDIF transmitter */
- card->ac97_status = 0;
-
- /* Don't attempt to get eid until powerup is complete */
- eid = i810_ac97_get(codec, AC97_EXTENDED_ID);
-
- if(eid==0xFFFF)
- {
- printk(KERN_WARNING "i810_audio: no codec attached ?\n");
- ac97_release_codec(codec);
- break;
- }
-
- /* Check for an AC97 1.0 soft modem (ID1) */
-
- if(codec->modem)
- {
- printk(KERN_WARNING "i810_audio: codec %d is a softmodem - skipping.\n", ac97_id);
- ac97_release_codec(codec);
- continue;
- }
-
- card->ac97_features = eid;
-
- /* Now check the codec for useful features to make up for
- the dumbness of the 810 hardware engine */
-
- if(!(eid&0x0001))
- printk(KERN_WARNING "i810_audio: only 48Khz playback available.\n");
- else
- {
- if(!i810_ac97_enable_variable_rate(codec)) {
- printk(KERN_WARNING "i810_audio: Codec refused to allow VRA, using 48Khz only.\n");
- card->ac97_features&=~1;
- }
- }
-
- /* Turn on the amplifier */
-
- codec->codec_write(codec, AC97_POWER_CONTROL,
- codec->codec_read(codec, AC97_POWER_CONTROL) & ~0x8000);
-
- /* Determine how many channels the codec(s) support */
- /* - The primary codec always supports 2 */
- /* - If the codec supports AMAP, surround DACs will */
- /* automaticlly get assigned to slots. */
- /* * Check for surround DACs and increment if */
- /* found. */
- /* - Else check if the codec is revision 2.2 */
- /* * If surround DACs exist, assign them to slots */
- /* and increment channel count. */
-
- /* All of this only applies to ICH2 and above. ICH */
- /* and ICH0 only support 2 channels. ICH2 will only */
- /* support multiple codecs in a "split audio" config. */
- /* as described above. */
-
- /* TODO: Remove all the debugging messages! */
-
- if((eid & 0xc000) == 0) /* primary codec */
- total_channels += 2;
-
- if(eid & 0x200) { /* GOOD, AMAP support */
- if (eid & 0x0080) /* L/R Surround channels */
- total_channels += 2;
- if (eid & 0x0140) /* LFE and Center channels */
- total_channels += 2;
- printk("i810_audio: AC'97 codec %d supports AMAP, total channels = %d\n", ac97_id, total_channels);
- } else if (eid & 0x0400) { /* this only works on 2.2 compliant codecs */
- eid &= 0xffcf;
- if((eid & 0xc000) != 0) {
- switch ( total_channels ) {
- case 2:
- /* Set dsa1, dsa0 to 01 */
- eid |= 0x0010;
- break;
- case 4:
- /* Set dsa1, dsa0 to 10 */
- eid |= 0x0020;
- break;
- case 6:
- /* Set dsa1, dsa0 to 11 */
- eid |= 0x0030;
- break;
- }
- total_channels += 2;
- }
- i810_ac97_set(codec, AC97_EXTENDED_ID, eid);
- eid = i810_ac97_get(codec, AC97_EXTENDED_ID);
- printk("i810_audio: AC'97 codec %d, new EID value = 0x%04x\n", ac97_id, eid);
- if (eid & 0x0080) /* L/R Surround channels */
- total_channels += 2;
- if (eid & 0x0140) /* LFE and Center channels */
- total_channels += 2;
- printk("i810_audio: AC'97 codec %d, DAC map configured, total channels = %d\n", ac97_id, total_channels);
- } else {
- printk("i810_audio: AC'97 codec %d Unable to map surround DAC's (or DAC's not present), total channels = %d\n", ac97_id, total_channels);
- }
-
- if ((codec->dev_mixer = register_sound_mixer(&i810_mixer_fops, -1)) < 0) {
- printk(KERN_ERR "i810_audio: couldn't register mixer!\n");
- ac97_release_codec(codec);
- break;
- }
-
- card->ac97_codec[num_ac97] = codec;
- }
-
- /* tune up the primary codec */
- ac97_tune_hardware(card->pci_dev, ac97_quirks, ac97_quirk);
-
- /* pick the minimum of channels supported by ICHx or codec(s) */
- card->channels = (card->channels > total_channels)?total_channels:card->channels;
-
- return num_ac97;
-}
-
-static void __devinit i810_configure_clocking (void)
-{
- struct i810_card *card;
- struct i810_state *state;
- struct dmabuf *dmabuf;
- unsigned int i, offset, new_offset;
- unsigned long flags;
-
- card = devs;
- /* We could try to set the clocking for multiple cards, but can you even have
- * more than one i810 in a machine? Besides, clocking is global, so unless
- * someone actually thinks more than one i810 in a machine is possible and
- * decides to rewrite that little bit, setting the rate for more than one card
- * is a waste of time.
- */
- if(card != NULL) {
- state = card->states[0] = (struct i810_state *)
- kzalloc(sizeof(struct i810_state), GFP_KERNEL);
- if (state == NULL)
- return;
- dmabuf = &state->dmabuf;
-
- dmabuf->write_channel = card->alloc_pcm_channel(card);
- state->virt = 0;
- state->card = card;
- state->magic = I810_STATE_MAGIC;
- init_waitqueue_head(&dmabuf->wait);
- mutex_init(&state->open_mutex);
- dmabuf->fmt = I810_FMT_STEREO | I810_FMT_16BIT;
- dmabuf->trigger = PCM_ENABLE_OUTPUT;
- i810_set_spdif_output(state, -1, 0);
- i810_set_dac_channels(state, 2);
- i810_set_dac_rate(state, 48000);
- if(prog_dmabuf(state, 0) != 0) {
- goto config_out_nodmabuf;
- }
- if(dmabuf->dmasize < 16384) {
- goto config_out;
- }
- dmabuf->count = dmabuf->dmasize;
- CIV_TO_LVI(card, dmabuf->write_channel->port, -1);
- local_irq_save(flags);
- start_dac(state);
- offset = i810_get_dma_addr(state, 0);
- mdelay(50);
- new_offset = i810_get_dma_addr(state, 0);
- stop_dac(state);
- local_irq_restore(flags);
- i = new_offset - offset;
-#ifdef DEBUG_INTERRUPTS
- printk("i810_audio: %d bytes in 50 milliseconds\n", i);
-#endif
- if(i == 0)
- goto config_out;
- i = i / 4 * 20;
- if (i > 48500 || i < 47500) {
- clocking = clocking * clocking / i;
- printk("i810_audio: setting clocking to %d\n", clocking);
- }
-config_out:
- dealloc_dmabuf(state);
-config_out_nodmabuf:
- state->card->free_pcm_channel(state->card,state->dmabuf.write_channel->num);
- kfree(state);
- card->states[0] = NULL;
- }
-}
-
-/* install the driver, we do not allocate hardware channel nor DMA buffer now, they are defered
- until "ACCESS" time (in prog_dmabuf called by open/read/write/ioctl/mmap) */
-
-static int __devinit i810_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_id)
-{
- struct i810_card *card;
-
- if (pci_enable_device(pci_dev))
- return -EIO;
-
- if (pci_set_dma_mask(pci_dev, I810_DMA_MASK)) {
- printk(KERN_ERR "i810_audio: architecture does not support"
- " 32bit PCI busmaster DMA\n");
- return -ENODEV;
- }
-
- if ((card = kzalloc(sizeof(struct i810_card), GFP_KERNEL)) == NULL) {
- printk(KERN_ERR "i810_audio: out of memory\n");
- return -ENOMEM;
- }
-
- card->initializing = 1;
- card->pci_dev = pci_dev;
- card->pci_id = pci_id->device;
- card->ac97base = pci_resource_start (pci_dev, 0);
- card->iobase = pci_resource_start (pci_dev, 1);
-
- if (!(card->ac97base) || !(card->iobase)) {
- card->ac97base = 0;
- card->iobase = 0;
- }
-
- /* if chipset could have mmio capability, check it */
- if (card_cap[pci_id->driver_data].flags & CAP_MMIO) {
- card->ac97base_mmio_phys = pci_resource_start (pci_dev, 2);
- card->iobase_mmio_phys = pci_resource_start (pci_dev, 3);
-
- if ((card->ac97base_mmio_phys) && (card->iobase_mmio_phys)) {
- card->use_mmio = 1;
- }
- else {
- card->ac97base_mmio_phys = 0;
- card->iobase_mmio_phys = 0;
- }
- }
-
- if (!(card->use_mmio) && (!(card->iobase) || !(card->ac97base))) {
- printk(KERN_ERR "i810_audio: No I/O resources available.\n");
- goto out_mem;
- }
-
- card->irq = pci_dev->irq;
- card->next = devs;
- card->magic = I810_CARD_MAGIC;
-#ifdef CONFIG_PM
- card->pm_suspended=0;
-#endif
- spin_lock_init(&card->lock);
- spin_lock_init(&card->ac97_lock);
- devs = card;
-
- pci_set_master(pci_dev);
-
- printk(KERN_INFO "i810: %s found at IO 0x%04lx and 0x%04lx, "
- "MEM 0x%04lx and 0x%04lx, IRQ %d\n",
- card_names[pci_id->driver_data],
- card->iobase, card->ac97base,
- card->ac97base_mmio_phys, card->iobase_mmio_phys,
- card->irq);
-
- card->alloc_pcm_channel = i810_alloc_pcm_channel;
- card->alloc_rec_pcm_channel = i810_alloc_rec_pcm_channel;
- card->alloc_rec_mic_channel = i810_alloc_rec_mic_channel;
- card->free_pcm_channel = i810_free_pcm_channel;
-
- if ((card->channel = pci_alloc_consistent(pci_dev,
- sizeof(struct i810_channel)*NR_HW_CH, &card->chandma)) == NULL) {
- printk(KERN_ERR "i810: cannot allocate channel DMA memory\n");
- goto out_mem;
- }
-
- { /* We may dispose of this altogether some time soon, so... */
- struct i810_channel *cp = card->channel;
-
- cp[0].offset = 0;
- cp[0].port = 0x00;
- cp[0].num = 0;
- cp[1].offset = 0;
- cp[1].port = 0x10;
- cp[1].num = 1;
- cp[2].offset = 0;
- cp[2].port = 0x20;
- cp[2].num = 2;
- }
-
- /* claim our iospace and irq */
- if (!request_region(card->iobase, 64, card_names[pci_id->driver_data])) {
- printk(KERN_ERR "i810_audio: unable to allocate region %lx\n", card->iobase);
- goto out_region1;
- }
- if (!request_region(card->ac97base, 256, card_names[pci_id->driver_data])) {
- printk(KERN_ERR "i810_audio: unable to allocate region %lx\n", card->ac97base);
- goto out_region2;
- }
-
- if (card->use_mmio) {
- if (request_mem_region(card->ac97base_mmio_phys, 512, "ich_audio MMBAR")) {
- if ((card->ac97base_mmio = ioremap(card->ac97base_mmio_phys, 512))) { /*@FIXME can ioremap fail? don't know (jsaw) */
- if (request_mem_region(card->iobase_mmio_phys, 256, "ich_audio MBBAR")) {
- if ((card->iobase_mmio = ioremap(card->iobase_mmio_phys, 256))) {
- printk(KERN_INFO "i810: %s mmio at 0x%04lx and 0x%04lx\n",
- card_names[pci_id->driver_data],
- (unsigned long) card->ac97base_mmio,
- (unsigned long) card->iobase_mmio);
- }
- else {
- iounmap(card->ac97base_mmio);
- release_mem_region(card->ac97base_mmio_phys, 512);
- release_mem_region(card->iobase_mmio_phys, 512);
- card->use_mmio = 0;
- }
- }
- else {
- iounmap(card->ac97base_mmio);
- release_mem_region(card->ac97base_mmio_phys, 512);
- card->use_mmio = 0;
- }
- }
- }
- else {
- card->use_mmio = 0;
- }
- }
-
- /* initialize AC97 codec and register /dev/mixer */
- if (i810_ac97_init(card) <= 0)
- goto out_iospace;
- pci_set_drvdata(pci_dev, card);
-
- if(clocking == 0) {
- clocking = 48000;
- i810_configure_clocking();
- }
-
- /* register /dev/dsp */
- if ((card->dev_audio = register_sound_dsp(&i810_audio_fops, -1)) < 0) {
- int i;
- printk(KERN_ERR "i810_audio: couldn't register DSP device!\n");
- for (i = 0; i < NR_AC97; i++)
- if (card->ac97_codec[i] != NULL) {
- unregister_sound_mixer(card->ac97_codec[i]->dev_mixer);
- ac97_release_codec(card->ac97_codec[i]);
- }
- goto out_iospace;
- }
-
- if (request_irq(card->irq, &i810_interrupt, IRQF_SHARED,
- card_names[pci_id->driver_data], card)) {
- printk(KERN_ERR "i810_audio: unable to allocate irq %d\n", card->irq);
- goto out_iospace;
- }
-
-
- card->initializing = 0;
- return 0;
-
-out_iospace:
- if (card->use_mmio) {
- iounmap(card->ac97base_mmio);
- iounmap(card->iobase_mmio);
- release_mem_region(card->ac97base_mmio_phys, 512);
- release_mem_region(card->iobase_mmio_phys, 256);
- }
- release_region(card->ac97base, 256);
-out_region2:
- release_region(card->iobase, 64);
-out_region1:
- pci_free_consistent(pci_dev, sizeof(struct i810_channel)*NR_HW_CH,
- card->channel, card->chandma);
-out_mem:
- kfree(card);
- return -ENODEV;
-}
-
-static void __devexit i810_remove(struct pci_dev *pci_dev)
-{
- int i;
- struct i810_card *card = pci_get_drvdata(pci_dev);
- /* free hardware resources */
- free_irq(card->irq, devs);
- release_region(card->iobase, 64);
- release_region(card->ac97base, 256);
- pci_free_consistent(pci_dev, sizeof(struct i810_channel)*NR_HW_CH,
- card->channel, card->chandma);
- if (card->use_mmio) {
- iounmap(card->ac97base_mmio);
- iounmap(card->iobase_mmio);
- release_mem_region(card->ac97base_mmio_phys, 512);
- release_mem_region(card->iobase_mmio_phys, 256);
- }
-
- /* unregister audio devices */
- for (i = 0; i < NR_AC97; i++)
- if (card->ac97_codec[i] != NULL) {
- unregister_sound_mixer(card->ac97_codec[i]->dev_mixer);
- ac97_release_codec(card->ac97_codec[i]);
- card->ac97_codec[i] = NULL;
- }
- unregister_sound_dsp(card->dev_audio);
- kfree(card);
-}
-
-#ifdef CONFIG_PM
-static int i810_pm_suspend(struct pci_dev *dev, pm_message_t pm_state)
-{
- struct i810_card *card = pci_get_drvdata(dev);
- struct i810_state *state;
- unsigned long flags;
- struct dmabuf *dmabuf;
- int i,num_ac97;
-#ifdef DEBUG
- printk("i810_audio: i810_pm_suspend called\n");
-#endif
- if(!card) return 0;
- spin_lock_irqsave(&card->lock, flags);
- card->pm_suspended=1;
- for(i=0;i<NR_HW_CH;i++) {
- state = card->states[i];
- if(!state) continue;
- /* this happens only if there are open files */
- dmabuf = &state->dmabuf;
- if(dmabuf->enable & DAC_RUNNING ||
- (dmabuf->count && (dmabuf->trigger & PCM_ENABLE_OUTPUT))) {
- state->pm_saved_dac_rate=dmabuf->rate;
- stop_dac(state);
- } else {
- state->pm_saved_dac_rate=0;
- }
- if(dmabuf->enable & ADC_RUNNING) {
- state->pm_saved_adc_rate=dmabuf->rate;
- stop_adc(state);
- } else {
- state->pm_saved_adc_rate=0;
- }
- dmabuf->ready = 0;
- dmabuf->swptr = dmabuf->hwptr = 0;
- dmabuf->count = dmabuf->total_bytes = 0;
- }
-
- spin_unlock_irqrestore(&card->lock, flags);
-
- /* save mixer settings */
- for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++) {
- struct ac97_codec *codec = card->ac97_codec[num_ac97];
- if(!codec) continue;
- for(i=0;i< SOUND_MIXER_NRDEVICES ;i++) {
- if((supported_mixer(codec,i)) &&
- (codec->read_mixer)) {
- card->pm_saved_mixer_settings[i][num_ac97]=
- codec->read_mixer(codec,i);
- }
- }
- }
- pci_save_state(dev); /* XXX do we need this? */
- pci_disable_device(dev); /* disable busmastering */
- pci_set_power_state(dev,3); /* Zzz. */
-
- return 0;
-}
-
-
-static int i810_pm_resume(struct pci_dev *dev)
-{
- int num_ac97,i=0;
- struct i810_card *card=pci_get_drvdata(dev);
- pci_enable_device(dev);
- pci_restore_state (dev);
-
- /* observation of a toshiba portege 3440ct suggests that the
- hardware has to be more or less completely reinitialized from
- scratch after an apm suspend. Works For Me. -dan */
-
- i810_ac97_power_up_bus(card);
-
- for (num_ac97 = 0; num_ac97 < NR_AC97; num_ac97++) {
- struct ac97_codec *codec = card->ac97_codec[num_ac97];
- /* check they haven't stolen the hardware while we were
- away */
- if(!codec || !i810_ac97_exists(card,num_ac97)) {
- if(num_ac97) continue;
- else BUG();
- }
- if(!i810_ac97_probe_and_powerup(card,codec)) BUG();
-
- if((card->ac97_features&0x0001)) {
- /* at probe time we found we could do variable
- rates, but APM suspend has made it forget
- its magical powers */
- if(!i810_ac97_enable_variable_rate(codec)) BUG();
- }
- /* we lost our mixer settings, so restore them */
- for(i=0;i< SOUND_MIXER_NRDEVICES ;i++) {
- if(supported_mixer(codec,i)){
- int val=card->
- pm_saved_mixer_settings[i][num_ac97];
- codec->mixer_state[i]=val;
- codec->write_mixer(codec,i,
- (val & 0xff) ,
- ((val >> 8) & 0xff) );
- }
- }
- }
-
- /* we need to restore the sample rate from whatever it was */
- for(i=0;i<NR_HW_CH;i++) {
- struct i810_state * state=card->states[i];
- if(state) {
- if(state->pm_saved_adc_rate)
- i810_set_adc_rate(state,state->pm_saved_adc_rate);
- if(state->pm_saved_dac_rate)
- i810_set_dac_rate(state,state->pm_saved_dac_rate);
- }
- }
-
-
- card->pm_suspended = 0;
-
- /* any processes that were reading/writing during the suspend
- probably ended up here */
- for(i=0;i<NR_HW_CH;i++) {
- struct i810_state *state = card->states[i];
- if(state) wake_up(&state->dmabuf.wait);
- }
-
- return 0;
-}
-#endif /* CONFIG_PM */
-
-MODULE_AUTHOR("The Linux kernel team");
-MODULE_DESCRIPTION("Intel 810 audio support");
-MODULE_LICENSE("GPL");
-module_param(ftsodell, int, 0444);
-module_param(clocking, uint, 0444);
-module_param(strict_clocking, int, 0444);
-module_param(spdif_locked, int, 0444);
-
-#define I810_MODULE_NAME "i810_audio"
-
-static struct pci_driver i810_pci_driver = {
- .name = I810_MODULE_NAME,
- .id_table = i810_pci_tbl,
- .probe = i810_probe,
- .remove = __devexit_p(i810_remove),
-#ifdef CONFIG_PM
- .suspend = i810_pm_suspend,
- .resume = i810_pm_resume,
-#endif /* CONFIG_PM */
-};
-
-
-static int __init i810_init_module (void)
-{
- int retval;
-
- printk(KERN_INFO "Intel 810 + AC97 Audio, version "
- DRIVER_VERSION ", " __TIME__ " " __DATE__ "\n");
-
- retval = pci_register_driver(&i810_pci_driver);
- if (retval)
- return retval;
-
- if(ftsodell != 0) {
- printk("i810_audio: ftsodell is now a deprecated option.\n");
- }
- if(spdif_locked > 0 ) {
- if(spdif_locked == 32000 || spdif_locked == 44100 || spdif_locked == 48000) {
- printk("i810_audio: Enabling S/PDIF at sample rate %dHz.\n", spdif_locked);
- } else {
- printk("i810_audio: S/PDIF can only be locked to 32000, 44100, or 48000Hz.\n");
- spdif_locked = 0;
- }
- }
-
- return 0;
-}
-
-static void __exit i810_cleanup_module (void)
-{
- pci_unregister_driver(&i810_pci_driver);
-}
-
-module_init(i810_init_module);
-module_exit(i810_cleanup_module);
-
-/*
-Local Variables:
-c-basic-offset: 8
-End:
-*/
diff --git a/sound/oss/pss.c b/sound/oss/pss.c
index ece428b2ba9f..16ed06950dc1 100644
--- a/sound/oss/pss.c
+++ b/sound/oss/pss.c
@@ -232,14 +232,12 @@ static int set_irq(pss_confdata * devc, int dev, int irq)
return 1;
}
-static int set_io_base(pss_confdata * devc, int dev, int base)
+static void set_io_base(pss_confdata * devc, int dev, int base)
{
unsigned short tmp = inw(REG(dev)) & 0x003f;
unsigned short bits = (base & 0x0ffc) << 4;
outw(bits | tmp, REG(dev));
-
- return 1;
}
static int set_dma(pss_confdata * devc, int dev, int dma)
@@ -673,20 +671,12 @@ static void configure_nonsound_components(void)
/* Configure CDROM port */
- if(pss_cdrom_port == -1) /* If cdrom port enablation wasn't requested */
- {
+ if (pss_cdrom_port == -1) { /* If cdrom port enablation wasn't requested */
printk(KERN_INFO "PSS: CDROM port not enabled.\n");
- }
- else if(check_region(pss_cdrom_port, 2))
- {
+ } else if (check_region(pss_cdrom_port, 2)) {
printk(KERN_ERR "PSS: CDROM I/O port conflict.\n");
- }
- else if(!set_io_base(devc, CONF_CDROM, pss_cdrom_port))
- {
- printk(KERN_ERR "PSS: CDROM I/O port could not be set.\n");
- }
- else /* CDROM port successfully configured */
- {
+ } else {
+ set_io_base(devc, CONF_CDROM, pss_cdrom_port);
printk(KERN_INFO "PSS: CDROM I/O port set to 0x%x.\n", pss_cdrom_port);
}
}
@@ -758,10 +748,7 @@ static int __init probe_pss_mpu(struct address_info *hw_config)
printk(KERN_ERR "PSS: MPU I/O port conflict\n");
return 0;
}
- if (!set_io_base(devc, CONF_MIDI, hw_config->io_base)) {
- printk(KERN_ERR "PSS: MIDI base could not be set.\n");
- goto fail;
- }
+ set_io_base(devc, CONF_MIDI, hw_config->io_base);
if (!set_irq(devc, CONF_MIDI, hw_config->irq)) {
printk(KERN_ERR "PSS: MIDI IRQ allocation error.\n");
goto fail;
@@ -1057,10 +1044,7 @@ static int __init probe_pss_mss(struct address_info *hw_config)
release_region(hw_config->io_base, 4);
return 0;
}
- if (!set_io_base(devc, CONF_WSS, hw_config->io_base)) {
- printk("PSS: WSS base not settable.\n");
- goto fail;
- }
+ set_io_base(devc, CONF_WSS, hw_config->io_base);
if (!set_irq(devc, CONF_WSS, hw_config->irq)) {
printk("PSS: WSS IRQ allocation error.\n");
goto fail;
diff --git a/sound/oss/sb_common.c b/sound/oss/sb_common.c
index 07cbacf63824..77d0e5efda76 100644
--- a/sound/oss/sb_common.c
+++ b/sound/oss/sb_common.c
@@ -1228,7 +1228,8 @@ int probe_sbmpu(struct address_info *hw_config, struct module *owner)
}
attach_mpu401(hw_config, owner);
if (last_sb->irq == -hw_config->irq)
- last_sb->midi_irq_cookie=(void *)hw_config->slots[1];
+ last_sb->midi_irq_cookie =
+ (void *)(long) hw_config->slots[1];
return 1;
}
#endif
diff --git a/sound/oss/trident.c b/sound/oss/trident.c
index 96adc47917aa..6959ee1bd17f 100644
--- a/sound/oss/trident.c
+++ b/sound/oss/trident.c
@@ -2935,7 +2935,7 @@ trident_ac97_set(struct ac97_codec *codec, u8 reg, u16 val)
do {
if ((inw(TRID_REG(card, address)) & busy) == 0)
break;
- } while (count--);
+ } while (--count);
data |= (mask | (reg & AC97_REG_ADDR));
@@ -2996,7 +2996,7 @@ trident_ac97_get(struct ac97_codec *codec, u8 reg)
data = inl(TRID_REG(card, address));
if ((data & busy) == 0)
break;
- } while (count--);
+ } while (--count);
spin_unlock_irqrestore(&card->lock, flags);
if (count == 0) {
diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c
deleted file mode 100644
index f95aa0946758..000000000000
--- a/sound/oss/via82cxxx_audio.c
+++ /dev/null
@@ -1,3616 +0,0 @@
-/*
- * Support for VIA 82Cxxx Audio Codecs
- * Copyright 1999,2000 Jeff Garzik
- *
- * Updated to support the VIA 8233/8235 audio subsystem
- * Alan Cox <alan@redhat.com> (C) Copyright 2002, 2003 Red Hat Inc
- *
- * Distributed under the GNU GENERAL PUBLIC LICENSE (GPL) Version 2.
- * See the "COPYING" file distributed with this software for more info.
- * NO WARRANTY
- *
- * For a list of known bugs (errata) and documentation,
- * see via-audio.pdf in Documentation/DocBook.
- * If this documentation does not exist, run "make pdfdocs".
- */
-
-
-#define VIA_VERSION "1.9.1-ac4-2.5"
-
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/poison.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/spinlock.h>
-#include <linux/sound.h>
-#include <linux/poll.h>
-#include <linux/soundcard.h>
-#include <linux/ac97_codec.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <linux/mutex.h>
-
-#include "sound_config.h"
-#include "dev_table.h"
-#include "mpu401.h"
-
-
-#undef VIA_DEBUG /* define to enable debugging output and checks */
-#ifdef VIA_DEBUG
-/* note: prints function name for you */
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
-#else
-#define DPRINTK(fmt, args...)
-#endif
-
-#undef VIA_NDEBUG /* define to disable lightweight runtime checks */
-#ifdef VIA_NDEBUG
-#define assert(expr)
-#else
-#define assert(expr) \
- if(!(expr)) { \
- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr,__FILE__,__FUNCTION__,__LINE__); \
- }
-#endif
-
-#define VIA_SUPPORT_MMAP 1 /* buggy, for now... */
-
-#define MAX_CARDS 1
-
-#define VIA_CARD_NAME "VIA 82Cxxx Audio driver " VIA_VERSION
-#define VIA_MODULE_NAME "via82cxxx"
-#define PFX VIA_MODULE_NAME ": "
-
-#define VIA_COUNTER_LIMIT 100000
-
-/* size of DMA buffers */
-#define VIA_MAX_BUFFER_DMA_PAGES 32
-
-/* buffering default values in ms */
-#define VIA_DEFAULT_FRAG_TIME 20
-#define VIA_DEFAULT_BUFFER_TIME 500
-
-/* the hardware has a 256 fragment limit */
-#define VIA_MIN_FRAG_NUMBER 2
-#define VIA_MAX_FRAG_NUMBER 128
-
-#define VIA_MAX_FRAG_SIZE PAGE_SIZE
-#define VIA_MIN_FRAG_SIZE (VIA_MAX_BUFFER_DMA_PAGES * PAGE_SIZE / VIA_MAX_FRAG_NUMBER)
-
-
-/* 82C686 function 5 (audio codec) PCI configuration registers */
-#define VIA_ACLINK_STATUS 0x40
-#define VIA_ACLINK_CTRL 0x41
-#define VIA_FUNC_ENABLE 0x42
-#define VIA_PNP_CONTROL 0x43
-#define VIA_FM_NMI_CTRL 0x48
-
-/*
- * controller base 0 (scatter-gather) registers
- *
- * NOTE: Via datasheet lists first channel as "read"
- * channel and second channel as "write" channel.
- * I changed the naming of the constants to be more
- * clear than I felt the datasheet to be.
- */
-
-#define VIA_BASE0_PCM_OUT_CHAN 0x00 /* output PCM to user */
-#define VIA_BASE0_PCM_OUT_CHAN_STATUS 0x00
-#define VIA_BASE0_PCM_OUT_CHAN_CTRL 0x01
-#define VIA_BASE0_PCM_OUT_CHAN_TYPE 0x02
-
-#define VIA_BASE0_PCM_IN_CHAN 0x10 /* input PCM from user */
-#define VIA_BASE0_PCM_IN_CHAN_STATUS 0x10
-#define VIA_BASE0_PCM_IN_CHAN_CTRL 0x11
-#define VIA_BASE0_PCM_IN_CHAN_TYPE 0x12
-
-/* offsets from base */
-#define VIA_PCM_STATUS 0x00
-#define VIA_PCM_CONTROL 0x01
-#define VIA_PCM_TYPE 0x02
-#define VIA_PCM_LEFTVOL 0x02
-#define VIA_PCM_RIGHTVOL 0x03
-#define VIA_PCM_TABLE_ADDR 0x04
-#define VIA_PCM_STOPRATE 0x08 /* 8233+ */
-#define VIA_PCM_BLOCK_COUNT 0x0C
-
-/* XXX unused DMA channel for FM PCM data */
-#define VIA_BASE0_FM_OUT_CHAN 0x20
-#define VIA_BASE0_FM_OUT_CHAN_STATUS 0x20
-#define VIA_BASE0_FM_OUT_CHAN_CTRL 0x21
-#define VIA_BASE0_FM_OUT_CHAN_TYPE 0x22
-
-/* Six channel audio output on 8233 */
-#define VIA_BASE0_MULTI_OUT_CHAN 0x40
-#define VIA_BASE0_MULTI_OUT_CHAN_STATUS 0x40
-#define VIA_BASE0_MULTI_OUT_CHAN_CTRL 0x41
-#define VIA_BASE0_MULTI_OUT_CHAN_TYPE 0x42
-
-#define VIA_BASE0_AC97_CTRL 0x80
-#define VIA_BASE0_SGD_STATUS_SHADOW 0x84
-#define VIA_BASE0_GPI_INT_ENABLE 0x8C
-#define VIA_INTR_OUT ((1<<0) | (1<<4) | (1<<8))
-#define VIA_INTR_IN ((1<<1) | (1<<5) | (1<<9))
-#define VIA_INTR_FM ((1<<2) | (1<<6) | (1<<10))
-#define VIA_INTR_MASK (VIA_INTR_OUT | VIA_INTR_IN | VIA_INTR_FM)
-
-/* Newer VIA we need to monitor the low 3 bits of each channel. This
- mask covers the channels we don't yet use as well
- */
-
-#define VIA_NEW_INTR_MASK 0x77077777UL
-
-/* VIA_BASE0_AUDIO_xxx_CHAN_TYPE bits */
-#define VIA_IRQ_ON_FLAG (1<<0) /* int on each flagged scatter block */
-#define VIA_IRQ_ON_EOL (1<<1) /* int at end of scatter list */
-#define VIA_INT_SEL_PCI_LAST_LINE_READ (0) /* int at PCI read of last line */
-#define VIA_INT_SEL_LAST_SAMPLE_SENT (1<<2) /* int at last sample sent */
-#define VIA_INT_SEL_ONE_LINE_LEFT (1<<3) /* int at less than one line to send */
-#define VIA_PCM_FMT_STEREO (1<<4) /* PCM stereo format (bit clear == mono) */
-#define VIA_PCM_FMT_16BIT (1<<5) /* PCM 16-bit format (bit clear == 8-bit) */
-#define VIA_PCM_REC_FIFO (1<<6) /* PCM Recording FIFO */
-#define VIA_RESTART_SGD_ON_EOL (1<<7) /* restart scatter-gather at EOL */
-#define VIA_PCM_FMT_MASK (VIA_PCM_FMT_STEREO|VIA_PCM_FMT_16BIT)
-#define VIA_CHAN_TYPE_MASK (VIA_RESTART_SGD_ON_EOL | \
- VIA_IRQ_ON_FLAG | \
- VIA_IRQ_ON_EOL)
-#define VIA_CHAN_TYPE_INT_SELECT (VIA_INT_SEL_LAST_SAMPLE_SENT)
-
-/* PCI configuration register bits and masks */
-#define VIA_CR40_AC97_READY 0x01
-#define VIA_CR40_AC97_LOW_POWER 0x02
-#define VIA_CR40_SECONDARY_READY 0x04
-
-#define VIA_CR41_AC97_ENABLE 0x80 /* enable AC97 codec */
-#define VIA_CR41_AC97_RESET 0x40 /* clear bit to reset AC97 */
-#define VIA_CR41_AC97_WAKEUP 0x20 /* wake up from power-down mode */
-#define VIA_CR41_AC97_SDO 0x10 /* force Serial Data Out (SDO) high */
-#define VIA_CR41_VRA 0x08 /* enable variable sample rate */
-#define VIA_CR41_PCM_ENABLE 0x04 /* AC Link SGD Read Channel PCM Data Output */
-#define VIA_CR41_FM_PCM_ENABLE 0x02 /* AC Link FM Channel PCM Data Out */
-#define VIA_CR41_SB_PCM_ENABLE 0x01 /* AC Link SB PCM Data Output */
-#define VIA_CR41_BOOT_MASK (VIA_CR41_AC97_ENABLE | \
- VIA_CR41_AC97_WAKEUP | \
- VIA_CR41_AC97_SDO)
-#define VIA_CR41_RUN_MASK (VIA_CR41_AC97_ENABLE | \
- VIA_CR41_AC97_RESET | \
- VIA_CR41_VRA | \
- VIA_CR41_PCM_ENABLE)
-
-#define VIA_CR42_SB_ENABLE 0x01
-#define VIA_CR42_MIDI_ENABLE 0x02
-#define VIA_CR42_FM_ENABLE 0x04
-#define VIA_CR42_GAME_ENABLE 0x08
-#define VIA_CR42_MIDI_IRQMASK 0x40
-#define VIA_CR42_MIDI_PNP 0x80
-
-#define VIA_CR44_SECOND_CODEC_SUPPORT (1 << 6)
-#define VIA_CR44_AC_LINK_ACCESS (1 << 7)
-
-#define VIA_CR48_FM_TRAP_TO_NMI (1 << 2)
-
-/* controller base 0 register bitmasks */
-#define VIA_INT_DISABLE_MASK (~(0x01|0x02))
-#define VIA_SGD_STOPPED (1 << 2)
-#define VIA_SGD_PAUSED (1 << 6)
-#define VIA_SGD_ACTIVE (1 << 7)
-#define VIA_SGD_TERMINATE (1 << 6)
-#define VIA_SGD_FLAG (1 << 0)
-#define VIA_SGD_EOL (1 << 1)
-#define VIA_SGD_START (1 << 7)
-
-#define VIA_CR80_FIRST_CODEC 0
-#define VIA_CR80_SECOND_CODEC (1 << 30)
-#define VIA_CR80_FIRST_CODEC_VALID (1 << 25)
-#define VIA_CR80_VALID (1 << 25)
-#define VIA_CR80_SECOND_CODEC_VALID (1 << 27)
-#define VIA_CR80_BUSY (1 << 24)
-#define VIA_CR83_BUSY (1)
-#define VIA_CR83_FIRST_CODEC_VALID (1 << 1)
-#define VIA_CR80_READ (1 << 23)
-#define VIA_CR80_WRITE_MODE 0
-#define VIA_CR80_REG_IDX(idx) ((((idx) & 0xFF) >> 1) << 16)
-
-/* capabilities we announce */
-#ifdef VIA_SUPPORT_MMAP
-#define VIA_DSP_CAP (DSP_CAP_REVISION | DSP_CAP_DUPLEX | DSP_CAP_MMAP | \
- DSP_CAP_TRIGGER | DSP_CAP_REALTIME)
-#else
-#define VIA_DSP_CAP (DSP_CAP_REVISION | DSP_CAP_DUPLEX | \
- DSP_CAP_TRIGGER | DSP_CAP_REALTIME)
-#endif
-
-/* scatter-gather DMA table entry, exactly as passed to hardware */
-struct via_sgd_table {
- u32 addr;
- u32 count; /* includes additional VIA_xxx bits also */
-};
-
-#define VIA_EOL (1 << 31)
-#define VIA_FLAG (1 << 30)
-#define VIA_STOP (1 << 29)
-
-
-enum via_channel_states {
- sgd_stopped = 0,
- sgd_in_progress = 1,
-};
-
-
-struct via_buffer_pgtbl {
- dma_addr_t handle;
- void *cpuaddr;
-};
-
-
-struct via_channel {
- atomic_t n_frags;
- atomic_t hw_ptr;
- wait_queue_head_t wait;
-
- unsigned int sw_ptr;
- unsigned int slop_len;
- unsigned int n_irqs;
- int bytes;
-
- unsigned is_active : 1;
- unsigned is_record : 1;
- unsigned is_mapped : 1;
- unsigned is_enabled : 1;
- unsigned is_multi: 1; /* 8233 6 channel */
- u8 pcm_fmt; /* VIA_PCM_FMT_xxx */
- u8 channels; /* Channel count */
-
- unsigned rate; /* sample rate */
- unsigned int frag_size;
- unsigned int frag_number;
-
- unsigned char intmask;
-
- volatile struct via_sgd_table *sgtable;
- dma_addr_t sgt_handle;
-
- unsigned int page_number;
- struct via_buffer_pgtbl pgtbl[VIA_MAX_BUFFER_DMA_PAGES];
-
- long iobase;
-
- const char *name;
-};
-
-
-/* data stored for each chip */
-struct via_info {
- struct pci_dev *pdev;
- long baseaddr;
-
- struct ac97_codec *ac97;
- spinlock_t ac97_lock;
- spinlock_t lock;
- int card_num; /* unique card number, from 0 */
-
- int dev_dsp; /* /dev/dsp index from register_sound_dsp() */
-
- unsigned rev_h : 1;
- unsigned legacy: 1; /* Has legacy ports */
- unsigned intmask: 1; /* Needs int bits */
- unsigned sixchannel: 1; /* 8233/35 with 6 channel support */
- unsigned volume: 1;
-
- unsigned locked_rate : 1;
-
- int mixer_vol; /* 8233/35 volume - not yet implemented */
-
- struct mutex syscall_mutex;
- struct mutex open_mutex;
-
- /* The 8233/8235 have 4 DX audio channels, two record and
- one six channel out. We bind ch_in to DX 1, ch_out to multichannel
- and ch_fm to DX 2. DX 3 and REC0/REC1 are unused at the
- moment */
-
- struct via_channel ch_in;
- struct via_channel ch_out;
- struct via_channel ch_fm;
-
-#ifdef CONFIG_MIDI_VIA82CXXX
- void *midi_devc;
- struct address_info midi_info;
-#endif
-};
-
-
-/* number of cards, used for assigning unique numbers to cards */
-static unsigned via_num_cards;
-
-
-
-/****************************************************************
- *
- * prototypes
- *
- *
- */
-
-static int via_init_one (struct pci_dev *dev, const struct pci_device_id *id);
-static void __devexit via_remove_one (struct pci_dev *pdev);
-
-static ssize_t via_dsp_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos);
-static ssize_t via_dsp_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
-static unsigned int via_dsp_poll(struct file *file, struct poll_table_struct *wait);
-static int via_dsp_ioctl (struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
-static int via_dsp_open (struct inode *inode, struct file *file);
-static int via_dsp_release(struct inode *inode, struct file *file);
-static int via_dsp_mmap(struct file *file, struct vm_area_struct *vma);
-
-static u16 via_ac97_read_reg (struct ac97_codec *codec, u8 reg);
-static void via_ac97_write_reg (struct ac97_codec *codec, u8 reg, u16 value);
-static u8 via_ac97_wait_idle (struct via_info *card);
-
-static void via_chan_free (struct via_info *card, struct via_channel *chan);
-static void via_chan_clear (struct via_info *card, struct via_channel *chan);
-static void via_chan_pcm_fmt (struct via_channel *chan, int reset);
-static void via_chan_buffer_free (struct via_info *card, struct via_channel *chan);
-
-
-/****************************************************************
- *
- * Various data the driver needs
- *
- *
- */
-
-
-static struct pci_device_id via_pci_tbl[] = {
- { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- { PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_5,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci,via_pci_tbl);
-
-
-static struct pci_driver via_driver = {
- .name = VIA_MODULE_NAME,
- .id_table = via_pci_tbl,
- .probe = via_init_one,
- .remove = __devexit_p(via_remove_one),
-};
-
-
-/****************************************************************
- *
- * Low-level base 0 register read/write helpers
- *
- *
- */
-
-/**
- * via_chan_stop - Terminate DMA on specified PCM channel
- * @iobase: PCI base address for SGD channel registers
- *
- * Terminate scatter-gather DMA operation for given
- * channel (derived from @iobase), if DMA is active.
- *
- * Note that @iobase is not the PCI base address,
- * but the PCI base address plus an offset to
- * one of three PCM channels supported by the chip.
- *
- */
-
-static inline void via_chan_stop (long iobase)
-{
- if (inb (iobase + VIA_PCM_STATUS) & VIA_SGD_ACTIVE)
- outb (VIA_SGD_TERMINATE, iobase + VIA_PCM_CONTROL);
-}
-
-
-/**
- * via_chan_status_clear - Clear status flags on specified DMA channel
- * @iobase: PCI base address for SGD channel registers
- *
- * Clear any pending status flags for the given
- * DMA channel (derived from @iobase), if any
- * flags are asserted.
- *
- * Note that @iobase is not the PCI base address,
- * but the PCI base address plus an offset to
- * one of three PCM channels supported by the chip.
- *
- */
-
-static inline void via_chan_status_clear (long iobase)
-{
- u8 tmp = inb (iobase + VIA_PCM_STATUS);
-
- if (tmp != 0)
- outb (tmp, iobase + VIA_PCM_STATUS);
-}
-
-
-/**
- * sg_begin - Begin recording or playback on a PCM channel
- * @chan: Channel for which DMA operation shall begin
- *
- * Start scatter-gather DMA for the given channel.
- *
- */
-
-static inline void sg_begin (struct via_channel *chan)
-{
- DPRINTK("Start with intmask %d\n", chan->intmask);
- DPRINTK("About to start from %d to %d\n",
- inl(chan->iobase + VIA_PCM_BLOCK_COUNT),
- inb(chan->iobase + VIA_PCM_STOPRATE + 3));
- outb (VIA_SGD_START|chan->intmask, chan->iobase + VIA_PCM_CONTROL);
- DPRINTK("Status is now %02X\n", inb(chan->iobase + VIA_PCM_STATUS));
- DPRINTK("Control is now %02X\n", inb(chan->iobase + VIA_PCM_CONTROL));
-}
-
-
-static int sg_active (long iobase)
-{
- u8 tmp = inb (iobase + VIA_PCM_STATUS);
- if ((tmp & VIA_SGD_STOPPED) || (tmp & VIA_SGD_PAUSED)) {
- printk(KERN_WARNING "via82cxxx warning: SG stopped or paused\n");
- return 0;
- }
- if (tmp & VIA_SGD_ACTIVE)
- return 1;
- return 0;
-}
-
-static int via_sg_offset(struct via_channel *chan)
-{
- return inl (chan->iobase + VIA_PCM_BLOCK_COUNT) & 0x00FFFFFF;
-}
-
-/****************************************************************
- *
- * Miscellaneous debris
- *
- *
- */
-
-
-/**
- * via_syscall_down - down the card-specific syscell semaphore
- * @card: Private info for specified board
- * @nonblock: boolean, non-zero if O_NONBLOCK is set
- *
- * Encapsulates standard method of acquiring the syscall sem.
- *
- * Returns negative errno on error, or zero for success.
- */
-
-static inline int via_syscall_down (struct via_info *card, int nonblock)
-{
- /* Thomas Sailer:
- * EAGAIN is supposed to be used if IO is pending,
- * not if there is contention on some internal
- * synchronization primitive which should be
- * held only for a short time anyway
- */
- nonblock = 0;
-
- if (nonblock) {
- if (!mutex_trylock(&card->syscall_mutex))
- return -EAGAIN;
- } else {
- if (mutex_lock_interruptible(&card->syscall_mutex))
- return -ERESTARTSYS;
- }
-
- return 0;
-}
-
-
-/**
- * via_stop_everything - Stop all audio operations
- * @card: Private info for specified board
- *
- * Stops all DMA operations and interrupts, and clear
- * any pending status bits resulting from those operations.
- */
-
-static void via_stop_everything (struct via_info *card)
-{
- u8 tmp, new_tmp;
-
- DPRINTK ("ENTER\n");
-
- assert (card != NULL);
-
- /*
- * terminate any existing operations on audio read/write channels
- */
- via_chan_stop (card->baseaddr + VIA_BASE0_PCM_OUT_CHAN);
- via_chan_stop (card->baseaddr + VIA_BASE0_PCM_IN_CHAN);
- via_chan_stop (card->baseaddr + VIA_BASE0_FM_OUT_CHAN);
- if(card->sixchannel)
- via_chan_stop (card->baseaddr + VIA_BASE0_MULTI_OUT_CHAN);
-
- /*
- * clear any existing stops / flags (sanity check mainly)
- */
- via_chan_status_clear (card->baseaddr + VIA_BASE0_PCM_OUT_CHAN);
- via_chan_status_clear (card->baseaddr + VIA_BASE0_PCM_IN_CHAN);
- via_chan_status_clear (card->baseaddr + VIA_BASE0_FM_OUT_CHAN);
- if(card->sixchannel)
- via_chan_status_clear (card->baseaddr + VIA_BASE0_MULTI_OUT_CHAN);
-
- /*
- * clear any enabled interrupt bits
- */
- tmp = inb (card->baseaddr + VIA_BASE0_PCM_OUT_CHAN_TYPE);
- new_tmp = tmp & ~(VIA_IRQ_ON_FLAG|VIA_IRQ_ON_EOL|VIA_RESTART_SGD_ON_EOL);
- if (tmp != new_tmp)
- outb (0, card->baseaddr + VIA_BASE0_PCM_OUT_CHAN_TYPE);
-
- tmp = inb (card->baseaddr + VIA_BASE0_PCM_IN_CHAN_TYPE);
- new_tmp = tmp & ~(VIA_IRQ_ON_FLAG|VIA_IRQ_ON_EOL|VIA_RESTART_SGD_ON_EOL);
- if (tmp != new_tmp)
- outb (0, card->baseaddr + VIA_BASE0_PCM_IN_CHAN_TYPE);
-
- tmp = inb (card->baseaddr + VIA_BASE0_FM_OUT_CHAN_TYPE);
- new_tmp = tmp & ~(VIA_IRQ_ON_FLAG|VIA_IRQ_ON_EOL|VIA_RESTART_SGD_ON_EOL);
- if (tmp != new_tmp)
- outb (0, card->baseaddr + VIA_BASE0_FM_OUT_CHAN_TYPE);
-
- if(card->sixchannel)
- {
- tmp = inb (card->baseaddr + VIA_BASE0_MULTI_OUT_CHAN_TYPE);
- new_tmp = tmp & ~(VIA_IRQ_ON_FLAG|VIA_IRQ_ON_EOL|VIA_RESTART_SGD_ON_EOL);
- if (tmp != new_tmp)
- outb (0, card->baseaddr + VIA_BASE0_MULTI_OUT_CHAN_TYPE);
- }
-
- udelay(10);
-
- /*
- * clear any existing flags
- */
- via_chan_status_clear (card->baseaddr + VIA_BASE0_PCM_OUT_CHAN);
- via_chan_status_clear (card->baseaddr + VIA_BASE0_PCM_IN_CHAN);
- via_chan_status_clear (card->baseaddr + VIA_BASE0_FM_OUT_CHAN);
-
- DPRINTK ("EXIT\n");
-}
-
-
-/**
- * via_set_rate - Set PCM rate for given channel
- * @ac97: Pointer to generic codec info struct
- * @chan: Private info for specified channel
- * @rate: Desired PCM sample rate, in Khz
- *
- * Sets the PCM sample rate for a channel.
- *
- * Values for @rate are clamped to a range of 4000 Khz through 48000 Khz,
- * due to hardware constraints.
- */
-
-static int via_set_rate (struct ac97_codec *ac97,
- struct via_channel *chan, unsigned rate)
-{
- struct via_info *card = ac97->private_data;
- int rate_reg;
- u32 dacp;
- u32 mast_vol, phone_vol, mono_vol, pcm_vol;
- u32 mute_vol = 0x8000; /* The mute volume? -- Seems to work! */
-
- DPRINTK ("ENTER, rate = %d\n", rate);
-
- if (chan->rate == rate)
- goto out;
- if (card->locked_rate) {
- chan->rate = 48000;
- goto out;
- }
-
- if (rate > 48000) rate = 48000;
- if (rate < 4000) rate = 4000;
-
- rate_reg = chan->is_record ? AC97_PCM_LR_ADC_RATE :
- AC97_PCM_FRONT_DAC_RATE;
-
- /* Save current state */
- dacp=via_ac97_read_reg(ac97, AC97_POWER_CONTROL);
- mast_vol = via_ac97_read_reg(ac97, AC97_MASTER_VOL_STEREO);
- mono_vol = via_ac97_read_reg(ac97, AC97_MASTER_VOL_MONO);
- phone_vol = via_ac97_read_reg(ac97, AC97_HEADPHONE_VOL);
- pcm_vol = via_ac97_read_reg(ac97, AC97_PCMOUT_VOL);
- /* Mute - largely reduces popping */
- via_ac97_write_reg(ac97, AC97_MASTER_VOL_STEREO, mute_vol);
- via_ac97_write_reg(ac97, AC97_MASTER_VOL_MONO, mute_vol);
- via_ac97_write_reg(ac97, AC97_HEADPHONE_VOL, mute_vol);
- via_ac97_write_reg(ac97, AC97_PCMOUT_VOL, mute_vol);
- /* Power down the DAC */
- via_ac97_write_reg(ac97, AC97_POWER_CONTROL, dacp|0x0200);
-
- /* Set new rate */
- via_ac97_write_reg (ac97, rate_reg, rate);
-
- /* Power DAC back up */
- via_ac97_write_reg(ac97, AC97_POWER_CONTROL, dacp);
- udelay (200); /* reduces popping */
-
- /* Restore volumes */
- via_ac97_write_reg(ac97, AC97_MASTER_VOL_STEREO, mast_vol);
- via_ac97_write_reg(ac97, AC97_MASTER_VOL_MONO, mono_vol);
- via_ac97_write_reg(ac97, AC97_HEADPHONE_VOL, phone_vol);
- via_ac97_write_reg(ac97, AC97_PCMOUT_VOL, pcm_vol);
-
- /* the hardware might return a value different than what we
- * passed to it, so read the rate value back from hardware
- * to see what we came up with
- */
- chan->rate = via_ac97_read_reg (ac97, rate_reg);
-
- if (chan->rate == 0) {
- card->locked_rate = 1;
- chan->rate = 48000;
- printk (KERN_WARNING PFX "Codec rate locked at 48Khz\n");
- }
-
-out:
- DPRINTK ("EXIT, returning rate %d Hz\n", chan->rate);
- return chan->rate;
-}
-
-
-/****************************************************************
- *
- * Channel-specific operations
- *
- *
- */
-
-
-/**
- * via_chan_init_defaults - Initialize a struct via_channel
- * @card: Private audio chip info
- * @chan: Channel to be initialized
- *
- * Zero @chan, and then set all static defaults for the structure.
- */
-
-static void via_chan_init_defaults (struct via_info *card, struct via_channel *chan)
-{
- memset (chan, 0, sizeof (*chan));
-
- if(card->intmask)
- chan->intmask = 0x23; /* Turn on the IRQ bits */
-
- if (chan == &card->ch_out) {
- chan->name = "PCM-OUT";
- if(card->sixchannel)
- {
- chan->iobase = card->baseaddr + VIA_BASE0_MULTI_OUT_CHAN;
- chan->is_multi = 1;
- DPRINTK("Using multichannel for pcm out\n");
- }
- else
- chan->iobase = card->baseaddr + VIA_BASE0_PCM_OUT_CHAN;
- } else if (chan == &card->ch_in) {
- chan->name = "PCM-IN";
- chan->iobase = card->baseaddr + VIA_BASE0_PCM_IN_CHAN;
- chan->is_record = 1;
- } else if (chan == &card->ch_fm) {
- chan->name = "PCM-OUT-FM";
- chan->iobase = card->baseaddr + VIA_BASE0_FM_OUT_CHAN;
- } else {
- BUG();
- }
-
- init_waitqueue_head (&chan->wait);
-
- chan->pcm_fmt = VIA_PCM_FMT_MASK;
- chan->is_enabled = 1;
-
- chan->frag_number = 0;
- chan->frag_size = 0;
- atomic_set(&chan->n_frags, 0);
- atomic_set (&chan->hw_ptr, 0);
-}
-
-/**
- * via_chan_init - Initialize PCM channel
- * @card: Private audio chip info
- * @chan: Channel to be initialized
- *
- * Performs some of the preparations necessary to begin
- * using a PCM channel.
- *
- * Currently the preparations consist of
- * setting the PCM channel to a known state.
- */
-
-
-static void via_chan_init (struct via_info *card, struct via_channel *chan)
-{
-
- DPRINTK ("ENTER\n");
-
- /* bzero channel structure, and init members to defaults */
- via_chan_init_defaults (card, chan);
-
- /* stop any existing channel output */
- via_chan_clear (card, chan);
- via_chan_status_clear (chan->iobase);
- via_chan_pcm_fmt (chan, 1);
-
- DPRINTK ("EXIT\n");
-}
-
-/**
- * via_chan_buffer_init - Initialize PCM channel buffer
- * @card: Private audio chip info
- * @chan: Channel to be initialized
- *
- * Performs some of the preparations necessary to begin
- * using a PCM channel.
- *
- * Currently the preparations include allocating the
- * scatter-gather DMA table and buffers,
- * and passing the
- * address of the DMA table to the hardware.
- *
- * Note that special care is taken when passing the
- * DMA table address to hardware, because it was found
- * during driver development that the hardware did not
- * always "take" the address.
- */
-
-static int via_chan_buffer_init (struct via_info *card, struct via_channel *chan)
-{
- int page, offset;
- int i;
-
- DPRINTK ("ENTER\n");
-
-
- chan->intmask = 0;
- if(card->intmask)
- chan->intmask = 0x23; /* Turn on the IRQ bits */
-
- if (chan->sgtable != NULL) {
- DPRINTK ("EXIT\n");
- return 0;
- }
-
- /* alloc DMA-able memory for scatter-gather table */
- chan->sgtable = pci_alloc_consistent (card->pdev,
- (sizeof (struct via_sgd_table) * chan->frag_number),
- &chan->sgt_handle);
- if (!chan->sgtable) {
- printk (KERN_ERR PFX "DMA table alloc fail, aborting\n");
- DPRINTK ("EXIT\n");
- return -ENOMEM;
- }
-
- memset ((void*)chan->sgtable, 0,
- (sizeof (struct via_sgd_table) * chan->frag_number));
-
- /* alloc DMA-able memory for scatter-gather buffers */
-
- chan->page_number = (chan->frag_number * chan->frag_size) / PAGE_SIZE +
- (((chan->frag_number * chan->frag_size) % PAGE_SIZE) ? 1 : 0);
-
- for (i = 0; i < chan->page_number; i++) {
- chan->pgtbl[i].cpuaddr = pci_alloc_consistent (card->pdev, PAGE_SIZE,
- &chan->pgtbl[i].handle);
-
- if (!chan->pgtbl[i].cpuaddr) {
- chan->page_number = i;
- goto err_out_nomem;
- }
-
-#ifndef VIA_NDEBUG
- memset (chan->pgtbl[i].cpuaddr, 0xBC, chan->frag_size);
-#endif
-
-#if 1
- DPRINTK ("dmabuf_pg #%d (h=%lx, v2p=%lx, a=%p)\n",
- i, (long)chan->pgtbl[i].handle,
- virt_to_phys(chan->pgtbl[i].cpuaddr),
- chan->pgtbl[i].cpuaddr);
-#endif
- }
-
- for (i = 0; i < chan->frag_number; i++) {
-
- page = i / (PAGE_SIZE / chan->frag_size);
- offset = (i % (PAGE_SIZE / chan->frag_size)) * chan->frag_size;
-
- chan->sgtable[i].count = cpu_to_le32 (chan->frag_size | VIA_FLAG);
- chan->sgtable[i].addr = cpu_to_le32 (chan->pgtbl[page].handle + offset);
-
-#if 1
- DPRINTK ("dmabuf #%d (32(h)=%lx)\n",
- i,
- (long)chan->sgtable[i].addr);
-#endif
- }
-
- /* overwrite the last buffer information */
- chan->sgtable[chan->frag_number - 1].count = cpu_to_le32 (chan->frag_size | VIA_EOL);
-
- /* set location of DMA-able scatter-gather info table */
- DPRINTK ("outl (0x%X, 0x%04lX)\n",
- chan->sgt_handle, chan->iobase + VIA_PCM_TABLE_ADDR);
-
- via_ac97_wait_idle (card);
- outl (chan->sgt_handle, chan->iobase + VIA_PCM_TABLE_ADDR);
- udelay (20);
- via_ac97_wait_idle (card);
- /* load no rate adaption, stereo 16bit, set up ring slots */
- if(card->sixchannel)
- {
- if(!chan->is_multi)
- {
- outl (0xFFFFF | (0x3 << 20) | (chan->frag_number << 24), chan->iobase + VIA_PCM_STOPRATE);
- udelay (20);
- via_ac97_wait_idle (card);
- }
- }
-
- DPRINTK ("inl (0x%lX) = %x\n",
- chan->iobase + VIA_PCM_TABLE_ADDR,
- inl(chan->iobase + VIA_PCM_TABLE_ADDR));
-
- DPRINTK ("EXIT\n");
- return 0;
-
-err_out_nomem:
- printk (KERN_ERR PFX "DMA buffer alloc fail, aborting\n");
- via_chan_buffer_free (card, chan);
- DPRINTK ("EXIT\n");
- return -ENOMEM;
-}
-
-
-/**
- * via_chan_free - Release a PCM channel
- * @card: Private audio chip info
- * @chan: Channel to be released
- *
- * Performs all the functions necessary to clean up
- * an initialized channel.
- *
- * Currently these functions include disabled any
- * active DMA operations, setting the PCM channel
- * back to a known state, and releasing any allocated
- * sound buffers.
- */
-
-static void via_chan_free (struct via_info *card, struct via_channel *chan)
-{
- DPRINTK ("ENTER\n");
-
- spin_lock_irq (&card->lock);
-
- /* stop any existing channel output */
- via_chan_status_clear (chan->iobase);
- via_chan_stop (chan->iobase);
- via_chan_status_clear (chan->iobase);
-
- spin_unlock_irq (&card->lock);
-
- synchronize_irq(card->pdev->irq);
-
- DPRINTK ("EXIT\n");
-}
-
-static void via_chan_buffer_free (struct via_info *card, struct via_channel *chan)
-{
- int i;
-
- DPRINTK ("ENTER\n");
-
- /* zero location of DMA-able scatter-gather info table */
- via_ac97_wait_idle(card);
- outl (0, chan->iobase + VIA_PCM_TABLE_ADDR);
-
- for (i = 0; i < chan->page_number; i++)
- if (chan->pgtbl[i].cpuaddr) {
- pci_free_consistent (card->pdev, PAGE_SIZE,
- chan->pgtbl[i].cpuaddr,
- chan->pgtbl[i].handle);
- chan->pgtbl[i].cpuaddr = NULL;
- chan->pgtbl[i].handle = 0;
- }
-
- chan->page_number = 0;
-
- if (chan->sgtable) {
- pci_free_consistent (card->pdev,
- (sizeof (struct via_sgd_table) * chan->frag_number),
- (void*)chan->sgtable, chan->sgt_handle);
- chan->sgtable = NULL;
- }
-
- DPRINTK ("EXIT\n");
-}
-
-
-/**
- * via_chan_pcm_fmt - Update PCM channel settings
- * @chan: Channel to be updated
- * @reset: Boolean. If non-zero, channel will be reset
- * to 8-bit mono mode.
- *
- * Stores the settings of the current PCM format,
- * 8-bit or 16-bit, and mono/stereo, into the
- * hardware settings for the specified channel.
- * If @reset is non-zero, the channel is reset
- * to 8-bit mono mode. Otherwise, the channel
- * is set to the values stored in the channel
- * information struct @chan.
- */
-
-static void via_chan_pcm_fmt (struct via_channel *chan, int reset)
-{
- DPRINTK ("ENTER, pcm_fmt=0x%02X, reset=%s\n",
- chan->pcm_fmt, reset ? "yes" : "no");
-
- assert (chan != NULL);
-
- if (reset)
- {
- /* reset to 8-bit mono mode */
- chan->pcm_fmt = 0;
- chan->channels = 1;
- }
-
- /* enable interrupts on FLAG and EOL */
- chan->pcm_fmt |= VIA_CHAN_TYPE_MASK;
-
- /* if we are recording, enable recording fifo bit */
- if (chan->is_record)
- chan->pcm_fmt |= VIA_PCM_REC_FIFO;
- /* set interrupt select bits where applicable (PCM in & out channels) */
- if (!chan->is_record)
- chan->pcm_fmt |= VIA_CHAN_TYPE_INT_SELECT;
-
- DPRINTK("SET FMT - %02x %02x\n", chan->intmask , chan->is_multi);
-
- if(chan->intmask)
- {
- u32 m;
-
- /*
- * Channel 0x4 is up to 6 x 16bit and has to be
- * programmed differently
- */
-
- if(chan->is_multi)
- {
- u8 c = 0;
-
- /*
- * Load the type bit for num channels
- * and 8/16bit
- */
-
- if(chan->pcm_fmt & VIA_PCM_FMT_16BIT)
- c = 1 << 7;
- if(chan->pcm_fmt & VIA_PCM_FMT_STEREO)
- c |= (2<<4);
- else
- c |= (1<<4);
-
- outb(c, chan->iobase + VIA_PCM_TYPE);
-
- /*
- * Set the channel steering
- * Mono
- * Channel 0 to slot 3
- * Channel 0 to slot 4
- * Stereo
- * Channel 0 to slot 3
- * Channel 1 to slot 4
- */
-
- switch(chan->channels)
- {
- case 1:
- outl(0xFF000000 | (1<<0) | (1<<4) , chan->iobase + VIA_PCM_STOPRATE);
- break;
- case 2:
- outl(0xFF000000 | (1<<0) | (2<<4) , chan->iobase + VIA_PCM_STOPRATE);
- break;
- case 4:
- outl(0xFF000000 | (1<<0) | (2<<4) | (3<<8) | (4<<12), chan->iobase + VIA_PCM_STOPRATE);
- break;
- case 6:
- outl(0xFF000000 | (1<<0) | (2<<4) | (5<<8) | (6<<12) | (3<<16) | (4<<20), chan->iobase + VIA_PCM_STOPRATE);
- break;
- }
- }
- else
- {
- /*
- * New style, turn off channel volume
- * control, set bits in the right register
- */
- outb(0x0, chan->iobase + VIA_PCM_LEFTVOL);
- outb(0x0, chan->iobase + VIA_PCM_RIGHTVOL);
-
- m = inl(chan->iobase + VIA_PCM_STOPRATE);
- m &= ~(3<<20);
- if(chan->pcm_fmt & VIA_PCM_FMT_STEREO)
- m |= (1 << 20);
- if(chan->pcm_fmt & VIA_PCM_FMT_16BIT)
- m |= (1 << 21);
- outl(m, chan->iobase + VIA_PCM_STOPRATE);
- }
- }
- else
- outb (chan->pcm_fmt, chan->iobase + VIA_PCM_TYPE);
-
-
- DPRINTK ("EXIT, pcm_fmt = 0x%02X, reg = 0x%02X\n",
- chan->pcm_fmt,
- inb (chan->iobase + VIA_PCM_TYPE));
-}
-
-
-/**
- * via_chan_clear - Stop DMA channel operation, and reset pointers
- * @card: the chip to accessed
- * @chan: Channel to be cleared
- *
- * Call via_chan_stop to halt DMA operations, and then resets
- * all software pointers which track DMA operation.
- */
-
-static void via_chan_clear (struct via_info *card, struct via_channel *chan)
-{
- DPRINTK ("ENTER\n");
- via_chan_stop (chan->iobase);
- via_chan_buffer_free(card, chan);
- chan->is_active = 0;
- chan->is_mapped = 0;
- chan->is_enabled = 1;
- chan->slop_len = 0;
- chan->sw_ptr = 0;
- chan->n_irqs = 0;
- atomic_set (&chan->hw_ptr, 0);
- DPRINTK ("EXIT\n");
-}
-
-
-/**
- * via_chan_set_speed - Set PCM sample rate for given channel
- * @card: Private info for specified board
- * @chan: Channel whose sample rate will be adjusted
- * @val: New sample rate, in Khz
- *
- * Helper function for the %SNDCTL_DSP_SPEED ioctl. OSS semantics
- * demand that all audio operations halt (if they are not already
- * halted) when the %SNDCTL_DSP_SPEED is given.
- *
- * This function halts all audio operations for the given channel
- * @chan, and then calls via_set_rate to set the audio hardware
- * to the new rate.
- */
-
-static int via_chan_set_speed (struct via_info *card,
- struct via_channel *chan, int val)
-{
- DPRINTK ("ENTER, requested rate = %d\n", val);
-
- via_chan_clear (card, chan);
-
- val = via_set_rate (card->ac97, chan, val);
-
- DPRINTK ("EXIT, returning %d\n", val);
- return val;
-}
-
-
-/**
- * via_chan_set_fmt - Set PCM sample size for given channel
- * @card: Private info for specified board
- * @chan: Channel whose sample size will be adjusted
- * @val: New sample size, use the %AFMT_xxx constants
- *
- * Helper function for the %SNDCTL_DSP_SETFMT ioctl. OSS semantics
- * demand that all audio operations halt (if they are not already
- * halted) when the %SNDCTL_DSP_SETFMT is given.
- *
- * This function halts all audio operations for the given channel
- * @chan, and then calls via_chan_pcm_fmt to set the audio hardware
- * to the new sample size, either 8-bit or 16-bit.
- */
-
-static int via_chan_set_fmt (struct via_info *card,
- struct via_channel *chan, int val)
-{
- DPRINTK ("ENTER, val=%s\n",
- val == AFMT_U8 ? "AFMT_U8" :
- val == AFMT_S16_LE ? "AFMT_S16_LE" :
- "unknown");
-
- via_chan_clear (card, chan);
-
- assert (val != AFMT_QUERY); /* this case is handled elsewhere */
-
- switch (val) {
- case AFMT_S16_LE:
- if ((chan->pcm_fmt & VIA_PCM_FMT_16BIT) == 0) {
- chan->pcm_fmt |= VIA_PCM_FMT_16BIT;
- via_chan_pcm_fmt (chan, 0);
- }
- break;
-
- case AFMT_U8:
- if (chan->pcm_fmt & VIA_PCM_FMT_16BIT) {
- chan->pcm_fmt &= ~VIA_PCM_FMT_16BIT;
- via_chan_pcm_fmt (chan, 0);
- }
- break;
-
- default:
- DPRINTK ("unknown AFMT: 0x%X\n", val);
- val = AFMT_S16_LE;
- }
-
- DPRINTK ("EXIT\n");
- return val;
-}
-
-
-/**
- * via_chan_set_stereo - Enable or disable stereo for a DMA channel
- * @card: Private info for specified board
- * @chan: Channel whose stereo setting will be adjusted
- * @val: New sample size, use the %AFMT_xxx constants
- *
- * Helper function for the %SNDCTL_DSP_CHANNELS and %SNDCTL_DSP_STEREO ioctls. OSS semantics
- * demand that all audio operations halt (if they are not already
- * halted) when %SNDCTL_DSP_CHANNELS or SNDCTL_DSP_STEREO is given.
- *
- * This function halts all audio operations for the given channel
- * @chan, and then calls via_chan_pcm_fmt to set the audio hardware
- * to enable or disable stereo.
- */
-
-static int via_chan_set_stereo (struct via_info *card,
- struct via_channel *chan, int val)
-{
- DPRINTK ("ENTER, channels = %d\n", val);
-
- via_chan_clear (card, chan);
-
- switch (val) {
-
- /* mono */
- case 1:
- chan->pcm_fmt &= ~VIA_PCM_FMT_STEREO;
- chan->channels = 1;
- via_chan_pcm_fmt (chan, 0);
- break;
-
- /* stereo */
- case 2:
- chan->pcm_fmt |= VIA_PCM_FMT_STEREO;
- chan->channels = 2;
- via_chan_pcm_fmt (chan, 0);
- break;
-
- case 4:
- case 6:
- if(chan->is_multi)
- {
- chan->pcm_fmt |= VIA_PCM_FMT_STEREO;
- chan->channels = val;
- break;
- }
- /* unknown */
- default:
- val = -EINVAL;
- break;
- }
-
- DPRINTK ("EXIT, returning %d\n", val);
- return val;
-}
-
-static int via_chan_set_buffering (struct via_info *card,
- struct via_channel *chan, int val)
-{
- int shift;
-
- DPRINTK ("ENTER\n");
-
- /* in both cases the buffer cannot be changed */
- if (chan->is_active || chan->is_mapped) {
- DPRINTK ("EXIT\n");
- return -EINVAL;
- }
-
- /* called outside SETFRAGMENT */
- /* set defaults or do nothing */
- if (val < 0) {
-
- if (chan->frag_size && chan->frag_number)
- goto out;
-
- DPRINTK ("\n");
-
- chan->frag_size = (VIA_DEFAULT_FRAG_TIME * chan->rate * chan->channels
- * ((chan->pcm_fmt & VIA_PCM_FMT_16BIT) ? 2 : 1)) / 1000 - 1;
-
- shift = 0;
- while (chan->frag_size) {
- chan->frag_size >>= 1;
- shift++;
- }
- chan->frag_size = 1 << shift;
-
- chan->frag_number = (VIA_DEFAULT_BUFFER_TIME / VIA_DEFAULT_FRAG_TIME);
-
- DPRINTK ("setting default values %d %d\n", chan->frag_size, chan->frag_number);
- } else {
- chan->frag_size = 1 << (val & 0xFFFF);
- chan->frag_number = (val >> 16) & 0xFFFF;
-
- DPRINTK ("using user values %d %d\n", chan->frag_size, chan->frag_number);
- }
-
- /* quake3 wants frag_number to be a power of two */
- shift = 0;
- while (chan->frag_number) {
- chan->frag_number >>= 1;
- shift++;
- }
- chan->frag_number = 1 << shift;
-
- if (chan->frag_size > VIA_MAX_FRAG_SIZE)
- chan->frag_size = VIA_MAX_FRAG_SIZE;
- else if (chan->frag_size < VIA_MIN_FRAG_SIZE)
- chan->frag_size = VIA_MIN_FRAG_SIZE;
-
- if (chan->frag_number < VIA_MIN_FRAG_NUMBER)
- chan->frag_number = VIA_MIN_FRAG_NUMBER;
- if (chan->frag_number > VIA_MAX_FRAG_NUMBER)
- chan->frag_number = VIA_MAX_FRAG_NUMBER;
-
- if ((chan->frag_number * chan->frag_size) / PAGE_SIZE > VIA_MAX_BUFFER_DMA_PAGES)
- chan->frag_number = (VIA_MAX_BUFFER_DMA_PAGES * PAGE_SIZE) / chan->frag_size;
-
-out:
- if (chan->is_record)
- atomic_set (&chan->n_frags, 0);
- else
- atomic_set (&chan->n_frags, chan->frag_number);
-
- DPRINTK ("EXIT\n");
-
- return 0;
-}
-
-#ifdef VIA_CHAN_DUMP_BUFS
-/**
- * via_chan_dump_bufs - Display DMA table contents
- * @chan: Channel whose DMA table will be displayed
- *
- * Debugging function which displays the contents of the
- * scatter-gather DMA table for the given channel @chan.
- */
-
-static void via_chan_dump_bufs (struct via_channel *chan)
-{
- int i;
-
- for (i = 0; i < chan->frag_number; i++) {
- DPRINTK ("#%02d: addr=%x, count=%u, flag=%d, eol=%d\n",
- i, chan->sgtable[i].addr,
- chan->sgtable[i].count & 0x00FFFFFF,
- chan->sgtable[i].count & VIA_FLAG ? 1 : 0,
- chan->sgtable[i].count & VIA_EOL ? 1 : 0);
- }
- DPRINTK ("buf_in_use = %d, nextbuf = %d\n",
- atomic_read (&chan->buf_in_use),
- atomic_read (&chan->sw_ptr));
-}
-#endif /* VIA_CHAN_DUMP_BUFS */
-
-
-/**
- * via_chan_flush_frag - Flush partially-full playback buffer to hardware
- * @chan: Channel whose DMA table will be flushed
- *
- * Flushes partially-full playback buffer to hardware.
- */
-
-static void via_chan_flush_frag (struct via_channel *chan)
-{
- DPRINTK ("ENTER\n");
-
- assert (chan->slop_len > 0);
-
- if (chan->sw_ptr == (chan->frag_number - 1))
- chan->sw_ptr = 0;
- else
- chan->sw_ptr++;
-
- chan->slop_len = 0;
-
- assert (atomic_read (&chan->n_frags) > 0);
- atomic_dec (&chan->n_frags);
-
- DPRINTK ("EXIT\n");
-}
-
-
-
-/**
- * via_chan_maybe_start - Initiate audio hardware DMA operation
- * @chan: Channel whose DMA is to be started
- *
- * Initiate DMA operation, if the DMA engine for the given
- * channel @chan is not already active.
- */
-
-static inline void via_chan_maybe_start (struct via_channel *chan)
-{
- assert (chan->is_active == sg_active(chan->iobase));
-
- DPRINTK ("MAYBE START %s\n", chan->name);
- if (!chan->is_active && chan->is_enabled) {
- chan->is_active = 1;
- sg_begin (chan);
- DPRINTK ("starting channel %s\n", chan->name);
- }
-}
-
-
-/****************************************************************
- *
- * Interface to ac97-codec module
- *
- *
- */
-
-/**
- * via_ac97_wait_idle - Wait until AC97 codec is not busy
- * @card: Private info for specified board
- *
- * Sleep until the AC97 codec is no longer busy.
- * Returns the final value read from the SGD
- * register being polled.
- */
-
-static u8 via_ac97_wait_idle (struct via_info *card)
-{
- u8 tmp8;
- int counter = VIA_COUNTER_LIMIT;
-
- DPRINTK ("ENTER/EXIT\n");
-
- assert (card != NULL);
- assert (card->pdev != NULL);
-
- do {
- udelay (15);
-
- tmp8 = inb (card->baseaddr + 0x83);
- } while ((tmp8 & VIA_CR83_BUSY) && (counter-- > 0));
-
- if (tmp8 & VIA_CR83_BUSY)
- printk (KERN_WARNING PFX "timeout waiting on AC97 codec\n");
- return tmp8;
-}
-
-
-/**
- * via_ac97_read_reg - Read AC97 standard register
- * @codec: Pointer to generic AC97 codec info
- * @reg: Index of AC97 register to be read
- *
- * Read the value of a single AC97 codec register,
- * as defined by the Intel AC97 specification.
- *
- * Defines the standard AC97 read-register operation
- * required by the kernel's ac97_codec interface.
- *
- * Returns the 16-bit value stored in the specified
- * register.
- */
-
-static u16 via_ac97_read_reg (struct ac97_codec *codec, u8 reg)
-{
- unsigned long data;
- struct via_info *card;
- int counter;
-
- DPRINTK ("ENTER\n");
-
- assert (codec != NULL);
- assert (codec->private_data != NULL);
-
- card = codec->private_data;
-
- spin_lock(&card->ac97_lock);
-
- /* Every time we write to register 80 we cause a transaction.
- The only safe way to clear the valid bit is to write it at
- the same time as the command */
- data = (reg << 16) | VIA_CR80_READ | VIA_CR80_VALID;
-
- outl (data, card->baseaddr + VIA_BASE0_AC97_CTRL);
- udelay (20);
-
- for (counter = VIA_COUNTER_LIMIT; counter > 0; counter--) {
- udelay (1);
- if ((((data = inl(card->baseaddr + VIA_BASE0_AC97_CTRL)) &
- (VIA_CR80_VALID|VIA_CR80_BUSY)) == VIA_CR80_VALID))
- goto out;
- }
-
- printk (KERN_WARNING PFX "timeout while reading AC97 codec (0x%lX)\n", data);
- goto err_out;
-
-out:
- /* Once the valid bit has become set, we must wait a complete AC97
- frame before the data has settled. */
- udelay(25);
- data = (unsigned long) inl (card->baseaddr + VIA_BASE0_AC97_CTRL);
-
- outb (0x02, card->baseaddr + 0x83);
-
- if (((data & 0x007F0000) >> 16) == reg) {
- DPRINTK ("EXIT, success, data=0x%lx, retval=0x%lx\n",
- data, data & 0x0000FFFF);
- spin_unlock(&card->ac97_lock);
- return data & 0x0000FFFF;
- }
-
- printk (KERN_WARNING "via82cxxx_audio: not our index: reg=0x%x, newreg=0x%lx\n",
- reg, ((data & 0x007F0000) >> 16));
-
-err_out:
- spin_unlock(&card->ac97_lock);
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-}
-
-
-/**
- * via_ac97_write_reg - Write AC97 standard register
- * @codec: Pointer to generic AC97 codec info
- * @reg: Index of AC97 register to be written
- * @value: Value to be written to AC97 register
- *
- * Write the value of a single AC97 codec register,
- * as defined by the Intel AC97 specification.
- *
- * Defines the standard AC97 write-register operation
- * required by the kernel's ac97_codec interface.
- */
-
-static void via_ac97_write_reg (struct ac97_codec *codec, u8 reg, u16 value)
-{
- u32 data;
- struct via_info *card;
- int counter;
-
- DPRINTK ("ENTER\n");
-
- assert (codec != NULL);
- assert (codec->private_data != NULL);
-
- card = codec->private_data;
-
- spin_lock(&card->ac97_lock);
-
- data = (reg << 16) + value;
- outl (data, card->baseaddr + VIA_BASE0_AC97_CTRL);
- udelay (10);
-
- for (counter = VIA_COUNTER_LIMIT; counter > 0; counter--) {
- if ((inb (card->baseaddr + 0x83) & VIA_CR83_BUSY) == 0)
- goto out;
-
- udelay (15);
- }
-
- printk (KERN_WARNING PFX "timeout after AC97 codec write (0x%X, 0x%X)\n", reg, value);
-
-out:
- spin_unlock(&card->ac97_lock);
- DPRINTK ("EXIT\n");
-}
-
-
-static int via_mixer_open (struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- struct via_info *card;
- struct pci_dev *pdev = NULL;
- struct pci_driver *drvr;
-
- DPRINTK ("ENTER\n");
-
- while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
- drvr = pci_dev_driver (pdev);
- if (drvr == &via_driver) {
- assert (pci_get_drvdata (pdev) != NULL);
-
- card = pci_get_drvdata (pdev);
- if (card->ac97->dev_mixer == minor)
- goto match;
- }
- }
-
- DPRINTK ("EXIT, returning -ENODEV\n");
- return -ENODEV;
-
-match:
- pci_dev_put(pdev);
- file->private_data = card->ac97;
-
- DPRINTK ("EXIT, returning 0\n");
- return nonseekable_open(inode, file);
-}
-
-static int via_mixer_ioctl (struct inode *inode, struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct ac97_codec *codec = file->private_data;
- struct via_info *card;
- int nonblock = (file->f_flags & O_NONBLOCK);
- int rc;
-
- DPRINTK ("ENTER\n");
-
- assert (codec != NULL);
- card = codec->private_data;
- assert (card != NULL);
-
- rc = via_syscall_down (card, nonblock);
- if (rc) goto out;
-
-#if 0
- /*
- * Intercept volume control on 8233 and 8235
- */
- if(card->volume)
- {
- switch(cmd)
- {
- case SOUND_MIXER_READ_VOLUME:
- return card->mixer_vol;
- case SOUND_MIXER_WRITE_VOLUME:
- {
- int v;
- if(get_user(v, (int *)arg))
- {
- rc = -EFAULT;
- goto out;
- }
- card->mixer_vol = v;
- }
- }
- }
-#endif
- rc = codec->mixer_ioctl(codec, cmd, arg);
-
- mutex_unlock(&card->syscall_mutex);
-
-out:
- DPRINTK ("EXIT, returning %d\n", rc);
- return rc;
-}
-
-
-static const struct file_operations via_mixer_fops = {
- .owner = THIS_MODULE,
- .open = via_mixer_open,
- .llseek = no_llseek,
- .ioctl = via_mixer_ioctl,
-};
-
-
-static int __devinit via_ac97_reset (struct via_info *card)
-{
- struct pci_dev *pdev = card->pdev;
- u8 tmp8;
- u16 tmp16;
-
- DPRINTK ("ENTER\n");
-
- assert (pdev != NULL);
-
-#ifndef NDEBUG
- {
- u8 r40,r41,r42,r43,r44,r48;
- pci_read_config_byte (card->pdev, 0x40, &r40);
- pci_read_config_byte (card->pdev, 0x41, &r41);
- pci_read_config_byte (card->pdev, 0x42, &r42);
- pci_read_config_byte (card->pdev, 0x43, &r43);
- pci_read_config_byte (card->pdev, 0x44, &r44);
- pci_read_config_byte (card->pdev, 0x48, &r48);
- DPRINTK ("PCI config: %02X %02X %02X %02X %02X %02X\n",
- r40,r41,r42,r43,r44,r48);
-
- spin_lock_irq (&card->lock);
- DPRINTK ("regs==%02X %02X %02X %08X %08X %08X %08X\n",
- inb (card->baseaddr + 0x00),
- inb (card->baseaddr + 0x01),
- inb (card->baseaddr + 0x02),
- inl (card->baseaddr + 0x04),
- inl (card->baseaddr + 0x0C),
- inl (card->baseaddr + 0x80),
- inl (card->baseaddr + 0x84));
- spin_unlock_irq (&card->lock);
-
- }
-#endif
-
- /*
- * Reset AC97 controller: enable, disable, enable,
- * pausing after each command for good luck. Only
- * do this if the codec is not ready, because it causes
- * loud pops and such due to such a hard codec reset.
- */
- pci_read_config_byte (pdev, VIA_ACLINK_STATUS, &tmp8);
- if ((tmp8 & VIA_CR40_AC97_READY) == 0) {
- pci_write_config_byte (pdev, VIA_ACLINK_CTRL,
- VIA_CR41_AC97_ENABLE |
- VIA_CR41_AC97_RESET |
- VIA_CR41_AC97_WAKEUP);
- udelay (100);
-
- pci_write_config_byte (pdev, VIA_ACLINK_CTRL, 0);
- udelay (100);
-
- pci_write_config_byte (pdev, VIA_ACLINK_CTRL,
- VIA_CR41_AC97_ENABLE |
- VIA_CR41_PCM_ENABLE |
- VIA_CR41_VRA | VIA_CR41_AC97_RESET);
- udelay (100);
- }
-
- /* Make sure VRA is enabled, in case we didn't do a
- * complete codec reset, above
- */
- pci_read_config_byte (pdev, VIA_ACLINK_CTRL, &tmp8);
- if (((tmp8 & VIA_CR41_VRA) == 0) ||
- ((tmp8 & VIA_CR41_AC97_ENABLE) == 0) ||
- ((tmp8 & VIA_CR41_PCM_ENABLE) == 0) ||
- ((tmp8 & VIA_CR41_AC97_RESET) == 0)) {
- pci_write_config_byte (pdev, VIA_ACLINK_CTRL,
- VIA_CR41_AC97_ENABLE |
- VIA_CR41_PCM_ENABLE |
- VIA_CR41_VRA | VIA_CR41_AC97_RESET);
- udelay (100);
- }
-
- if(card->legacy)
- {
-#if 0 /* this breaks on K7M */
- /* disable legacy stuff */
- pci_write_config_byte (pdev, 0x42, 0x00);
- udelay(10);
-#endif
-
- /* route FM trap to IRQ, disable FM trap */
- pci_write_config_byte (pdev, 0x48, 0x05);
- udelay(10);
- }
-
- /* disable all codec GPI interrupts */
- outl (0, pci_resource_start (pdev, 0) + 0x8C);
-
- /* WARNING: this line is magic. Remove this
- * and things break. */
- /* enable variable rate */
- tmp16 = via_ac97_read_reg (card->ac97, AC97_EXTENDED_STATUS);
- if ((tmp16 & 1) == 0)
- via_ac97_write_reg (card->ac97, AC97_EXTENDED_STATUS, tmp16 | 1);
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-}
-
-
-static void via_ac97_codec_wait (struct ac97_codec *codec)
-{
- assert (codec->private_data != NULL);
- via_ac97_wait_idle (codec->private_data);
-}
-
-
-static int __devinit via_ac97_init (struct via_info *card)
-{
- int rc;
- u16 tmp16;
-
- DPRINTK ("ENTER\n");
-
- assert (card != NULL);
-
- card->ac97 = ac97_alloc_codec();
- if(card->ac97 == NULL)
- return -ENOMEM;
-
- card->ac97->private_data = card;
- card->ac97->codec_read = via_ac97_read_reg;
- card->ac97->codec_write = via_ac97_write_reg;
- card->ac97->codec_wait = via_ac97_codec_wait;
-
- card->ac97->dev_mixer = register_sound_mixer (&via_mixer_fops, -1);
- if (card->ac97->dev_mixer < 0) {
- printk (KERN_ERR PFX "unable to register AC97 mixer, aborting\n");
- DPRINTK ("EXIT, returning -EIO\n");
- ac97_release_codec(card->ac97);
- return -EIO;
- }
-
- rc = via_ac97_reset (card);
- if (rc) {
- printk (KERN_ERR PFX "unable to reset AC97 codec, aborting\n");
- goto err_out;
- }
-
- mdelay(10);
-
- if (ac97_probe_codec (card->ac97) == 0) {
- printk (KERN_ERR PFX "unable to probe AC97 codec, aborting\n");
- rc = -EIO;
- goto err_out;
- }
-
- /* enable variable rate */
- tmp16 = via_ac97_read_reg (card->ac97, AC97_EXTENDED_STATUS);
- via_ac97_write_reg (card->ac97, AC97_EXTENDED_STATUS, tmp16 | 1);
-
- /*
- * If we cannot enable VRA, we have a locked-rate codec.
- * We try again to enable VRA before assuming so, however.
- */
- tmp16 = via_ac97_read_reg (card->ac97, AC97_EXTENDED_STATUS);
- if ((tmp16 & 1) == 0) {
- via_ac97_write_reg (card->ac97, AC97_EXTENDED_STATUS, tmp16 | 1);
- tmp16 = via_ac97_read_reg (card->ac97, AC97_EXTENDED_STATUS);
- if ((tmp16 & 1) == 0) {
- card->locked_rate = 1;
- printk (KERN_WARNING PFX "Codec rate locked at 48Khz\n");
- }
- }
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-
-err_out:
- unregister_sound_mixer (card->ac97->dev_mixer);
- DPRINTK ("EXIT, returning %d\n", rc);
- ac97_release_codec(card->ac97);
- return rc;
-}
-
-
-static void via_ac97_cleanup (struct via_info *card)
-{
- DPRINTK ("ENTER\n");
-
- assert (card != NULL);
- assert (card->ac97->dev_mixer >= 0);
-
- unregister_sound_mixer (card->ac97->dev_mixer);
- ac97_release_codec(card->ac97);
-
- DPRINTK ("EXIT\n");
-}
-
-
-
-/****************************************************************
- *
- * Interrupt-related code
- *
- */
-
-/**
- * via_intr_channel - handle an interrupt for a single channel
- * @card: unused
- * @chan: handle interrupt for this channel
- *
- * This is the "meat" of the interrupt handler,
- * containing the actions taken each time an interrupt
- * occurs. All communication and coordination with
- * userspace takes place here.
- *
- * Locking: inside card->lock
- */
-
-static void via_intr_channel (struct via_info *card, struct via_channel *chan)
-{
- u8 status;
- int n;
-
- /* check pertinent bits of status register for action bits */
- status = inb (chan->iobase) & (VIA_SGD_FLAG | VIA_SGD_EOL | VIA_SGD_STOPPED);
- if (!status)
- return;
-
- /* acknowledge any flagged bits ASAP */
- outb (status, chan->iobase);
-
- if (!chan->sgtable) /* XXX: temporary solution */
- return;
-
- /* grab current h/w ptr value */
- n = atomic_read (&chan->hw_ptr);
-
- /* sanity check: make sure our h/w ptr doesn't have a weird value */
- assert (n >= 0);
- assert (n < chan->frag_number);
-
-
- /* reset SGD data structure in memory to reflect a full buffer,
- * and advance the h/w ptr, wrapping around to zero if needed
- */
- if (n == (chan->frag_number - 1)) {
- chan->sgtable[n].count = cpu_to_le32(chan->frag_size | VIA_EOL);
- atomic_set (&chan->hw_ptr, 0);
- } else {
- chan->sgtable[n].count = cpu_to_le32(chan->frag_size | VIA_FLAG);
- atomic_inc (&chan->hw_ptr);
- }
-
- /* accounting crap for SNDCTL_DSP_GETxPTR */
- chan->n_irqs++;
- chan->bytes += chan->frag_size;
- /* FIXME - signed overflow is undefined */
- if (chan->bytes < 0) /* handle overflow of 31-bit value */
- chan->bytes = chan->frag_size;
- /* all following checks only occur when not in mmap(2) mode */
- if (!chan->is_mapped)
- {
- /* If we are recording, then n_frags represents the number
- * of fragments waiting to be handled by userspace.
- * If we are playback, then n_frags represents the number
- * of fragments remaining to be filled by userspace.
- * We increment here. If we reach max number of fragments,
- * this indicates an underrun/overrun. For this case under OSS,
- * we stop the record/playback process.
- */
- if (atomic_read (&chan->n_frags) < chan->frag_number)
- atomic_inc (&chan->n_frags);
- assert (atomic_read (&chan->n_frags) <= chan->frag_number);
- if (atomic_read (&chan->n_frags) == chan->frag_number) {
- chan->is_active = 0;
- via_chan_stop (chan->iobase);
- }
- }
- /* wake up anyone listening to see when interrupts occur */
- wake_up_all (&chan->wait);
-
- DPRINTK ("%s intr, status=0x%02X, hwptr=0x%lX, chan->hw_ptr=%d\n",
- chan->name, status, (long) inl (chan->iobase + 0x04),
- atomic_read (&chan->hw_ptr));
-
- DPRINTK ("%s intr, channel n_frags == %d, missed %d\n", chan->name,
- atomic_read (&chan->n_frags), missed);
-}
-
-
-static irqreturn_t via_interrupt(int irq, void *dev_id)
-{
- struct via_info *card = dev_id;
- u32 status32;
-
- /* to minimize interrupt sharing costs, we use the SGD status
- * shadow register to check the status of all inputs and
- * outputs with a single 32-bit bus read. If no interrupt
- * conditions are flagged, we exit immediately
- */
- status32 = inl (card->baseaddr + VIA_BASE0_SGD_STATUS_SHADOW);
- if (!(status32 & VIA_INTR_MASK))
- {
-#ifdef CONFIG_MIDI_VIA82CXXX
- if (card->midi_devc)
- uart401intr(irq, card->midi_devc);
-#endif
- return IRQ_HANDLED;
- }
- DPRINTK ("intr, status32 == 0x%08X\n", status32);
-
- /* synchronize interrupt handling under SMP. this spinlock
- * goes away completely on UP
- */
- spin_lock (&card->lock);
-
- if (status32 & VIA_INTR_OUT)
- via_intr_channel (card, &card->ch_out);
- if (status32 & VIA_INTR_IN)
- via_intr_channel (card, &card->ch_in);
- if (status32 & VIA_INTR_FM)
- via_intr_channel (card, &card->ch_fm);
-
- spin_unlock (&card->lock);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t via_new_interrupt(int irq, void *dev_id)
-{
- struct via_info *card = dev_id;
- u32 status32;
-
- /* to minimize interrupt sharing costs, we use the SGD status
- * shadow register to check the status of all inputs and
- * outputs with a single 32-bit bus read. If no interrupt
- * conditions are flagged, we exit immediately
- */
- status32 = inl (card->baseaddr + VIA_BASE0_SGD_STATUS_SHADOW);
- if (!(status32 & VIA_NEW_INTR_MASK))
- return IRQ_NONE;
- /*
- * goes away completely on UP
- */
- spin_lock (&card->lock);
-
- via_intr_channel (card, &card->ch_out);
- via_intr_channel (card, &card->ch_in);
- via_intr_channel (card, &card->ch_fm);
-
- spin_unlock (&card->lock);
- return IRQ_HANDLED;
-}
-
-
-/**
- * via_interrupt_init - Initialize interrupt handling
- * @card: Private info for specified board
- *
- * Obtain and reserve IRQ for using in handling audio events.
- * Also, disable any IRQ-generating resources, to make sure
- * we don't get interrupts before we want them.
- */
-
-static int via_interrupt_init (struct via_info *card)
-{
- u8 tmp8;
-
- DPRINTK ("ENTER\n");
-
- assert (card != NULL);
- assert (card->pdev != NULL);
-
- /* check for sane IRQ number. can this ever happen? */
- if (card->pdev->irq < 2) {
- printk (KERN_ERR PFX "insane IRQ %d, aborting\n",
- card->pdev->irq);
- DPRINTK ("EXIT, returning -EIO\n");
- return -EIO;
- }
-
- /* VIA requires this is done */
- pci_write_config_byte(card->pdev, PCI_INTERRUPT_LINE, card->pdev->irq);
-
- if(card->legacy)
- {
- /* make sure FM irq is not routed to us */
- pci_read_config_byte (card->pdev, VIA_FM_NMI_CTRL, &tmp8);
- if ((tmp8 & VIA_CR48_FM_TRAP_TO_NMI) == 0) {
- tmp8 |= VIA_CR48_FM_TRAP_TO_NMI;
- pci_write_config_byte (card->pdev, VIA_FM_NMI_CTRL, tmp8);
- }
- if (request_irq (card->pdev->irq, via_interrupt, IRQF_SHARED, VIA_MODULE_NAME, card)) {
- printk (KERN_ERR PFX "unable to obtain IRQ %d, aborting\n",
- card->pdev->irq);
- DPRINTK ("EXIT, returning -EBUSY\n");
- return -EBUSY;
- }
- }
- else
- {
- if (request_irq (card->pdev->irq, via_new_interrupt, IRQF_SHARED, VIA_MODULE_NAME, card)) {
- printk (KERN_ERR PFX "unable to obtain IRQ %d, aborting\n",
- card->pdev->irq);
- DPRINTK ("EXIT, returning -EBUSY\n");
- return -EBUSY;
- }
- }
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-}
-
-
-/****************************************************************
- *
- * OSS DSP device
- *
- */
-
-static const struct file_operations via_dsp_fops = {
- .owner = THIS_MODULE,
- .open = via_dsp_open,
- .release = via_dsp_release,
- .read = via_dsp_read,
- .write = via_dsp_write,
- .poll = via_dsp_poll,
- .llseek = no_llseek,
- .ioctl = via_dsp_ioctl,
- .mmap = via_dsp_mmap,
-};
-
-
-static int __devinit via_dsp_init (struct via_info *card)
-{
- u8 tmp8;
-
- DPRINTK ("ENTER\n");
-
- assert (card != NULL);
-
- if(card->legacy)
- {
- /* turn off legacy features, if not already */
- pci_read_config_byte (card->pdev, VIA_FUNC_ENABLE, &tmp8);
- if (tmp8 & (VIA_CR42_SB_ENABLE | VIA_CR42_FM_ENABLE)) {
- tmp8 &= ~(VIA_CR42_SB_ENABLE | VIA_CR42_FM_ENABLE);
- pci_write_config_byte (card->pdev, VIA_FUNC_ENABLE, tmp8);
- }
- }
-
- via_stop_everything (card);
-
- card->dev_dsp = register_sound_dsp (&via_dsp_fops, -1);
- if (card->dev_dsp < 0) {
- DPRINTK ("EXIT, returning -ENODEV\n");
- return -ENODEV;
- }
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-}
-
-
-static void via_dsp_cleanup (struct via_info *card)
-{
- DPRINTK ("ENTER\n");
-
- assert (card != NULL);
- assert (card->dev_dsp >= 0);
-
- via_stop_everything (card);
-
- unregister_sound_dsp (card->dev_dsp);
-
- DPRINTK ("EXIT\n");
-}
-
-
-static struct page * via_mm_nopage (struct vm_area_struct * vma,
- unsigned long address, int *type)
-{
- struct via_info *card = vma->vm_private_data;
- struct via_channel *chan = &card->ch_out;
- unsigned long max_bufs;
- struct page *dmapage;
- unsigned long pgoff;
- int rd, wr;
-
- DPRINTK ("ENTER, start %lXh, ofs %lXh, pgoff %ld, addr %lXh\n",
- vma->vm_start,
- address - vma->vm_start,
- (address - vma->vm_start) >> PAGE_SHIFT,
- address);
-
- if (address > vma->vm_end) {
- DPRINTK ("EXIT, returning NOPAGE_SIGBUS\n");
- return NOPAGE_SIGBUS; /* Disallow mremap */
- }
- if (!card) {
- DPRINTK ("EXIT, returning NOPAGE_SIGBUS\n");
- return NOPAGE_SIGBUS; /* Nothing allocated */
- }
-
- pgoff = vma->vm_pgoff + ((address - vma->vm_start) >> PAGE_SHIFT);
- rd = card->ch_in.is_mapped;
- wr = card->ch_out.is_mapped;
-
- max_bufs = chan->frag_number;
- if (rd && wr)
- max_bufs *= 2;
- if (pgoff >= max_bufs)
- return NOPAGE_SIGBUS;
-
- /* if full-duplex (read+write) and we have two sets of bufs,
- * then the playback buffers come first, sez soundcard.c */
- if (pgoff >= chan->page_number) {
- pgoff -= chan->page_number;
- chan = &card->ch_in;
- } else if (!wr)
- chan = &card->ch_in;
-
- assert ((((unsigned long)chan->pgtbl[pgoff].cpuaddr) % PAGE_SIZE) == 0);
-
- dmapage = virt_to_page (chan->pgtbl[pgoff].cpuaddr);
- DPRINTK ("EXIT, returning page %p for cpuaddr %lXh\n",
- dmapage, (unsigned long) chan->pgtbl[pgoff].cpuaddr);
- get_page (dmapage);
- if (type)
- *type = VM_FAULT_MINOR;
- return dmapage;
-}
-
-
-#ifndef VM_RESERVED
-static int via_mm_swapout (struct page *page, struct file *filp)
-{
- return 0;
-}
-#endif /* VM_RESERVED */
-
-
-static struct vm_operations_struct via_mm_ops = {
- .nopage = via_mm_nopage,
-
-#ifndef VM_RESERVED
- .swapout = via_mm_swapout,
-#endif
-};
-
-
-static int via_dsp_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct via_info *card;
- int nonblock = (file->f_flags & O_NONBLOCK);
- int rc = -EINVAL, rd=0, wr=0;
- unsigned long max_size, size, start, offset;
-
- assert (file != NULL);
- assert (vma != NULL);
- card = file->private_data;
- assert (card != NULL);
-
- DPRINTK ("ENTER, start %lXh, size %ld, pgoff %ld\n",
- vma->vm_start,
- vma->vm_end - vma->vm_start,
- vma->vm_pgoff);
-
- max_size = 0;
- if (vma->vm_flags & VM_READ) {
- rd = 1;
- via_chan_set_buffering(card, &card->ch_in, -1);
- via_chan_buffer_init (card, &card->ch_in);
- max_size += card->ch_in.page_number << PAGE_SHIFT;
- }
- if (vma->vm_flags & VM_WRITE) {
- wr = 1;
- via_chan_set_buffering(card, &card->ch_out, -1);
- via_chan_buffer_init (card, &card->ch_out);
- max_size += card->ch_out.page_number << PAGE_SHIFT;
- }
-
- start = vma->vm_start;
- offset = (vma->vm_pgoff << PAGE_SHIFT);
- size = vma->vm_end - vma->vm_start;
-
- /* some basic size/offset sanity checks */
- if (size > max_size)
- goto out;
- if (offset > max_size - size)
- goto out;
-
- rc = via_syscall_down (card, nonblock);
- if (rc) goto out;
-
- vma->vm_ops = &via_mm_ops;
- vma->vm_private_data = card;
-
-#ifdef VM_RESERVED
- vma->vm_flags |= VM_RESERVED;
-#endif
-
- if (rd)
- card->ch_in.is_mapped = 1;
- if (wr)
- card->ch_out.is_mapped = 1;
-
- mutex_unlock(&card->syscall_mutex);
- rc = 0;
-
-out:
- DPRINTK ("EXIT, returning %d\n", rc);
- return rc;
-}
-
-
-static ssize_t via_dsp_do_read (struct via_info *card,
- char __user *userbuf, size_t count,
- int nonblock)
-{
- DECLARE_WAITQUEUE(wait, current);
- const char __user *orig_userbuf = userbuf;
- struct via_channel *chan = &card->ch_in;
- size_t size;
- int n, tmp;
- ssize_t ret = 0;
-
- /* if SGD has not yet been started, start it */
- via_chan_maybe_start (chan);
-
-handle_one_block:
- /* just to be a nice neighbor */
- /* Thomas Sailer:
- * But also to ourselves, release semaphore if we do so */
- if (need_resched()) {
- mutex_unlock(&card->syscall_mutex);
- schedule ();
- ret = via_syscall_down (card, nonblock);
- if (ret)
- goto out;
- }
-
- /* grab current channel software pointer. In the case of
- * recording, this is pointing to the next buffer that
- * will receive data from the audio hardware.
- */
- n = chan->sw_ptr;
-
- /* n_frags represents the number of fragments waiting
- * to be copied to userland. sleep until at least
- * one buffer has been read from the audio hardware.
- */
- add_wait_queue(&chan->wait, &wait);
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- tmp = atomic_read (&chan->n_frags);
- assert (tmp >= 0);
- assert (tmp <= chan->frag_number);
- if (tmp)
- break;
- if (nonblock || !chan->is_active) {
- ret = -EAGAIN;
- break;
- }
-
- mutex_unlock(&card->syscall_mutex);
-
- DPRINTK ("Sleeping on block %d\n", n);
- schedule();
-
- ret = via_syscall_down (card, nonblock);
- if (ret)
- break;
-
- if (signal_pending (current)) {
- ret = -ERESTARTSYS;
- break;
- }
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&chan->wait, &wait);
- if (ret)
- goto out;
-
- /* Now that we have a buffer we can read from, send
- * as much as sample data possible to userspace.
- */
- while ((count > 0) && (chan->slop_len < chan->frag_size)) {
- size_t slop_left = chan->frag_size - chan->slop_len;
- void *base = chan->pgtbl[n / (PAGE_SIZE / chan->frag_size)].cpuaddr;
- unsigned ofs = (n % (PAGE_SIZE / chan->frag_size)) * chan->frag_size;
-
- size = (count < slop_left) ? count : slop_left;
- if (copy_to_user (userbuf,
- base + ofs + chan->slop_len,
- size)) {
- ret = -EFAULT;
- goto out;
- }
-
- count -= size;
- chan->slop_len += size;
- userbuf += size;
- }
-
- /* If we didn't copy the buffer completely to userspace,
- * stop now.
- */
- if (chan->slop_len < chan->frag_size)
- goto out;
-
- /*
- * If we get to this point, we copied one buffer completely
- * to userspace, give the buffer back to the hardware.
- */
-
- /* advance channel software pointer to point to
- * the next buffer from which we will copy
- */
- if (chan->sw_ptr == (chan->frag_number - 1))
- chan->sw_ptr = 0;
- else
- chan->sw_ptr++;
-
- /* mark one less buffer waiting to be processed */
- assert (atomic_read (&chan->n_frags) > 0);
- atomic_dec (&chan->n_frags);
-
- /* we are at a block boundary, there is no fragment data */
- chan->slop_len = 0;
-
- DPRINTK ("Flushed block %u, sw_ptr now %u, n_frags now %d\n",
- n, chan->sw_ptr, atomic_read (&chan->n_frags));
-
- DPRINTK ("regs==%02X %02X %02X %08X %08X %08X %08X\n",
- inb (card->baseaddr + 0x00),
- inb (card->baseaddr + 0x01),
- inb (card->baseaddr + 0x02),
- inl (card->baseaddr + 0x04),
- inl (card->baseaddr + 0x0C),
- inl (card->baseaddr + 0x80),
- inl (card->baseaddr + 0x84));
-
- if (count > 0)
- goto handle_one_block;
-
-out:
- return (userbuf != orig_userbuf) ? (userbuf - orig_userbuf) : ret;
-}
-
-
-static ssize_t via_dsp_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
-{
- struct via_info *card;
- int nonblock = (file->f_flags & O_NONBLOCK);
- int rc;
-
- DPRINTK ("ENTER, file=%p, buffer=%p, count=%u, ppos=%lu\n",
- file, buffer, count, ppos ? ((unsigned long)*ppos) : 0);
-
- assert (file != NULL);
- card = file->private_data;
- assert (card != NULL);
-
- rc = via_syscall_down (card, nonblock);
- if (rc) goto out;
-
- if (card->ch_in.is_mapped) {
- rc = -ENXIO;
- goto out_up;
- }
-
- via_chan_set_buffering(card, &card->ch_in, -1);
- rc = via_chan_buffer_init (card, &card->ch_in);
-
- if (rc)
- goto out_up;
-
- rc = via_dsp_do_read (card, buffer, count, nonblock);
-
-out_up:
- mutex_unlock(&card->syscall_mutex);
-out:
- DPRINTK ("EXIT, returning %ld\n",(long) rc);
- return rc;
-}
-
-
-static ssize_t via_dsp_do_write (struct via_info *card,
- const char __user *userbuf, size_t count,
- int nonblock)
-{
- DECLARE_WAITQUEUE(wait, current);
- const char __user *orig_userbuf = userbuf;
- struct via_channel *chan = &card->ch_out;
- volatile struct via_sgd_table *sgtable = chan->sgtable;
- size_t size;
- int n, tmp;
- ssize_t ret = 0;
-
-handle_one_block:
- /* just to be a nice neighbor */
- /* Thomas Sailer:
- * But also to ourselves, release semaphore if we do so */
- if (need_resched()) {
- mutex_unlock(&card->syscall_mutex);
- schedule ();
- ret = via_syscall_down (card, nonblock);
- if (ret)
- goto out;
- }
-
- /* grab current channel fragment pointer. In the case of
- * playback, this is pointing to the next fragment that
- * should receive data from userland.
- */
- n = chan->sw_ptr;
-
- /* n_frags represents the number of fragments remaining
- * to be filled by userspace. Sleep until
- * at least one fragment is available for our use.
- */
- add_wait_queue(&chan->wait, &wait);
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- tmp = atomic_read (&chan->n_frags);
- assert (tmp >= 0);
- assert (tmp <= chan->frag_number);
- if (tmp)
- break;
- if (nonblock || !chan->is_active) {
- ret = -EAGAIN;
- break;
- }
-
- mutex_unlock(&card->syscall_mutex);
-
- DPRINTK ("Sleeping on page %d, tmp==%d, ir==%d\n", n, tmp, chan->is_record);
- schedule();
-
- ret = via_syscall_down (card, nonblock);
- if (ret)
- break;
-
- if (signal_pending (current)) {
- ret = -ERESTARTSYS;
- break;
- }
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&chan->wait, &wait);
- if (ret)
- goto out;
-
- /* Now that we have at least one fragment we can write to, fill the buffer
- * as much as possible with data from userspace.
- */
- while ((count > 0) && (chan->slop_len < chan->frag_size)) {
- size_t slop_left = chan->frag_size - chan->slop_len;
-
- size = (count < slop_left) ? count : slop_left;
- if (copy_from_user (chan->pgtbl[n / (PAGE_SIZE / chan->frag_size)].cpuaddr + (n % (PAGE_SIZE / chan->frag_size)) * chan->frag_size + chan->slop_len,
- userbuf, size)) {
- ret = -EFAULT;
- goto out;
- }
-
- count -= size;
- chan->slop_len += size;
- userbuf += size;
- }
-
- /* If we didn't fill up the buffer with data, stop now.
- * Put a 'stop' marker in the DMA table too, to tell the
- * audio hardware to stop if it gets here.
- */
- if (chan->slop_len < chan->frag_size) {
- sgtable[n].count = cpu_to_le32 (chan->slop_len | VIA_EOL | VIA_STOP);
- goto out;
- }
-
- /*
- * If we get to this point, we have filled a buffer with
- * audio data, flush the buffer to audio hardware.
- */
-
- /* Record the true size for the audio hardware to notice */
- if (n == (chan->frag_number - 1))
- sgtable[n].count = cpu_to_le32 (chan->frag_size | VIA_EOL);
- else
- sgtable[n].count = cpu_to_le32 (chan->frag_size | VIA_FLAG);
-
- /* advance channel software pointer to point to
- * the next buffer we will fill with data
- */
- if (chan->sw_ptr == (chan->frag_number - 1))
- chan->sw_ptr = 0;
- else
- chan->sw_ptr++;
-
- /* mark one less buffer as being available for userspace consumption */
- assert (atomic_read (&chan->n_frags) > 0);
- atomic_dec (&chan->n_frags);
-
- /* we are at a block boundary, there is no fragment data */
- chan->slop_len = 0;
-
- /* if SGD has not yet been started, start it */
- via_chan_maybe_start (chan);
-
- DPRINTK ("Flushed block %u, sw_ptr now %u, n_frags now %d\n",
- n, chan->sw_ptr, atomic_read (&chan->n_frags));
-
- DPRINTK ("regs==S=%02X C=%02X TP=%02X BP=%08X RT=%08X SG=%08X CC=%08X SS=%08X\n",
- inb (card->baseaddr + 0x00),
- inb (card->baseaddr + 0x01),
- inb (card->baseaddr + 0x02),
- inl (card->baseaddr + 0x04),
- inl (card->baseaddr + 0x08),
- inl (card->baseaddr + 0x0C),
- inl (card->baseaddr + 0x80),
- inl (card->baseaddr + 0x84));
-
- if (count > 0)
- goto handle_one_block;
-
-out:
- if (userbuf - orig_userbuf)
- return userbuf - orig_userbuf;
- else
- return ret;
-}
-
-
-static ssize_t via_dsp_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
-{
- struct via_info *card;
- ssize_t rc;
- int nonblock = (file->f_flags & O_NONBLOCK);
-
- DPRINTK ("ENTER, file=%p, buffer=%p, count=%u, ppos=%lu\n",
- file, buffer, count, ppos ? ((unsigned long)*ppos) : 0);
-
- assert (file != NULL);
- card = file->private_data;
- assert (card != NULL);
-
- rc = via_syscall_down (card, nonblock);
- if (rc) goto out;
-
- if (card->ch_out.is_mapped) {
- rc = -ENXIO;
- goto out_up;
- }
-
- via_chan_set_buffering(card, &card->ch_out, -1);
- rc = via_chan_buffer_init (card, &card->ch_out);
-
- if (rc)
- goto out_up;
-
- rc = via_dsp_do_write (card, buffer, count, nonblock);
-
-out_up:
- mutex_unlock(&card->syscall_mutex);
-out:
- DPRINTK ("EXIT, returning %ld\n",(long) rc);
- return rc;
-}
-
-
-static unsigned int via_dsp_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct via_info *card;
- struct via_channel *chan;
- unsigned int mask = 0;
-
- DPRINTK ("ENTER\n");
-
- assert (file != NULL);
- card = file->private_data;
- assert (card != NULL);
-
- if (file->f_mode & FMODE_READ) {
- chan = &card->ch_in;
- if (sg_active (chan->iobase))
- poll_wait(file, &chan->wait, wait);
- if (atomic_read (&chan->n_frags) > 0)
- mask |= POLLIN | POLLRDNORM;
- }
-
- if (file->f_mode & FMODE_WRITE) {
- chan = &card->ch_out;
- if (sg_active (chan->iobase))
- poll_wait(file, &chan->wait, wait);
- if (atomic_read (&chan->n_frags) > 0)
- mask |= POLLOUT | POLLWRNORM;
- }
-
- DPRINTK ("EXIT, returning %u\n", mask);
- return mask;
-}
-
-
-/**
- * via_dsp_drain_playback - sleep until all playback samples are flushed
- * @card: Private info for specified board
- * @chan: Channel to drain
- * @nonblock: boolean, non-zero if O_NONBLOCK is set
- *
- * Sleeps until all playback has been flushed to the audio
- * hardware.
- *
- * Locking: inside card->syscall_mutex
- */
-
-static int via_dsp_drain_playback (struct via_info *card,
- struct via_channel *chan, int nonblock)
-{
- DECLARE_WAITQUEUE(wait, current);
- int ret = 0;
-
- DPRINTK ("ENTER, nonblock = %d\n", nonblock);
-
- if (chan->slop_len > 0)
- via_chan_flush_frag (chan);
-
- if (atomic_read (&chan->n_frags) == chan->frag_number)
- goto out;
-
- via_chan_maybe_start (chan);
-
- add_wait_queue(&chan->wait, &wait);
- for (;;) {
- DPRINTK ("FRAGS %d FRAGNUM %d\n", atomic_read(&chan->n_frags), chan->frag_number);
- __set_current_state(TASK_INTERRUPTIBLE);
- if (atomic_read (&chan->n_frags) >= chan->frag_number)
- break;
-
- if (nonblock) {
- DPRINTK ("EXIT, returning -EAGAIN\n");
- ret = -EAGAIN;
- break;
- }
-
-#ifdef VIA_DEBUG
- {
- u8 r40,r41,r42,r43,r44,r48;
- pci_read_config_byte (card->pdev, 0x40, &r40);
- pci_read_config_byte (card->pdev, 0x41, &r41);
- pci_read_config_byte (card->pdev, 0x42, &r42);
- pci_read_config_byte (card->pdev, 0x43, &r43);
- pci_read_config_byte (card->pdev, 0x44, &r44);
- pci_read_config_byte (card->pdev, 0x48, &r48);
- DPRINTK ("PCI config: %02X %02X %02X %02X %02X %02X\n",
- r40,r41,r42,r43,r44,r48);
-
- DPRINTK ("regs==%02X %02X %02X %08X %08X %08X %08X\n",
- inb (card->baseaddr + 0x00),
- inb (card->baseaddr + 0x01),
- inb (card->baseaddr + 0x02),
- inl (card->baseaddr + 0x04),
- inl (card->baseaddr + 0x0C),
- inl (card->baseaddr + 0x80),
- inl (card->baseaddr + 0x84));
- }
-
- if (!chan->is_active)
- printk (KERN_ERR "sleeping but not active\n");
-#endif
-
- mutex_unlock(&card->syscall_mutex);
-
- DPRINTK ("sleeping, nbufs=%d\n", atomic_read (&chan->n_frags));
- schedule();
-
- if ((ret = via_syscall_down (card, nonblock)))
- break;
-
- if (signal_pending (current)) {
- DPRINTK ("EXIT, returning -ERESTARTSYS\n");
- ret = -ERESTARTSYS;
- break;
- }
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&chan->wait, &wait);
-
-#ifdef VIA_DEBUG
- {
- u8 r40,r41,r42,r43,r44,r48;
- pci_read_config_byte (card->pdev, 0x40, &r40);
- pci_read_config_byte (card->pdev, 0x41, &r41);
- pci_read_config_byte (card->pdev, 0x42, &r42);
- pci_read_config_byte (card->pdev, 0x43, &r43);
- pci_read_config_byte (card->pdev, 0x44, &r44);
- pci_read_config_byte (card->pdev, 0x48, &r48);
- DPRINTK ("PCI config: %02X %02X %02X %02X %02X %02X\n",
- r40,r41,r42,r43,r44,r48);
-
- DPRINTK ("regs==%02X %02X %02X %08X %08X %08X %08X\n",
- inb (card->baseaddr + 0x00),
- inb (card->baseaddr + 0x01),
- inb (card->baseaddr + 0x02),
- inl (card->baseaddr + 0x04),
- inl (card->baseaddr + 0x0C),
- inl (card->baseaddr + 0x80),
- inl (card->baseaddr + 0x84));
-
- DPRINTK ("final nbufs=%d\n", atomic_read (&chan->n_frags));
- }
-#endif
-
-out:
- DPRINTK ("EXIT, returning %d\n", ret);
- return ret;
-}
-
-
-/**
- * via_dsp_ioctl_space - get information about channel buffering
- * @card: Private info for specified board
- * @chan: pointer to channel-specific info
- * @arg: user buffer for returned information
- *
- * Handles SNDCTL_DSP_GETISPACE and SNDCTL_DSP_GETOSPACE.
- *
- * Locking: inside card->syscall_mutex
- */
-
-static int via_dsp_ioctl_space (struct via_info *card,
- struct via_channel *chan,
- void __user *arg)
-{
- audio_buf_info info;
-
- via_chan_set_buffering(card, chan, -1);
-
- info.fragstotal = chan->frag_number;
- info.fragsize = chan->frag_size;
-
- /* number of full fragments we can read/write without blocking */
- info.fragments = atomic_read (&chan->n_frags);
-
- if ((chan->slop_len % chan->frag_size > 0) && (info.fragments > 0))
- info.fragments--;
-
- /* number of bytes that can be read or written immediately
- * without blocking.
- */
- info.bytes = (info.fragments * chan->frag_size);
- if (chan->slop_len % chan->frag_size > 0)
- info.bytes += chan->frag_size - (chan->slop_len % chan->frag_size);
-
- DPRINTK ("EXIT, returning fragstotal=%d, fragsize=%d, fragments=%d, bytes=%d\n",
- info.fragstotal,
- info.fragsize,
- info.fragments,
- info.bytes);
-
- return copy_to_user (arg, &info, sizeof (info))?-EFAULT:0;
-}
-
-
-/**
- * via_dsp_ioctl_ptr - get information about hardware buffer ptr
- * @card: Private info for specified board
- * @chan: pointer to channel-specific info
- * @arg: user buffer for returned information
- *
- * Handles SNDCTL_DSP_GETIPTR and SNDCTL_DSP_GETOPTR.
- *
- * Locking: inside card->syscall_mutex
- */
-
-static int via_dsp_ioctl_ptr (struct via_info *card,
- struct via_channel *chan,
- void __user *arg)
-{
- count_info info;
-
- spin_lock_irq (&card->lock);
-
- info.bytes = chan->bytes;
- info.blocks = chan->n_irqs;
- chan->n_irqs = 0;
-
- spin_unlock_irq (&card->lock);
-
- if (chan->is_active) {
- unsigned long extra;
- info.ptr = atomic_read (&chan->hw_ptr) * chan->frag_size;
- extra = chan->frag_size - via_sg_offset(chan);
- info.ptr += extra;
- info.bytes += extra;
- } else {
- info.ptr = 0;
- }
-
- DPRINTK ("EXIT, returning bytes=%d, blocks=%d, ptr=%d\n",
- info.bytes,
- info.blocks,
- info.ptr);
-
- return copy_to_user (arg, &info, sizeof (info))?-EFAULT:0;
-}
-
-
-static int via_dsp_ioctl_trigger (struct via_channel *chan, int val)
-{
- int enable, do_something;
-
- if (chan->is_record)
- enable = (val & PCM_ENABLE_INPUT);
- else
- enable = (val & PCM_ENABLE_OUTPUT);
-
- if (!chan->is_enabled && enable) {
- do_something = 1;
- } else if (chan->is_enabled && !enable) {
- do_something = -1;
- } else {
- do_something = 0;
- }
-
- DPRINTK ("enable=%d, do_something=%d\n",
- enable, do_something);
-
- if (chan->is_active && do_something)
- return -EINVAL;
-
- if (do_something == 1) {
- chan->is_enabled = 1;
- via_chan_maybe_start (chan);
- DPRINTK ("Triggering input\n");
- }
-
- else if (do_something == -1) {
- chan->is_enabled = 0;
- DPRINTK ("Setup input trigger\n");
- }
-
- return 0;
-}
-
-
-static int via_dsp_ioctl (struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- int rc, rd=0, wr=0, val=0;
- struct via_info *card;
- struct via_channel *chan;
- int nonblock = (file->f_flags & O_NONBLOCK);
- int __user *ip = (int __user *)arg;
- void __user *p = (void __user *)arg;
-
- assert (file != NULL);
- card = file->private_data;
- assert (card != NULL);
-
- if (file->f_mode & FMODE_WRITE)
- wr = 1;
- if (file->f_mode & FMODE_READ)
- rd = 1;
-
- rc = via_syscall_down (card, nonblock);
- if (rc)
- return rc;
- rc = -EINVAL;
-
- switch (cmd) {
-
- /* OSS API version. XXX unverified */
- case OSS_GETVERSION:
- DPRINTK ("ioctl OSS_GETVERSION, EXIT, returning SOUND_VERSION\n");
- rc = put_user (SOUND_VERSION, ip);
- break;
-
- /* list of supported PCM data formats */
- case SNDCTL_DSP_GETFMTS:
- DPRINTK ("DSP_GETFMTS, EXIT, returning AFMT U8|S16_LE\n");
- rc = put_user (AFMT_U8 | AFMT_S16_LE, ip);
- break;
-
- /* query or set current channel's PCM data format */
- case SNDCTL_DSP_SETFMT:
- if (get_user(val, ip)) {
- rc = -EFAULT;
- break;
- }
- DPRINTK ("DSP_SETFMT, val==%d\n", val);
- if (val != AFMT_QUERY) {
- rc = 0;
-
- if (rd)
- rc = via_chan_set_fmt (card, &card->ch_in, val);
-
- if (rc >= 0 && wr)
- rc = via_chan_set_fmt (card, &card->ch_out, val);
-
- if (rc < 0)
- break;
-
- val = rc;
- } else {
- if ((rd && (card->ch_in.pcm_fmt & VIA_PCM_FMT_16BIT)) ||
- (wr && (card->ch_out.pcm_fmt & VIA_PCM_FMT_16BIT)))
- val = AFMT_S16_LE;
- else
- val = AFMT_U8;
- }
- DPRINTK ("SETFMT EXIT, returning %d\n", val);
- rc = put_user (val, ip);
- break;
-
- /* query or set number of channels (1=mono, 2=stereo, 4/6 for multichannel) */
- case SNDCTL_DSP_CHANNELS:
- if (get_user(val, ip)) {
- rc = -EFAULT;
- break;
- }
- DPRINTK ("DSP_CHANNELS, val==%d\n", val);
- if (val != 0) {
- rc = 0;
-
- if (rd)
- rc = via_chan_set_stereo (card, &card->ch_in, val);
-
- if (rc >= 0 && wr)
- rc = via_chan_set_stereo (card, &card->ch_out, val);
-
- if (rc < 0)
- break;
-
- val = rc;
- } else {
- if (rd)
- val = card->ch_in.channels;
- else
- val = card->ch_out.channels;
- }
- DPRINTK ("CHANNELS EXIT, returning %d\n", val);
- rc = put_user (val, ip);
- break;
-
- /* enable (val is not zero) or disable (val == 0) stereo */
- case SNDCTL_DSP_STEREO:
- if (get_user(val, ip)) {
- rc = -EFAULT;
- break;
- }
- DPRINTK ("DSP_STEREO, val==%d\n", val);
- rc = 0;
-
- if (rd)
- rc = via_chan_set_stereo (card, &card->ch_in, val ? 2 : 1);
- if (rc >= 0 && wr)
- rc = via_chan_set_stereo (card, &card->ch_out, val ? 2 : 1);
-
- if (rc < 0)
- break;
-
- val = rc - 1;
-
- DPRINTK ("STEREO EXIT, returning %d\n", val);
- rc = put_user(val, ip);
- break;
-
- /* query or set sampling rate */
- case SNDCTL_DSP_SPEED:
- if (get_user(val, ip)) {
- rc = -EFAULT;
- break;
- }
- DPRINTK ("DSP_SPEED, val==%d\n", val);
- if (val < 0) {
- rc = -EINVAL;
- break;
- }
- if (val > 0) {
- rc = 0;
-
- if (rd)
- rc = via_chan_set_speed (card, &card->ch_in, val);
- if (rc >= 0 && wr)
- rc = via_chan_set_speed (card, &card->ch_out, val);
-
- if (rc < 0)
- break;
-
- val = rc;
- } else {
- if (rd)
- val = card->ch_in.rate;
- else if (wr)
- val = card->ch_out.rate;
- else
- val = 0;
- }
- DPRINTK ("SPEED EXIT, returning %d\n", val);
- rc = put_user (val, ip);
- break;
-
- /* wait until all buffers have been played, and then stop device */
- case SNDCTL_DSP_SYNC:
- DPRINTK ("DSP_SYNC\n");
- rc = 0;
- if (wr) {
- DPRINTK ("SYNC EXIT (after calling via_dsp_drain_playback)\n");
- rc = via_dsp_drain_playback (card, &card->ch_out, nonblock);
- }
- break;
-
- /* stop recording/playback immediately */
- case SNDCTL_DSP_RESET:
- DPRINTK ("DSP_RESET\n");
- if (rd) {
- via_chan_clear (card, &card->ch_in);
- card->ch_in.frag_number = 0;
- card->ch_in.frag_size = 0;
- atomic_set(&card->ch_in.n_frags, 0);
- }
-
- if (wr) {
- via_chan_clear (card, &card->ch_out);
- card->ch_out.frag_number = 0;
- card->ch_out.frag_size = 0;
- atomic_set(&card->ch_out.n_frags, 0);
- }
-
- rc = 0;
- break;
-
- case SNDCTL_DSP_NONBLOCK:
- file->f_flags |= O_NONBLOCK;
- rc = 0;
- break;
-
- /* obtain bitmask of device capabilities, such as mmap, full duplex, etc. */
- case SNDCTL_DSP_GETCAPS:
- DPRINTK ("DSP_GETCAPS\n");
- rc = put_user(VIA_DSP_CAP, ip);
- break;
-
- /* obtain buffer fragment size */
- case SNDCTL_DSP_GETBLKSIZE:
- DPRINTK ("DSP_GETBLKSIZE\n");
-
- if (rd) {
- via_chan_set_buffering(card, &card->ch_in, -1);
- rc = put_user(card->ch_in.frag_size, ip);
- } else if (wr) {
- via_chan_set_buffering(card, &card->ch_out, -1);
- rc = put_user(card->ch_out.frag_size, ip);
- }
- break;
-
- /* obtain information about input buffering */
- case SNDCTL_DSP_GETISPACE:
- DPRINTK ("DSP_GETISPACE\n");
- if (rd)
- rc = via_dsp_ioctl_space (card, &card->ch_in, p);
- break;
-
- /* obtain information about output buffering */
- case SNDCTL_DSP_GETOSPACE:
- DPRINTK ("DSP_GETOSPACE\n");
- if (wr)
- rc = via_dsp_ioctl_space (card, &card->ch_out, p);
- break;
-
- /* obtain information about input hardware pointer */
- case SNDCTL_DSP_GETIPTR:
- DPRINTK ("DSP_GETIPTR\n");
- if (rd)
- rc = via_dsp_ioctl_ptr (card, &card->ch_in, p);
- break;
-
- /* obtain information about output hardware pointer */
- case SNDCTL_DSP_GETOPTR:
- DPRINTK ("DSP_GETOPTR\n");
- if (wr)
- rc = via_dsp_ioctl_ptr (card, &card->ch_out, p);
- break;
-
- /* return number of bytes remaining to be played by DMA engine */
- case SNDCTL_DSP_GETODELAY:
- {
- DPRINTK ("DSP_GETODELAY\n");
-
- chan = &card->ch_out;
-
- if (!wr)
- break;
-
- if (chan->is_active) {
-
- val = chan->frag_number - atomic_read (&chan->n_frags);
-
- assert(val >= 0);
-
- if (val > 0) {
- val *= chan->frag_size;
- val -= chan->frag_size - via_sg_offset(chan);
- }
- val += chan->slop_len % chan->frag_size;
- } else
- val = 0;
-
- assert (val <= (chan->frag_size * chan->frag_number));
-
- DPRINTK ("GETODELAY EXIT, val = %d bytes\n", val);
- rc = put_user (val, ip);
- break;
- }
-
- /* handle the quick-start of a channel,
- * or the notification that a quick-start will
- * occur in the future
- */
- case SNDCTL_DSP_SETTRIGGER:
- if (get_user(val, ip)) {
- rc = -EFAULT;
- break;
- }
- DPRINTK ("DSP_SETTRIGGER, rd=%d, wr=%d, act=%d/%d, en=%d/%d\n",
- rd, wr, card->ch_in.is_active, card->ch_out.is_active,
- card->ch_in.is_enabled, card->ch_out.is_enabled);
-
- rc = 0;
-
- if (rd)
- rc = via_dsp_ioctl_trigger (&card->ch_in, val);
-
- if (!rc && wr)
- rc = via_dsp_ioctl_trigger (&card->ch_out, val);
-
- break;
-
- case SNDCTL_DSP_GETTRIGGER:
- val = 0;
- if ((file->f_mode & FMODE_READ) && card->ch_in.is_enabled)
- val |= PCM_ENABLE_INPUT;
- if ((file->f_mode & FMODE_WRITE) && card->ch_out.is_enabled)
- val |= PCM_ENABLE_OUTPUT;
- rc = put_user(val, ip);
- break;
-
- /* Enable full duplex. Since we do this as soon as we are opened
- * with O_RDWR, this is mainly a no-op that always returns success.
- */
- case SNDCTL_DSP_SETDUPLEX:
- DPRINTK ("DSP_SETDUPLEX\n");
- if (!rd || !wr)
- break;
- rc = 0;
- break;
-
- /* set fragment size. implemented as a successful no-op for now */
- case SNDCTL_DSP_SETFRAGMENT:
- if (get_user(val, ip)) {
- rc = -EFAULT;
- break;
- }
- DPRINTK ("DSP_SETFRAGMENT, val==%d\n", val);
-
- if (rd)
- rc = via_chan_set_buffering(card, &card->ch_in, val);
-
- if (wr)
- rc = via_chan_set_buffering(card, &card->ch_out, val);
-
- DPRINTK ("SNDCTL_DSP_SETFRAGMENT (fragshift==0x%04X (%d), maxfrags==0x%04X (%d))\n",
- val & 0xFFFF,
- val & 0xFFFF,
- (val >> 16) & 0xFFFF,
- (val >> 16) & 0xFFFF);
-
- rc = 0;
- break;
-
- /* inform device of an upcoming pause in input (or output). */
- case SNDCTL_DSP_POST:
- DPRINTK ("DSP_POST\n");
- if (wr) {
- if (card->ch_out.slop_len > 0)
- via_chan_flush_frag (&card->ch_out);
- via_chan_maybe_start (&card->ch_out);
- }
-
- rc = 0;
- break;
-
- /* not implemented */
- default:
- DPRINTK ("unhandled ioctl, cmd==%u, arg==%p\n",
- cmd, p);
- break;
- }
-
- mutex_unlock(&card->syscall_mutex);
- DPRINTK ("EXIT, returning %d\n", rc);
- return rc;
-}
-
-
-static int via_dsp_open (struct inode *inode, struct file *file)
-{
- int minor = iminor(inode);
- struct via_info *card;
- struct pci_dev *pdev = NULL;
- struct via_channel *chan;
- struct pci_driver *drvr;
- int nonblock = (file->f_flags & O_NONBLOCK);
-
- DPRINTK ("ENTER, minor=%d, file->f_mode=0x%x\n", minor, file->f_mode);
-
- if (!(file->f_mode & (FMODE_READ | FMODE_WRITE))) {
- DPRINTK ("EXIT, returning -EINVAL\n");
- return -EINVAL;
- }
-
- card = NULL;
- while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
- drvr = pci_dev_driver (pdev);
- if (drvr == &via_driver) {
- assert (pci_get_drvdata (pdev) != NULL);
-
- card = pci_get_drvdata (pdev);
- DPRINTK ("dev_dsp = %d, minor = %d, assn = %d\n",
- card->dev_dsp, minor,
- (card->dev_dsp ^ minor) & ~0xf);
-
- if (((card->dev_dsp ^ minor) & ~0xf) == 0)
- goto match;
- }
- }
-
- DPRINTK ("no matching %s found\n", card ? "minor" : "driver");
- return -ENODEV;
-
-match:
- pci_dev_put(pdev);
- if (nonblock) {
- if (!mutex_trylock(&card->open_mutex)) {
- DPRINTK ("EXIT, returning -EAGAIN\n");
- return -EAGAIN;
- }
- } else {
- if (mutex_lock_interruptible(&card->open_mutex)) {
- DPRINTK ("EXIT, returning -ERESTARTSYS\n");
- return -ERESTARTSYS;
- }
- }
-
- file->private_data = card;
- DPRINTK ("file->f_mode == 0x%x\n", file->f_mode);
-
- /* handle input from analog source */
- if (file->f_mode & FMODE_READ) {
- chan = &card->ch_in;
-
- via_chan_init (card, chan);
-
- /* why is this forced to 16-bit stereo in all drivers? */
- chan->pcm_fmt = VIA_PCM_FMT_16BIT | VIA_PCM_FMT_STEREO;
- chan->channels = 2;
-
- // TO DO - use FIFO: via_capture_fifo(card, 1);
- via_chan_pcm_fmt (chan, 0);
- via_set_rate (card->ac97, chan, 44100);
- }
-
- /* handle output to analog source */
- if (file->f_mode & FMODE_WRITE) {
- chan = &card->ch_out;
-
- via_chan_init (card, chan);
-
- if (file->f_mode & FMODE_READ) {
- /* if in duplex mode make the recording and playback channels
- have the same settings */
- chan->pcm_fmt = VIA_PCM_FMT_16BIT | VIA_PCM_FMT_STEREO;
- chan->channels = 2;
- via_chan_pcm_fmt (chan, 0);
- via_set_rate (card->ac97, chan, 44100);
- } else {
- if ((minor & 0xf) == SND_DEV_DSP16) {
- chan->pcm_fmt = VIA_PCM_FMT_16BIT;
- via_chan_pcm_fmt (chan, 0);
- via_set_rate (card->ac97, chan, 44100);
- } else {
- via_chan_pcm_fmt (chan, 1);
- via_set_rate (card->ac97, chan, 8000);
- }
- }
- }
-
- DPRINTK ("EXIT, returning 0\n");
- return nonseekable_open(inode, file);
-}
-
-
-static int via_dsp_release(struct inode *inode, struct file *file)
-{
- struct via_info *card;
- int nonblock = (file->f_flags & O_NONBLOCK);
- int rc;
-
- DPRINTK ("ENTER\n");
-
- assert (file != NULL);
- card = file->private_data;
- assert (card != NULL);
-
- rc = via_syscall_down (card, nonblock);
- if (rc) {
- DPRINTK ("EXIT (syscall_down error), rc=%d\n", rc);
- return rc;
- }
-
- if (file->f_mode & FMODE_WRITE) {
- rc = via_dsp_drain_playback (card, &card->ch_out, nonblock);
- if (rc && rc != -ERESTARTSYS) /* Nobody needs to know about ^C */
- printk (KERN_DEBUG "via_audio: ignoring drain playback error %d\n", rc);
-
- via_chan_free (card, &card->ch_out);
- via_chan_buffer_free(card, &card->ch_out);
- }
-
- if (file->f_mode & FMODE_READ) {
- via_chan_free (card, &card->ch_in);
- via_chan_buffer_free (card, &card->ch_in);
- }
-
- mutex_unlock(&card->syscall_mutex);
- mutex_unlock(&card->open_mutex);
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-}
-
-
-/****************************************************************
- *
- * Chip setup and kernel registration
- *
- *
- */
-
-static int __devinit via_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
-{
-#ifdef CONFIG_MIDI_VIA82CXXX
- u8 r42;
-#endif
- int rc;
- struct via_info *card;
- static int printed_version;
-
- DPRINTK ("ENTER\n");
-
- if (printed_version++ == 0)
- printk (KERN_INFO "Via 686a/8233/8235 audio driver " VIA_VERSION "\n");
-
- rc = pci_enable_device (pdev);
- if (rc)
- goto err_out;
-
- rc = pci_request_regions (pdev, "via82cxxx_audio");
- if (rc)
- goto err_out_disable;
-
- rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
- if (rc)
- goto err_out_res;
- rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
- if (rc)
- goto err_out_res;
-
- card = kmalloc (sizeof (*card), GFP_KERNEL);
- if (!card) {
- printk (KERN_ERR PFX "out of memory, aborting\n");
- rc = -ENOMEM;
- goto err_out_res;
- }
-
- pci_set_drvdata (pdev, card);
-
- memset (card, 0, sizeof (*card));
- card->pdev = pdev;
- card->baseaddr = pci_resource_start (pdev, 0);
- card->card_num = via_num_cards++;
- spin_lock_init (&card->lock);
- spin_lock_init (&card->ac97_lock);
- mutex_init(&card->syscall_mutex);
- mutex_init(&card->open_mutex);
-
- /* we must init these now, in case the intr handler needs them */
- via_chan_init_defaults (card, &card->ch_out);
- via_chan_init_defaults (card, &card->ch_in);
- via_chan_init_defaults (card, &card->ch_fm);
-
- /* if BAR 2 is present, chip is Rev H or later,
- * which means it has a few extra features */
- if (pci_resource_start (pdev, 2) > 0)
- card->rev_h = 1;
-
- /* Overkill for now, but more flexible done right */
-
- card->intmask = id->driver_data;
- card->legacy = !card->intmask;
- card->sixchannel = id->driver_data;
-
- if(card->sixchannel)
- printk(KERN_INFO PFX "Six channel audio available\n");
- if (pdev->irq < 1) {
- printk (KERN_ERR PFX "invalid PCI IRQ %d, aborting\n", pdev->irq);
- rc = -ENODEV;
- goto err_out_kfree;
- }
-
- if (!(pci_resource_flags (pdev, 0) & IORESOURCE_IO)) {
- printk (KERN_ERR PFX "unable to locate I/O resources, aborting\n");
- rc = -ENODEV;
- goto err_out_kfree;
- }
-
- pci_set_master(pdev);
-
- /*
- * init AC97 mixer and codec
- */
- rc = via_ac97_init (card);
- if (rc) {
- printk (KERN_ERR PFX "AC97 init failed, aborting\n");
- goto err_out_kfree;
- }
-
- /*
- * init DSP device
- */
- rc = via_dsp_init (card);
- if (rc) {
- printk (KERN_ERR PFX "DSP device init failed, aborting\n");
- goto err_out_have_mixer;
- }
-
- /*
- * init and turn on interrupts, as the last thing we do
- */
- rc = via_interrupt_init (card);
- if (rc) {
- printk (KERN_ERR PFX "interrupt init failed, aborting\n");
- goto err_out_have_dsp;
- }
-
- printk (KERN_INFO PFX "board #%d at 0x%04lX, IRQ %d\n",
- card->card_num + 1, card->baseaddr, pdev->irq);
-
-#ifdef CONFIG_MIDI_VIA82CXXX
- /* Disable by default */
- card->midi_info.io_base = 0;
-
- if(card->legacy)
- {
- pci_read_config_byte (pdev, 0x42, &r42);
- /* Disable MIDI interrupt */
- pci_write_config_byte (pdev, 0x42, r42 | VIA_CR42_MIDI_IRQMASK);
- if (r42 & VIA_CR42_MIDI_ENABLE)
- {
- if (r42 & VIA_CR42_MIDI_PNP) /* Address selected by iobase 2 - not tested */
- card->midi_info.io_base = pci_resource_start (pdev, 2);
- else /* Address selected by byte 0x43 */
- {
- u8 r43;
- pci_read_config_byte (pdev, 0x43, &r43);
- card->midi_info.io_base = 0x300 + ((r43 & 0x0c) << 2);
- }
-
- card->midi_info.irq = -pdev->irq;
- if (probe_uart401(& card->midi_info, THIS_MODULE))
- {
- card->midi_devc=midi_devs[card->midi_info.slots[4]]->devc;
- pci_write_config_byte(pdev, 0x42, r42 & ~VIA_CR42_MIDI_IRQMASK);
- printk("Enabled Via MIDI\n");
- }
- }
- }
-#endif
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-
-err_out_have_dsp:
- via_dsp_cleanup (card);
-
-err_out_have_mixer:
- via_ac97_cleanup (card);
-
-err_out_kfree:
-#ifndef VIA_NDEBUG
- memset (card, OSS_POISON_FREE, sizeof (*card)); /* poison memory */
-#endif
- kfree (card);
-
-err_out_res:
- pci_release_regions (pdev);
-
-err_out_disable:
- pci_disable_device (pdev);
-
-err_out:
- pci_set_drvdata (pdev, NULL);
- DPRINTK ("EXIT - returning %d\n", rc);
- return rc;
-}
-
-
-static void __devexit via_remove_one (struct pci_dev *pdev)
-{
- struct via_info *card;
-
- DPRINTK ("ENTER\n");
-
- assert (pdev != NULL);
- card = pci_get_drvdata (pdev);
- assert (card != NULL);
-
-#ifdef CONFIG_MIDI_VIA82CXXX
- if (card->midi_info.io_base)
- unload_uart401(&card->midi_info);
-#endif
-
- free_irq (card->pdev->irq, card);
- via_dsp_cleanup (card);
- via_ac97_cleanup (card);
-
-#ifndef VIA_NDEBUG
- memset (card, OSS_POISON_FREE, sizeof (*card)); /* poison memory */
-#endif
- kfree (card);
-
- pci_set_drvdata (pdev, NULL);
-
- pci_release_regions (pdev);
- pci_disable_device (pdev);
- pci_set_power_state (pdev, 3); /* ...zzzzzz */
-
- DPRINTK ("EXIT\n");
- return;
-}
-
-
-/****************************************************************
- *
- * Driver initialization and cleanup
- *
- *
- */
-
-static int __init init_via82cxxx_audio(void)
-{
- int rc;
-
- DPRINTK ("ENTER\n");
-
- rc = pci_register_driver (&via_driver);
- if (rc) {
- DPRINTK ("EXIT, returning %d\n", rc);
- return rc;
- }
-
- DPRINTK ("EXIT, returning 0\n");
- return 0;
-}
-
-
-static void __exit cleanup_via82cxxx_audio(void)
-{
- DPRINTK ("ENTER\n");
-
- pci_unregister_driver (&via_driver);
-
- DPRINTK ("EXIT\n");
-}
-
-
-module_init(init_via82cxxx_audio);
-module_exit(cleanup_via82cxxx_audio);
-
-MODULE_AUTHOR("Jeff Garzik");
-MODULE_DESCRIPTION("DSP audio and mixer driver for Via 82Cxxx audio devices");
-MODULE_LICENSE("GPL");
-