diff options
254 files changed, 13629 insertions, 7067 deletions
diff --git a/Documentation/ABI/stable/sysfs-bus-firewire b/Documentation/ABI/stable/sysfs-bus-firewire index 3d484e5dc846..41e5a0cd1e3e 100644 --- a/Documentation/ABI/stable/sysfs-bus-firewire +++ b/Documentation/ABI/stable/sysfs-bus-firewire @@ -39,6 +39,17 @@ Users: udev rules to set ownership and access permissions or ACLs of /dev/fw[0-9]+ character device files +What: /sys/bus/firewire/devices/fw[0-9]+/is_local +Date: July 2012 +KernelVersion: 3.6 +Contact: linux1394-devel@lists.sourceforge.net +Description: + IEEE 1394 node device attribute. + Read-only and immutable. +Values: 1: The sysfs entry represents a local node (a controller card). + 0: The sysfs entry represents a remote node. + + What: /sys/bus/firewire/devices/fw[0-9]+[.][0-9]+/ Date: May 2007 KernelVersion: 2.6.22 diff --git a/Documentation/ABI/testing/sysfs-devices-edac b/Documentation/ABI/testing/sysfs-devices-edac new file mode 100644 index 000000000000..30ee78aaed75 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-devices-edac @@ -0,0 +1,140 @@ +What: /sys/devices/system/edac/mc/mc*/reset_counters +Date: January 2006 +Contact: linux-edac@vger.kernel.org +Description: This write-only control file will zero all the statistical + counters for UE and CE errors on the given memory controller. + Zeroing the counters will also reset the timer indicating how + long since the last counter were reset. This is useful for + computing errors/time. Since the counters are always reset + at driver initialization time, no module/kernel parameter + is available. + +What: /sys/devices/system/edac/mc/mc*/seconds_since_reset +Date: January 2006 +Contact: linux-edac@vger.kernel.org +Description: This attribute file displays how many seconds have elapsed + since the last counter reset. This can be used with the error + counters to measure error rates. + +What: /sys/devices/system/edac/mc/mc*/mc_name +Date: January 2006 +Contact: linux-edac@vger.kernel.org +Description: This attribute file displays the type of memory controller + that is being utilized. + +What: /sys/devices/system/edac/mc/mc*/size_mb +Date: January 2006 +Contact: linux-edac@vger.kernel.org +Description: This attribute file displays, in count of megabytes, of memory + that this memory controller manages. + +What: /sys/devices/system/edac/mc/mc*/ue_count +Date: January 2006 +Contact: linux-edac@vger.kernel.org +Description: This attribute file displays the total count of uncorrectable + errors that have occurred on this memory controller. If + panic_on_ue is set, this counter will not have a chance to + increment, since EDAC will panic the system + +What: /sys/devices/system/edac/mc/mc*/ue_noinfo_count +Date: January 2006 +Contact: linux-edac@vger.kernel.org +Description: This attribute file displays the number of UEs that have + occurred on this memory controller with no information as to + which DIMM slot is having errors. + +What: /sys/devices/system/edac/mc/mc*/ce_count +Date: January 2006 +Contact: linux-edac@vger.kernel.org +Description: This attribute file displays the total count of correctable + errors that have occurred on this memory controller. This + count is very important to examine. CEs provide early + indications that a DIMM is beginning to fail. This count + field should be monitored for non-zero values and report + such information to the system administrator. + +What: /sys/devices/system/edac/mc/mc*/ce_noinfo_count +Date: January 2006 +Contact: linux-edac@vger.kernel.org +Description: This attribute file displays the number of CEs that + have occurred on this memory controller wherewith no + information as to which DIMM slot is having errors. Memory is + handicapped, but operational, yet no information is available + to indicate which slot the failing memory is in. This count + field should be also be monitored for non-zero values. + +What: /sys/devices/system/edac/mc/mc*/sdram_scrub_rate +Date: February 2007 +Contact: linux-edac@vger.kernel.org +Description: Read/Write attribute file that controls memory scrubbing. + The scrubbing rate used by the memory controller is set by + writing a minimum bandwidth in bytes/sec to the attribute file. + The rate will be translated to an internal value that gives at + least the specified rate. + Reading the file will return the actual scrubbing rate employed. + If configuration fails or memory scrubbing is not implemented, + the value of the attribute file will be -1. + +What: /sys/devices/system/edac/mc/mc*/max_location +Date: April 2012 +Contact: Mauro Carvalho Chehab <mchehab@redhat.com> + linux-edac@vger.kernel.org +Description: This attribute file displays the information about the last + available memory slot in this memory controller. It is used by + userspace tools in order to display the memory filling layout. + +What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/size +Date: April 2012 +Contact: Mauro Carvalho Chehab <mchehab@redhat.com> + linux-edac@vger.kernel.org +Description: This attribute file will display the size of dimm or rank. + For dimm*/size, this is the size, in MB of the DIMM memory + stick. For rank*/size, this is the size, in MB for one rank + of the DIMM memory stick. On single rank memories (1R), this + is also the total size of the dimm. On dual rank (2R) memories, + this is half the size of the total DIMM memories. + +What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_dev_type +Date: April 2012 +Contact: Mauro Carvalho Chehab <mchehab@redhat.com> + linux-edac@vger.kernel.org +Description: This attribute file will display what type of DRAM device is + being utilized on this DIMM (x1, x2, x4, x8, ...). + +What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_edac_mode +Date: April 2012 +Contact: Mauro Carvalho Chehab <mchehab@redhat.com> + linux-edac@vger.kernel.org +Description: This attribute file will display what type of Error detection + and correction is being utilized. For example: S4ECD4ED would + mean a Chipkill with x4 DRAM. + +What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_label +Date: April 2012 +Contact: Mauro Carvalho Chehab <mchehab@redhat.com> + linux-edac@vger.kernel.org +Description: This control file allows this DIMM to have a label assigned + to it. With this label in the module, when errors occur + the output can provide the DIMM label in the system log. + This becomes vital for panic events to isolate the + cause of the UE event. + DIMM Labels must be assigned after booting, with information + that correctly identifies the physical slot with its + silk screen label. This information is currently very + motherboard specific and determination of this information + must occur in userland at this time. + +What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_location +Date: April 2012 +Contact: Mauro Carvalho Chehab <mchehab@redhat.com> + linux-edac@vger.kernel.org +Description: This attribute file will display the location (csrow/channel, + branch/channel/slot or channel/slot) of the dimm or rank. + +What: /sys/devices/system/edac/mc/mc*/(dimm|rank)*/dimm_mem_type +Date: April 2012 +Contact: Mauro Carvalho Chehab <mchehab@redhat.com> + linux-edac@vger.kernel.org +Description: This attribute file will display what type of memory is + currently on this csrow. Normally, either buffered or + unbuffered memory (for example, Unbuffered-DDR3). diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt index 5c72eed89563..f50309081ac7 100644 --- a/Documentation/DMA-attributes.txt +++ b/Documentation/DMA-attributes.txt @@ -49,3 +49,45 @@ DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either consistent or non-consistent memory as it sees fit. By using this API, you are guaranteeing to the platform that you have all the correct and necessary sync points for this memory in the driver. + +DMA_ATTR_NO_KERNEL_MAPPING +-------------------------- + +DMA_ATTR_NO_KERNEL_MAPPING lets the platform to avoid creating a kernel +virtual mapping for the allocated buffer. On some architectures creating +such mapping is non-trivial task and consumes very limited resources +(like kernel virtual address space or dma consistent address space). +Buffers allocated with this attribute can be only passed to user space +by calling dma_mmap_attrs(). By using this API, you are guaranteeing +that you won't dereference the pointer returned by dma_alloc_attr(). You +can threat it as a cookie that must be passed to dma_mmap_attrs() and +dma_free_attrs(). Make sure that both of these also get this attribute +set on each call. + +Since it is optional for platforms to implement +DMA_ATTR_NO_KERNEL_MAPPING, those that do not will simply ignore the +attribute and exhibit default behavior. + +DMA_ATTR_SKIP_CPU_SYNC +---------------------- + +By default dma_map_{single,page,sg} functions family transfer a given +buffer from CPU domain to device domain. Some advanced use cases might +require sharing a buffer between more than one device. This requires +having a mapping created separately for each device and is usually +performed by calling dma_map_{single,page,sg} function more than once +for the given buffer with device pointer to each device taking part in +the buffer sharing. The first call transfers a buffer from 'CPU' domain +to 'device' domain, what synchronizes CPU caches for the given region +(usually it means that the cache has been flushed or invalidated +depending on the dma direction). However, next calls to +dma_map_{single,page,sg}() for other devices will perform exactly the +same sychronization operation on the CPU cache. CPU cache sychronization +might be a time consuming operation, especially if the buffers are +large, so it is highly recommended to avoid it if possible. +DMA_ATTR_SKIP_CPU_SYNC allows platform code to skip synchronization of +the CPU cache for the given buffer assuming that it has been already +transferred to 'device' domain. This attribute can be also used for +dma_unmap_{single,page,sg} functions family to force buffer to stay in +device domain after releasing a mapping for it. Use this attribute with +care! diff --git a/Documentation/device-mapper/striped.txt b/Documentation/device-mapper/striped.txt index f34d3236b9da..45f3b91ea4c3 100644 --- a/Documentation/device-mapper/striped.txt +++ b/Documentation/device-mapper/striped.txt @@ -9,15 +9,14 @@ devices in parallel. Parameters: <num devs> <chunk size> [<dev path> <offset>]+ <num devs>: Number of underlying devices. - <chunk size>: Size of each chunk of data. Must be a power-of-2 and at - least as large as the system's PAGE_SIZE. + <chunk size>: Size of each chunk of data. Must be at least as + large as the system's PAGE_SIZE. <dev path>: Full pathname to the underlying block-device, or a "major:minor" device-number. <offset>: Starting sector within the device. One or more underlying devices can be specified. The striped device size must -be a multiple of the chunk size and a multiple of the number of underlying -devices. +be a multiple of the chunk size multiplied by the number of underlying devices. Example scripts diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index f5cfc62b7ad3..30b8b83bd333 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt @@ -231,6 +231,9 @@ i) Constructor no_discard_passdown: Don't pass discards down to the underlying data device, but just remove the mapping. + read_only: Don't allow any changes to be made to the pool + metadata. + Data block size must be between 64KB (128 sectors) and 1GB (2097152 sectors) inclusive. @@ -239,7 +242,7 @@ ii) Status <transaction id> <used metadata blocks>/<total metadata blocks> <used data blocks>/<total data blocks> <held metadata root> - + [no_]discard_passdown ro|rw transaction id: A 64-bit number used by userspace to help synchronise with metadata @@ -257,6 +260,21 @@ ii) Status held root. This feature is not yet implemented so '-' is always returned. + discard_passdown|no_discard_passdown + Whether or not discards are actually being passed down to the + underlying device. When this is enabled when loading the table, + it can get disabled if the underlying device doesn't support it. + + ro|rw + If the pool encounters certain types of device failures it will + drop into a read-only metadata mode in which no changes to + the pool metadata (like allocating new blocks) are permitted. + + In serious cases where even a read-only mode is deemed unsafe + no further I/O will be permitted and the status will just + contain the string 'Fail'. The userspace recovery tools + should then be used. + iii) Messages create_thin <dev id> @@ -329,3 +347,7 @@ regain some space then send the 'trim' message to the pool. ii) Status <nr mapped sectors> <highest mapped sector> + + If the pool has encountered device errors and failed, the status + will just contain the string 'Fail'. The userspace recovery + tools should then be used. diff --git a/Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt b/Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt new file mode 100644 index 000000000000..94e642a33db0 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/calxeda/l2ecc.txt @@ -0,0 +1,15 @@ +Calxeda Highbank L2 cache ECC + +Properties: +- compatible : Should be "calxeda,hb-sregs-l2-ecc" +- reg : Address and size for ECC error interrupt clear registers. +- interrupts : Should be single bit error interrupt, then double bit error + interrupt. + +Example: + + sregs@fff3c200 { + compatible = "calxeda,hb-sregs-l2-ecc"; + reg = <0xfff3c200 0x100>; + interrupts = <0 71 4 0 72 4>; + }; diff --git a/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt b/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt new file mode 100644 index 000000000000..f770ac0893d4 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/calxeda/mem-ctrlr.txt @@ -0,0 +1,14 @@ +Calxeda DDR memory controller + +Properties: +- compatible : Should be "calxeda,hb-ddr-ctrl" +- reg : Address and size for DDR controller registers. +- interrupts : Interrupt for DDR controller. + +Example: + + memory-controller@fff00000 { + compatible = "calxeda,hb-ddr-ctrl"; + reg = <0xfff00000 0x1000>; + interrupts = <0 91 4>; + }; diff --git a/Documentation/devicetree/bindings/gpio/gpio_i2c.txt b/Documentation/devicetree/bindings/i2c/gpio-i2c.txt index 4f8ec947c6bd..4f8ec947c6bd 100644 --- a/Documentation/devicetree/bindings/gpio/gpio_i2c.txt +++ b/Documentation/devicetree/bindings/i2c/gpio-i2c.txt diff --git a/Documentation/devicetree/bindings/i2c/i2c-mxs.txt b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt index 1bfc02de1b0c..30ac3a0557f7 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mxs.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt @@ -4,6 +4,8 @@ Required properties: - compatible: Should be "fsl,<chip>-i2c" - reg: Should contain registers location and length - interrupts: Should contain ERROR and DMA interrupts +- clock-frequency: Desired I2C bus clock frequency in Hz. + Only 100000Hz and 400000Hz modes are supported. Examples: @@ -13,4 +15,5 @@ i2c0: i2c@80058000 { compatible = "fsl,imx28-i2c"; reg = <0x80058000 2000>; interrupts = <111 68>; + clock-frequency = <100000>; }; diff --git a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt new file mode 100644 index 000000000000..c15781f4dc8c --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt @@ -0,0 +1,33 @@ +Device tree configuration for i2c-ocores + +Required properties: +- compatible : "opencores,i2c-ocores" +- reg : bus address start and address range size of device +- interrupts : interrupt number +- clock-frequency : frequency of bus clock in Hz +- #address-cells : should be <1> +- #size-cells : should be <0> + +Optional properties: +- reg-shift : device register offsets are shifted by this value +- reg-io-width : io register width in bytes (1, 2 or 4) +- regstep : deprecated, use reg-shift above + +Example: + + i2c0: ocores@a0000000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "opencores,i2c-ocores"; + reg = <0xa0000000 0x8>; + interrupts = <10>; + clock-frequency = <20000000>; + + reg-shift = <0>; /* 8 bit registers */ + reg-io-width = <1>; /* 8 bit read/write */ + + dummy@60 { + compatible = "dummy"; + reg = <0x60>; + }; + }; diff --git a/Documentation/devicetree/bindings/i2c/mrvl-i2c.txt b/Documentation/devicetree/bindings/i2c/mrvl-i2c.txt index b891ee218354..0f7945019f6f 100644 --- a/Documentation/devicetree/bindings/i2c/mrvl-i2c.txt +++ b/Documentation/devicetree/bindings/i2c/mrvl-i2c.txt @@ -1,4 +1,4 @@ -* I2C +* Marvell MMP I2C controller Required properties : @@ -32,3 +32,20 @@ Examples: interrupts = <58>; }; +* Marvell MV64XXX I2C controller + +Required properties : + + - reg : Offset and length of the register set for the device + - compatible : Should be "marvell,mv64xxx-i2c" + - interrupts : The interrupt number + - clock-frequency : Desired I2C bus clock frequency in Hz. + +Examples: + + i2c@11000 { + compatible = "marvell,mv64xxx-i2c"; + reg = <0x11000 0x20>; + interrupts = <29>; + clock-frequency = <100000>; + }; diff --git a/Documentation/devicetree/bindings/pwm/lpc32xx-pwm.txt b/Documentation/devicetree/bindings/pwm/lpc32xx-pwm.txt new file mode 100644 index 000000000000..cfe1db3bb6e9 --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/lpc32xx-pwm.txt @@ -0,0 +1,12 @@ +LPC32XX PWM controller + +Required properties: +- compatible: should be "nxp,lpc3220-pwm" +- reg: physical base address and length of the controller's registers + +Examples: + +pwm@0x4005C000 { + compatible = "nxp,lpc3220-pwm"; + reg = <0x4005C000 0x8>; +}; diff --git a/Documentation/devicetree/bindings/pwm/mxs-pwm.txt b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt new file mode 100644 index 000000000000..b16f4a57d111 --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt @@ -0,0 +1,17 @@ +Freescale MXS PWM controller + +Required properties: +- compatible: should be "fsl,imx23-pwm" +- reg: physical base address and length of the controller's registers +- #pwm-cells: should be 2. The first cell specifies the per-chip index + of the PWM to use and the second cell is the duty cycle in nanoseconds. +- fsl,pwm-number: the number of PWM devices + +Example: + +pwm: pwm@80064000 { + compatible = "fsl,imx28-pwm", "fsl,imx23-pwm"; + reg = <0x80064000 2000>; + #pwm-cells = <2>; + fsl,pwm-number = <8>; +}; diff --git a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt new file mode 100644 index 000000000000..bbbeedb4ec05 --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt @@ -0,0 +1,18 @@ +Tegra SoC PWFM controller + +Required properties: +- compatible: should be one of: + - "nvidia,tegra20-pwm" + - "nvidia,tegra30-pwm" +- reg: physical base address and length of the controller's registers +- #pwm-cells: On Tegra the number of cells used to specify a PWM is 2. The + first cell specifies the per-chip index of the PWM to use and the second + cell is the duty cycle in nanoseconds. + +Example: + + pwm: pwm@7000a000 { + compatible = "nvidia,tegra20-pwm"; + reg = <0x7000a000 0x100>; + #pwm-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/pwm/pwm.txt b/Documentation/devicetree/bindings/pwm/pwm.txt new file mode 100644 index 000000000000..73ec962bfe8c --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/pwm.txt @@ -0,0 +1,57 @@ +Specifying PWM information for devices +====================================== + +1) PWM user nodes +----------------- + +PWM users should specify a list of PWM devices that they want to use +with a property containing a 'pwm-list': + + pwm-list ::= <single-pwm> [pwm-list] + single-pwm ::= <pwm-phandle> <pwm-specifier> + pwm-phandle : phandle to PWM controller node + pwm-specifier : array of #pwm-cells specifying the given PWM + (controller specific) + +PWM properties should be named "pwms". The exact meaning of each pwms +property must be documented in the device tree binding for each device. +An optional property "pwm-names" may contain a list of strings to label +each of the PWM devices listed in the "pwms" property. If no "pwm-names" +property is given, the name of the user node will be used as fallback. + +Drivers for devices that use more than a single PWM device can use the +"pwm-names" property to map the name of the PWM device requested by the +pwm_get() call to an index into the list given by the "pwms" property. + +The following example could be used to describe a PWM-based backlight +device: + + pwm: pwm { + #pwm-cells = <2>; + }; + + [...] + + bl: backlight { + pwms = <&pwm 0 5000000>; + pwm-names = "backlight"; + }; + +pwm-specifier typically encodes the chip-relative PWM number and the PWM +period in nanoseconds. Note that in the example above, specifying the +"pwm-names" is redundant because the name "backlight" would be used as +fallback anyway. + +2) PWM controller nodes +----------------------- + +PWM controller nodes must specify the number of cells used for the +specifier using the '#pwm-cells' property. + +An example PWM controller might look like this: + + pwm: pwm@7000a000 { + compatible = "nvidia,tegra20-pwm"; + reg = <0x7000a000 0x100>; + #pwm-cells = <2>; + }; diff --git a/Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt b/Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt new file mode 100644 index 000000000000..1e4fc727f3b1 --- /dev/null +++ b/Documentation/devicetree/bindings/video/backlight/pwm-backlight.txt @@ -0,0 +1,28 @@ +pwm-backlight bindings + +Required properties: + - compatible: "pwm-backlight" + - pwms: OF device-tree PWM specification (see PWM binding[0]) + - brightness-levels: Array of distinct brightness levels. Typically these + are in the range from 0 to 255, but any range starting at 0 will do. + The actual brightness level (PWM duty cycle) will be interpolated + from these values. 0 means a 0% duty cycle (darkest/off), while the + last value in the array represents a 100% duty cycle (brightest). + - default-brightness-level: the default brightness level (index into the + array defined by the "brightness-levels" property) + +Optional properties: + - pwm-names: a list of names for the PWM devices specified in the + "pwms" property (see PWM binding[0]) + +[0]: Documentation/devicetree/bindings/pwm/pwm.txt + +Example: + + backlight { + compatible = "pwm-backlight"; + pwms = <&pwm 0 5000000>; + + brightness-levels = <0 4 8 16 32 64 128 255>; + default-brightness-level = <6>; + }; diff --git a/Documentation/edac.txt b/Documentation/edac.txt index 03df2b020332..56c7e936430f 100644 --- a/Documentation/edac.txt +++ b/Documentation/edac.txt @@ -232,116 +232,20 @@ EDAC control and attribute files. In 'mcX' directories are EDAC control and attribute files for -this 'X' instance of the memory controllers: - - -Counter reset control file: - - 'reset_counters' - - This write-only control file will zero all the statistical counters - for UE and CE errors. Zeroing the counters will also reset the timer - indicating how long since the last counter zero. This is useful - for computing errors/time. Since the counters are always reset at - driver initialization time, no module/kernel parameter is available. - - RUN TIME: echo "anything" >/sys/devices/system/edac/mc/mc0/counter_reset - - This resets the counters on memory controller 0 - - -Seconds since last counter reset control file: - - 'seconds_since_reset' - - This attribute file displays how many seconds have elapsed since the - last counter reset. This can be used with the error counters to - measure error rates. - - - -Memory Controller name attribute file: - - 'mc_name' - - This attribute file displays the type of memory controller - that is being utilized. - - -Total memory managed by this memory controller attribute file: - - 'size_mb' - - This attribute file displays, in count of megabytes, of memory - that this instance of memory controller manages. - - -Total Uncorrectable Errors count attribute file: - - 'ue_count' - - This attribute file displays the total count of uncorrectable - errors that have occurred on this memory controller. If panic_on_ue - is set this counter will not have a chance to increment, - since EDAC will panic the system. - - -Total UE count that had no information attribute fileY: - - 'ue_noinfo_count' - - This attribute file displays the number of UEs that have occurred - with no information as to which DIMM slot is having errors. - - -Total Correctable Errors count attribute file: - - 'ce_count' - - This attribute file displays the total count of correctable - errors that have occurred on this memory controller. This - count is very important to examine. CEs provide early - indications that a DIMM is beginning to fail. This count - field should be monitored for non-zero values and report - such information to the system administrator. - - -Total Correctable Errors count attribute file: - - 'ce_noinfo_count' - - This attribute file displays the number of CEs that - have occurred wherewith no information as to which DIMM slot - is having errors. Memory is handicapped, but operational, - yet no information is available to indicate which slot - the failing memory is in. This count field should be also - be monitored for non-zero values. - -Device Symlink: - - 'device' - - Symlink to the memory controller device. - -Sdram memory scrubbing rate: - - 'sdram_scrub_rate' - - Read/Write attribute file that controls memory scrubbing. The scrubbing - rate is set by writing a minimum bandwidth in bytes/sec to the attribute - file. The rate will be translated to an internal value that gives at - least the specified rate. - - Reading the file will return the actual scrubbing rate employed. - - If configuration fails or memory scrubbing is not implemented, accessing - that attribute will fail. +this 'X' instance of the memory controllers. +For a description of the sysfs API, please see: + Documentation/ABI/testing/sysfs/devices-edac ============================================================================ 'csrowX' DIRECTORIES +When CONFIG_EDAC_LEGACY_SYSFS is enabled, the sysfs will contain the +csrowX directories. As this API doesn't work properly for Rambus, FB-DIMMs +and modern Intel Memory Controllers, this is being deprecated in favor +of dimmX directories. + In the 'csrowX' directories are EDAC control and attribute files for this 'X' instance of csrow: diff --git a/Documentation/input/edt-ft5x06.txt b/Documentation/input/edt-ft5x06.txt new file mode 100644 index 000000000000..2032f0b7a8fa --- /dev/null +++ b/Documentation/input/edt-ft5x06.txt @@ -0,0 +1,54 @@ +EDT ft5x06 based Polytouch devices +---------------------------------- + +The edt-ft5x06 driver is useful for the EDT "Polytouch" family of capacitive +touch screens. Note that it is *not* suitable for other devices based on the +focaltec ft5x06 devices, since they contain vendor-specific firmware. In +particular this driver is not suitable for the Nook tablet. + +It has been tested with the following devices: + * EP0350M06 + * EP0430M06 + * EP0570M06 + * EP0700M06 + +The driver allows configuration of the touch screen via a set of sysfs files: + +/sys/class/input/eventX/device/device/threshold: + allows setting the "click"-threshold in the range from 20 to 80. + +/sys/class/input/eventX/device/device/gain: + allows setting the sensitivity in the range from 0 to 31. Note that + lower values indicate higher sensitivity. + +/sys/class/input/eventX/device/device/offset: + allows setting the edge compensation in the range from 0 to 31. + +/sys/class/input/eventX/device/device/report_rate: + allows setting the report rate in the range from 3 to 14. + + +For debugging purposes the driver provides a few files in the debug +filesystem (if available in the kernel). In /sys/kernel/debug/edt_ft5x06 +you'll find the following files: + +num_x, num_y: + (readonly) contains the number of sensor fields in X- and + Y-direction. + +mode: + allows switching the sensor between "factory mode" and "operation + mode" by writing "1" or "0" to it. In factory mode (1) it is + possible to get the raw data from the sensor. Note that in factory + mode regular events don't get delivered and the options described + above are unavailable. + +raw_data: + contains num_x * num_y big endian 16 bit values describing the raw + values for each sensor field. Note that each read() call on this + files triggers a new readout. It is recommended to provide a buffer + big enough to contain num_x * num_y * 2 bytes. + +Note that reading raw_data gives a I/O error when the device is not in factory +mode. The same happens when reading/writing to the parameter files when the +device is not in regular operation mode. diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index c2619ef44a72..ad7e2e5088c1 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -526,7 +526,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. coherent_pool=nn[KMG] [ARM,KNL] Sets the size of memory pool for coherent, atomic dma - allocations if Contiguous Memory Allocator (CMA) is used. + allocations, by default set to 256K. code_bytes [X86] How many bytes of object code to print in an oops report. diff --git a/Documentation/pwm.txt b/Documentation/pwm.txt new file mode 100644 index 000000000000..554290ebab94 --- /dev/null +++ b/Documentation/pwm.txt @@ -0,0 +1,76 @@ +Pulse Width Modulation (PWM) interface + +This provides an overview about the Linux PWM interface + +PWMs are commonly used for controlling LEDs, fans or vibrators in +cell phones. PWMs with a fixed purpose have no need implementing +the Linux PWM API (although they could). However, PWMs are often +found as discrete devices on SoCs which have no fixed purpose. It's +up to the board designer to connect them to LEDs or fans. To provide +this kind of flexibility the generic PWM API exists. + +Identifying PWMs +---------------- + +Users of the legacy PWM API use unique IDs to refer to PWM devices. + +Instead of referring to a PWM device via its unique ID, board setup code +should instead register a static mapping that can be used to match PWM +consumers to providers, as given in the following example: + + static struct pwm_lookup board_pwm_lookup[] = { + PWM_LOOKUP("tegra-pwm", 0, "pwm-backlight", NULL), + }; + + static void __init board_init(void) + { + ... + pwm_add_table(board_pwm_lookup, ARRAY_SIZE(board_pwm_lookup)); + ... + } + +Using PWMs +---------- + +Legacy users can request a PWM device using pwm_request() and free it +after usage with pwm_free(). + +New users should use the pwm_get() function and pass to it the consumer +device or a consumer name. pwm_put() is used to free the PWM device. + +After being requested a PWM has to be configured using: + +int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns); + +To start/stop toggling the PWM output use pwm_enable()/pwm_disable(). + +Implementing a PWM driver +------------------------- + +Currently there are two ways to implement pwm drivers. Traditionally +there only has been the barebone API meaning that each driver has +to implement the pwm_*() functions itself. This means that it's impossible +to have multiple PWM drivers in the system. For this reason it's mandatory +for new drivers to use the generic PWM framework. + +A new PWM controller/chip can be added using pwmchip_add() and removed +again with pwmchip_remove(). pwmchip_add() takes a filled in struct +pwm_chip as argument which provides a description of the PWM chip, the +number of PWM devices provider by the chip and the chip-specific +implementation of the supported PWM operations to the framework. + +Locking +------- + +The PWM core list manipulations are protected by a mutex, so pwm_request() +and pwm_free() may not be called from an atomic context. Currently the +PWM core does not enforce any locking to pwm_enable(), pwm_disable() and +pwm_config(), so the calling context is currently driver specific. This +is an issue derived from the former barebone API and should be fixed soon. + +Helpers +------- + +Currently a PWM can only be configured with period_ns and duty_ns. For several +use cases freq_hz and duty_percent might be better. Instead of calculating +this in your driver please consider adding appropriate helpers to the framework. diff --git a/MAINTAINERS b/MAINTAINERS index bd451649f13a..b141083b2621 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5526,6 +5526,18 @@ S: Maintained F: Documentation/video4linux/README.pvrusb2 F: drivers/media/video/pvrusb2/ +PWM SUBSYSTEM +M: Thierry Reding <thierry.reding@avionic-design.de> +L: linux-kernel@vger.kernel.org +S: Maintained +W: http://gitorious.org/linux-pwm +T: git git://gitorious.org/linux-pwm/linux-pwm.git +F: Documentation/pwm.txt +F: Documentation/devicetree/bindings/pwm/ +F: include/linux/pwm.h +F: include/linux/of_pwm.h +F: drivers/pwm/ + PXA2xx/PXA3xx SUPPORT M: Eric Miao <eric.y.miao@gmail.com> M: Russell King <linux@arm.linux.org.uk> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index fbdd8533c05d..6b86bb963a28 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1009,7 +1009,6 @@ config ARCH_VT8500 select ARCH_HAS_CPUFREQ select GENERIC_CLOCKEVENTS select ARCH_REQUIRE_GPIOLIB - select HAVE_PWM help Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip. diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts index 2e1cfa00c25b..9fecf1ae777b 100644 --- a/arch/arm/boot/dts/highbank.dts +++ b/arch/arm/boot/dts/highbank.dts @@ -130,6 +130,12 @@ clocks = <&eclk>; }; + memory-controller@fff00000 { + compatible = "calxeda,hb-ddr-ctrl"; + reg = <0xfff00000 0x1000>; + interrupts = <0 91 4>; + }; + ipc@fff20000 { compatible = "arm,pl320", "arm,primecell"; reg = <0xfff20000 0x1000>; @@ -275,6 +281,12 @@ }; }; + sregs@fff3c200 { + compatible = "calxeda,hb-sregs-l2-ecc"; + reg = <0xfff3c200 0x100>; + interrupts = <0 71 4 0 72 4>; + }; + dma@fff3d000 { compatible = "arm,pl330", "arm,primecell"; reg = <0xfff3d000 0x1000>; diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index 915db89e3644..787efac68da8 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi @@ -660,6 +660,7 @@ compatible = "fsl,imx28-i2c"; reg = <0x80058000 2000>; interrupts = <111 68>; + clock-frequency = <100000>; status = "disabled"; }; @@ -669,6 +670,7 @@ compatible = "fsl,imx28-i2c"; reg = <0x8005a000 2000>; interrupts = <110 69>; + clock-frequency = <100000>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi new file mode 100644 index 000000000000..798fa35c0005 --- /dev/null +++ b/arch/arm/boot/dts/r8a7740.dtsi @@ -0,0 +1,21 @@ +/* + * Device Tree Source for the r8a7740 SoC + * + * Copyright (C) 2012 Renesas Solutions Corp. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +/include/ "skeleton.dtsi" + +/ { + compatible = "renesas,r8a7740"; + + cpus { + cpu@0 { + compatible = "arm,cortex-a9"; + }; + }; +}; diff --git a/arch/arm/boot/dts/sh7377.dtsi b/arch/arm/boot/dts/sh7377.dtsi new file mode 100644 index 000000000000..767ee0796daa --- /dev/null +++ b/arch/arm/boot/dts/sh7377.dtsi @@ -0,0 +1,21 @@ +/* + * Device Tree Source for the sh7377 SoC + * + * Copyright (C) 2012 Renesas Solutions Corp. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +/include/ "skeleton.dtsi" + +/ { + compatible = "renesas,sh7377"; + + cpus { + cpu@0 { + compatible = "arm,cortex-a8"; + }; + }; +}; diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 9f1921634eb7..405d1673904e 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -123,6 +123,12 @@ status = "disabled"; }; + pwm { + compatible = "nvidia,tegra20-pwm"; + reg = <0x7000a000 0x100>; + #pwm-cells = <2>; + }; + i2c@7000c000 { compatible = "nvidia,tegra20-i2c"; reg = <0x7000c000 0x100>; diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index da740191771f..3e4334d14efb 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -117,6 +117,12 @@ status = "disabled"; }; + pwm { + compatible = "nvidia,tegra30-pwm", "nvidia,tegra20-pwm"; + reg = <0x7000a000 0x100>; + #pwm-cells = <2>; + }; + i2c@7000c000 { compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c"; reg = <0x7000c000 0x100>; diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index aa07f5938f05..1143c4d5c567 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c @@ -452,6 +452,7 @@ static struct dma_map_ops dmabounce_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, .mmap = arm_dma_mmap, + .get_sgtable = arm_dma_get_sgtable, .map_page = dmabounce_map_page, .unmap_page = dmabounce_unmap_page, .sync_single_for_cpu = dmabounce_sync_for_cpu, diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig index ddc9fe6a78ac..7d8718468e0d 100644 --- a/arch/arm/configs/armadillo800eva_defconfig +++ b/arch/arm/configs/armadillo800eva_defconfig @@ -5,10 +5,7 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=16 # CONFIG_UTS_NS is not set # CONFIG_IPC_NS is not set -# CONFIG_USER_NS is not set # CONFIG_PID_NS is not set -CONFIG_SYSFS_DEPRECATED=y -CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SLAB=y CONFIG_MODULES=y @@ -21,7 +18,7 @@ CONFIG_ARCH_SHMOBILE=y CONFIG_ARCH_R8A7740=y CONFIG_MACH_ARMADILLO800EVA=y # CONFIG_SH_TIMER_TMU is not set -# CONFIG_ARM_THUMB is not set +CONFIG_ARM_THUMB=y CONFIG_CPU_BPREDICT_DISABLE=y # CONFIG_CACHE_L2X0 is not set CONFIG_ARM_ERRATA_430973=y @@ -39,6 +36,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096" CONFIG_CMDLINE_FORCE=y CONFIG_KEXEC=y +CONFIG_VFP=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_SUSPEND is not set CONFIG_NET=y @@ -89,26 +87,32 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y CONFIG_I2C=y CONFIG_I2C_SH_MOBILE=y # CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=y +CONFIG_VIDEO_DEV=y +# CONFIG_RC_CORE is not set +# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set +# CONFIG_V4L_USB_DRIVERS is not set +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SOC_CAMERA=y +CONFIG_SOC_CAMERA_MT9T112=y +CONFIG_VIDEO_SH_MOBILE_CEU=y +# CONFIG_RADIO_ADAPTERS is not set CONFIG_FB=y -CONFIG_FB_MODE_HELPERS=y CONFIG_FB_SH_MOBILE_LCDC=y +CONFIG_FB_SH_MOBILE_HDMI=y CONFIG_LCD_CLASS_DEVICE=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y CONFIG_LOGO=y # CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_SOUND=y -CONFIG_SND=y # CONFIG_SND_SUPPORT_OLD_API is not set # CONFIG_SND_VERBOSE_PROCFS is not set # CONFIG_SND_DRIVERS is not set # CONFIG_SND_ARM is not set -CONFIG_SND_SOC=y CONFIG_SND_SOC_SH4_FSI=y # CONFIG_HID_SUPPORT is not set CONFIG_USB=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_RENESAS_USBHS=y CONFIG_USB_GADGET=y CONFIG_USB_RENESAS_USBHS_UDC=y @@ -116,6 +120,8 @@ CONFIG_USB_ETH=m CONFIG_MMC=y CONFIG_MMC_SDHI=y CONFIG_MMC_SH_MMCIF=y +CONFIG_DMADEVICES=y +CONFIG_SH_DMAE=y CONFIG_UIO=y CONFIG_UIO_PDRV_GENIRQ=y # CONFIG_DNOTIFY is not set @@ -124,7 +130,6 @@ CONFIG_VFAT_FS=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_NFS_V4_1=y diff --git a/arch/arm/configs/kzm9d_defconfig b/arch/arm/configs/kzm9d_defconfig new file mode 100644 index 000000000000..26146ffea1a5 --- /dev/null +++ b/arch/arm/configs/kzm9d_defconfig @@ -0,0 +1,89 @@ +# CONFIG_ARM_PATCH_PHYS_VIRT is not set +CONFIG_EXPERIMENTAL=y +CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=16 +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARCH_SHMOBILE=y +CONFIG_ARCH_EMEV2=y +CONFIG_MACH_KZM9D=y +CONFIG_MEMORY_START=0x40000000 +CONFIG_MEMORY_SIZE=0x10000000 +# CONFIG_SH_TIMER_TMU is not set +# CONFIG_SWP_EMULATE is not set +# CONFIG_CACHE_L2X0 is not set +CONFIG_SMP=y +CONFIG_NR_CPUS=2 +CONFIG_HOTPLUG_CPU=y +# CONFIG_LOCAL_TIMERS is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_FORCE_MAX_ZONEORDER=13 +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_ARM_APPENDED_DTB=y +CONFIG_CMDLINE="console=tty0 console=ttyS1,115200n81 earlyprintk=serial8250-em.1,115200n81 mem=128M@0x40000000 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096" +CONFIG_CMDLINE_FORCE=y +CONFIG_VFP=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +# CONFIG_SUSPEND is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_BLK_DEV is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_FARADAY is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_SMSC911X=y +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_EM=y +# CONFIG_HW_RANDOM is not set +CONFIG_GPIOLIB=y +CONFIG_GPIO_EM=y +# CONFIG_HWMON is not set +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_IOMMU_SUPPORT is not set +# CONFIG_DNOTIFY is not set +CONFIG_TMPFS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +# CONFIG_FTRACE is not set diff --git a/arch/arm/configs/kzm9g_defconfig b/arch/arm/configs/kzm9g_defconfig index e3ebc20ed0a7..2388c8610627 100644 --- a/arch/arm/configs/kzm9g_defconfig +++ b/arch/arm/configs/kzm9g_defconfig @@ -100,7 +100,12 @@ CONFIG_SND_SOC_SH4_FSI=y CONFIG_USB=y CONFIG_USB_DEVICEFS=y CONFIG_USB_R8A66597_HCD=y +CONFIG_USB_RENESAS_USBHS=y CONFIG_USB_STORAGE=y +CONFIG_USB_GADGET=y +CONFIG_USB_RENESAS_USBHS_UDC=y +CONFIG_USB_ETH=m +CONFIG_USB_MASS_STORAGE=m CONFIG_MMC=y # CONFIG_MMC_BLOCK_BOUNCE is not set CONFIG_MMC_SDHI=y @@ -108,12 +113,13 @@ CONFIG_MMC_SH_MMCIF=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_RS5C372=y CONFIG_DMADEVICES=y CONFIG_SH_DMAE=y CONFIG_ASYNC_TX_DMA=y CONFIG_STAGING=y # CONFIG_DNOTIFY is not set -# CONFIG_INOTIFY_USER is not set +CONFIG_INOTIFY_USER=y CONFIG_VFAT_FS=y CONFIG_TMPFS=y # CONFIG_MISC_FILESYSTEMS is not set diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index bbef15d04890..2ae842df4551 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -186,17 +186,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs); -#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) - -static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, - size_t size, struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - BUG_ON(!ops); - return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); -} - static inline void *dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { @@ -213,20 +202,12 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); } -static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size) -{ - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); -} - /* * This can be called during boot to increase the size of the consistent * DMA region above it's default value of 2MB. It must be called before the * memory allocator is initialised, i.e. before any core_initcall. */ -extern void __init init_consistent_dma_size(unsigned long size); +static inline void init_consistent_dma_size(unsigned long size) { } /* * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" @@ -280,6 +261,9 @@ extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, enum dma_data_direction); extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, enum dma_data_direction); +extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs); #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c index 26fe9de35ecb..2f51293c1875 100644 --- a/arch/arm/mach-exynos/clock-exynos4.c +++ b/arch/arm/mach-exynos/clock-exynos4.c @@ -620,10 +620,6 @@ static struct clk exynos4_init_clocks_off[] = { .enable = exynos4_clk_ip_peril_ctrl, .ctrlbit = (1 << 27), }, { - .name = "fimg2d", - .enable = exynos4_clk_ip_image_ctrl, - .ctrlbit = (1 << 0), - }, { .name = "mfc", .devname = "s5p-mfc", .enable = exynos4_clk_ip_mfc_ctrl, @@ -819,47 +815,21 @@ static struct clk *exynos4_clkset_mout_g2d0_list[] = { [1] = &exynos4_clk_sclk_apll.clk, }; -static struct clksrc_sources exynos4_clkset_mout_g2d0 = { +struct clksrc_sources exynos4_clkset_mout_g2d0 = { .sources = exynos4_clkset_mout_g2d0_list, .nr_sources = ARRAY_SIZE(exynos4_clkset_mout_g2d0_list), }; -static struct clksrc_clk exynos4_clk_mout_g2d0 = { - .clk = { - .name = "mout_g2d0", - }, - .sources = &exynos4_clkset_mout_g2d0, - .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 0, .size = 1 }, -}; - static struct clk *exynos4_clkset_mout_g2d1_list[] = { [0] = &exynos4_clk_mout_epll.clk, [1] = &exynos4_clk_sclk_vpll.clk, }; -static struct clksrc_sources exynos4_clkset_mout_g2d1 = { +struct clksrc_sources exynos4_clkset_mout_g2d1 = { .sources = exynos4_clkset_mout_g2d1_list, .nr_sources = ARRAY_SIZE(exynos4_clkset_mout_g2d1_list), }; -static struct clksrc_clk exynos4_clk_mout_g2d1 = { - .clk = { - .name = "mout_g2d1", - }, - .sources = &exynos4_clkset_mout_g2d1, - .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 4, .size = 1 }, -}; - -static struct clk *exynos4_clkset_mout_g2d_list[] = { - [0] = &exynos4_clk_mout_g2d0.clk, - [1] = &exynos4_clk_mout_g2d1.clk, -}; - -static struct clksrc_sources exynos4_clkset_mout_g2d = { - .sources = exynos4_clkset_mout_g2d_list, - .nr_sources = ARRAY_SIZE(exynos4_clkset_mout_g2d_list), -}; - static struct clk *exynos4_clkset_mout_mfc0_list[] = { [0] = &exynos4_clk_mout_mpll.clk, [1] = &exynos4_clk_sclk_apll.clk, @@ -1126,13 +1096,6 @@ static struct clksrc_clk exynos4_clksrcs[] = { .reg_div = { .reg = EXYNOS4_CLKDIV_LCD0, .shift = 0, .size = 4 }, }, { .clk = { - .name = "sclk_fimg2d", - }, - .sources = &exynos4_clkset_mout_g2d, - .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 8, .size = 1 }, - .reg_div = { .reg = EXYNOS4_CLKDIV_IMAGE, .shift = 0, .size = 4 }, - }, { - .clk = { .name = "sclk_mfc", .devname = "s5p-mfc", }, diff --git a/arch/arm/mach-exynos/clock-exynos4.h b/arch/arm/mach-exynos/clock-exynos4.h index 28a119701182..bd12d5f8b63d 100644 --- a/arch/arm/mach-exynos/clock-exynos4.h +++ b/arch/arm/mach-exynos/clock-exynos4.h @@ -23,6 +23,9 @@ extern struct clksrc_sources exynos4_clkset_group; extern struct clk *exynos4_clkset_aclk_top_list[]; extern struct clk *exynos4_clkset_group_list[]; +extern struct clksrc_sources exynos4_clkset_mout_g2d0; +extern struct clksrc_sources exynos4_clkset_mout_g2d1; + extern int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable); extern int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable); extern int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable); diff --git a/arch/arm/mach-exynos/clock-exynos4210.c b/arch/arm/mach-exynos/clock-exynos4210.c index b8689ff60baf..fed4c26e9dad 100644 --- a/arch/arm/mach-exynos/clock-exynos4210.c +++ b/arch/arm/mach-exynos/clock-exynos4210.c @@ -48,6 +48,32 @@ static struct clksrc_clk *sysclks[] = { /* nothing here yet */ }; +static struct clksrc_clk exynos4210_clk_mout_g2d0 = { + .clk = { + .name = "mout_g2d0", + }, + .sources = &exynos4_clkset_mout_g2d0, + .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 0, .size = 1 }, +}; + +static struct clksrc_clk exynos4210_clk_mout_g2d1 = { + .clk = { + .name = "mout_g2d1", + }, + .sources = &exynos4_clkset_mout_g2d1, + .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 4, .size = 1 }, +}; + +static struct clk *exynos4210_clkset_mout_g2d_list[] = { + [0] = &exynos4210_clk_mout_g2d0.clk, + [1] = &exynos4210_clk_mout_g2d1.clk, +}; + +static struct clksrc_sources exynos4210_clkset_mout_g2d = { + .sources = exynos4210_clkset_mout_g2d_list, + .nr_sources = ARRAY_SIZE(exynos4210_clkset_mout_g2d_list), +}; + static int exynos4_clksrc_mask_lcd1_ctrl(struct clk *clk, int enable) { return s5p_gatectrl(EXYNOS4210_CLKSRC_MASK_LCD1, clk, enable); @@ -74,6 +100,13 @@ static struct clksrc_clk clksrcs[] = { .sources = &exynos4_clkset_group, .reg_src = { .reg = EXYNOS4210_CLKSRC_LCD1, .shift = 0, .size = 4 }, .reg_div = { .reg = EXYNOS4210_CLKDIV_LCD1, .shift = 0, .size = 4 }, + }, { + .clk = { + .name = "sclk_fimg2d", + }, + .sources = &exynos4210_clkset_mout_g2d, + .reg_src = { .reg = EXYNOS4_CLKSRC_IMAGE, .shift = 8, .size = 1 }, + .reg_div = { .reg = EXYNOS4_CLKDIV_IMAGE, .shift = 0, .size = 4 }, }, }; @@ -105,6 +138,10 @@ static struct clk init_clocks_off[] = { .devname = SYSMMU_CLOCK_DEVNAME(fimd1, 11), .enable = exynos4_clk_ip_lcd1_ctrl, .ctrlbit = (1 << 4), + }, { + .name = "fimg2d", + .enable = exynos4_clk_ip_image_ctrl, + .ctrlbit = (1 << 0), }, }; diff --git a/arch/arm/mach-exynos/clock-exynos4212.c b/arch/arm/mach-exynos/clock-exynos4212.c index da397d21bbcf..8fba0b5fb8ab 100644 --- a/arch/arm/mach-exynos/clock-exynos4212.c +++ b/arch/arm/mach-exynos/clock-exynos4212.c @@ -68,12 +68,45 @@ static struct clksrc_clk clk_mout_mpll_user = { .reg_src = { .reg = EXYNOS4_CLKSRC_CPU, .shift = 24, .size = 1 }, }; +static struct clksrc_clk exynos4x12_clk_mout_g2d0 = { + .clk = { + .name = "mout_g2d0", + }, + .sources = &exynos4_clkset_mout_g2d0, + .reg_src = { .reg = EXYNOS4_CLKSRC_DMC, .shift = 20, .size = 1 }, +}; + +static struct clksrc_clk exynos4x12_clk_mout_g2d1 = { + .clk = { + .name = "mout_g2d1", + }, + .sources = &exynos4_clkset_mout_g2d1, + .reg_src = { .reg = EXYNOS4_CLKSRC_DMC, .shift = 24, .size = 1 }, +}; + +static struct clk *exynos4x12_clkset_mout_g2d_list[] = { + [0] = &exynos4x12_clk_mout_g2d0.clk, + [1] = &exynos4x12_clk_mout_g2d1.clk, +}; + +static struct clksrc_sources exynos4x12_clkset_mout_g2d = { + .sources = exynos4x12_clkset_mout_g2d_list, + .nr_sources = ARRAY_SIZE(exynos4x12_clkset_mout_g2d_list), +}; + static struct clksrc_clk *sysclks[] = { &clk_mout_mpll_user, }; static struct clksrc_clk clksrcs[] = { - /* nothing here yet */ + { + .clk = { + .name = "sclk_fimg2d", + }, + .sources = &exynos4x12_clkset_mout_g2d, + .reg_src = { .reg = EXYNOS4_CLKSRC_DMC, .shift = 28, .size = 1 }, + .reg_div = { .reg = EXYNOS4_CLKDIV_DMC1, .shift = 0, .size = 4 }, + }, }; static struct clk init_clocks_off[] = { @@ -102,7 +135,11 @@ static struct clk init_clocks_off[] = { .devname = "exynos-fimc-lite.1", .enable = exynos4212_clk_ip_isp0_ctrl, .ctrlbit = (1 << 3), - } + }, { + .name = "fimg2d", + .enable = exynos4_clk_ip_dmc_ctrl, + .ctrlbit = (1 << 23), + }, }; #ifdef CONFIG_PM_SLEEP diff --git a/arch/arm/mach-pxa/eseries.h b/arch/arm/mach-pxa/eseries.h deleted file mode 100644 index b96949dd5adb..000000000000 --- a/arch/arm/mach-pxa/eseries.h +++ /dev/null @@ -1,14 +0,0 @@ -void __init eseries_fixup(struct tag *tags, char **cmdline, struct meminfo *mi); - -extern struct pxa2xx_udc_mach_info e7xx_udc_mach_info; -extern struct pxaficp_platform_data e7xx_ficp_platform_data; -extern int e7xx_irda_init(void); - -extern int eseries_tmio_enable(struct platform_device *dev); -extern int eseries_tmio_disable(struct platform_device *dev); -extern int eseries_tmio_suspend(struct platform_device *dev); -extern int eseries_tmio_resume(struct platform_device *dev); -extern void eseries_get_tmio_gpios(void); -extern struct resource eseries_tmio_resources[]; -extern struct platform_device e300_tc6387xb_device; - diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index d3de84b0dcbe..e6311988add2 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c @@ -296,27 +296,11 @@ static struct asic3_led asic3_leds[ASIC3_NUM_LEDS] = { static struct resource asic3_resources[] = { /* GPIO part */ - [0] = { - .start = ASIC3_PHYS, - .end = ASIC3_PHYS + ASIC3_MAP_SIZE_16BIT - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = PXA_GPIO_TO_IRQ(GPIO12_HX4700_ASIC3_IRQ), - .end = PXA_GPIO_TO_IRQ(GPIO12_HX4700_ASIC3_IRQ), - .flags = IORESOURCE_IRQ, - }, + [0] = DEFINE_RES_MEM(ASIC3_PHYS, ASIC3_MAP_SIZE_16BIT), + [1] = DEFINE_RES_IRQ(PXA_GPIO_TO_IRQ(GPIO12_HX4700_ASIC3_IRQ)), /* SD part */ - [2] = { - .start = ASIC3_SD_PHYS, - .end = ASIC3_SD_PHYS + ASIC3_MAP_SIZE_16BIT - 1, - .flags = IORESOURCE_MEM, - }, - [3] = { - .start = PXA_GPIO_TO_IRQ(GPIO66_HX4700_ASIC3_nSDIO_IRQ), - .end = PXA_GPIO_TO_IRQ(GPIO66_HX4700_ASIC3_nSDIO_IRQ), - .flags = IORESOURCE_IRQ, - }, + [2] = DEFINE_RES_MEM(ASIC3_SD_PHYS, ASIC3_MAP_SIZE_16BIT), + [3] = DEFINE_RES_IRQ(PXA_GPIO_TO_IRQ(GPIO66_HX4700_ASIC3_nSDIO_IRQ)), }; static struct asic3_platform_data asic3_platform_data = { @@ -343,11 +327,7 @@ static struct platform_device asic3 = { */ static struct resource egpio_resources[] = { - [0] = { - .start = PXA_CS5_PHYS, - .end = PXA_CS5_PHYS + 0x4 - 1, - .flags = IORESOURCE_MEM, - }, + [0] = DEFINE_RES_MEM(PXA_CS5_PHYS, 0x4), }; static struct htc_egpio_chip egpio_chips[] = { @@ -537,11 +517,7 @@ static struct w100fb_mach_info w3220_info = { }; static struct resource w3220_resources[] = { - [0] = { - .start = ATI_W3220_PHYS, - .end = ATI_W3220_PHYS + 0x00ffffff, - .flags = IORESOURCE_MEM, - }, + [0] = DEFINE_RES_MEM(ATI_W3220_PHYS, SZ_16M), }; static struct platform_device w3220 = { @@ -683,20 +659,12 @@ static struct pda_power_pdata power_supply_info = { }; static struct resource power_supply_resources[] = { - [0] = { - .name = "ac", - .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | - IORESOURCE_IRQ_LOWEDGE, - .start = PXA_GPIO_TO_IRQ(GPIOD9_nAC_IN), - .end = PXA_GPIO_TO_IRQ(GPIOD9_nAC_IN), - }, - [1] = { - .name = "usb", - .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE | - IORESOURCE_IRQ_LOWEDGE, - .start = PXA_GPIO_TO_IRQ(GPIOD14_nUSBC_DETECT), - .end = PXA_GPIO_TO_IRQ(GPIOD14_nUSBC_DETECT), - }, + [0] = DEFINE_RES_NAMED(PXA_GPIO_TO_IRQ(GPIOD9_nAC_IN), 1, "ac", + IORESOURCE_IRQ | + IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE), + [1] = DEFINE_RES_NAMED(PXA_GPIO_TO_IRQ(GPIOD14_nUSBC_DETECT), 1, "usb", + IORESOURCE_IRQ | + IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE), }; static struct platform_device power_supply = { diff --git a/arch/arm/mach-s3c64xx/include/mach/pm-core.h b/arch/arm/mach-s3c64xx/include/mach/pm-core.h index fcf3dcabb694..c0537f40a3d8 100644 --- a/arch/arm/mach-s3c64xx/include/mach/pm-core.h +++ b/arch/arm/mach-s3c64xx/include/mach/pm-core.h @@ -12,6 +12,9 @@ * published by the Free Software Foundation. */ +#ifndef __MACH_S3C64XX_PM_CORE_H +#define __MACH_S3C64XX_PM_CORE_H __FILE__ + #include <mach/regs-gpio.h> static inline void s3c_pm_debug_init_uart(void) @@ -113,3 +116,4 @@ static inline void samsung_pm_saved_gpios(void) __raw_writel(S3C64XX_SLPEN_USE_xSLP, S3C64XX_SLPEN); } +#endif /* __MACH_S3C64XX_PM_CORE_H */ diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig index df33909205e2..4cacc2d22fbe 100644 --- a/arch/arm/mach-shmobile/Kconfig +++ b/arch/arm/mach-shmobile/Kconfig @@ -19,6 +19,7 @@ config ARCH_SH7372 select CPU_V7 select SH_CLK_CPG select ARCH_WANT_OPTIONAL_GPIOLIB + select ARM_CPU_SUSPEND if PM || CPU_IDLE config ARCH_SH73A0 bool "SH-Mobile AG5 (R8A73A00)" @@ -58,6 +59,7 @@ config MACH_G4EVM bool "G4EVM board" depends on ARCH_SH7377 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR config MACH_AP4EVB bool "AP4EVB board" @@ -65,6 +67,7 @@ config MACH_AP4EVB select ARCH_REQUIRE_GPIOLIB select SH_LCD_MIPI_DSI select SND_SOC_AK4642 if SND_SIMPLE_CARD + select REGULATOR_FIXED_VOLTAGE if REGULATOR choice prompt "AP4EVB LCD panel selection" @@ -83,6 +86,7 @@ config MACH_AG5EVM bool "AG5EVM board" select ARCH_REQUIRE_GPIOLIB select SH_LCD_MIPI_DSI + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on ARCH_SH73A0 config MACH_MACKEREL @@ -90,15 +94,18 @@ config MACH_MACKEREL depends on ARCH_SH7372 select ARCH_REQUIRE_GPIOLIB select SND_SOC_AK4642 if SND_SIMPLE_CARD + select REGULATOR_FIXED_VOLTAGE if REGULATOR config MACH_KOTA2 bool "KOTA2 board" select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on ARCH_SH73A0 config MACH_BONITO bool "bonito board" select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR depends on ARCH_R8A7740 config MACH_ARMADILLO800EVA @@ -106,22 +113,28 @@ config MACH_ARMADILLO800EVA depends on ARCH_R8A7740 select ARCH_REQUIRE_GPIOLIB select USE_OF + select REGULATOR_FIXED_VOLTAGE if REGULATOR + select SND_SOC_WM8978 if SND_SIMPLE_CARD config MACH_MARZEN bool "MARZEN board" depends on ARCH_R8A7779 select ARCH_REQUIRE_GPIOLIB + select REGULATOR_FIXED_VOLTAGE if REGULATOR config MACH_KZM9D bool "KZM9D board" depends on ARCH_EMEV2 select USE_OF + select REGULATOR_FIXED_VOLTAGE if REGULATOR config MACH_KZM9G bool "KZM-A9-GT board" depends on ARCH_SH73A0 select ARCH_REQUIRE_GPIOLIB select USE_OF + select SND_SOC_AK4642 if SND_SIMPLE_CARD + select REGULATOR_FIXED_VOLTAGE if REGULATOR comment "SH-Mobile System Configuration" diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile index 8aa1962c22a2..0df5ae6740c6 100644 --- a/arch/arm/mach-shmobile/Makefile +++ b/arch/arm/mach-shmobile/Makefile @@ -39,7 +39,9 @@ obj-$(CONFIG_ARCH_R8A7740) += entry-intc.o # PM objects obj-$(CONFIG_SUSPEND) += suspend.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o +obj-$(CONFIG_ARCH_SHMOBILE) += pm-rmobile.o obj-$(CONFIG_ARCH_SH7372) += pm-sh7372.o sleep-sh7372.o +obj-$(CONFIG_ARCH_R8A7740) += pm-r8a7740.o obj-$(CONFIG_ARCH_R8A7779) += pm-r8a7779.o # Board objects diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index 5a6f22f05e99..d82c010fdfc6 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c @@ -27,6 +27,8 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/dma-mapping.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/serial_sci.h> #include <linux/smsc911x.h> #include <linux/gpio.h> @@ -52,6 +54,12 @@ #include <asm/hardware/cache-l2x0.h> #include <asm/traps.h> +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + static struct resource smsc9220_resources[] = { [0] = { .start = 0x14000000, @@ -142,6 +150,13 @@ static struct platform_device fsi_device = { .resource = fsi_resources, }; +/* Fixed 1.8V regulator to be used by MMCIF */ +static struct regulator_consumer_supply fixed1v8_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), +}; + static struct resource sh_mmcif_resources[] = { [0] = { .name = "MMCIF", @@ -364,6 +379,13 @@ static struct platform_device mipidsi0_device = { }, }; +/* Fixed 2.8V regulators to be used by SDHI0 */ +static struct regulator_consumer_supply fixed2v8_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), +}; + /* SDHI0 */ static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, @@ -408,8 +430,57 @@ static struct platform_device sdhi0_device = { }, }; -void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state) +/* Fixed 3.3V regulator to be used by SDHI1 */ +static struct regulator_consumer_supply cn4_power_consumers[] = { + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), +}; + +static struct regulator_init_data cn4_power_init_data = { + .constraints = { + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .num_consumer_supplies = ARRAY_SIZE(cn4_power_consumers), + .consumer_supplies = cn4_power_consumers, +}; + +static struct fixed_voltage_config cn4_power_info = { + .supply_name = "CN4 SD/MMC Vdd", + .microvolts = 3300000, + .gpio = GPIO_PORT114, + .enable_high = 1, + .init_data = &cn4_power_init_data, +}; + +static struct platform_device cn4_power = { + .name = "reg-fixed-voltage", + .id = 2, + .dev = { + .platform_data = &cn4_power_info, + }, +}; + +static void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state) +{ + static int power_gpio = -EINVAL; + + if (power_gpio < 0) { + int ret = gpio_request(GPIO_PORT114, "sdhi1_power"); + if (!ret) { + power_gpio = GPIO_PORT114; + gpio_direction_output(power_gpio, 0); + } + } + + /* + * If requesting the GPIO above failed, it means, that the regulator got + * probed and grabbed the GPIO, but we don't know, whether the sdhi + * driver already uses the regulator. If it doesn't, we have to toggle + * the GPIO ourselves, even though it is now owned by the fixed + * regulator driver. We have to live with the race in case the driver + * gets unloaded and the GPIO freed between these two steps. + */ gpio_set_value(GPIO_PORT114, state); } @@ -455,6 +526,7 @@ static struct platform_device sdhi1_device = { }; static struct platform_device *ag5evm_devices[] __initdata = { + &cn4_power, ð_device, &keysc_device, &fsi_device, @@ -468,6 +540,12 @@ static struct platform_device *ag5evm_devices[] __initdata = { static void __init ag5evm_init(void) { + regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers, + ARRAY_SIZE(fixed1v8_power_consumers), 1800000); + regulator_register_always_on(1, "fixed-2.8V", fixed2v8_power_consumers, + ARRAY_SIZE(fixed2v8_power_consumers), 3300000); + regulator_register_fixed(3, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + sh73a0_pinmux_init(); /* enable SCIFA2 */ @@ -562,8 +640,6 @@ static void __init ag5evm_init(void) gpio_request(GPIO_FN_SDHID1_2_PU, NULL); gpio_request(GPIO_FN_SDHID1_1_PU, NULL); gpio_request(GPIO_FN_SDHID1_0_PU, NULL); - gpio_request(GPIO_PORT114, "sdhi1_power"); - gpio_direction_output(GPIO_PORT114, 0); #ifdef CONFIG_CACHE_L2X0 /* Shared attribute override enable, 64K*8way */ diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index ace60246a5df..f172ca85905c 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -34,6 +34,8 @@ #include <linux/i2c.h> #include <linux/i2c/tsc2007.h> #include <linux/io.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/sh_intc.h> #include <linux/sh_clk.h> @@ -159,6 +161,27 @@ * CN12: 3.3v */ +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply fixed1v8_power_consumers[] = +{ + /* J22 default position: 1.8V */ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), +}; + +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), +}; + +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + /* MTD */ static struct mtd_partition nor_flash_partitions[] = { { @@ -1138,21 +1161,6 @@ static void __init fsi_init_pm_clock(void) clk_put(fsia_ick); } -/* - * FIXME !! - * - * gpio_no_direction - * are quick_hack. - * - * current gpio frame work doesn't have - * the method to control only pull up/down/free. - * this function should be replaced by correct gpio function - */ -static void __init gpio_no_direction(u32 addr) -{ - __raw_writeb(0x00, addr); -} - /* TouchScreen */ #ifdef CONFIG_AP4EVB_QHD # define GPIO_TSC_IRQ GPIO_FN_IRQ28_123 @@ -1224,6 +1232,12 @@ static void __init ap4evb_init(void) u32 srcr4; struct clk *clk; + regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers, + ARRAY_SIZE(fixed1v8_power_consumers), 1800000); + regulator_register_always_on(1, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + regulator_register_fixed(2, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + /* External clock source */ clk_set_rate(&sh7372_dv_clki_clk, 27000000); @@ -1302,8 +1316,8 @@ static void __init ap4evb_init(void) gpio_request(GPIO_PORT9, NULL); gpio_request(GPIO_PORT10, NULL); - gpio_no_direction(GPIO_PORT9CR); /* FSIAOBT needs no direction */ - gpio_no_direction(GPIO_PORT10CR); /* FSIAOLR needs no direction */ + gpio_direction_none(GPIO_PORT9CR); /* FSIAOBT needs no direction */ + gpio_direction_none(GPIO_PORT10CR); /* FSIAOLR needs no direction */ /* card detect pin for MMC slot (CN7) */ gpio_request(GPIO_PORT41, NULL); @@ -1447,14 +1461,14 @@ static void __init ap4evb_init(void) platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices)); - sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device); - sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); - sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); + rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc1_device); + rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device); + rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi1_device); - sh7372_add_device_to_domain(&sh7372_a4r, &ceu_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device); + rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device); hdmi_init_pm_clock(); fsi_init_pm_clock(); diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 9bd135531d76..cf10f92856dc 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -28,6 +28,8 @@ #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/gpio_keys.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/sh_eth.h> #include <linux/videodev2.h> #include <linux/usb/renesas_usbhs.h> @@ -37,14 +39,20 @@ #include <linux/mmc/sh_mobile_sdhi.h> #include <mach/common.h> #include <mach/irqs.h> +#include <mach/r8a7740.h> +#include <media/mt9t112.h> +#include <media/sh_mobile_ceu.h> +#include <media/soc_camera.h> #include <asm/page.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <asm/hardware/cache-l2x0.h> -#include <mach/r8a7740.h> #include <video/sh_mobile_lcdc.h> +#include <video/sh_mobile_hdmi.h> +#include <sound/sh_fsi.h> +#include <sound/simple_card.h> /* * CON1 Camera Module @@ -108,6 +116,14 @@ */ /* + * FSI-WM8978 + * + * this command is required when playback. + * + * # amixer set "Headphone" 50 + */ + +/* * USB function * * When you use USB Function, @@ -117,14 +133,8 @@ * These are a little bit complex. * see * usbhsf_power_ctrl() - * - * CAUTION - * - * It uses autonomy mode for USB hotplug at this point - * (= usbhs_private.platform_callback.get_vbus is NULL), - * since we don't know what's happen on PM control - * on this workaround. */ +#define IRQ7 evt2irq(0x02e0) #define USBCR1 0xe605810a #define USBH 0xC6700000 #define USBH_USBCTR 0x10834 @@ -204,6 +214,20 @@ static void usbhsf_power_ctrl(struct platform_device *pdev, } } +static int usbhsf_get_vbus(struct platform_device *pdev) +{ + return gpio_get_value(GPIO_PORT209); +} + +static irqreturn_t usbhsf_interrupt(int irq, void *data) +{ + struct platform_device *pdev = data; + + renesas_usbhs_call_notify_hotplug(pdev); + + return IRQ_HANDLED; +} + static void usbhsf_hardware_exit(struct platform_device *pdev) { struct usbhsf_private *priv = usbhsf_get_priv(pdev); @@ -227,11 +251,14 @@ static void usbhsf_hardware_exit(struct platform_device *pdev) priv->host = NULL; priv->func = NULL; priv->usbh_base = NULL; + + free_irq(IRQ7, pdev); } static int usbhsf_hardware_init(struct platform_device *pdev) { struct usbhsf_private *priv = usbhsf_get_priv(pdev); + int ret; priv->phy = clk_get(&pdev->dev, "phy"); priv->usb24 = clk_get(&pdev->dev, "usb24"); @@ -251,6 +278,14 @@ static int usbhsf_hardware_init(struct platform_device *pdev) return -EIO; } + ret = request_irq(IRQ7, usbhsf_interrupt, IRQF_TRIGGER_NONE, + dev_name(&pdev->dev), pdev); + if (ret) { + dev_err(&pdev->dev, "request_irq err\n"); + return ret; + } + irq_set_irq_type(IRQ7, IRQ_TYPE_EDGE_BOTH); + /* usb24 use 1/1 of parent clock (= usb24s = 24MHz) */ clk_set_rate(priv->usb24, clk_get_rate(clk_get_parent(priv->usb24))); @@ -262,6 +297,7 @@ static struct usbhsf_private usbhsf_private = { .info = { .platform_callback = { .get_id = usbhsf_get_id, + .get_vbus = usbhsf_get_vbus, .hardware_init = usbhsf_hardware_init, .hardware_exit = usbhsf_hardware_exit, .power_ctrl = usbhsf_power_ctrl, @@ -269,6 +305,8 @@ static struct usbhsf_private usbhsf_private = { .driver_param = { .buswait_bwait = 5, .detection_delay = 5, + .d0_rx_id = SHDMA_SLAVE_USBHS_RX, + .d1_tx_id = SHDMA_SLAVE_USBHS_TX, }, } }; @@ -384,6 +422,103 @@ static struct platform_device lcdc0_device = { }, }; +/* + * LCDC1/HDMI + */ +static struct sh_mobile_hdmi_info hdmi_info = { + .flags = HDMI_OUTPUT_PUSH_PULL | + HDMI_OUTPUT_POLARITY_HI | + HDMI_32BIT_REG | + HDMI_HAS_HTOP1 | + HDMI_SND_SRC_SPDIF, +}; + +static struct resource hdmi_resources[] = { + [0] = { + .name = "HDMI", + .start = 0xe6be0000, + .end = 0xe6be03ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0x1700), + .flags = IORESOURCE_IRQ, + }, + [2] = { + .name = "HDMI emma3pf", + .start = 0xe6be4000, + .end = 0xe6be43ff, + .flags = IORESOURCE_MEM, + }, +}; + +static struct platform_device hdmi_device = { + .name = "sh-mobile-hdmi", + .num_resources = ARRAY_SIZE(hdmi_resources), + .resource = hdmi_resources, + .id = -1, + .dev = { + .platform_data = &hdmi_info, + }, +}; + +static const struct fb_videomode lcdc1_mode = { + .name = "HDMI 720p", + .xres = 1280, + .yres = 720, + .pixclock = 13468, + .left_margin = 220, + .right_margin = 110, + .hsync_len = 40, + .upper_margin = 20, + .lower_margin = 5, + .vsync_len = 5, + .refresh = 60, + .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, +}; + +static struct sh_mobile_lcdc_info hdmi_lcdc_info = { + .clock_source = LCDC_CLK_PERIPHERAL, /* HDMI clock */ + .ch[0] = { + .chan = LCDC_CHAN_MAINLCD, + .fourcc = V4L2_PIX_FMT_RGB565, + .interface_type = RGB24, + .clock_divider = 1, + .flags = LCDC_FLAGS_DWPOL, + .lcd_modes = &lcdc1_mode, + .num_modes = 1, + .tx_dev = &hdmi_device, + .panel_cfg = { + .width = 1280, + .height = 720, + }, + }, +}; + +static struct resource hdmi_lcdc_resources[] = { + [0] = { + .name = "LCDC1", + .start = 0xfe944000, + .end = 0xfe948000 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = intcs_evt2irq(0x1780), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device hdmi_lcdc_device = { + .name = "sh_mobile_lcdc_fb", + .num_resources = ARRAY_SIZE(hdmi_lcdc_resources), + .resource = hdmi_lcdc_resources, + .id = 1, + .dev = { + .platform_data = &hdmi_lcdc_info, + .coherent_dma_mask = ~0, + }, +}; + /* GPIO KEY */ #define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } @@ -407,6 +542,17 @@ static struct platform_device gpio_keys_device = { }, }; +/* Fixed 3.3V regulator to be used by SDHI0, SDHI1, MMCIF */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vmmc", "sh_mmcif"), + REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), +}; + /* SDHI0 */ /* * FIXME @@ -418,6 +564,8 @@ static struct platform_device gpio_keys_device = { */ #define IRQ31 evt2irq(0x33E0) static struct sh_mobile_sdhi_info sdhi0_info = { + .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, + .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |\ MMC_CAP_NEEDS_POLL, .tmio_ocr_mask = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34, @@ -458,6 +606,8 @@ static struct platform_device sdhi0_device = { /* SDHI1 */ static struct sh_mobile_sdhi_info sdhi1_info = { + .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, + .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, .tmio_ocr_mask = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34, .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, @@ -532,12 +682,209 @@ static struct platform_device sh_mmcif_device = { .resource = sh_mmcif_resources, }; +/* Camera */ +static int mt9t111_power(struct device *dev, int mode) +{ + struct clk *mclk = clk_get(NULL, "video1"); + + if (IS_ERR(mclk)) { + dev_err(dev, "can't get video1 clock\n"); + return -EINVAL; + } + + if (mode) { + /* video1 (= CON1 camera) expect 24MHz */ + clk_set_rate(mclk, clk_round_rate(mclk, 24000000)); + clk_enable(mclk); + gpio_direction_output(GPIO_PORT158, 1); + } else { + gpio_direction_output(GPIO_PORT158, 0); + clk_disable(mclk); + } + + clk_put(mclk); + + return 0; +} + +static struct i2c_board_info i2c_camera_mt9t111 = { + I2C_BOARD_INFO("mt9t112", 0x3d), +}; + +static struct mt9t112_camera_info mt9t111_info = { + .divider = { 16, 0, 0, 7, 0, 10, 14, 7, 7 }, +}; + +static struct soc_camera_link mt9t111_link = { + .i2c_adapter_id = 0, + .bus_id = 0, + .board_info = &i2c_camera_mt9t111, + .power = mt9t111_power, + .priv = &mt9t111_info, +}; + +static struct platform_device camera_device = { + .name = "soc-camera-pdrv", + .id = 0, + .dev = { + .platform_data = &mt9t111_link, + }, +}; + +/* CEU0 */ +static struct sh_mobile_ceu_info sh_mobile_ceu0_info = { + .flags = SH_CEU_FLAG_LOWER_8BIT, +}; + +static struct resource ceu0_resources[] = { + [0] = { + .name = "CEU", + .start = 0xfe910000, + .end = 0xfe91009f, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = intcs_evt2irq(0x0500), + .flags = IORESOURCE_IRQ, + }, + [2] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device ceu0_device = { + .name = "sh_mobile_ceu", + .id = 0, + .num_resources = ARRAY_SIZE(ceu0_resources), + .resource = ceu0_resources, + .dev = { + .platform_data = &sh_mobile_ceu0_info, + .coherent_dma_mask = 0xffffffff, + }, +}; + +/* FSI */ +static int fsi_hdmi_set_rate(struct device *dev, int rate, int enable) +{ + struct clk *fsib; + int ret; + + /* it support 48KHz only */ + if (48000 != rate) + return -EINVAL; + + fsib = clk_get(dev, "ickb"); + if (IS_ERR(fsib)) + return -EINVAL; + + if (enable) { + ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64; + clk_enable(fsib); + } else { + ret = 0; + clk_disable(fsib); + } + + clk_put(fsib); + + return ret; +} + +static struct sh_fsi_platform_info fsi_info = { + /* FSI-WM8978 */ + .port_a = { + .tx_id = SHDMA_SLAVE_FSIA_TX, + }, + /* FSI-HDMI */ + .port_b = { + .flags = SH_FSI_FMT_SPDIF | + SH_FSI_ENABLE_STREAM_MODE, + .set_rate = fsi_hdmi_set_rate, + .tx_id = SHDMA_SLAVE_FSIB_TX, + } +}; + +static struct resource fsi_resources[] = { + [0] = { + .name = "FSI", + .start = 0xfe1f0000, + .end = 0xfe1f8400 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0x1840), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device fsi_device = { + .name = "sh_fsi2", + .id = -1, + .num_resources = ARRAY_SIZE(fsi_resources), + .resource = fsi_resources, + .dev = { + .platform_data = &fsi_info, + }, +}; + +/* FSI-WM8978 */ +static struct asoc_simple_dai_init_info fsi_wm8978_init_info = { + .fmt = SND_SOC_DAIFMT_I2S, + .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF, + .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS, + .sysclk = 12288000, +}; + +static struct asoc_simple_card_info fsi_wm8978_info = { + .name = "wm8978", + .card = "FSI2A-WM8978", + .cpu_dai = "fsia-dai", + .codec = "wm8978.0-001a", + .platform = "sh_fsi2", + .codec_dai = "wm8978-hifi", + .init = &fsi_wm8978_init_info, +}; + +static struct platform_device fsi_wm8978_device = { + .name = "asoc-simple-card", + .id = 0, + .dev = { + .platform_data = &fsi_wm8978_info, + }, +}; + +/* FSI-HDMI */ +static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = { + .cpu_daifmt = SND_SOC_DAIFMT_CBM_CFM, +}; + +static struct asoc_simple_card_info fsi2_hdmi_info = { + .name = "HDMI", + .card = "FSI2B-HDMI", + .cpu_dai = "fsib-dai", + .codec = "sh-mobile-hdmi", + .platform = "sh_fsi2", + .codec_dai = "sh_mobile_hdmi-hifi", + .init = &fsi2_hdmi_init_info, +}; + +static struct platform_device fsi_hdmi_device = { + .name = "asoc-simple-card", + .id = 1, + .dev = { + .platform_data = &fsi2_hdmi_info, + }, +}; + /* I2C */ static struct i2c_board_info i2c0_devices[] = { { I2C_BOARD_INFO("st1232-ts", 0x55), .irq = evt2irq(0x0340), }, + { + I2C_BOARD_INFO("wm8978", 0x1a), + }, }; /* @@ -549,6 +896,13 @@ static struct platform_device *eva_devices[] __initdata = { &sh_eth_device, &sdhi0_device, &sh_mmcif_device, + &hdmi_device, + &hdmi_lcdc_device, + &camera_device, + &ceu0_device, + &fsi_device, + &fsi_hdmi_device, + &fsi_wm8978_device, }; static void __init eva_clock_init(void) @@ -556,10 +910,14 @@ static void __init eva_clock_init(void) struct clk *system = clk_get(NULL, "system_clk"); struct clk *xtal1 = clk_get(NULL, "extal1"); struct clk *usb24s = clk_get(NULL, "usb24s"); + struct clk *fsibck = clk_get(NULL, "fsibck"); + struct clk *fsib = clk_get(&fsi_device.dev, "ickb"); if (IS_ERR(system) || IS_ERR(xtal1) || - IS_ERR(usb24s)) { + IS_ERR(usb24s) || + IS_ERR(fsibck) || + IS_ERR(fsib)) { pr_err("armadillo800eva board clock init failed\n"); goto clock_error; } @@ -570,6 +928,11 @@ static void __init eva_clock_init(void) /* usb24s use extal1 (= system) clock (= 24MHz) */ clk_set_parent(usb24s, system); + /* FSIBCK is 12.288MHz, and it is parent of FSI-B */ + clk_set_parent(fsib, fsibck); + clk_set_rate(fsibck, 12288000); + clk_set_rate(fsib, 12288000); + clock_error: if (!IS_ERR(system)) clk_put(system); @@ -577,16 +940,26 @@ clock_error: clk_put(xtal1); if (!IS_ERR(usb24s)) clk_put(usb24s); + if (!IS_ERR(fsibck)) + clk_put(fsibck); + if (!IS_ERR(fsib)) + clk_put(fsib); } /* * board init */ +#define GPIO_PORT7CR 0xe6050007 +#define GPIO_PORT8CR 0xe6050008 static void __init eva_init(void) { - eva_clock_init(); + struct platform_device *usb = NULL; + + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); r8a7740_pinmux_init(); + r8a7740_meram_workaround(); /* SCIFA1 */ gpio_request(GPIO_FN_SCIFA1_RXD, NULL); @@ -667,8 +1040,19 @@ static void __init eva_init(void) /* USB Host */ } else { /* USB Func */ - gpio_request(GPIO_FN_VBUS, NULL); + /* + * A1 chip has 2 IRQ7 pin and it was controled by MSEL register. + * OTOH, usbhs interrupt needs its value (HI/LOW) to decide + * USB connection/disconnection (usbhsf_get_vbus()). + * This means we needs to select GPIO_FN_IRQ7_PORT209 first, + * and select GPIO_PORT209 here + */ + gpio_request(GPIO_FN_IRQ7_PORT209, NULL); + gpio_request(GPIO_PORT209, NULL); + gpio_direction_input(GPIO_PORT209); + platform_device_register(&usbhsf_device); + usb = &usbhsf_device; } /* SDHI0 */ @@ -706,6 +1090,48 @@ static void __init eva_init(void) gpio_request(GPIO_FN_MMC1_D6_PORT143, NULL); gpio_request(GPIO_FN_MMC1_D7_PORT142, NULL); + /* CEU0 */ + gpio_request(GPIO_FN_VIO0_D7, NULL); + gpio_request(GPIO_FN_VIO0_D6, NULL); + gpio_request(GPIO_FN_VIO0_D5, NULL); + gpio_request(GPIO_FN_VIO0_D4, NULL); + gpio_request(GPIO_FN_VIO0_D3, NULL); + gpio_request(GPIO_FN_VIO0_D2, NULL); + gpio_request(GPIO_FN_VIO0_D1, NULL); + gpio_request(GPIO_FN_VIO0_D0, NULL); + gpio_request(GPIO_FN_VIO0_CLK, NULL); + gpio_request(GPIO_FN_VIO0_HD, NULL); + gpio_request(GPIO_FN_VIO0_VD, NULL); + gpio_request(GPIO_FN_VIO0_FIELD, NULL); + gpio_request(GPIO_FN_VIO_CKO, NULL); + + /* CON1/CON15 Camera */ + gpio_request(GPIO_PORT173, NULL); /* STANDBY */ + gpio_request(GPIO_PORT172, NULL); /* RST */ + gpio_request(GPIO_PORT158, NULL); /* CAM_PON */ + gpio_direction_output(GPIO_PORT173, 0); + gpio_direction_output(GPIO_PORT172, 1); + gpio_direction_output(GPIO_PORT158, 0); /* see mt9t111_power() */ + + /* FSI-WM8978 */ + gpio_request(GPIO_FN_FSIAIBT, NULL); + gpio_request(GPIO_FN_FSIAILR, NULL); + gpio_request(GPIO_FN_FSIAOMC, NULL); + gpio_request(GPIO_FN_FSIAOSLD, NULL); + gpio_request(GPIO_FN_FSIAISLD_PORT5, NULL); + + gpio_request(GPIO_PORT7, NULL); + gpio_request(GPIO_PORT8, NULL); + gpio_direction_none(GPIO_PORT7CR); /* FSIAOBT needs no direction */ + gpio_direction_none(GPIO_PORT8CR); /* FSIAOLR needs no direction */ + + /* FSI-HDMI */ + gpio_request(GPIO_FN_FSIBCK, NULL); + + /* HDMI */ + gpio_request(GPIO_FN_HDMI_HPD, NULL); + gpio_request(GPIO_FN_HDMI_CEC, NULL); + /* * CAUTION * @@ -752,6 +1178,13 @@ static void __init eva_init(void) platform_add_devices(eva_devices, ARRAY_SIZE(eva_devices)); + + eva_clock_init(); + + rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &lcdc0_device); + rmobile_add_device_to_domain(&r8a7740_pd_a4lc, &hdmi_lcdc_device); + if (usb) + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, usb); } static void __init eva_earlytimer_init(void) diff --git a/arch/arm/mach-shmobile/board-bonito.c b/arch/arm/mach-shmobile/board-bonito.c index e9b32cfbf741..4129008eae29 100644 --- a/arch/arm/mach-shmobile/board-bonito.c +++ b/arch/arm/mach-shmobile/board-bonito.c @@ -26,6 +26,8 @@ #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/gpio.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/videodev2.h> #include <mach/common.h> @@ -75,6 +77,12 @@ * S38.2 = OFF */ +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + /* * FPGA */ @@ -360,6 +368,8 @@ static void __init bonito_init(void) { u16 val; + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + r8a7740_pinmux_init(); bonito_fpga_init(); diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c index f1257321999a..fa5dfc5c8ed6 100644 --- a/arch/arm/mach-shmobile/board-g4evm.c +++ b/arch/arm/mach-shmobile/board-g4evm.c @@ -26,6 +26,8 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/usb/r8a66597.h> #include <linux/io.h> #include <linux/input.h> @@ -196,6 +198,15 @@ static struct platform_device keysc_device = { }, }; +/* Fixed 3.3V regulator to be used by SDHI0 and SDHI1 */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), +}; + /* SDHI */ static struct sh_mobile_sdhi_info sdhi0_info = { .tmio_caps = MMC_CAP_SDIO_IRQ, @@ -271,26 +282,11 @@ static struct platform_device *g4evm_devices[] __initdata = { #define GPIO_SDHID1_D3 0xe6052106 #define GPIO_SDHICMD1 0xe6052107 -/* - * FIXME !! - * - * gpio_pull_up is quick_hack. - * - * current gpio frame work doesn't have - * the method to control only pull up/down/free. - * this function should be replaced by correct gpio function - */ -static void __init gpio_pull_up(u32 addr) -{ - u8 data = __raw_readb(addr); - - data &= 0x0F; - data |= 0xC0; - __raw_writeb(data, addr); -} - static void __init g4evm_init(void) { + regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + sh7377_pinmux_init(); /* Lit DS14 LED */ @@ -351,11 +347,11 @@ static void __init g4evm_init(void) gpio_request(GPIO_FN_SDHID0_3, NULL); gpio_request(GPIO_FN_SDHICMD0, NULL); gpio_request(GPIO_FN_SDHIWP0, NULL); - gpio_pull_up(GPIO_SDHID0_D0); - gpio_pull_up(GPIO_SDHID0_D1); - gpio_pull_up(GPIO_SDHID0_D2); - gpio_pull_up(GPIO_SDHID0_D3); - gpio_pull_up(GPIO_SDHICMD0); + gpio_request_pullup(GPIO_SDHID0_D0); + gpio_request_pullup(GPIO_SDHID0_D1); + gpio_request_pullup(GPIO_SDHID0_D2); + gpio_request_pullup(GPIO_SDHID0_D3); + gpio_request_pullup(GPIO_SDHICMD0); /* SDHI1 */ gpio_request(GPIO_FN_SDHICLK1, NULL); @@ -364,11 +360,11 @@ static void __init g4evm_init(void) gpio_request(GPIO_FN_SDHID1_2, NULL); gpio_request(GPIO_FN_SDHID1_3, NULL); gpio_request(GPIO_FN_SDHICMD1, NULL); - gpio_pull_up(GPIO_SDHID1_D0); - gpio_pull_up(GPIO_SDHID1_D1); - gpio_pull_up(GPIO_SDHID1_D2); - gpio_pull_up(GPIO_SDHID1_D3); - gpio_pull_up(GPIO_SDHICMD1); + gpio_request_pullup(GPIO_SDHID1_D0); + gpio_request_pullup(GPIO_SDHID1_D1); + gpio_request_pullup(GPIO_SDHID1_D2); + gpio_request_pullup(GPIO_SDHID1_D3); + gpio_request_pullup(GPIO_SDHICMD1); sh7377_add_standard_devices(); diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c index f60f1b281cc4..21dbe54304d5 100644 --- a/arch/arm/mach-shmobile/board-kota2.c +++ b/arch/arm/mach-shmobile/board-kota2.c @@ -27,6 +27,8 @@ #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/gpio.h> #include <linux/input.h> @@ -49,6 +51,12 @@ #include <asm/hardware/cache-l2x0.h> #include <asm/traps.h> +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + /* SMSC 9220 */ static struct resource smsc9220_resources[] = { [0] = { @@ -288,6 +296,13 @@ static struct platform_device leds_tpu30_device = { .resource = tpu30_resources, }; +/* Fixed 1.8V regulator to be used by MMCIF */ +static struct regulator_consumer_supply fixed1v8_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), +}; + /* MMCIF */ static struct resource mmcif_resources[] = { [0] = { @@ -321,6 +336,15 @@ static struct platform_device mmcif_device = { .resource = mmcif_resources, }; +/* Fixed 3.3V regulator to be used by SDHI0 and SDHI1 */ +static struct regulator_consumer_supply fixed3v3_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), +}; + /* SDHI0 */ static struct sh_mobile_sdhi_info sdhi0_info = { .tmio_caps = MMC_CAP_SD_HIGHSPEED, @@ -411,6 +435,12 @@ static struct platform_device *kota2_devices[] __initdata = { static void __init kota2_init(void) { + regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers, + ARRAY_SIZE(fixed1v8_power_consumers), 1800000); + regulator_register_always_on(1, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + regulator_register_fixed(2, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + sh73a0_pinmux_init(); /* SCIFA2 (UART2) */ diff --git a/arch/arm/mach-shmobile/board-kzm9d.c b/arch/arm/mach-shmobile/board-kzm9d.c index 6a33cf393428..2c986eaae7b4 100644 --- a/arch/arm/mach-shmobile/board-kzm9d.c +++ b/arch/arm/mach-shmobile/board-kzm9d.c @@ -21,6 +21,8 @@ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/platform_device.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <mach/common.h> #include <mach/emev2.h> @@ -28,6 +30,12 @@ #include <asm/mach/arch.h> #include <asm/hardware/gic.h> +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + /* Ether */ static struct resource smsc911x_resources[] = { [0] = { @@ -63,6 +71,8 @@ static struct platform_device *kzm9d_devices[] __initdata = { void __init kzm9d_add_standard_devices(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + emev2_add_standard_devices(); platform_add_devices(kzm9d_devices, ARRAY_SIZE(kzm9d_devices)); diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index c0ae815e7beb..53b7ea92c32c 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c @@ -30,9 +30,14 @@ #include <linux/mmc/sh_mobile_sdhi.h> #include <linux/mfd/tmio.h> #include <linux/platform_device.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/usb/r8a66597.h> +#include <linux/usb/renesas_usbhs.h> #include <linux/videodev2.h> +#include <sound/sh_fsi.h> +#include <sound/simple_card.h> #include <mach/irqs.h> #include <mach/sh73a0.h> #include <mach/common.h> @@ -54,6 +59,20 @@ #define GPIO_PCF8575_PORT15 (GPIO_NR + 13) #define GPIO_PCF8575_PORT16 (GPIO_NR + 14) +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + +/* + * FSI-AK4648 + * + * this command is required when playback. + * + * # amixer set "LINEOUT Mixer DACL" on + */ + /* SMSC 9221 */ static struct resource smsc9221_resources[] = { [0] = { @@ -112,6 +131,151 @@ static struct platform_device usb_host_device = { .resource = usb_resources, }; +/* USB Func CN17 */ +struct usbhs_private { + unsigned int phy; + unsigned int cr2; + struct renesas_usbhs_platform_info info; +}; + +#define IRQ15 intcs_evt2irq(0x03e0) +#define USB_PHY_MODE (1 << 4) +#define USB_PHY_INT_EN ((1 << 3) | (1 << 2)) +#define USB_PHY_ON (1 << 1) +#define USB_PHY_OFF (1 << 0) +#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF) + +#define usbhs_get_priv(pdev) \ + container_of(renesas_usbhs_get_info(pdev), struct usbhs_private, info) + +static int usbhs_get_vbus(struct platform_device *pdev) +{ + struct usbhs_private *priv = usbhs_get_priv(pdev); + + return !((1 << 7) & __raw_readw(priv->cr2)); +} + +static void usbhs_phy_reset(struct platform_device *pdev) +{ + struct usbhs_private *priv = usbhs_get_priv(pdev); + + /* init phy */ + __raw_writew(0x8a0a, priv->cr2); +} + +static int usbhs_get_id(struct platform_device *pdev) +{ + return USBHS_GADGET; +} + +static irqreturn_t usbhs_interrupt(int irq, void *data) +{ + struct platform_device *pdev = data; + struct usbhs_private *priv = usbhs_get_priv(pdev); + + renesas_usbhs_call_notify_hotplug(pdev); + + /* clear status */ + __raw_writew(__raw_readw(priv->phy) | USB_PHY_INT_CLR, priv->phy); + + return IRQ_HANDLED; +} + +static int usbhs_hardware_init(struct platform_device *pdev) +{ + struct usbhs_private *priv = usbhs_get_priv(pdev); + int ret; + + /* clear interrupt status */ + __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy); + + ret = request_irq(IRQ15, usbhs_interrupt, IRQF_TRIGGER_HIGH, + dev_name(&pdev->dev), pdev); + if (ret) { + dev_err(&pdev->dev, "request_irq err\n"); + return ret; + } + + /* enable USB phy interrupt */ + __raw_writew(USB_PHY_MODE | USB_PHY_INT_EN, priv->phy); + + return 0; +} + +static void usbhs_hardware_exit(struct platform_device *pdev) +{ + struct usbhs_private *priv = usbhs_get_priv(pdev); + + /* clear interrupt status */ + __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->phy); + + free_irq(IRQ15, pdev); +} + +static u32 usbhs_pipe_cfg[] = { + USB_ENDPOINT_XFER_CONTROL, + USB_ENDPOINT_XFER_ISOC, + USB_ENDPOINT_XFER_ISOC, + USB_ENDPOINT_XFER_BULK, + USB_ENDPOINT_XFER_BULK, + USB_ENDPOINT_XFER_BULK, + USB_ENDPOINT_XFER_INT, + USB_ENDPOINT_XFER_INT, + USB_ENDPOINT_XFER_INT, + USB_ENDPOINT_XFER_BULK, + USB_ENDPOINT_XFER_BULK, + USB_ENDPOINT_XFER_BULK, + USB_ENDPOINT_XFER_BULK, + USB_ENDPOINT_XFER_BULK, + USB_ENDPOINT_XFER_BULK, + USB_ENDPOINT_XFER_BULK, +}; + +static struct usbhs_private usbhs_private = { + .phy = 0xe60781e0, /* USBPHYINT */ + .cr2 = 0xe605810c, /* USBCR2 */ + .info = { + .platform_callback = { + .hardware_init = usbhs_hardware_init, + .hardware_exit = usbhs_hardware_exit, + .get_id = usbhs_get_id, + .phy_reset = usbhs_phy_reset, + .get_vbus = usbhs_get_vbus, + }, + .driver_param = { + .buswait_bwait = 4, + .has_otg = 1, + .pipe_type = usbhs_pipe_cfg, + .pipe_size = ARRAY_SIZE(usbhs_pipe_cfg), + }, + }, +}; + +static struct resource usbhs_resources[] = { + [0] = { + .start = 0xE6890000, + .end = 0xE68900e6 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = gic_spi(62), + .end = gic_spi(62), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usbhs_device = { + .name = "renesas_usbhs", + .id = -1, + .dev = { + .dma_mask = NULL, + .coherent_dma_mask = 0xffffffff, + .platform_data = &usbhs_private.info, + }, + .num_resources = ARRAY_SIZE(usbhs_resources), + .resource = usbhs_resources, +}; + /* LCDC */ static struct fb_videomode kzm_lcdc_mode = { .name = "WVGA Panel", @@ -166,6 +330,13 @@ static struct platform_device lcdc_device = { }, }; +/* Fixed 1.8V regulator to be used by MMCIF */ +static struct regulator_consumer_supply fixed1v8_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), +}; + /* MMCIF */ static struct resource sh_mmcif_resources[] = { [0] = { @@ -187,6 +358,8 @@ static struct resource sh_mmcif_resources[] = { static struct sh_mmcif_plat_data sh_mmcif_platdata = { .ocr = MMC_VDD_165_195, .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, + .slave_id_tx = SHDMA_SLAVE_MMCIF_TX, + .slave_id_rx = SHDMA_SLAVE_MMCIF_RX, }; static struct platform_device mmc_device = { @@ -200,6 +373,15 @@ static struct platform_device mmc_device = { .resource = sh_mmcif_resources, }; +/* Fixed 2.8V regulators to be used by SDHI0 and SDHI2 */ +static struct regulator_consumer_supply fixed2v8_power_consumers[] = +{ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.2"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.2"), +}; + /* SDHI */ static struct sh_mobile_sdhi_info sdhi0_info = { .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, @@ -240,6 +422,50 @@ static struct platform_device sdhi0_device = { }, }; +/* Micro SD */ +static struct sh_mobile_sdhi_info sdhi2_info = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | + TMIO_MMC_USE_GPIO_CD | + TMIO_MMC_WRPROTECT_DISABLE, + .tmio_caps = MMC_CAP_SD_HIGHSPEED, + .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, + .cd_gpio = GPIO_PORT13, +}; + +static struct resource sdhi2_resources[] = { + [0] = { + .name = "SDHI2", + .start = 0xee140000, + .end = 0xee1400ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .name = SH_MOBILE_SDHI_IRQ_CARD_DETECT, + .start = gic_spi(103), + .flags = IORESOURCE_IRQ, + }, + [2] = { + .name = SH_MOBILE_SDHI_IRQ_SDCARD, + .start = gic_spi(104), + .flags = IORESOURCE_IRQ, + }, + [3] = { + .name = SH_MOBILE_SDHI_IRQ_SDIO, + .start = gic_spi(105), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device sdhi2_device = { + .name = "sh_mobile_sdhi", + .id = 2, + .num_resources = ARRAY_SIZE(sdhi2_resources), + .resource = sdhi2_resources, + .dev = { + .platform_data = &sdhi2_info, + }, +}; + /* KEY */ #define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } @@ -267,11 +493,74 @@ static struct platform_device gpio_keys_device = { }, }; +/* FSI-AK4648 */ +static struct sh_fsi_platform_info fsi_info = { + .port_a = { + .tx_id = SHDMA_SLAVE_FSI2A_TX, + }, +}; + +static struct resource fsi_resources[] = { + [0] = { + .name = "FSI", + .start = 0xEC230000, + .end = 0xEC230400 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = gic_spi(146), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device fsi_device = { + .name = "sh_fsi2", + .id = -1, + .num_resources = ARRAY_SIZE(fsi_resources), + .resource = fsi_resources, + .dev = { + .platform_data = &fsi_info, + }, +}; + +static struct asoc_simple_dai_init_info fsi2_ak4648_init_info = { + .fmt = SND_SOC_DAIFMT_LEFT_J, + .codec_daifmt = SND_SOC_DAIFMT_CBM_CFM, + .cpu_daifmt = SND_SOC_DAIFMT_CBS_CFS, + .sysclk = 11289600, +}; + +static struct asoc_simple_card_info fsi2_ak4648_info = { + .name = "AK4648", + .card = "FSI2A-AK4648", + .cpu_dai = "fsia-dai", + .codec = "ak4642-codec.0-0012", + .platform = "sh_fsi2", + .codec_dai = "ak4642-hifi", + .init = &fsi2_ak4648_init_info, +}; + +static struct platform_device fsi_ak4648_device = { + .name = "asoc-simple-card", + .dev = { + .platform_data = &fsi2_ak4648_info, + }, +}; + /* I2C */ static struct pcf857x_platform_data pcf8575_pdata = { .gpio_base = GPIO_PCF8575_BASE, }; +static struct i2c_board_info i2c0_devices[] = { + { + I2C_BOARD_INFO("ak4648", 0x12), + }, + { + I2C_BOARD_INFO("r2025sd", 0x32), + } +}; + static struct i2c_board_info i2c1_devices[] = { { I2C_BOARD_INFO("st1232-ts", 0x55), @@ -289,10 +578,14 @@ static struct i2c_board_info i2c3_devices[] = { static struct platform_device *kzm_devices[] __initdata = { &smsc_device, &usb_host_device, + &usbhs_device, &lcdc_device, &mmc_device, &sdhi0_device, + &sdhi2_device, &gpio_keys_device, + &fsi_device, + &fsi_ak4648_device, }; /* @@ -350,6 +643,12 @@ device_initcall(as3711_enable_lcdc_backlight); static void __init kzm_init(void) { + regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers, + ARRAY_SIZE(fixed1v8_power_consumers), 1800000); + regulator_register_always_on(1, "fixed-2.8V", fixed2v8_power_consumers, + ARRAY_SIZE(fixed2v8_power_consumers), 2800000); + regulator_register_fixed(2, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + sh73a0_pinmux_init(); /* enable SCIFA4 */ @@ -427,15 +726,36 @@ static void __init kzm_init(void) gpio_request(GPIO_PORT15, NULL); gpio_direction_output(GPIO_PORT15, 1); /* power */ + /* enable Micro SD */ + gpio_request(GPIO_FN_SDHID2_0, NULL); + gpio_request(GPIO_FN_SDHID2_1, NULL); + gpio_request(GPIO_FN_SDHID2_2, NULL); + gpio_request(GPIO_FN_SDHID2_3, NULL); + gpio_request(GPIO_FN_SDHICMD2, NULL); + gpio_request(GPIO_FN_SDHICLK2, NULL); + gpio_request(GPIO_PORT14, NULL); + gpio_direction_output(GPIO_PORT14, 1); /* power */ + /* I2C 3 */ gpio_request(GPIO_FN_PORT27_I2C_SCL3, NULL); gpio_request(GPIO_FN_PORT28_I2C_SDA3, NULL); + /* enable FSI2 port A (ak4648) */ + gpio_request(GPIO_FN_FSIACK, NULL); + gpio_request(GPIO_FN_FSIAILR, NULL); + gpio_request(GPIO_FN_FSIAIBT, NULL); + gpio_request(GPIO_FN_FSIAISLD, NULL); + gpio_request(GPIO_FN_FSIAOSLD, NULL); + + /* enable USB */ + gpio_request(GPIO_FN_VBUS_0, NULL); + #ifdef CONFIG_CACHE_L2X0 /* Early BRESP enable, Shared attribute override enable, 64K*8way */ l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff); #endif + i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); i2c_register_board_info(1, i2c1_devices, ARRAY_SIZE(i2c1_devices)); i2c_register_board_info(3, i2c3_devices, ARRAY_SIZE(i2c3_devices)); diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 150122a44630..7ea2b31e3199 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -41,6 +41,8 @@ #include <linux/mtd/physmap.h> #include <linux/mtd/sh_flctl.h> #include <linux/pm_clock.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <linux/sh_intc.h> #include <linux/tca6416_keypad.h> @@ -203,31 +205,32 @@ * amixer set "HPOUTR Mixer DACH" on */ -/* - * FIXME !! - * - * gpio_no_direction - * gpio_pull_down - * are quick_hack. - * - * current gpio frame work doesn't have - * the method to control only pull up/down/free. - * this function should be replaced by correct gpio function - */ -static void __init gpio_no_direction(u32 addr) +/* Fixed 3.3V and 1.8V regulators to be used by multiple devices */ +static struct regulator_consumer_supply fixed1v8_power_consumers[] = { - __raw_writeb(0x00, addr); -} + /* + * J22 on mackerel switches mmcif.0 and sdhi.1 between 1.8V and 3.3V + * Since we cannot support both voltages, we support the default 1.8V + */ + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.1"), + REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"), +}; -static void __init gpio_pull_down(u32 addr) +static struct regulator_consumer_supply fixed3v3_power_consumers[] = { - u8 data = __raw_readb(addr); - - data &= 0x0F; - data |= 0xA0; + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"), + REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.2"), + REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.2"), +}; - __raw_writeb(data, addr); -} +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; /* MTD */ static struct mtd_partition nor_flash_partitions[] = { @@ -1409,6 +1412,12 @@ static void __init mackerel_init(void) u32 srcr4; struct clk *clk; + regulator_register_always_on(0, "fixed-1.8V", fixed1v8_power_consumers, + ARRAY_SIZE(fixed1v8_power_consumers), 1800000); + regulator_register_always_on(1, "fixed-3.3V", fixed3v3_power_consumers, + ARRAY_SIZE(fixed3v3_power_consumers), 3300000); + regulator_register_fixed(2, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + /* External clock source */ clk_set_rate(&sh7372_dv_clki_clk, 27000000); @@ -1458,11 +1467,11 @@ static void __init mackerel_init(void) /* USBHS0 */ gpio_request(GPIO_FN_VBUS0_0, NULL); - gpio_pull_down(GPIO_PORT168CR); /* VBUS0_0 pull down */ + gpio_request_pulldown(GPIO_PORT168CR); /* VBUS0_0 pull down */ /* USBHS1 */ gpio_request(GPIO_FN_VBUS0_1, NULL); - gpio_pull_down(GPIO_PORT167CR); /* VBUS0_1 pull down */ + gpio_request_pulldown(GPIO_PORT167CR); /* VBUS0_1 pull down */ gpio_request(GPIO_FN_IDIN_1_113, NULL); /* enable FSI2 port A (ak4643) */ @@ -1475,8 +1484,8 @@ static void __init mackerel_init(void) gpio_request(GPIO_PORT9, NULL); gpio_request(GPIO_PORT10, NULL); - gpio_no_direction(GPIO_PORT9CR); /* FSIAOBT needs no direction */ - gpio_no_direction(GPIO_PORT10CR); /* FSIAOLR needs no direction */ + gpio_direction_none(GPIO_PORT9CR); /* FSIAOBT needs no direction */ + gpio_direction_none(GPIO_PORT10CR); /* FSIAOLR needs no direction */ intc_set_priority(IRQ_FSI, 3); /* irq priority FSI(3) > SMSC911X(2) */ @@ -1614,20 +1623,20 @@ static void __init mackerel_init(void) platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices)); - sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device); - sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device); - sh7372_add_device_to_domain(&sh7372_a4lc, &meram_device); - sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &nand_flash_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device); + rmobile_add_device_to_domain(&sh7372_pd_a4lc, &lcdc_device); + rmobile_add_device_to_domain(&sh7372_pd_a4lc, &hdmi_lcdc_device); + rmobile_add_device_to_domain(&sh7372_pd_a4lc, &meram_device); + rmobile_add_device_to_domain(&sh7372_pd_a4mp, &fsi_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs0_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usbhs1_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &nand_flash_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sh_mmcif_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi0_device); #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) - sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi1_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi1_device); #endif - sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi2_device); - sh7372_add_device_to_domain(&sh7372_a4r, &ceu_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &sdhi2_device); + rmobile_add_device_to_domain(&sh7372_pd_a4r, &ceu_device); hdmi_init_pm_clock(); sh7372_pm_init(); diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c index 14de3787cafc..3a528cf4366c 100644 --- a/arch/arm/mach-shmobile/board-marzen.c +++ b/arch/arm/mach-shmobile/board-marzen.c @@ -27,6 +27,8 @@ #include <linux/io.h> #include <linux/gpio.h> #include <linux/dma-mapping.h> +#include <linux/regulator/fixed.h> +#include <linux/regulator/machine.h> #include <linux/smsc911x.h> #include <mach/hardware.h> #include <mach/r8a7779.h> @@ -37,6 +39,12 @@ #include <asm/hardware/gic.h> #include <asm/traps.h> +/* Dummy supplies, where voltage doesn't matter */ +static struct regulator_consumer_supply dummy_supplies[] = { + REGULATOR_SUPPLY("vddvario", "smsc911x"), + REGULATOR_SUPPLY("vdd33a", "smsc911x"), +}; + /* SMSC LAN89218 */ static struct resource smsc911x_resources[] = { [0] = { @@ -73,6 +81,8 @@ static struct platform_device *marzen_devices[] __initdata = { static void __init marzen_init(void) { + regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + r8a7779_pinmux_init(); /* SCIF2 (CN18: DEBUG0) */ diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c index 26eea5f21054..ad5fccc7b5e7 100644 --- a/arch/arm/mach-shmobile/clock-r8a7740.c +++ b/arch/arm/mach-shmobile/clock-r8a7740.c @@ -43,7 +43,10 @@ /* CPG registers */ #define FRQCRA 0xe6150000 #define FRQCRB 0xe6150004 +#define VCLKCR1 0xE6150008 +#define VCLKCR2 0xE615000c #define FRQCRC 0xe61500e0 +#define FSIACKCR 0xe6150018 #define PLLC01CR 0xe6150028 #define SUBCKCR 0xe6150080 @@ -54,6 +57,8 @@ #define MSTPSR2 0xe6150040 #define MSTPSR3 0xe6150048 #define MSTPSR4 0xe615004c +#define FSIBCKCR 0xe6150090 +#define HDMICKCR 0xe6150094 #define SMSTPCR0 0xe6150130 #define SMSTPCR1 0xe6150134 #define SMSTPCR2 0xe6150138 @@ -271,6 +276,13 @@ static struct clk usb24_clk = { .parent = &usb24s_clk, }; +/* External FSIACK/FSIBCK clock */ +static struct clk fsiack_clk = { +}; + +static struct clk fsibck_clk = { +}; + struct clk *main_clks[] = { &extalr_clk, &extal1_clk, @@ -288,6 +300,8 @@ struct clk *main_clks[] = { &pllc1_div2_clk, &usb24s_clk, &usb24_clk, + &fsiack_clk, + &fsibck_clk, }; static void div4_kick(struct clk *clk) @@ -313,6 +327,107 @@ static struct clk_div4_table div4_table = { .kick = div4_kick, }; +/* DIV6 reparent */ +enum { + DIV6_HDMI, + DIV6_VCLK1, DIV6_VCLK2, + DIV6_FSIA, DIV6_FSIB, + DIV6_REPARENT_NR, +}; + +static struct clk *hdmi_parent[] = { + [0] = &pllc1_div2_clk, + [1] = &system_clk, + [2] = &dv_clk +}; + +static struct clk *vclk_parents[8] = { + [0] = &pllc1_div2_clk, + [2] = &dv_clk, + [3] = &usb24s_clk, + [4] = &extal1_div2_clk, + [5] = &extalr_clk, +}; + +static struct clk *fsia_parents[] = { + [0] = &pllc1_div2_clk, + [1] = &fsiack_clk, /* external clock */ +}; + +static struct clk *fsib_parents[] = { + [0] = &pllc1_div2_clk, + [1] = &fsibck_clk, /* external clock */ +}; + +static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = { + [DIV6_HDMI] = SH_CLK_DIV6_EXT(HDMICKCR, 0, + hdmi_parent, ARRAY_SIZE(hdmi_parent), 6, 2), + [DIV6_VCLK1] = SH_CLK_DIV6_EXT(VCLKCR1, 0, + vclk_parents, ARRAY_SIZE(vclk_parents), 12, 3), + [DIV6_VCLK2] = SH_CLK_DIV6_EXT(VCLKCR2, 0, + vclk_parents, ARRAY_SIZE(vclk_parents), 12, 3), + [DIV6_FSIA] = SH_CLK_DIV6_EXT(FSIACKCR, 0, + fsia_parents, ARRAY_SIZE(fsia_parents), 6, 2), + [DIV6_FSIB] = SH_CLK_DIV6_EXT(FSIBCKCR, 0, + fsib_parents, ARRAY_SIZE(fsib_parents), 6, 2), +}; + +/* HDMI1/2 clock */ +static unsigned long hdmi12_recalc(struct clk *clk) +{ + u32 val = __raw_readl(HDMICKCR); + int shift = (int)clk->priv; + + val >>= shift; + val &= 0x3; + + return clk->parent->rate / (1 << val); +}; + +static int hdmi12_set_rate(struct clk *clk, unsigned long rate) +{ + u32 val, mask; + int i, shift; + + for (i = 0; i < 3; i++) + if (rate == clk->parent->rate / (1 << i)) + goto find; + return -ENODEV; + +find: + shift = (int)clk->priv; + + val = __raw_readl(HDMICKCR); + mask = ~(0x3 << shift); + val = (val & mask) | i << shift; + __raw_writel(val, HDMICKCR); + + return 0; +}; + +static struct sh_clk_ops hdmi12_clk_ops = { + .recalc = hdmi12_recalc, + .set_rate = hdmi12_set_rate, +}; + +static struct clk hdmi1_clk = { + .ops = &hdmi12_clk_ops, + .priv = (void *)9, + .parent = &div6_reparent_clks[DIV6_HDMI], /* late install */ +}; + +static struct clk hdmi2_clk = { + .ops = &hdmi12_clk_ops, + .priv = (void *)11, + .parent = &div6_reparent_clks[DIV6_HDMI], /* late install */ +}; + +static struct clk *late_main_clks[] = { + &hdmi1_clk, + &hdmi2_clk, +}; + +/* MSTP */ enum { DIV4_I, DIV4_ZG, DIV4_B, DIV4_M1, DIV4_HP, DIV4_HPP, DIV4_USBP, DIV4_S, DIV4_ZB, DIV4_M3, DIV4_CP, @@ -343,11 +458,12 @@ static struct clk div6_clks[DIV6_NR] = { }; enum { - MSTP125, + MSTP128, MSTP127, MSTP125, MSTP116, MSTP111, MSTP100, MSTP117, MSTP230, MSTP222, + MSTP218, MSTP217, MSTP216, MSTP214, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, MSTP329, MSTP328, MSTP323, MSTP320, @@ -360,6 +476,8 @@ enum { }; static struct clk mstp_clks[MSTP_NR] = { + [MSTP128] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 28, 0), /* CEU21 */ + [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 27, 0), /* CEU20 */ [MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ [MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ [MSTP116] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ @@ -368,6 +486,10 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */ [MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */ + [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ + [MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ + [MSTP216] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */ + [MSTP214] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 14, 0), /* USBDMAC */ [MSTP207] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ [MSTP206] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ [MSTP204] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ @@ -408,6 +530,12 @@ static struct clk_lookup lookups[] = { CLKDEV_CON_ID("pllc1_clk", &pllc1_clk), CLKDEV_CON_ID("pllc1_div2_clk", &pllc1_div2_clk), CLKDEV_CON_ID("usb24s", &usb24s_clk), + CLKDEV_CON_ID("hdmi1", &hdmi1_clk), + CLKDEV_CON_ID("hdmi2", &hdmi2_clk), + CLKDEV_CON_ID("video1", &div6_reparent_clks[DIV6_VCLK1]), + CLKDEV_CON_ID("video2", &div6_reparent_clks[DIV6_VCLK2]), + CLKDEV_CON_ID("fsiack", &fsiack_clk), + CLKDEV_CON_ID("fsibck", &fsibck_clk), /* DIV4 clocks */ CLKDEV_CON_ID("i_clk", &div4_clks[DIV4_I]), @@ -430,6 +558,8 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), CLKDEV_DEV_ID("sh_mobile_lcdc_fb.1", &mstp_clks[MSTP117]), CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), + CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), + CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP128]), CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[MSTP201]), @@ -438,7 +568,10 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), - + CLKDEV_DEV_ID("sh-dma-engine.3", &mstp_clks[MSTP214]), + CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), + CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), + CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP222]), CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP230]), @@ -459,6 +592,10 @@ static struct clk_lookup lookups[] = { CLKDEV_ICK_ID("phy", "renesas_usbhs", &mstp_clks[MSTP406]), CLKDEV_ICK_ID("pci", "renesas_usbhs", &div4_clks[DIV4_USBP]), CLKDEV_ICK_ID("usb24", "renesas_usbhs", &usb24_clk), + CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]), + + CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]), + CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]), }; void __init r8a7740_clock_init(u8 md_ck) @@ -495,7 +632,14 @@ void __init r8a7740_clock_init(u8 md_ck) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); + ret = sh_clk_div6_reparent_register(div6_reparent_clks, + DIV6_REPARENT_NR); + + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); + + for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++) + ret = clk_register(late_main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); diff --git a/arch/arm/mach-shmobile/clock-r8a7779.c b/arch/arm/mach-shmobile/clock-r8a7779.c index 7d6e9fe47b56..339c62c824d5 100644 --- a/arch/arm/mach-shmobile/clock-r8a7779.c +++ b/arch/arm/mach-shmobile/clock-r8a7779.c @@ -162,7 +162,7 @@ void __init r8a7779_clock_init(void) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++) ret = clk_register(late_main_clks[k]); diff --git a/arch/arm/mach-shmobile/clock-sh7367.c b/arch/arm/mach-shmobile/clock-sh7367.c index 006e7b5d304c..162b791b8984 100644 --- a/arch/arm/mach-shmobile/clock-sh7367.c +++ b/arch/arm/mach-shmobile/clock-sh7367.c @@ -344,7 +344,7 @@ void __init sh7367_clock_init(void) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index 94d1f88246d3..5a2894b1c965 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c @@ -704,7 +704,7 @@ void __init sh7372_clock_init(void) ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_REPARENT_NR); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++) ret = clk_register(late_main_clks[k]); diff --git a/arch/arm/mach-shmobile/clock-sh7377.c b/arch/arm/mach-shmobile/clock-sh7377.c index 0798a15936c3..85f2a3ec2c44 100644 --- a/arch/arm/mach-shmobile/clock-sh7377.c +++ b/arch/arm/mach-shmobile/clock-sh7377.c @@ -355,7 +355,7 @@ void __init sh7377_clock_init(void) ret = sh_clk_div6_register(div6_clks, DIV6_NR); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c index 3946c4ba2aa8..7f8da18a8580 100644 --- a/arch/arm/mach-shmobile/clock-sh73a0.c +++ b/arch/arm/mach-shmobile/clock-sh73a0.c @@ -475,9 +475,9 @@ static struct clk *late_main_clks[] = { enum { MSTP001, MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100, - MSTP219, MSTP218, + MSTP219, MSTP218, MSTP217, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, - MSTP331, MSTP329, MSTP325, MSTP323, + MSTP331, MSTP329, MSTP328, MSTP325, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, MSTP311, MSTP303, MSTP302, MSTP301, MSTP300, MSTP411, MSTP410, MSTP403, @@ -498,6 +498,7 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ [MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */ [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */ + [MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* MP-DMAC */ [MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */ [MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */ [MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */ @@ -507,8 +508,10 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */ [MSTP331] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 31, 0), /* SCIFA6 */ [MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */ + [MSTP328] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 28, 0), /*FSI*/ [MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */ [MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */ + [MSTP322] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 22, 0), /* USB */ [MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */ [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */ [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */ @@ -553,6 +556,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */ + CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* MP-DMAC */ CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ @@ -562,8 +566,10 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */ CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */ CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */ + CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI */ CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */ CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */ + CLKDEV_DEV_ID("renesas_usbhs", &mstp_clks[MSTP322]), /* USB */ CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */ @@ -612,7 +618,7 @@ void __init sh73a0_clock_init(void) ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR); if (!ret) - ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++) ret = clk_register(late_main_clks[k]); diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index 01e2bc014f15..45e61dada030 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h @@ -77,6 +77,7 @@ extern void r8a7779_add_standard_devices(void); extern void r8a7779_clock_init(void); extern void r8a7779_pinmux_init(void); extern void r8a7779_pm_init(void); +extern void r8a7740_meram_workaround(void); extern unsigned int r8a7779_get_core_count(void); extern int r8a7779_platform_cpu_kill(unsigned int cpu); diff --git a/arch/arm/mach-shmobile/include/mach/dma-register.h b/arch/arm/mach-shmobile/include/mach/dma-register.h new file mode 100644 index 000000000000..97c40bd9b94f --- /dev/null +++ b/arch/arm/mach-shmobile/include/mach/dma-register.h @@ -0,0 +1,84 @@ +/* + * SH-ARM CPU-specific DMA definitions, used by both DMA drivers + * + * Copyright (C) 2012 Renesas Solutions Corp + * + * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * Based on arch/sh/include/cpu-sh4/cpu/dma-register.h + * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef DMA_REGISTER_H +#define DMA_REGISTER_H + +/* + * Direct Memory Access Controller + */ + +/* Transmit sizes and respective CHCR register values */ +enum { + XMIT_SZ_8BIT = 0, + XMIT_SZ_16BIT = 1, + XMIT_SZ_32BIT = 2, + XMIT_SZ_64BIT = 7, + XMIT_SZ_128BIT = 3, + XMIT_SZ_256BIT = 4, + XMIT_SZ_512BIT = 5, +}; + +/* log2(size / 8) - used to calculate number of transfers */ +static const unsigned int dma_ts_shift[] = { + [XMIT_SZ_8BIT] = 0, + [XMIT_SZ_16BIT] = 1, + [XMIT_SZ_32BIT] = 2, + [XMIT_SZ_64BIT] = 3, + [XMIT_SZ_128BIT] = 4, + [XMIT_SZ_256BIT] = 5, + [XMIT_SZ_512BIT] = 6, +}; + +#define TS_LOW_BIT 0x3 /* --xx */ +#define TS_HI_BIT 0xc /* xx-- */ + +#define TS_LOW_SHIFT (3) +#define TS_HI_SHIFT (20 - 2) /* 2 bits for shifted low TS */ + +#define TS_INDEX2VAL(i) \ + ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\ + (((i) & TS_HI_BIT) << TS_HI_SHIFT)) + +#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz))) +#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz))) + + +/* + * USB High-Speed DMAC + */ +/* Transmit sizes and respective CHCR register values */ +enum { + USBTS_XMIT_SZ_8BYTE = 0, + USBTS_XMIT_SZ_16BYTE = 1, + USBTS_XMIT_SZ_32BYTE = 2, +}; + +/* log2(size / 8) - used to calculate number of transfers */ +static const unsigned int dma_usbts_shift[] = { + [USBTS_XMIT_SZ_8BYTE] = 3, + [USBTS_XMIT_SZ_16BYTE] = 4, + [USBTS_XMIT_SZ_32BYTE] = 5, +}; + +#define USBTS_LOW_BIT 0x3 /* --xx */ +#define USBTS_HI_BIT 0x0 /* ---- */ + +#define USBTS_LOW_SHIFT 6 +#define USBTS_HI_SHIFT 0 + +#define USBTS_INDEX2VAL(i) (((i) & 3) << 6) + +#endif /* DMA_REGISTER_H */ diff --git a/arch/arm/mach-shmobile/include/mach/gpio.h b/arch/arm/mach-shmobile/include/mach/gpio.h index de795b42232a..844507d937cb 100644 --- a/arch/arm/mach-shmobile/include/mach/gpio.h +++ b/arch/arm/mach-shmobile/include/mach/gpio.h @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/sh_pfc.h> +#include <linux/io.h> #ifdef CONFIG_GPIOLIB @@ -27,4 +28,35 @@ static inline int irq_to_gpio(unsigned int irq) #endif /* CONFIG_GPIOLIB */ +/* + * FIXME !! + * + * current gpio frame work doesn't have + * the method to control only pull up/down/free. + * this function should be replaced by correct gpio function + */ +static inline void __init gpio_direction_none(u32 addr) +{ + __raw_writeb(0x00, addr); +} + +static inline void __init gpio_request_pullup(u32 addr) +{ + u8 data = __raw_readb(addr); + + data &= 0x0F; + data |= 0xC0; + __raw_writeb(data, addr); +} + +static inline void __init gpio_request_pulldown(u32 addr) +{ + u8 data = __raw_readb(addr); + + data &= 0x0F; + data |= 0xA0; + + __raw_writeb(data, addr); +} + #endif /* __ASM_ARCH_GPIO_H */ diff --git a/arch/arm/mach-shmobile/include/mach/pm-rmobile.h b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h new file mode 100644 index 000000000000..5a402840fe28 --- /dev/null +++ b/arch/arm/mach-shmobile/include/mach/pm-rmobile.h @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2012 Renesas Solutions Corp. + * + * Kuninori Morimoto <morimoto.kuninori@renesas.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef PM_RMOBILE_H +#define PM_RMOBILE_H + +#include <linux/pm_domain.h> + +struct platform_device; + +struct rmobile_pm_domain { + struct generic_pm_domain genpd; + struct dev_power_governor *gov; + int (*suspend)(void); + void (*resume)(void); + unsigned int bit_shift; + bool no_debug; +}; + +static inline +struct rmobile_pm_domain *to_rmobile_pd(struct generic_pm_domain *d) +{ + return container_of(d, struct rmobile_pm_domain, genpd); +} + +#ifdef CONFIG_PM +extern void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd); +extern void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd, + struct platform_device *pdev); +extern void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd, + struct rmobile_pm_domain *rmobile_sd); +#else +#define rmobile_init_pm_domain(pd) do { } while (0) +#define rmobile_add_device_to_domain(pd, pdev) do { } while (0) +#define rmobile_pm_add_subdomain(pd, sd) do { } while (0) +#endif /* CONFIG_PM */ + +#endif /* PM_RMOBILE_H */ diff --git a/arch/arm/mach-shmobile/include/mach/r8a7740.h b/arch/arm/mach-shmobile/include/mach/r8a7740.h index 9d447abb969c..7143147780df 100644 --- a/arch/arm/mach-shmobile/include/mach/r8a7740.h +++ b/arch/arm/mach-shmobile/include/mach/r8a7740.h @@ -19,6 +19,8 @@ #ifndef __ASM_R8A7740_H__ #define __ASM_R8A7740_H__ +#include <mach/pm-rmobile.h> + /* * MD_CKx pin */ @@ -139,7 +141,7 @@ enum { GPIO_FN_DBGMD10, GPIO_FN_DBGMD11, GPIO_FN_DBGMD20, GPIO_FN_DBGMD21, - /* FSI */ + /* FSI-A */ GPIO_FN_FSIAISLD_PORT0, /* FSIAISLD Port 0/5 */ GPIO_FN_FSIAISLD_PORT5, GPIO_FN_FSIASPDIF_PORT9, /* FSIASPDIF Port 9/18 */ @@ -150,6 +152,9 @@ enum { GPIO_FN_FSIACK, GPIO_FN_FSIAILR, GPIO_FN_FSIAIBT, + /* FSI-B */ + GPIO_FN_FSIBCK, + /* FMSI */ GPIO_FN_FMSISLD_PORT1, /* FMSISLD Port 1/6 */ GPIO_FN_FMSISLD_PORT6, @@ -565,6 +570,10 @@ enum { GPIO_FN_RESETP_PULLUP, GPIO_FN_RESETP_PLAIN, + /* HDMI */ + GPIO_FN_HDMI_HPD, + GPIO_FN_HDMI_CEC, + /* SDENC */ GPIO_FN_SDENC_CPG, GPIO_FN_SDENC_DV_CLKI, @@ -581,4 +590,26 @@ enum { GPIO_FN_TRACEAUD_FROM_MEMC, }; +/* DMA slave IDs */ +enum { + SHDMA_SLAVE_INVALID, + SHDMA_SLAVE_SDHI0_RX, + SHDMA_SLAVE_SDHI0_TX, + SHDMA_SLAVE_SDHI1_RX, + SHDMA_SLAVE_SDHI1_TX, + SHDMA_SLAVE_SDHI2_RX, + SHDMA_SLAVE_SDHI2_TX, + SHDMA_SLAVE_FSIA_RX, + SHDMA_SLAVE_FSIA_TX, + SHDMA_SLAVE_FSIB_TX, + SHDMA_SLAVE_USBHS_TX, + SHDMA_SLAVE_USBHS_RX, +}; + +#ifdef CONFIG_PM +extern struct rmobile_pm_domain r8a7740_pd_a4s; +extern struct rmobile_pm_domain r8a7740_pd_a3sp; +extern struct rmobile_pm_domain r8a7740_pd_a4lc; +#endif /* CONFIG_PM */ + #endif /* __ASM_R8A7740_H__ */ diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h index 915d0093da08..b59048e6d8fd 100644 --- a/arch/arm/mach-shmobile/include/mach/sh7372.h +++ b/arch/arm/mach-shmobile/include/mach/sh7372.h @@ -13,6 +13,7 @@ #include <linux/sh_clk.h> #include <linux/pm_domain.h> +#include <mach/pm-rmobile.h> /* * Pin Function Controller: @@ -477,42 +478,16 @@ extern struct clk sh7372_fsibck_clk; extern struct clk sh7372_fsidiva_clk; extern struct clk sh7372_fsidivb_clk; -struct platform_device; - -struct sh7372_pm_domain { - struct generic_pm_domain genpd; - struct dev_power_governor *gov; - int (*suspend)(void); - void (*resume)(void); - unsigned int bit_shift; - bool no_debug; -}; - -static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d) -{ - return container_of(d, struct sh7372_pm_domain, genpd); -} - #ifdef CONFIG_PM -extern struct sh7372_pm_domain sh7372_a4lc; -extern struct sh7372_pm_domain sh7372_a4mp; -extern struct sh7372_pm_domain sh7372_d4; -extern struct sh7372_pm_domain sh7372_a4r; -extern struct sh7372_pm_domain sh7372_a3rv; -extern struct sh7372_pm_domain sh7372_a3ri; -extern struct sh7372_pm_domain sh7372_a4s; -extern struct sh7372_pm_domain sh7372_a3sp; -extern struct sh7372_pm_domain sh7372_a3sg; - -extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd); -extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, - struct platform_device *pdev); -extern void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, - struct sh7372_pm_domain *sh7372_sd); -#else -#define sh7372_init_pm_domain(pd) do { } while(0) -#define sh7372_add_device_to_domain(pd, pdev) do { } while(0) -#define sh7372_pm_add_subdomain(pd, sd) do { } while(0) +extern struct rmobile_pm_domain sh7372_pd_a4lc; +extern struct rmobile_pm_domain sh7372_pd_a4mp; +extern struct rmobile_pm_domain sh7372_pd_d4; +extern struct rmobile_pm_domain sh7372_pd_a4r; +extern struct rmobile_pm_domain sh7372_pd_a3rv; +extern struct rmobile_pm_domain sh7372_pd_a3ri; +extern struct rmobile_pm_domain sh7372_pd_a4s; +extern struct rmobile_pm_domain sh7372_pd_a3sp; +extern struct rmobile_pm_domain sh7372_pd_a3sg; #endif /* CONFIG_PM */ extern void sh7372_intcs_suspend(void); diff --git a/arch/arm/mach-shmobile/include/mach/sh73a0.h b/arch/arm/mach-shmobile/include/mach/sh73a0.h index 398e2c10913b..fe950f25d793 100644 --- a/arch/arm/mach-shmobile/include/mach/sh73a0.h +++ b/arch/arm/mach-shmobile/include/mach/sh73a0.h @@ -516,6 +516,13 @@ enum { SHDMA_SLAVE_SDHI2_RX, SHDMA_SLAVE_MMCIF_TX, SHDMA_SLAVE_MMCIF_RX, + SHDMA_SLAVE_FSI2A_TX, + SHDMA_SLAVE_FSI2A_RX, + SHDMA_SLAVE_FSI2B_TX, + SHDMA_SLAVE_FSI2B_RX, + SHDMA_SLAVE_FSI2C_TX, + SHDMA_SLAVE_FSI2C_RX, + SHDMA_SLAVE_FSI2D_RX, }; /* diff --git a/arch/arm/mach-shmobile/intc-r8a7740.c b/arch/arm/mach-shmobile/intc-r8a7740.c index 09c42afcb22d..9a69a31918ba 100644 --- a/arch/arm/mach-shmobile/intc-r8a7740.c +++ b/arch/arm/mach-shmobile/intc-r8a7740.c @@ -71,10 +71,12 @@ enum { DMAC3_1_DEI0, DMAC3_1_DEI1, DMAC3_1_DEI2, DMAC3_1_DEI3, DMAC3_2_DEI4, DMAC3_2_DEI5, DMAC3_2_DADERR, SHWYSTAT_RT, SHWYSTAT_HS, SHWYSTAT_COM, + HDMI, USBH_INT, USBH_OHCI, USBH_EHCI, USBH_PME, USBH_BIND, RSPI_OVRF, RSPI_SPTEF, RSPI_SPRF, SPU2_0, SPU2_1, FSI, FMSI, + HDMI_SSS, HDMI_KEY, IPMMU, AP_ARM_CTIIRQ, AP_ARM_PMURQ, MFIS2, @@ -182,6 +184,7 @@ static struct intc_vect intca_vectors[] __initdata = { INTC_VECT(USBH_EHCI, 0x1580), INTC_VECT(USBH_PME, 0x15A0), INTC_VECT(USBH_BIND, 0x15C0), + INTC_VECT(HDMI, 0x1700), INTC_VECT(RSPI_OVRF, 0x1780), INTC_VECT(RSPI_SPTEF, 0x17A0), INTC_VECT(RSPI_SPRF, 0x17C0), @@ -189,6 +192,8 @@ static struct intc_vect intca_vectors[] __initdata = { INTC_VECT(SPU2_1, 0x1820), INTC_VECT(FSI, 0x1840), INTC_VECT(FMSI, 0x1860), + INTC_VECT(HDMI_SSS, 0x18A0), + INTC_VECT(HDMI_KEY, 0x18C0), INTC_VECT(IPMMU, 0x1920), INTC_VECT(AP_ARM_CTIIRQ, 0x1980), INTC_VECT(AP_ARM_PMURQ, 0x19A0), @@ -304,11 +309,11 @@ static struct intc_mask_reg intca_mask_registers[] __initdata = { USBH_EHCI, USBH_PME, USBH_BIND, 0 } }, /* IMR3A3 / IMCR3A3 */ { /* IMR4A3 / IMCR4A3 */ 0xe6950090, 0xe69500d0, 8, - { 0, 0, 0, 0, + { HDMI, 0, 0, 0, RSPI_OVRF, RSPI_SPTEF, RSPI_SPRF, 0 } }, { /* IMR5A3 / IMCR5A3 */ 0xe6950094, 0xe69500d4, 8, { SPU2_0, SPU2_1, FSI, FMSI, - 0, 0, 0, 0 } }, + 0, HDMI_SSS, HDMI_KEY, 0 } }, { /* IMR6A3 / IMCR6A3 */ 0xe6950098, 0xe69500d8, 8, { 0, IPMMU, 0, 0, AP_ARM_CTIIRQ, AP_ARM_PMURQ, 0, 0 } }, @@ -353,10 +358,10 @@ static struct intc_prio_reg intca_prio_registers[] __initdata = { { 0xe6950014, 0, 16, 4, /* IPRFA3 */ { USBH2, 0, 0, 0 } }, /* IPRGA3 */ /* IPRHA3 */ - /* IPRIA3 */ + { 0xe6950020, 0, 16, 4, /* IPRIA3 */ { HDMI, 0, 0, 0 } }, { 0xe6950024, 0, 16, 4, /* IPRJA3 */ { RSPI, 0, 0, 0 } }, { 0xe6950028, 0, 16, 4, /* IPRKA3 */ { SPU2, 0, FSI, FMSI } }, - /* IPRLA3 */ + { 0xe695002c, 0, 16, 4, /* IPRLA3 */ { 0, HDMI_SSS, HDMI_KEY, 0 } }, { 0xe6950030, 0, 16, 4, /* IPRMA3 */ { IPMMU, 0, 0, 0 } }, { 0xe6950034, 0, 16, 4, /* IPRNA3 */ { AP_ARM2, 0, 0, 0 } }, { 0xe6950038, 0, 16, 4, /* IPROA3 */ { MFIS2, CPORTR2S, diff --git a/arch/arm/mach-shmobile/pfc-r8a7740.c b/arch/arm/mach-shmobile/pfc-r8a7740.c index 670fe1869dbc..ce9e7fa5cc8a 100644 --- a/arch/arm/mach-shmobile/pfc-r8a7740.c +++ b/arch/arm/mach-shmobile/pfc-r8a7740.c @@ -169,7 +169,7 @@ enum { DBGMD10_MARK, DBGMD11_MARK, DBGMD20_MARK, DBGMD21_MARK, - /* FSI */ + /* FSI-A */ FSIAISLD_PORT0_MARK, /* FSIAISLD Port 0/5 */ FSIAISLD_PORT5_MARK, FSIASPDIF_PORT9_MARK, /* FSIASPDIF Port 9/18 */ @@ -178,6 +178,9 @@ enum { FSIAOBT_MARK, FSIAOSLD_MARK, FSIAOMC_MARK, FSIACK_MARK, FSIAILR_MARK, FSIAIBT_MARK, + /* FSI-B */ + FSIBCK_MARK, + /* FMSI */ FMSISLD_PORT1_MARK, /* FMSISLD Port 1/6 */ FMSISLD_PORT6_MARK, @@ -560,6 +563,9 @@ enum { /* SDENC */ SDENC_CPG_MARK, SDENC_DV_CLKI_MARK, + /* HDMI */ + HDMI_HPD_MARK, HDMI_CEC_MARK, + /* DEBUG */ EDEBGREQ_PULLUP_MARK, /* for JTAG */ EDEBGREQ_PULLDOWN_MARK, @@ -771,6 +777,7 @@ static pinmux_enum_t pinmux_data[] = { /* Port11 */ PINMUX_DATA(FSIACK_MARK, PORT11_FN1), + PINMUX_DATA(FSIBCK_MARK, PORT11_FN2), PINMUX_DATA(IRQ2_PORT11_MARK, PORT11_FN0, MSEL1CR_2_0), /* Port12 */ @@ -1254,7 +1261,7 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(A21_MARK, PORT120_FN1), PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT120_FN2), PINMUX_DATA(MSIOF1_TSYNC_PORT120_MARK, PORT120_FN3, MSEL4CR_10_0), - PINMUX_DATA(IRQ7_PORT120_MARK, PORT120_FN0, MSEL1CR_7_0), + PINMUX_DATA(IRQ7_PORT120_MARK, PORT120_FN0, MSEL1CR_7_1), /* Port121 */ PINMUX_DATA(A20_MARK, PORT121_FN1), @@ -1616,13 +1623,15 @@ static pinmux_enum_t pinmux_data[] = { /* Port209 */ PINMUX_DATA(VBUS_MARK, PORT209_FN1), - PINMUX_DATA(IRQ7_PORT209_MARK, PORT209_FN0, MSEL1CR_7_1), + PINMUX_DATA(IRQ7_PORT209_MARK, PORT209_FN0, MSEL1CR_7_0), /* Port210 */ PINMUX_DATA(IRQ9_PORT210_MARK, PORT210_FN0, MSEL1CR_9_1), + PINMUX_DATA(HDMI_HPD_MARK, PORT210_FN1), /* Port211 */ PINMUX_DATA(IRQ16_PORT211_MARK, PORT211_FN0, MSEL1CR_16_1), + PINMUX_DATA(HDMI_CEC_MARK, PORT211_FN1), /* LCDC select */ PINMUX_DATA(LCDC0_SELECT_MARK, MSEL3CR_6_0), @@ -1691,7 +1700,7 @@ static struct pinmux_gpio pinmux_gpios[] = { GPIO_FN(DBGMD10), GPIO_FN(DBGMD11), GPIO_FN(DBGMD20), GPIO_FN(DBGMD21), - /* FSI */ + /* FSI-A */ GPIO_FN(FSIAISLD_PORT0), /* FSIAISLD Port 0/5 */ GPIO_FN(FSIAISLD_PORT5), GPIO_FN(FSIASPDIF_PORT9), /* FSIASPDIF Port 9/18 */ @@ -1700,6 +1709,9 @@ static struct pinmux_gpio pinmux_gpios[] = { GPIO_FN(FSIAOBT), GPIO_FN(FSIAOSLD), GPIO_FN(FSIAOMC), GPIO_FN(FSIACK), GPIO_FN(FSIAILR), GPIO_FN(FSIAIBT), + /* FSI-B */ + GPIO_FN(FSIBCK), + /* FMSI */ GPIO_FN(FMSISLD_PORT1), /* FMSISLD Port 1/6 */ GPIO_FN(FMSISLD_PORT6), @@ -2097,6 +2109,10 @@ static struct pinmux_gpio pinmux_gpios[] = { GPIO_FN(SDENC_CPG), GPIO_FN(SDENC_DV_CLKI), + /* HDMI */ + GPIO_FN(HDMI_HPD), + GPIO_FN(HDMI_CEC), + /* SYSC */ GPIO_FN(RESETP_PULLUP), GPIO_FN(RESETP_PLAIN), diff --git a/arch/arm/mach-shmobile/pm-r8a7740.c b/arch/arm/mach-shmobile/pm-r8a7740.c new file mode 100644 index 000000000000..893504d012a6 --- /dev/null +++ b/arch/arm/mach-shmobile/pm-r8a7740.c @@ -0,0 +1,54 @@ +/* + * r8a7740 power management support + * + * Copyright (C) 2012 Renesas Solutions Corp. + * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/console.h> +#include <mach/pm-rmobile.h> + +#ifdef CONFIG_PM +static int r8a7740_pd_a4s_suspend(void) +{ + /* + * The A4S domain contains the CPU core and therefore it should + * only be turned off if the CPU is in use. + */ + return -EBUSY; +} + +struct rmobile_pm_domain r8a7740_pd_a4s = { + .genpd.name = "A4S", + .bit_shift = 10, + .gov = &pm_domain_always_on_gov, + .no_debug = true, + .suspend = r8a7740_pd_a4s_suspend, +}; + +static int r8a7740_pd_a3sp_suspend(void) +{ + /* + * Serial consoles make use of SCIF hardware located in A3SP, + * keep such power domain on if "no_console_suspend" is set. + */ + return console_suspend_enabled ? 0 : -EBUSY; +} + +struct rmobile_pm_domain r8a7740_pd_a3sp = { + .genpd.name = "A3SP", + .bit_shift = 11, + .gov = &pm_domain_always_on_gov, + .no_debug = true, + .suspend = r8a7740_pd_a3sp_suspend, +}; + +struct rmobile_pm_domain r8a7740_pd_a4lc = { + .genpd.name = "A4LC", + .bit_shift = 1, +}; + +#endif /* CONFIG_PM */ diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c new file mode 100644 index 000000000000..a8562540f1d6 --- /dev/null +++ b/arch/arm/mach-shmobile/pm-rmobile.c @@ -0,0 +1,167 @@ +/* + * rmobile power management support + * + * Copyright (C) 2012 Renesas Solutions Corp. + * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + * + * based on pm-sh7372.c + * Copyright (C) 2011 Magnus Damm + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_clock.h> +#include <asm/io.h> +#include <mach/pm-rmobile.h> + +/* SYSC */ +#define SPDCR 0xe6180008 +#define SWUCR 0xe6180014 +#define PSTR 0xe6180080 + +#define PSTR_RETRIES 100 +#define PSTR_DELAY_US 10 + +#ifdef CONFIG_PM +static int rmobile_pd_power_down(struct generic_pm_domain *genpd) +{ + struct rmobile_pm_domain *rmobile_pd = to_rmobile_pd(genpd); + unsigned int mask = 1 << rmobile_pd->bit_shift; + + if (rmobile_pd->suspend) { + int ret = rmobile_pd->suspend(); + + if (ret) + return ret; + } + + if (__raw_readl(PSTR) & mask) { + unsigned int retry_count; + __raw_writel(mask, SPDCR); + + for (retry_count = PSTR_RETRIES; retry_count; retry_count--) { + if (!(__raw_readl(SPDCR) & mask)) + break; + cpu_relax(); + } + } + + if (!rmobile_pd->no_debug) + pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n", + genpd->name, mask, __raw_readl(PSTR)); + + return 0; +} + +static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd, + bool do_resume) +{ + unsigned int mask = 1 << rmobile_pd->bit_shift; + unsigned int retry_count; + int ret = 0; + + if (__raw_readl(PSTR) & mask) + goto out; + + __raw_writel(mask, SWUCR); + + for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) { + if (!(__raw_readl(SWUCR) & mask)) + break; + if (retry_count > PSTR_RETRIES) + udelay(PSTR_DELAY_US); + else + cpu_relax(); + } + if (!retry_count) + ret = -EIO; + + if (!rmobile_pd->no_debug) + pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n", + rmobile_pd->genpd.name, mask, __raw_readl(PSTR)); + +out: + if (ret == 0 && rmobile_pd->resume && do_resume) + rmobile_pd->resume(); + + return ret; +} + +static int rmobile_pd_power_up(struct generic_pm_domain *genpd) +{ + return __rmobile_pd_power_up(to_rmobile_pd(genpd), true); +} + +static bool rmobile_pd_active_wakeup(struct device *dev) +{ + bool (*active_wakeup)(struct device *dev); + + active_wakeup = dev_gpd_data(dev)->ops.active_wakeup; + return active_wakeup ? active_wakeup(dev) : true; +} + +static int rmobile_pd_stop_dev(struct device *dev) +{ + int (*stop)(struct device *dev); + + stop = dev_gpd_data(dev)->ops.stop; + if (stop) { + int ret = stop(dev); + if (ret) + return ret; + } + return pm_clk_suspend(dev); +} + +static int rmobile_pd_start_dev(struct device *dev) +{ + int (*start)(struct device *dev); + int ret; + + ret = pm_clk_resume(dev); + if (ret) + return ret; + + start = dev_gpd_data(dev)->ops.start; + if (start) + ret = start(dev); + + return ret; +} + +void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd) +{ + struct generic_pm_domain *genpd = &rmobile_pd->genpd; + struct dev_power_governor *gov = rmobile_pd->gov; + + pm_genpd_init(genpd, gov ? : &simple_qos_governor, false); + genpd->dev_ops.stop = rmobile_pd_stop_dev; + genpd->dev_ops.start = rmobile_pd_start_dev; + genpd->dev_ops.active_wakeup = rmobile_pd_active_wakeup; + genpd->dev_irq_safe = true; + genpd->power_off = rmobile_pd_power_down; + genpd->power_on = rmobile_pd_power_up; + __rmobile_pd_power_up(rmobile_pd, false); +} + +void rmobile_add_device_to_domain(struct rmobile_pm_domain *rmobile_pd, + struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + pm_genpd_add_device(&rmobile_pd->genpd, dev); + if (pm_clk_no_clocks(dev)) + pm_clk_add(dev, NULL); +} + +void rmobile_pm_add_subdomain(struct rmobile_pm_domain *rmobile_pd, + struct rmobile_pm_domain *rmobile_sd) +{ + pm_genpd_add_subdomain(&rmobile_pd->genpd, &rmobile_sd->genpd); +} +#endif /* CONFIG_PM */ diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c index a3bdb12acde9..792037069226 100644 --- a/arch/arm/mach-shmobile/pm-sh7372.c +++ b/arch/arm/mach-shmobile/pm-sh7372.c @@ -26,6 +26,7 @@ #include <asm/suspend.h> #include <mach/common.h> #include <mach/sh7372.h> +#include <mach/pm-rmobile.h> /* DBG */ #define DBGREG1 0xe6100020 @@ -41,13 +42,10 @@ #define PLLC01STPCR 0xe61500c8 /* SYSC */ -#define SPDCR 0xe6180008 -#define SWUCR 0xe6180014 #define SBAR 0xe6180020 #define WUPRMSK 0xe6180028 #define WUPSMSK 0xe618002c #define WUPSMSK2 0xe6180048 -#define PSTR 0xe6180080 #define WUPSFAC 0xe6180098 #define IRQCR 0xe618022c #define IRQCR2 0xe6180238 @@ -71,188 +69,48 @@ /* AP-System Core */ #define APARMBAREA 0xe6f10020 -#define PSTR_RETRIES 100 -#define PSTR_DELAY_US 10 - #ifdef CONFIG_PM -static int pd_power_down(struct generic_pm_domain *genpd) -{ - struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd); - unsigned int mask = 1 << sh7372_pd->bit_shift; - - if (sh7372_pd->suspend) { - int ret = sh7372_pd->suspend(); - - if (ret) - return ret; - } - - if (__raw_readl(PSTR) & mask) { - unsigned int retry_count; - - __raw_writel(mask, SPDCR); - - for (retry_count = PSTR_RETRIES; retry_count; retry_count--) { - if (!(__raw_readl(SPDCR) & mask)) - break; - cpu_relax(); - } - } - - if (!sh7372_pd->no_debug) - pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n", - genpd->name, mask, __raw_readl(PSTR)); - - return 0; -} - -static int __pd_power_up(struct sh7372_pm_domain *sh7372_pd, bool do_resume) -{ - unsigned int mask = 1 << sh7372_pd->bit_shift; - unsigned int retry_count; - int ret = 0; - - if (__raw_readl(PSTR) & mask) - goto out; - - __raw_writel(mask, SWUCR); - - for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) { - if (!(__raw_readl(SWUCR) & mask)) - break; - if (retry_count > PSTR_RETRIES) - udelay(PSTR_DELAY_US); - else - cpu_relax(); - } - if (!retry_count) - ret = -EIO; - - if (!sh7372_pd->no_debug) - pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n", - sh7372_pd->genpd.name, mask, __raw_readl(PSTR)); - - out: - if (ret == 0 && sh7372_pd->resume && do_resume) - sh7372_pd->resume(); - - return ret; -} - -static int pd_power_up(struct generic_pm_domain *genpd) -{ - return __pd_power_up(to_sh7372_pd(genpd), true); -} - -static int sh7372_a4r_suspend(void) -{ - sh7372_intcs_suspend(); - __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */ - return 0; -} - -static bool pd_active_wakeup(struct device *dev) -{ - bool (*active_wakeup)(struct device *dev); - - active_wakeup = dev_gpd_data(dev)->ops.active_wakeup; - return active_wakeup ? active_wakeup(dev) : true; -} - -static int sh7372_stop_dev(struct device *dev) -{ - int (*stop)(struct device *dev); - - stop = dev_gpd_data(dev)->ops.stop; - if (stop) { - int ret = stop(dev); - if (ret) - return ret; - } - return pm_clk_suspend(dev); -} - -static int sh7372_start_dev(struct device *dev) -{ - int (*start)(struct device *dev); - int ret; - - ret = pm_clk_resume(dev); - if (ret) - return ret; - - start = dev_gpd_data(dev)->ops.start; - if (start) - ret = start(dev); - - return ret; -} - -void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd) -{ - struct generic_pm_domain *genpd = &sh7372_pd->genpd; - struct dev_power_governor *gov = sh7372_pd->gov; - - pm_genpd_init(genpd, gov ? : &simple_qos_governor, false); - genpd->dev_ops.stop = sh7372_stop_dev; - genpd->dev_ops.start = sh7372_start_dev; - genpd->dev_ops.active_wakeup = pd_active_wakeup; - genpd->dev_irq_safe = true; - genpd->power_off = pd_power_down; - genpd->power_on = pd_power_up; - __pd_power_up(sh7372_pd, false); -} - -void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd, - struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - - pm_genpd_add_device(&sh7372_pd->genpd, dev); - if (pm_clk_no_clocks(dev)) - pm_clk_add(dev, NULL); -} - -void sh7372_pm_add_subdomain(struct sh7372_pm_domain *sh7372_pd, - struct sh7372_pm_domain *sh7372_sd) -{ - pm_genpd_add_subdomain(&sh7372_pd->genpd, &sh7372_sd->genpd); -} - -struct sh7372_pm_domain sh7372_a4lc = { +struct rmobile_pm_domain sh7372_pd_a4lc = { .genpd.name = "A4LC", .bit_shift = 1, }; -struct sh7372_pm_domain sh7372_a4mp = { +struct rmobile_pm_domain sh7372_pd_a4mp = { .genpd.name = "A4MP", .bit_shift = 2, }; -struct sh7372_pm_domain sh7372_d4 = { +struct rmobile_pm_domain sh7372_pd_d4 = { .genpd.name = "D4", .bit_shift = 3, }; -struct sh7372_pm_domain sh7372_a4r = { +static int sh7372_a4r_pd_suspend(void) +{ + sh7372_intcs_suspend(); + __raw_writel(0x300fffff, WUPRMSK); /* avoid wakeup */ + return 0; +} + +struct rmobile_pm_domain sh7372_pd_a4r = { .genpd.name = "A4R", .bit_shift = 5, - .suspend = sh7372_a4r_suspend, + .suspend = sh7372_a4r_pd_suspend, .resume = sh7372_intcs_resume, }; -struct sh7372_pm_domain sh7372_a3rv = { +struct rmobile_pm_domain sh7372_pd_a3rv = { .genpd.name = "A3RV", .bit_shift = 6, }; -struct sh7372_pm_domain sh7372_a3ri = { +struct rmobile_pm_domain sh7372_pd_a3ri = { .genpd.name = "A3RI", .bit_shift = 8, }; -static int sh7372_a4s_suspend(void) +static int sh7372_pd_a4s_suspend(void) { /* * The A4S domain contains the CPU core and therefore it should @@ -261,15 +119,15 @@ static int sh7372_a4s_suspend(void) return -EBUSY; } -struct sh7372_pm_domain sh7372_a4s = { +struct rmobile_pm_domain sh7372_pd_a4s = { .genpd.name = "A4S", .bit_shift = 10, .gov = &pm_domain_always_on_gov, .no_debug = true, - .suspend = sh7372_a4s_suspend, + .suspend = sh7372_pd_a4s_suspend, }; -static int sh7372_a3sp_suspend(void) +static int sh7372_a3sp_pd_suspend(void) { /* * Serial consoles make use of SCIF hardware located in A3SP, @@ -278,32 +136,22 @@ static int sh7372_a3sp_suspend(void) return console_suspend_enabled ? 0 : -EBUSY; } -struct sh7372_pm_domain sh7372_a3sp = { +struct rmobile_pm_domain sh7372_pd_a3sp = { .genpd.name = "A3SP", .bit_shift = 11, .gov = &pm_domain_always_on_gov, .no_debug = true, - .suspend = sh7372_a3sp_suspend, + .suspend = sh7372_a3sp_pd_suspend, }; -struct sh7372_pm_domain sh7372_a3sg = { +struct rmobile_pm_domain sh7372_pd_a3sg = { .genpd.name = "A3SG", .bit_shift = 13, }; -#else /* !CONFIG_PM */ - -static inline void sh7372_a3sp_init(void) {} - -#endif /* !CONFIG_PM */ +#endif /* CONFIG_PM */ #if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE) -static int sh7372_do_idle_core_standby(unsigned long unused) -{ - cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */ - return 0; -} - static void sh7372_set_reset_vector(unsigned long address) { /* set reset vector, translate 4k */ @@ -311,21 +159,6 @@ static void sh7372_set_reset_vector(unsigned long address) __raw_writel(0, APARMBAREA); } -static void sh7372_enter_core_standby(void) -{ - sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); - - /* enter sleep mode with SYSTBCR to 0x10 */ - __raw_writel(0x10, SYSTBCR); - cpu_suspend(0, sh7372_do_idle_core_standby); - __raw_writel(0, SYSTBCR); - - /* disable reset vector translation */ - __raw_writel(0, SBAR); -} -#endif - -#ifdef CONFIG_SUSPEND static void sh7372_enter_sysc(int pllc0_on, unsigned long sleep_mode) { if (pllc0_on) @@ -465,22 +298,42 @@ static void sh7372_setup_sysc(unsigned long msk, unsigned long msk2) static void sh7372_enter_a3sm_common(int pllc0_on) { + /* use INTCA together with SYSC for wakeup */ + sh7372_setup_sysc(1 << 0, 0); sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); sh7372_enter_sysc(pllc0_on, 1 << 12); } +#endif /* CONFIG_SUSPEND || CONFIG_CPU_IDLE */ -static void sh7372_enter_a4s_common(int pllc0_on) +#ifdef CONFIG_CPU_IDLE +static int sh7372_do_idle_core_standby(unsigned long unused) { - sh7372_intca_suspend(); - memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100); - sh7372_set_reset_vector(SMFRAM); - sh7372_enter_sysc(pllc0_on, 1 << 10); - sh7372_intca_resume(); + cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */ + return 0; } -#endif +static void sh7372_enter_core_standby(void) +{ + sh7372_set_reset_vector(__pa(sh7372_resume_core_standby_sysc)); -#ifdef CONFIG_CPU_IDLE + /* enter sleep mode with SYSTBCR to 0x10 */ + __raw_writel(0x10, SYSTBCR); + cpu_suspend(0, sh7372_do_idle_core_standby); + __raw_writel(0, SYSTBCR); + + /* disable reset vector translation */ + __raw_writel(0, SBAR); +} + +static void sh7372_enter_a3sm_pll_on(void) +{ + sh7372_enter_a3sm_common(1); +} + +static void sh7372_enter_a3sm_pll_off(void) +{ + sh7372_enter_a3sm_common(0); +} static void sh7372_cpuidle_setup(struct cpuidle_driver *drv) { @@ -492,7 +345,24 @@ static void sh7372_cpuidle_setup(struct cpuidle_driver *drv) state->target_residency = 20 + 10; state->flags = CPUIDLE_FLAG_TIME_VALID; shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_core_standby; + drv->state_count++; + + state = &drv->states[drv->state_count]; + snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); + strncpy(state->desc, "A3SM PLL ON", CPUIDLE_DESC_LEN); + state->exit_latency = 20; + state->target_residency = 30 + 20; + state->flags = CPUIDLE_FLAG_TIME_VALID; + shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_on; + drv->state_count++; + state = &drv->states[drv->state_count]; + snprintf(state->name, CPUIDLE_NAME_LEN, "C4"); + strncpy(state->desc, "A3SM PLL OFF", CPUIDLE_DESC_LEN); + state->exit_latency = 120; + state->target_residency = 30 + 120; + state->flags = CPUIDLE_FLAG_TIME_VALID; + shmobile_cpuidle_modes[drv->state_count] = sh7372_enter_a3sm_pll_off; drv->state_count++; } @@ -505,6 +375,14 @@ static void sh7372_cpuidle_init(void) {} #endif #ifdef CONFIG_SUSPEND +static void sh7372_enter_a4s_common(int pllc0_on) +{ + sh7372_intca_suspend(); + memcpy((void *)SMFRAM, sh7372_resume_core_standby_sysc, 0x100); + sh7372_set_reset_vector(SMFRAM); + sh7372_enter_sysc(pllc0_on, 1 << 10); + sh7372_intca_resume(); +} static int sh7372_enter_suspend(suspend_state_t suspend_state) { @@ -512,24 +390,21 @@ static int sh7372_enter_suspend(suspend_state_t suspend_state) /* check active clocks to determine potential wakeup sources */ if (sh7372_sysc_valid(&msk, &msk2)) { - /* convert INTC mask and sense to SYSC mask and sense */ - sh7372_setup_sysc(msk, msk2); - if (!console_suspend_enabled && - sh7372_a4s.genpd.status == GPD_STATE_POWER_OFF) { + sh7372_pd_a4s.genpd.status == GPD_STATE_POWER_OFF) { + /* convert INTC mask/sense to SYSC mask/sense */ + sh7372_setup_sysc(msk, msk2); + /* enter A4S sleep with PLLC0 off */ pr_debug("entering A4S\n"); sh7372_enter_a4s_common(0); - } else { - /* enter A3SM sleep with PLLC0 off */ - pr_debug("entering A3SM\n"); - sh7372_enter_a3sm_common(0); + return 0; } - } else { - /* default to Core Standby that supports all wakeup sources */ - pr_debug("entering Core Standby\n"); - sh7372_enter_core_standby(); } + + /* default to enter A3SM sleep with PLLC0 off */ + pr_debug("entering A3SM\n"); + sh7372_enter_a3sm_common(0); return 0; } @@ -550,7 +425,7 @@ static int sh7372_pm_notifier_fn(struct notifier_block *notifier, * executed during system suspend and resume, respectively, so * that those functions don't crash while accessing the INTCS. */ - pm_genpd_poweron(&sh7372_a4r.genpd); + pm_genpd_poweron(&sh7372_pd_a4r.genpd); break; case PM_POST_SUSPEND: pm_genpd_poweroff_unused(); diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c index ec4eb49c1693..78948a9dba0e 100644 --- a/arch/arm/mach-shmobile/setup-r8a7740.c +++ b/arch/arm/mach-shmobile/setup-r8a7740.c @@ -23,9 +23,14 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> +#include <linux/of_platform.h> #include <linux/serial_sci.h> +#include <linux/sh_dma.h> #include <linux/sh_timer.h> +#include <linux/dma-mapping.h> +#include <mach/dma-register.h> #include <mach/r8a7740.h> +#include <mach/pm-rmobile.h> #include <mach/common.h> #include <mach/irqs.h> #include <asm/mach-types.h> @@ -276,6 +281,272 @@ static struct platform_device *r8a7740_early_devices[] __initdata = { &cmt10_device, }; +/* DMA */ +static const struct sh_dmae_slave_config r8a7740_dmae_slaves[] = { + { + .slave_id = SHDMA_SLAVE_SDHI0_TX, + .addr = 0xe6850030, + .chcr = CHCR_TX(XMIT_SZ_16BIT), + .mid_rid = 0xc1, + }, { + .slave_id = SHDMA_SLAVE_SDHI0_RX, + .addr = 0xe6850030, + .chcr = CHCR_RX(XMIT_SZ_16BIT), + .mid_rid = 0xc2, + }, { + .slave_id = SHDMA_SLAVE_SDHI1_TX, + .addr = 0xe6860030, + .chcr = CHCR_TX(XMIT_SZ_16BIT), + .mid_rid = 0xc9, + }, { + .slave_id = SHDMA_SLAVE_SDHI1_RX, + .addr = 0xe6860030, + .chcr = CHCR_RX(XMIT_SZ_16BIT), + .mid_rid = 0xca, + }, { + .slave_id = SHDMA_SLAVE_SDHI2_TX, + .addr = 0xe6870030, + .chcr = CHCR_TX(XMIT_SZ_16BIT), + .mid_rid = 0xcd, + }, { + .slave_id = SHDMA_SLAVE_SDHI2_RX, + .addr = 0xe6870030, + .chcr = CHCR_RX(XMIT_SZ_16BIT), + .mid_rid = 0xce, + }, { + .slave_id = SHDMA_SLAVE_FSIA_TX, + .addr = 0xfe1f0024, + .chcr = CHCR_TX(XMIT_SZ_32BIT), + .mid_rid = 0xb1, + }, { + .slave_id = SHDMA_SLAVE_FSIA_RX, + .addr = 0xfe1f0020, + .chcr = CHCR_RX(XMIT_SZ_32BIT), + .mid_rid = 0xb2, + }, { + .slave_id = SHDMA_SLAVE_FSIB_TX, + .addr = 0xfe1f0064, + .chcr = CHCR_TX(XMIT_SZ_32BIT), + .mid_rid = 0xb5, + }, +}; + +#define DMA_CHANNEL(a, b, c) \ +{ \ + .offset = a, \ + .dmars = b, \ + .dmars_bit = c, \ + .chclr_offset = (0x220 - 0x20) + a \ +} + +static const struct sh_dmae_channel r8a7740_dmae_channels[] = { + DMA_CHANNEL(0x00, 0, 0), + DMA_CHANNEL(0x10, 0, 8), + DMA_CHANNEL(0x20, 4, 0), + DMA_CHANNEL(0x30, 4, 8), + DMA_CHANNEL(0x50, 8, 0), + DMA_CHANNEL(0x60, 8, 8), +}; + +static struct sh_dmae_pdata dma_platform_data = { + .slave = r8a7740_dmae_slaves, + .slave_num = ARRAY_SIZE(r8a7740_dmae_slaves), + .channel = r8a7740_dmae_channels, + .channel_num = ARRAY_SIZE(r8a7740_dmae_channels), + .ts_low_shift = TS_LOW_SHIFT, + .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, + .ts_high_shift = TS_HI_SHIFT, + .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, + .ts_shift = dma_ts_shift, + .ts_shift_num = ARRAY_SIZE(dma_ts_shift), + .dmaor_init = DMAOR_DME, + .chclr_present = 1, +}; + +/* Resource order important! */ +static struct resource r8a7740_dmae0_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xfe008020, + .end = 0xfe00828f, + .flags = IORESOURCE_MEM, + }, + { + /* DMARSx */ + .start = 0xfe009000, + .end = 0xfe00900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = evt2irq(0x20c0), + .end = evt2irq(0x20c0), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 0-5 */ + .start = evt2irq(0x2000), + .end = evt2irq(0x20a0), + .flags = IORESOURCE_IRQ, + }, +}; + +/* Resource order important! */ +static struct resource r8a7740_dmae1_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xfe018020, + .end = 0xfe01828f, + .flags = IORESOURCE_MEM, + }, + { + /* DMARSx */ + .start = 0xfe019000, + .end = 0xfe01900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = evt2irq(0x21c0), + .end = evt2irq(0x21c0), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 0-5 */ + .start = evt2irq(0x2100), + .end = evt2irq(0x21a0), + .flags = IORESOURCE_IRQ, + }, +}; + +/* Resource order important! */ +static struct resource r8a7740_dmae2_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xfe028020, + .end = 0xfe02828f, + .flags = IORESOURCE_MEM, + }, + { + /* DMARSx */ + .start = 0xfe029000, + .end = 0xfe02900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = evt2irq(0x22c0), + .end = evt2irq(0x22c0), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 0-5 */ + .start = evt2irq(0x2200), + .end = evt2irq(0x22a0), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device dma0_device = { + .name = "sh-dma-engine", + .id = 0, + .resource = r8a7740_dmae0_resources, + .num_resources = ARRAY_SIZE(r8a7740_dmae0_resources), + .dev = { + .platform_data = &dma_platform_data, + }, +}; + +static struct platform_device dma1_device = { + .name = "sh-dma-engine", + .id = 1, + .resource = r8a7740_dmae1_resources, + .num_resources = ARRAY_SIZE(r8a7740_dmae1_resources), + .dev = { + .platform_data = &dma_platform_data, + }, +}; + +static struct platform_device dma2_device = { + .name = "sh-dma-engine", + .id = 2, + .resource = r8a7740_dmae2_resources, + .num_resources = ARRAY_SIZE(r8a7740_dmae2_resources), + .dev = { + .platform_data = &dma_platform_data, + }, +}; + +/* USB-DMAC */ +static const struct sh_dmae_channel r8a7740_usb_dma_channels[] = { + { + .offset = 0, + }, { + .offset = 0x20, + }, +}; + +static const struct sh_dmae_slave_config r8a7740_usb_dma_slaves[] = { + { + .slave_id = SHDMA_SLAVE_USBHS_TX, + .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE), + }, { + .slave_id = SHDMA_SLAVE_USBHS_RX, + .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE), + }, +}; + +static struct sh_dmae_pdata usb_dma_platform_data = { + .slave = r8a7740_usb_dma_slaves, + .slave_num = ARRAY_SIZE(r8a7740_usb_dma_slaves), + .channel = r8a7740_usb_dma_channels, + .channel_num = ARRAY_SIZE(r8a7740_usb_dma_channels), + .ts_low_shift = USBTS_LOW_SHIFT, + .ts_low_mask = USBTS_LOW_BIT << USBTS_LOW_SHIFT, + .ts_high_shift = USBTS_HI_SHIFT, + .ts_high_mask = USBTS_HI_BIT << USBTS_HI_SHIFT, + .ts_shift = dma_usbts_shift, + .ts_shift_num = ARRAY_SIZE(dma_usbts_shift), + .dmaor_init = DMAOR_DME, + .chcr_offset = 0x14, + .chcr_ie_bit = 1 << 5, + .dmaor_is_32bit = 1, + .needs_tend_set = 1, + .no_dmars = 1, + .slave_only = 1, +}; + +static struct resource r8a7740_usb_dma_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xe68a0020, + .end = 0xe68a0064 - 1, + .flags = IORESOURCE_MEM, + }, + { + /* VCR/SWR/DMICR */ + .start = 0xe68a0000, + .end = 0xe68a0014 - 1, + .flags = IORESOURCE_MEM, + }, + { + /* IRQ for channels */ + .start = evt2irq(0x0a00), + .end = evt2irq(0x0a00), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usb_dma_device = { + .name = "sh-dma-engine", + .id = 3, + .resource = r8a7740_usb_dma_resources, + .num_resources = ARRAY_SIZE(r8a7740_usb_dma_resources), + .dev = { + .platform_data = &usb_dma_platform_data, + }, +}; + /* I2C */ static struct resource i2c0_resources[] = { [0] = { @@ -322,8 +593,30 @@ static struct platform_device i2c1_device = { static struct platform_device *r8a7740_late_devices[] __initdata = { &i2c0_device, &i2c1_device, + &dma0_device, + &dma1_device, + &dma2_device, + &usb_dma_device, }; +/* + * r8a7740 chip has lasting errata on MERAM buffer. + * this is work-around for it. + * see + * "Media RAM (MERAM)" on r8a7740 documentation + */ +#define MEBUFCNTR 0xFE950098 +void r8a7740_meram_workaround(void) +{ + void __iomem *reg; + + reg = ioremap_nocache(MEBUFCNTR, 4); + if (reg) { + iowrite32(0x01600164, reg); + iounmap(reg); + } +} + #define ICCR 0x0004 #define ICSTART 0x0070 @@ -380,10 +673,31 @@ void __init r8a7740_add_standard_devices(void) r8a7740_i2c_workaround(&i2c0_device); r8a7740_i2c_workaround(&i2c1_device); + /* PM domain */ + rmobile_init_pm_domain(&r8a7740_pd_a4s); + rmobile_init_pm_domain(&r8a7740_pd_a3sp); + rmobile_init_pm_domain(&r8a7740_pd_a4lc); + + rmobile_pm_add_subdomain(&r8a7740_pd_a4s, &r8a7740_pd_a3sp); + + /* add devices */ platform_add_devices(r8a7740_early_devices, ARRAY_SIZE(r8a7740_early_devices)); platform_add_devices(r8a7740_late_devices, ARRAY_SIZE(r8a7740_late_devices)); + + /* add devices to PM domain */ + + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif0_device); + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif1_device); + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif2_device); + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif3_device); + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif4_device); + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif5_device); + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif6_device); + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scif7_device); + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &scifb_device); + rmobile_add_device_to_domain(&r8a7740_pd_a3sp, &i2c1_device); } static void __init r8a7740_earlytimer_init(void) @@ -403,3 +717,49 @@ void __init r8a7740_add_early_devices(void) /* override timer setup with soc-specific code */ shmobile_timer.init = r8a7740_earlytimer_init; } + +#ifdef CONFIG_USE_OF + +void __init r8a7740_add_early_devices_dt(void) +{ + shmobile_setup_delay(800, 1, 3); /* Cortex-A9 @ 800MHz */ + + early_platform_add_devices(r8a7740_early_devices, + ARRAY_SIZE(r8a7740_early_devices)); + + /* setup early console here as well */ + shmobile_setup_console(); +} + +static const struct of_dev_auxdata r8a7740_auxdata_lookup[] __initconst = { + { } +}; + +void __init r8a7740_add_standard_devices_dt(void) +{ + /* clocks are setup late during boot in the case of DT */ + r8a7740_clock_init(0); + + platform_add_devices(r8a7740_early_devices, + ARRAY_SIZE(r8a7740_early_devices)); + + of_platform_populate(NULL, of_default_bus_match_table, + r8a7740_auxdata_lookup, NULL); +} + +static const char *r8a7740_boards_compat_dt[] __initdata = { + "renesas,r8a7740", + NULL, +}; + +DT_MACHINE_START(SH7372_DT, "Generic R8A7740 (Flattened Device Tree)") + .map_io = r8a7740_map_io, + .init_early = r8a7740_add_early_devices_dt, + .init_irq = r8a7740_init_irq, + .handle_irq = shmobile_handle_irq_intc, + .init_machine = r8a7740_add_standard_devices_dt, + .timer = &shmobile_timer, + .dt_compat = r8a7740_boards_compat_dt, +MACHINE_END + +#endif /* CONFIG_USE_OF */ diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index fafce9ce8218..838a87be1d5c 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c @@ -33,6 +33,7 @@ #include <linux/sh_timer.h> #include <linux/pm_domain.h> #include <linux/dma-mapping.h> +#include <mach/dma-register.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/sh7372.h> @@ -335,151 +336,126 @@ static struct platform_device iic1_device = { }; /* DMA */ -/* Transmit sizes and respective CHCR register values */ -enum { - XMIT_SZ_8BIT = 0, - XMIT_SZ_16BIT = 1, - XMIT_SZ_32BIT = 2, - XMIT_SZ_64BIT = 7, - XMIT_SZ_128BIT = 3, - XMIT_SZ_256BIT = 4, - XMIT_SZ_512BIT = 5, -}; - -/* log2(size / 8) - used to calculate number of transfers */ -#define TS_SHIFT { \ - [XMIT_SZ_8BIT] = 0, \ - [XMIT_SZ_16BIT] = 1, \ - [XMIT_SZ_32BIT] = 2, \ - [XMIT_SZ_64BIT] = 3, \ - [XMIT_SZ_128BIT] = 4, \ - [XMIT_SZ_256BIT] = 5, \ - [XMIT_SZ_512BIT] = 6, \ -} - -#define TS_INDEX2VAL(i) ((((i) & 3) << 3) | \ - (((i) & 0xc) << (20 - 2))) - static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = { { .slave_id = SHDMA_SLAVE_SCIF0_TX, .addr = 0xe6c40020, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x21, }, { .slave_id = SHDMA_SLAVE_SCIF0_RX, .addr = 0xe6c40024, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x22, }, { .slave_id = SHDMA_SLAVE_SCIF1_TX, .addr = 0xe6c50020, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x25, }, { .slave_id = SHDMA_SLAVE_SCIF1_RX, .addr = 0xe6c50024, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x26, }, { .slave_id = SHDMA_SLAVE_SCIF2_TX, .addr = 0xe6c60020, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x29, }, { .slave_id = SHDMA_SLAVE_SCIF2_RX, .addr = 0xe6c60024, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x2a, }, { .slave_id = SHDMA_SLAVE_SCIF3_TX, .addr = 0xe6c70020, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x2d, }, { .slave_id = SHDMA_SLAVE_SCIF3_RX, .addr = 0xe6c70024, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x2e, }, { .slave_id = SHDMA_SLAVE_SCIF4_TX, .addr = 0xe6c80020, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x39, }, { .slave_id = SHDMA_SLAVE_SCIF4_RX, .addr = 0xe6c80024, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x3a, }, { .slave_id = SHDMA_SLAVE_SCIF5_TX, .addr = 0xe6cb0020, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x35, }, { .slave_id = SHDMA_SLAVE_SCIF5_RX, .addr = 0xe6cb0024, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x36, }, { .slave_id = SHDMA_SLAVE_SCIF6_TX, .addr = 0xe6c30040, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x3d, }, { .slave_id = SHDMA_SLAVE_SCIF6_RX, .addr = 0xe6c30060, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), + .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x3e, }, { .slave_id = SHDMA_SLAVE_SDHI0_TX, .addr = 0xe6850030, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), + .chcr = CHCR_TX(XMIT_SZ_16BIT), .mid_rid = 0xc1, }, { .slave_id = SHDMA_SLAVE_SDHI0_RX, .addr = 0xe6850030, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), + .chcr = CHCR_RX(XMIT_SZ_16BIT), .mid_rid = 0xc2, }, { .slave_id = SHDMA_SLAVE_SDHI1_TX, .addr = 0xe6860030, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), + .chcr = CHCR_TX(XMIT_SZ_16BIT), .mid_rid = 0xc9, }, { .slave_id = SHDMA_SLAVE_SDHI1_RX, .addr = 0xe6860030, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), + .chcr = CHCR_RX(XMIT_SZ_16BIT), .mid_rid = 0xca, }, { .slave_id = SHDMA_SLAVE_SDHI2_TX, .addr = 0xe6870030, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), + .chcr = CHCR_TX(XMIT_SZ_16BIT), .mid_rid = 0xcd, }, { .slave_id = SHDMA_SLAVE_SDHI2_RX, .addr = 0xe6870030, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), + .chcr = CHCR_RX(XMIT_SZ_16BIT), .mid_rid = 0xce, }, { .slave_id = SHDMA_SLAVE_FSIA_TX, .addr = 0xfe1f0024, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .chcr = CHCR_TX(XMIT_SZ_32BIT), .mid_rid = 0xb1, }, { .slave_id = SHDMA_SLAVE_FSIA_RX, .addr = 0xfe1f0020, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .chcr = CHCR_RX(XMIT_SZ_32BIT), .mid_rid = 0xb2, }, { .slave_id = SHDMA_SLAVE_MMCIF_TX, .addr = 0xe6bd0034, - .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .chcr = CHCR_TX(XMIT_SZ_32BIT), .mid_rid = 0xd1, }, { .slave_id = SHDMA_SLAVE_MMCIF_RX, .addr = 0xe6bd0034, - .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), + .chcr = CHCR_RX(XMIT_SZ_32BIT), .mid_rid = 0xd2, }, }; @@ -520,19 +496,17 @@ static const struct sh_dmae_channel sh7372_dmae_channels[] = { } }; -static const unsigned int ts_shift[] = TS_SHIFT; - static struct sh_dmae_pdata dma_platform_data = { .slave = sh7372_dmae_slaves, .slave_num = ARRAY_SIZE(sh7372_dmae_slaves), .channel = sh7372_dmae_channels, .channel_num = ARRAY_SIZE(sh7372_dmae_channels), - .ts_low_shift = 3, - .ts_low_mask = 0x18, - .ts_high_shift = (20 - 2), /* 2 bits for shifted low TS */ - .ts_high_mask = 0x00300000, - .ts_shift = ts_shift, - .ts_shift_num = ARRAY_SIZE(ts_shift), + .ts_low_shift = TS_LOW_SHIFT, + .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, + .ts_high_shift = TS_HI_SHIFT, + .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, + .ts_shift = dma_ts_shift, + .ts_shift_num = ARRAY_SIZE(dma_ts_shift), .dmaor_init = DMAOR_DME, .chclr_present = 1, }; @@ -654,17 +628,6 @@ static struct platform_device dma2_device = { /* * USB-DMAC */ - -unsigned int usbts_shift[] = {3, 4, 5}; - -enum { - XMIT_SZ_8BYTE = 0, - XMIT_SZ_16BYTE = 1, - XMIT_SZ_32BYTE = 2, -}; - -#define USBTS_INDEX2VAL(i) (((i) & 3) << 6) - static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = { { .offset = 0, @@ -677,10 +640,10 @@ static const struct sh_dmae_channel sh7372_usb_dmae_channels[] = { static const struct sh_dmae_slave_config sh7372_usb_dmae0_slaves[] = { { .slave_id = SHDMA_SLAVE_USB0_TX, - .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), + .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE), }, { .slave_id = SHDMA_SLAVE_USB0_RX, - .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), + .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE), }, }; @@ -689,12 +652,12 @@ static struct sh_dmae_pdata usb_dma0_platform_data = { .slave_num = ARRAY_SIZE(sh7372_usb_dmae0_slaves), .channel = sh7372_usb_dmae_channels, .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels), - .ts_low_shift = 6, - .ts_low_mask = 0xc0, - .ts_high_shift = 0, - .ts_high_mask = 0, - .ts_shift = usbts_shift, - .ts_shift_num = ARRAY_SIZE(usbts_shift), + .ts_low_shift = USBTS_LOW_SHIFT, + .ts_low_mask = USBTS_LOW_BIT << USBTS_LOW_SHIFT, + .ts_high_shift = USBTS_HI_SHIFT, + .ts_high_mask = USBTS_HI_BIT << USBTS_HI_SHIFT, + .ts_shift = dma_usbts_shift, + .ts_shift_num = ARRAY_SIZE(dma_usbts_shift), .dmaor_init = DMAOR_DME, .chcr_offset = 0x14, .chcr_ie_bit = 1 << 5, @@ -739,10 +702,10 @@ static struct platform_device usb_dma0_device = { static const struct sh_dmae_slave_config sh7372_usb_dmae1_slaves[] = { { .slave_id = SHDMA_SLAVE_USB1_TX, - .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), + .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE), }, { .slave_id = SHDMA_SLAVE_USB1_RX, - .chcr = USBTS_INDEX2VAL(XMIT_SZ_8BYTE), + .chcr = USBTS_INDEX2VAL(USBTS_XMIT_SZ_8BYTE), }, }; @@ -751,12 +714,12 @@ static struct sh_dmae_pdata usb_dma1_platform_data = { .slave_num = ARRAY_SIZE(sh7372_usb_dmae1_slaves), .channel = sh7372_usb_dmae_channels, .channel_num = ARRAY_SIZE(sh7372_usb_dmae_channels), - .ts_low_shift = 6, - .ts_low_mask = 0xc0, - .ts_high_shift = 0, - .ts_high_mask = 0, - .ts_shift = usbts_shift, - .ts_shift_num = ARRAY_SIZE(usbts_shift), + .ts_low_shift = USBTS_LOW_SHIFT, + .ts_low_mask = USBTS_LOW_BIT << USBTS_LOW_SHIFT, + .ts_high_shift = USBTS_HI_SHIFT, + .ts_high_mask = USBTS_HI_BIT << USBTS_HI_SHIFT, + .ts_shift = dma_usbts_shift, + .ts_shift_num = ARRAY_SIZE(dma_usbts_shift), .dmaor_init = DMAOR_DME, .chcr_offset = 0x14, .chcr_ie_bit = 1 << 5, @@ -1038,21 +1001,21 @@ static struct platform_device *sh7372_late_devices[] __initdata = { void __init sh7372_add_standard_devices(void) { - sh7372_init_pm_domain(&sh7372_a4lc); - sh7372_init_pm_domain(&sh7372_a4mp); - sh7372_init_pm_domain(&sh7372_d4); - sh7372_init_pm_domain(&sh7372_a4r); - sh7372_init_pm_domain(&sh7372_a3rv); - sh7372_init_pm_domain(&sh7372_a3ri); - sh7372_init_pm_domain(&sh7372_a4s); - sh7372_init_pm_domain(&sh7372_a3sp); - sh7372_init_pm_domain(&sh7372_a3sg); - - sh7372_pm_add_subdomain(&sh7372_a4lc, &sh7372_a3rv); - sh7372_pm_add_subdomain(&sh7372_a4r, &sh7372_a4lc); - - sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sg); - sh7372_pm_add_subdomain(&sh7372_a4s, &sh7372_a3sp); + rmobile_init_pm_domain(&sh7372_pd_a4lc); + rmobile_init_pm_domain(&sh7372_pd_a4mp); + rmobile_init_pm_domain(&sh7372_pd_d4); + rmobile_init_pm_domain(&sh7372_pd_a4r); + rmobile_init_pm_domain(&sh7372_pd_a3rv); + rmobile_init_pm_domain(&sh7372_pd_a3ri); + rmobile_init_pm_domain(&sh7372_pd_a4s); + rmobile_init_pm_domain(&sh7372_pd_a3sp); + rmobile_init_pm_domain(&sh7372_pd_a3sg); + + rmobile_pm_add_subdomain(&sh7372_pd_a4lc, &sh7372_pd_a3rv); + rmobile_pm_add_subdomain(&sh7372_pd_a4r, &sh7372_pd_a4lc); + + rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sg); + rmobile_pm_add_subdomain(&sh7372_pd_a4s, &sh7372_pd_a3sp); platform_add_devices(sh7372_early_devices, ARRAY_SIZE(sh7372_early_devices)); @@ -1060,30 +1023,30 @@ void __init sh7372_add_standard_devices(void) platform_add_devices(sh7372_late_devices, ARRAY_SIZE(sh7372_late_devices)); - sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device); - sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device); - sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &scif0_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &scif1_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &scif2_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &scif3_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &scif4_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &scif5_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &scif6_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &iic1_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &dma0_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &dma1_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &dma2_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &usb_dma0_device); - sh7372_add_device_to_domain(&sh7372_a3sp, &usb_dma1_device); - sh7372_add_device_to_domain(&sh7372_a4r, &iic0_device); - sh7372_add_device_to_domain(&sh7372_a4r, &veu0_device); - sh7372_add_device_to_domain(&sh7372_a4r, &veu1_device); - sh7372_add_device_to_domain(&sh7372_a4r, &veu2_device); - sh7372_add_device_to_domain(&sh7372_a4r, &veu3_device); - sh7372_add_device_to_domain(&sh7372_a4r, &jpu_device); - sh7372_add_device_to_domain(&sh7372_a4r, &tmu00_device); - sh7372_add_device_to_domain(&sh7372_a4r, &tmu01_device); + rmobile_add_device_to_domain(&sh7372_pd_a3rv, &vpu_device); + rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu0_device); + rmobile_add_device_to_domain(&sh7372_pd_a4mp, &spu1_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif0_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif1_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif2_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif3_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif4_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif5_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &scif6_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &iic1_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma0_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma1_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &dma2_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma0_device); + rmobile_add_device_to_domain(&sh7372_pd_a3sp, &usb_dma1_device); + rmobile_add_device_to_domain(&sh7372_pd_a4r, &iic0_device); + rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu0_device); + rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu1_device); + rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu2_device); + rmobile_add_device_to_domain(&sh7372_pd_a4r, &veu3_device); + rmobile_add_device_to_domain(&sh7372_pd_a4r, &jpu_device); + rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu00_device); + rmobile_add_device_to_domain(&sh7372_pd_a4r, &tmu01_device); } static void __init sh7372_earlytimer_init(void) diff --git a/arch/arm/mach-shmobile/setup-sh7377.c b/arch/arm/mach-shmobile/setup-sh7377.c index d576a6abbade..855b1506caf8 100644 --- a/arch/arm/mach-shmobile/setup-sh7377.c +++ b/arch/arm/mach-shmobile/setup-sh7377.c @@ -22,6 +22,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/platform_device.h> +#include <linux/of_platform.h> #include <linux/uio_driver.h> #include <linux/delay.h> #include <linux/input.h> @@ -500,3 +501,49 @@ void __init sh7377_add_early_devices(void) /* override timer setup with soc-specific code */ shmobile_timer.init = sh7377_earlytimer_init; } + +#ifdef CONFIG_USE_OF + +void __init sh7377_add_early_devices_dt(void) +{ + shmobile_setup_delay(600, 1, 3); /* Cortex-A8 @ 600MHz */ + + early_platform_add_devices(sh7377_early_devices, + ARRAY_SIZE(sh7377_early_devices)); + + /* setup early console here as well */ + shmobile_setup_console(); +} + +static const struct of_dev_auxdata sh7377_auxdata_lookup[] __initconst = { + { } +}; + +void __init sh7377_add_standard_devices_dt(void) +{ + /* clocks are setup late during boot in the case of DT */ + sh7377_clock_init(); + + platform_add_devices(sh7377_early_devices, + ARRAY_SIZE(sh7377_early_devices)); + + of_platform_populate(NULL, of_default_bus_match_table, + sh7377_auxdata_lookup, NULL); +} + +static const char *sh7377_boards_compat_dt[] __initdata = { + "renesas,sh7377", + NULL, +}; + +DT_MACHINE_START(SH7377_DT, "Generic SH7377 (Flattened Device Tree)") + .map_io = sh7377_map_io, + .init_early = sh7377_add_early_devices_dt, + .init_irq = sh7377_init_irq, + .handle_irq = shmobile_handle_irq_intc, + .init_machine = sh7377_add_standard_devices_dt, + .timer = &shmobile_timer, + .dt_compat = sh7377_boards_compat_dt, +MACHINE_END + +#endif /* CONFIG_USE_OF */ diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index 04a0dfe75493..d230af656fc9 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -30,6 +30,7 @@ #include <linux/sh_dma.h> #include <linux/sh_intc.h> #include <linux/sh_timer.h> +#include <mach/dma-register.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/sh73a0.h> @@ -415,32 +416,6 @@ static struct platform_device i2c4_device = { .num_resources = ARRAY_SIZE(i2c4_resources), }; -/* Transmit sizes and respective CHCR register values */ -enum { - XMIT_SZ_8BIT = 0, - XMIT_SZ_16BIT = 1, - XMIT_SZ_32BIT = 2, - XMIT_SZ_64BIT = 7, - XMIT_SZ_128BIT = 3, - XMIT_SZ_256BIT = 4, - XMIT_SZ_512BIT = 5, -}; - -/* log2(size / 8) - used to calculate number of transfers */ -#define TS_SHIFT { \ - [XMIT_SZ_8BIT] = 0, \ - [XMIT_SZ_16BIT] = 1, \ - [XMIT_SZ_32BIT] = 2, \ - [XMIT_SZ_64BIT] = 3, \ - [XMIT_SZ_128BIT] = 4, \ - [XMIT_SZ_256BIT] = 5, \ - [XMIT_SZ_512BIT] = 6, \ -} - -#define TS_INDEX2VAL(i) ((((i) & 3) << 3) | (((i) & 0xc) << (20 - 2))) -#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz))) -#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz))) - static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = { { .slave_id = SHDMA_SLAVE_SCIF0_TX, @@ -604,19 +579,17 @@ static const struct sh_dmae_channel sh73a0_dmae_channels[] = { DMAE_CHANNEL(0x8980), }; -static const unsigned int ts_shift[] = TS_SHIFT; - static struct sh_dmae_pdata sh73a0_dmae_platform_data = { .slave = sh73a0_dmae_slaves, .slave_num = ARRAY_SIZE(sh73a0_dmae_slaves), .channel = sh73a0_dmae_channels, .channel_num = ARRAY_SIZE(sh73a0_dmae_channels), - .ts_low_shift = 3, - .ts_low_mask = 0x18, - .ts_high_shift = (20 - 2), /* 2 bits for shifted low TS */ - .ts_high_mask = 0x00300000, - .ts_shift = ts_shift, - .ts_shift_num = ARRAY_SIZE(ts_shift), + .ts_low_shift = TS_LOW_SHIFT, + .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, + .ts_high_shift = TS_HI_SHIFT, + .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, + .ts_shift = dma_ts_shift, + .ts_shift_num = ARRAY_SIZE(dma_ts_shift), .dmaor_init = DMAOR_DME, }; @@ -651,6 +624,116 @@ static struct platform_device dma0_device = { }, }; +/* MPDMAC */ +static const struct sh_dmae_slave_config sh73a0_mpdma_slaves[] = { + { + .slave_id = SHDMA_SLAVE_FSI2A_RX, + .addr = 0xec230020, + .chcr = CHCR_RX(XMIT_SZ_32BIT), + .mid_rid = 0xd6, /* CHECK ME */ + }, { + .slave_id = SHDMA_SLAVE_FSI2A_TX, + .addr = 0xec230024, + .chcr = CHCR_TX(XMIT_SZ_32BIT), + .mid_rid = 0xd5, /* CHECK ME */ + }, { + .slave_id = SHDMA_SLAVE_FSI2C_RX, + .addr = 0xec230060, + .chcr = CHCR_RX(XMIT_SZ_32BIT), + .mid_rid = 0xda, /* CHECK ME */ + }, { + .slave_id = SHDMA_SLAVE_FSI2C_TX, + .addr = 0xec230064, + .chcr = CHCR_TX(XMIT_SZ_32BIT), + .mid_rid = 0xd9, /* CHECK ME */ + }, { + .slave_id = SHDMA_SLAVE_FSI2B_RX, + .addr = 0xec240020, + .chcr = CHCR_RX(XMIT_SZ_32BIT), + .mid_rid = 0x8e, /* CHECK ME */ + }, { + .slave_id = SHDMA_SLAVE_FSI2B_TX, + .addr = 0xec240024, + .chcr = CHCR_RX(XMIT_SZ_32BIT), + .mid_rid = 0x8d, /* CHECK ME */ + }, { + .slave_id = SHDMA_SLAVE_FSI2D_RX, + .addr = 0xec240060, + .chcr = CHCR_RX(XMIT_SZ_32BIT), + .mid_rid = 0x9a, /* CHECK ME */ + }, +}; + +#define MPDMA_CHANNEL(a, b, c) \ +{ \ + .offset = a, \ + .dmars = b, \ + .dmars_bit = c, \ + .chclr_offset = (0x220 - 0x20) + a \ +} + +static const struct sh_dmae_channel sh73a0_mpdma_channels[] = { + MPDMA_CHANNEL(0x00, 0, 0), + MPDMA_CHANNEL(0x10, 0, 8), + MPDMA_CHANNEL(0x20, 4, 0), + MPDMA_CHANNEL(0x30, 4, 8), + MPDMA_CHANNEL(0x50, 8, 0), + MPDMA_CHANNEL(0x70, 8, 8), +}; + +static struct sh_dmae_pdata sh73a0_mpdma_platform_data = { + .slave = sh73a0_mpdma_slaves, + .slave_num = ARRAY_SIZE(sh73a0_mpdma_slaves), + .channel = sh73a0_mpdma_channels, + .channel_num = ARRAY_SIZE(sh73a0_mpdma_channels), + .ts_low_shift = TS_LOW_SHIFT, + .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, + .ts_high_shift = TS_HI_SHIFT, + .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, + .ts_shift = dma_ts_shift, + .ts_shift_num = ARRAY_SIZE(dma_ts_shift), + .dmaor_init = DMAOR_DME, + .chclr_present = 1, +}; + +/* Resource order important! */ +static struct resource sh73a0_mpdma_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xec618020, + .end = 0xec61828f, + .flags = IORESOURCE_MEM, + }, + { + /* DMARSx */ + .start = 0xec619000, + .end = 0xec61900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = gic_spi(181), + .end = gic_spi(181), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 0-5 */ + .start = gic_spi(175), + .end = gic_spi(180), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device mpdma0_device = { + .name = "sh-dma-engine", + .id = 1, + .resource = sh73a0_mpdma_resources, + .num_resources = ARRAY_SIZE(sh73a0_mpdma_resources), + .dev = { + .platform_data = &sh73a0_mpdma_platform_data, + }, +}; + static struct platform_device *sh73a0_early_devices[] __initdata = { &scif0_device, &scif1_device, @@ -673,6 +756,7 @@ static struct platform_device *sh73a0_late_devices[] __initdata = { &i2c3_device, &i2c4_device, &dma0_device, + &mpdma0_device, }; #define SRCR2 0xe61580b0 diff --git a/arch/arm/mach-tegra/board-dt-tegra20.c b/arch/arm/mach-tegra/board-dt-tegra20.c index d0de9c1192f7..c0999633a9ab 100644 --- a/arch/arm/mach-tegra/board-dt-tegra20.c +++ b/arch/arm/mach-tegra/board-dt-tegra20.c @@ -64,7 +64,8 @@ struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = { &tegra_ehci2_pdata), OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB3_BASE, "tegra-ehci.2", &tegra_ehci3_pdata), - OF_DEV_AUXDATA("nvidia,tegra20-apbdma", 0x6000a000, "tegra-apbdma", NULL), + OF_DEV_AUXDATA("nvidia,tegra20-apbdma", TEGRA_APB_DMA_BASE, "tegra-apbdma", NULL), + OF_DEV_AUXDATA("nvidia,tegra20-pwm", TEGRA_PWFM_BASE, "tegra-pwm", NULL), {} }; diff --git a/arch/arm/mach-tegra/board-dt-tegra30.c b/arch/arm/mach-tegra/board-dt-tegra30.c index ee48214bfd89..53bf60f11580 100644 --- a/arch/arm/mach-tegra/board-dt-tegra30.c +++ b/arch/arm/mach-tegra/board-dt-tegra30.c @@ -33,6 +33,8 @@ #include <asm/mach/arch.h> #include <asm/hardware/gic.h> +#include <mach/iomap.h> + #include "board.h" #include "clock.h" @@ -48,6 +50,7 @@ struct of_dev_auxdata tegra30_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000D000, "tegra-i2c.4", NULL), OF_DEV_AUXDATA("nvidia,tegra30-ahub", 0x70080000, "tegra30-ahub", NULL), OF_DEV_AUXDATA("nvidia,tegra30-apbdma", 0x6000a000, "tegra-apbdma", NULL), + OF_DEV_AUXDATA("nvidia,tegra30-pwm", TEGRA_PWFM_BASE, "tegra-pwm", NULL), {} }; diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index a310222951da..8f421c0ca45c 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -15,6 +15,7 @@ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/i2c.h> +#include <linux/platform_data/i2c-nomadik.h> #include <linux/gpio.h> #include <linux/amba/bus.h> #include <linux/amba/pl022.h> @@ -40,7 +41,6 @@ #include <asm/mach/arch.h> #include <asm/hardware/gic.h> -#include <plat/i2c.h> #include <plat/ste_dma40.h> #include <plat/gpio-nomadik.h> diff --git a/arch/arm/mach-ux500/devices-common.h b/arch/arm/mach-ux500/devices-common.h index 6e4706560266..ecdd8386cffb 100644 --- a/arch/arm/mach-ux500/devices-common.h +++ b/arch/arm/mach-ux500/devices-common.h @@ -12,7 +12,7 @@ #include <linux/dma-mapping.h> #include <linux/sys_soc.h> #include <linux/amba/bus.h> -#include <plat/i2c.h> +#include <linux/platform_data/i2c-nomadik.h> #include <mach/crypto-ux500.h> struct spi_master_cntlr; @@ -56,27 +56,15 @@ dbx500_add_uart(struct device *parent, const char *name, resource_size_t base, struct nmk_i2c_controller; -static inline struct platform_device * +static inline struct amba_device * dbx500_add_i2c(struct device *parent, int id, resource_size_t base, int irq, struct nmk_i2c_controller *data) { - struct resource res[] = { - DEFINE_RES_MEM(base, SZ_4K), - DEFINE_RES_IRQ(irq), - }; + /* Conjure a name similar to what the platform device used to have */ + char name[16]; - struct platform_device_info pdevinfo = { - .parent = parent, - .name = "nmk-i2c", - .id = id, - .res = res, - .num_res = ARRAY_SIZE(res), - .data = data, - .size_data = sizeof(*data), - .dma_mask = DMA_BIT_MASK(32), - }; - - return platform_device_register_full(&pdevinfo); + snprintf(name, sizeof(name), "nmk-i2c.%d", id); + return amba_apb_device_add(parent, name, base, SZ_4K, irq, 0, data, 0); } static inline struct amba_device * diff --git a/arch/arm/mach-vt8500/Makefile b/arch/arm/mach-vt8500/Makefile index 54e69973f39b..7ce51767c99c 100644 --- a/arch/arm/mach-vt8500/Makefile +++ b/arch/arm/mach-vt8500/Makefile @@ -5,5 +5,3 @@ obj-$(CONFIG_VTWM_VERSION_WM8505) += devices-wm8505.o obj-$(CONFIG_MACH_BV07) += bv07.o obj-$(CONFIG_MACH_WM8505_7IN_NETBOOK) += wm8505_7in.o - -obj-$(CONFIG_HAVE_PWM) += pwm.o diff --git a/arch/arm/mach-vt8500/pwm.c b/arch/arm/mach-vt8500/pwm.c deleted file mode 100644 index 8ad825e93592..000000000000 --- a/arch/arm/mach-vt8500/pwm.c +++ /dev/null @@ -1,265 +0,0 @@ -/* - * arch/arm/mach-vt8500/pwm.c - * - * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/err.h> -#include <linux/io.h> -#include <linux/pwm.h> -#include <linux/delay.h> - -#include <asm/div64.h> - -#define VT8500_NR_PWMS 4 - -static DEFINE_MUTEX(pwm_lock); -static LIST_HEAD(pwm_list); - -struct pwm_device { - struct list_head node; - struct platform_device *pdev; - - const char *label; - - void __iomem *regbase; - - unsigned int use_count; - unsigned int pwm_id; -}; - -#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) -static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask) -{ - int loops = msecs_to_loops(10); - while ((readb(reg) & bitmask) && --loops) - cpu_relax(); - - if (unlikely(!loops)) - pr_warning("Waiting for status bits 0x%x to clear timed out\n", - bitmask); -} - -int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) -{ - unsigned long long c; - unsigned long period_cycles, prescale, pv, dc; - - if (pwm == NULL || period_ns == 0 || duty_ns > period_ns) - return -EINVAL; - - c = 25000000/2; /* wild guess --- need to implement clocks */ - c = c * period_ns; - do_div(c, 1000000000); - period_cycles = c; - - if (period_cycles < 1) - period_cycles = 1; - prescale = (period_cycles - 1) / 4096; - pv = period_cycles / (prescale + 1) - 1; - if (pv > 4095) - pv = 4095; - - if (prescale > 1023) - return -EINVAL; - - c = (unsigned long long)pv * duty_ns; - do_div(c, period_ns); - dc = c; - - pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 1)); - writel(prescale, pwm->regbase + 0x4 + (pwm->pwm_id << 4)); - - pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 2)); - writel(pv, pwm->regbase + 0x8 + (pwm->pwm_id << 4)); - - pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 3)); - writel(dc, pwm->regbase + 0xc + (pwm->pwm_id << 4)); - - return 0; -} -EXPORT_SYMBOL(pwm_config); - -int pwm_enable(struct pwm_device *pwm) -{ - pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 0)); - writel(5, pwm->regbase + (pwm->pwm_id << 4)); - return 0; -} -EXPORT_SYMBOL(pwm_enable); - -void pwm_disable(struct pwm_device *pwm) -{ - pwm_busy_wait(pwm->regbase + 0x40 + pwm->pwm_id, (1 << 0)); - writel(0, pwm->regbase + (pwm->pwm_id << 4)); -} -EXPORT_SYMBOL(pwm_disable); - -struct pwm_device *pwm_request(int pwm_id, const char *label) -{ - struct pwm_device *pwm; - int found = 0; - - mutex_lock(&pwm_lock); - - list_for_each_entry(pwm, &pwm_list, node) { - if (pwm->pwm_id == pwm_id) { - found = 1; - break; - } - } - - if (found) { - if (pwm->use_count == 0) { - pwm->use_count++; - pwm->label = label; - } else { - pwm = ERR_PTR(-EBUSY); - } - } else { - pwm = ERR_PTR(-ENOENT); - } - - mutex_unlock(&pwm_lock); - return pwm; -} -EXPORT_SYMBOL(pwm_request); - -void pwm_free(struct pwm_device *pwm) -{ - mutex_lock(&pwm_lock); - - if (pwm->use_count) { - pwm->use_count--; - pwm->label = NULL; - } else { - pr_warning("PWM device already freed\n"); - } - - mutex_unlock(&pwm_lock); -} -EXPORT_SYMBOL(pwm_free); - -static inline void __add_pwm(struct pwm_device *pwm) -{ - mutex_lock(&pwm_lock); - list_add_tail(&pwm->node, &pwm_list); - mutex_unlock(&pwm_lock); -} - -static int __devinit pwm_probe(struct platform_device *pdev) -{ - struct pwm_device *pwms; - struct resource *r; - int ret = 0; - int i; - - pwms = kzalloc(sizeof(struct pwm_device) * VT8500_NR_PWMS, GFP_KERNEL); - if (pwms == NULL) { - dev_err(&pdev->dev, "failed to allocate memory\n"); - return -ENOMEM; - } - - for (i = 0; i < VT8500_NR_PWMS; i++) { - pwms[i].use_count = 0; - pwms[i].pwm_id = i; - pwms[i].pdev = pdev; - } - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); - ret = -ENODEV; - goto err_free; - } - - r = request_mem_region(r->start, resource_size(r), pdev->name); - if (r == NULL) { - dev_err(&pdev->dev, "failed to request memory resource\n"); - ret = -EBUSY; - goto err_free; - } - - pwms[0].regbase = ioremap(r->start, resource_size(r)); - if (pwms[0].regbase == NULL) { - dev_err(&pdev->dev, "failed to ioremap() registers\n"); - ret = -ENODEV; - goto err_free_mem; - } - - for (i = 1; i < VT8500_NR_PWMS; i++) - pwms[i].regbase = pwms[0].regbase; - - for (i = 0; i < VT8500_NR_PWMS; i++) - __add_pwm(&pwms[i]); - - platform_set_drvdata(pdev, pwms); - return 0; - -err_free_mem: - release_mem_region(r->start, resource_size(r)); -err_free: - kfree(pwms); - return ret; -} - -static int __devexit pwm_remove(struct platform_device *pdev) -{ - struct pwm_device *pwms; - struct resource *r; - int i; - - pwms = platform_get_drvdata(pdev); - if (pwms == NULL) - return -ENODEV; - - mutex_lock(&pwm_lock); - - for (i = 0; i < VT8500_NR_PWMS; i++) - list_del(&pwms[i].node); - mutex_unlock(&pwm_lock); - - iounmap(pwms[0].regbase); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(r->start, resource_size(r)); - - kfree(pwms); - return 0; -} - -static struct platform_driver pwm_driver = { - .driver = { - .name = "vt8500-pwm", - .owner = THIS_MODULE, - }, - .probe = pwm_probe, - .remove = __devexit_p(pwm_remove), -}; - -static int __init pwm_init(void) -{ - return platform_driver_register(&pwm_driver); -} -arch_initcall(pwm_init); - -static void __exit pwm_exit(void) -{ - platform_driver_unregister(&pwm_driver); -} -module_exit(pwm_exit); - -MODULE_LICENSE("GPL"); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 5cfc98994076..c2cdf6500f75 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -22,6 +22,7 @@ #include <linux/memblock.h> #include <linux/slab.h> #include <linux/iommu.h> +#include <linux/io.h> #include <linux/vmalloc.h> #include <linux/sizes.h> @@ -72,7 +73,7 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - if (!arch_is_coherent()) + if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(page, offset, size, dir); return pfn_to_dma(dev, page_to_pfn(page)) + offset; } @@ -95,7 +96,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { - if (!arch_is_coherent()) + if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), handle & ~PAGE_MASK, size, dir); } @@ -124,6 +125,7 @@ struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, .mmap = arm_dma_mmap, + .get_sgtable = arm_dma_get_sgtable, .map_page = arm_dma_map_page, .unmap_page = arm_dma_unmap_page, .map_sg = arm_dma_map_sg, @@ -217,115 +219,70 @@ static void __dma_free_buffer(struct page *page, size_t size) } #ifdef CONFIG_MMU +#ifdef CONFIG_HUGETLB_PAGE +#error ARM Coherent DMA allocator does not (yet) support huge TLB +#endif -#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT) -#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT) - -/* - * These are the page tables (2MB each) covering uncached, DMA consistent allocations - */ -static pte_t **consistent_pte; - -#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M +static void *__alloc_from_contiguous(struct device *dev, size_t size, + pgprot_t prot, struct page **ret_page); -static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; +static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, + pgprot_t prot, struct page **ret_page, + const void *caller); -void __init init_consistent_dma_size(unsigned long size) +static void * +__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, + const void *caller) { - unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M); + struct vm_struct *area; + unsigned long addr; - BUG_ON(consistent_pte); /* Check we're called before DMA region init */ - BUG_ON(base < VMALLOC_END); + /* + * DMA allocation can be mapped to user space, so lets + * set VM_USERMAP flags too. + */ + area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, + caller); + if (!area) + return NULL; + addr = (unsigned long)area->addr; + area->phys_addr = __pfn_to_phys(page_to_pfn(page)); - /* Grow region to accommodate specified size */ - if (base < consistent_base) - consistent_base = base; + if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { + vunmap((void *)addr); + return NULL; + } + return (void *)addr; } -#include "vmregion.h" - -static struct arm_vmregion_head consistent_head = { - .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), - .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), - .vm_end = CONSISTENT_END, -}; - -#ifdef CONFIG_HUGETLB_PAGE -#error ARM Coherent DMA allocator does not (yet) support huge TLB -#endif - -/* - * Initialise the consistent memory allocation. - */ -static int __init consistent_init(void) +static void __dma_free_remap(void *cpu_addr, size_t size) { - int ret = 0; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - int i = 0; - unsigned long base = consistent_base; - unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; - - if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) - return 0; - - consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); - if (!consistent_pte) { - pr_err("%s: no memory\n", __func__); - return -ENOMEM; + unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; + struct vm_struct *area = find_vm_area(cpu_addr); + if (!area || (area->flags & flags) != flags) { + WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); + return; } - - pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END); - consistent_head.vm_start = base; - - do { - pgd = pgd_offset(&init_mm, base); - - pud = pud_alloc(&init_mm, pgd, base); - if (!pud) { - pr_err("%s: no pud tables\n", __func__); - ret = -ENOMEM; - break; - } - - pmd = pmd_alloc(&init_mm, pud, base); - if (!pmd) { - pr_err("%s: no pmd tables\n", __func__); - ret = -ENOMEM; - break; - } - WARN_ON(!pmd_none(*pmd)); - - pte = pte_alloc_kernel(pmd, base); - if (!pte) { - pr_err("%s: no pte tables\n", __func__); - ret = -ENOMEM; - break; - } - - consistent_pte[i++] = pte; - base += PMD_SIZE; - } while (base < CONSISTENT_END); - - return ret; + unmap_kernel_range((unsigned long)cpu_addr, size); + vunmap(cpu_addr); } -core_initcall(consistent_init); -static void *__alloc_from_contiguous(struct device *dev, size_t size, - pgprot_t prot, struct page **ret_page); - -static struct arm_vmregion_head coherent_head = { - .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock), - .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), +struct dma_pool { + size_t size; + spinlock_t lock; + unsigned long *bitmap; + unsigned long nr_pages; + void *vaddr; + struct page *page; }; -static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; +static struct dma_pool atomic_pool = { + .size = SZ_256K, +}; static int __init early_coherent_pool(char *p) { - coherent_pool_size = memparse(p, &p); + atomic_pool.size = memparse(p, &p); return 0; } early_param("coherent_pool", early_coherent_pool); @@ -333,32 +290,45 @@ early_param("coherent_pool", early_coherent_pool); /* * Initialise the coherent pool for atomic allocations. */ -static int __init coherent_init(void) +static int __init atomic_pool_init(void) { + struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); - size_t size = coherent_pool_size; + unsigned long nr_pages = pool->size >> PAGE_SHIFT; + unsigned long *bitmap; struct page *page; void *ptr; + int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); - if (!IS_ENABLED(CONFIG_CMA)) - return 0; + bitmap = kzalloc(bitmap_size, GFP_KERNEL); + if (!bitmap) + goto no_bitmap; - ptr = __alloc_from_contiguous(NULL, size, prot, &page); + if (IS_ENABLED(CONFIG_CMA)) + ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); + else + ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, + &page, NULL); if (ptr) { - coherent_head.vm_start = (unsigned long) ptr; - coherent_head.vm_end = (unsigned long) ptr + size; - printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n", - (unsigned)size / 1024); + spin_lock_init(&pool->lock); + pool->vaddr = ptr; + pool->page = page; + pool->bitmap = bitmap; + pool->nr_pages = nr_pages; + pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", + (unsigned)pool->size / 1024); return 0; } - printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", - (unsigned)size / 1024); + kfree(bitmap); +no_bitmap: + pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", + (unsigned)pool->size / 1024); return -ENOMEM; } /* * CMA is activated by core_initcall, so we must be called after it. */ -postcore_initcall(coherent_init); +postcore_initcall(atomic_pool_init); struct dma_contig_early_reserve { phys_addr_t base; @@ -406,112 +376,6 @@ void __init dma_contiguous_remap(void) } } -static void * -__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, - const void *caller) -{ - struct arm_vmregion *c; - size_t align; - int bit; - - if (!consistent_pte) { - pr_err("%s: not initialised\n", __func__); - dump_stack(); - return NULL; - } - - /* - * Align the virtual region allocation - maximum alignment is - * a section size, minimum is a page size. This helps reduce - * fragmentation of the DMA space, and also prevents allocations - * smaller than a section from crossing a section boundary. - */ - bit = fls(size - 1); - if (bit > SECTION_SHIFT) - bit = SECTION_SHIFT; - align = 1 << bit; - - /* - * Allocate a virtual address in the consistent mapping region. - */ - c = arm_vmregion_alloc(&consistent_head, align, size, - gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller); - if (c) { - pte_t *pte; - int idx = CONSISTENT_PTE_INDEX(c->vm_start); - u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); - - pte = consistent_pte[idx] + off; - c->priv = page; - - do { - BUG_ON(!pte_none(*pte)); - - set_pte_ext(pte, mk_pte(page, prot), 0); - page++; - pte++; - off++; - if (off >= PTRS_PER_PTE) { - off = 0; - pte = consistent_pte[++idx]; - } - } while (size -= PAGE_SIZE); - - dsb(); - - return (void *)c->vm_start; - } - return NULL; -} - -static void __dma_free_remap(void *cpu_addr, size_t size) -{ - struct arm_vmregion *c; - unsigned long addr; - pte_t *ptep; - int idx; - u32 off; - - c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); - if (!c) { - pr_err("%s: trying to free invalid coherent area: %p\n", - __func__, cpu_addr); - dump_stack(); - return; - } - - if ((c->vm_end - c->vm_start) != size) { - pr_err("%s: freeing wrong coherent size (%ld != %d)\n", - __func__, c->vm_end - c->vm_start, size); - dump_stack(); - size = c->vm_end - c->vm_start; - } - - idx = CONSISTENT_PTE_INDEX(c->vm_start); - off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); - ptep = consistent_pte[idx] + off; - addr = c->vm_start; - do { - pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); - - ptep++; - addr += PAGE_SIZE; - off++; - if (off >= PTRS_PER_PTE) { - off = 0; - ptep = consistent_pte[++idx]; - } - - if (pte_none(pte) || !pte_present(pte)) - pr_crit("%s: bad page in kernel page table\n", - __func__); - } while (size -= PAGE_SIZE); - - flush_tlb_kernel_range(c->vm_start, c->vm_end); - - arm_vmregion_free(&consistent_head, c); -} - static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { @@ -552,16 +416,17 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, return ptr; } -static void *__alloc_from_pool(struct device *dev, size_t size, - struct page **ret_page, const void *caller) +static void *__alloc_from_pool(size_t size, struct page **ret_page) { - struct arm_vmregion *c; + struct dma_pool *pool = &atomic_pool; + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned int pageno; + unsigned long flags; + void *ptr = NULL; size_t align; - if (!coherent_head.vm_start) { - printk(KERN_ERR "%s: coherent pool not initialised!\n", - __func__); - dump_stack(); + if (!pool->vaddr) { + WARN(1, "coherent pool not initialised!\n"); return NULL; } @@ -571,35 +436,41 @@ static void *__alloc_from_pool(struct device *dev, size_t size, * size. This helps reduce fragmentation of the DMA space. */ align = PAGE_SIZE << get_order(size); - c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller); - if (c) { - void *ptr = (void *)c->vm_start; - struct page *page = virt_to_page(ptr); - *ret_page = page; - return ptr; + + spin_lock_irqsave(&pool->lock, flags); + pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, + 0, count, (1 << align) - 1); + if (pageno < pool->nr_pages) { + bitmap_set(pool->bitmap, pageno, count); + ptr = pool->vaddr + PAGE_SIZE * pageno; + *ret_page = pool->page + pageno; } - return NULL; + spin_unlock_irqrestore(&pool->lock, flags); + + return ptr; } -static int __free_from_pool(void *cpu_addr, size_t size) +static int __free_from_pool(void *start, size_t size) { - unsigned long start = (unsigned long)cpu_addr; - unsigned long end = start + size; - struct arm_vmregion *c; + struct dma_pool *pool = &atomic_pool; + unsigned long pageno, count; + unsigned long flags; - if (start < coherent_head.vm_start || end > coherent_head.vm_end) + if (start < pool->vaddr || start > pool->vaddr + pool->size) return 0; - c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start); - - if ((c->vm_end - c->vm_start) != size) { - printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", - __func__, c->vm_end - c->vm_start, size); - dump_stack(); - size = c->vm_end - c->vm_start; + if (start + size > pool->vaddr + pool->size) { + WARN(1, "freeing wrong coherent size from pool\n"); + return 0; } - arm_vmregion_free(&coherent_head, c); + pageno = (start - pool->vaddr) >> PAGE_SHIFT; + count = size >> PAGE_SHIFT; + + spin_lock_irqsave(&pool->lock, flags); + bitmap_clear(pool->bitmap, pageno, count); + spin_unlock_irqrestore(&pool->lock, flags); + return 1; } @@ -644,7 +515,7 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) #define __get_dma_pgprot(attrs, prot) __pgprot(0) #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL -#define __alloc_from_pool(dev, size, ret_page, c) NULL +#define __alloc_from_pool(size, ret_page) NULL #define __alloc_from_contiguous(dev, size, prot, ret) NULL #define __free_from_pool(cpu_addr, size) 0 #define __free_from_contiguous(dev, page, size) do { } while (0) @@ -702,10 +573,10 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (arch_is_coherent() || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); + else if (gfp & GFP_ATOMIC) + addr = __alloc_from_pool(size, &page); else if (!IS_ENABLED(CONFIG_CMA)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); - else if (gfp & GFP_ATOMIC) - addr = __alloc_from_pool(dev, size, &page, caller); else addr = __alloc_from_contiguous(dev, size, prot, &page); @@ -741,16 +612,22 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, { int ret = -ENXIO; #ifdef CONFIG_MMU + unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = dma_to_pfn(dev, dma_addr); + unsigned long off = vma->vm_pgoff; + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) return ret; - ret = remap_pfn_range(vma, vma->vm_start, - pfn + vma->vm_pgoff, - vma->vm_end - vma->vm_start, - vma->vm_page_prot); + if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { + ret = remap_pfn_range(vma, vma->vm_start, + pfn + off, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); + } #endif /* CONFIG_MMU */ return ret; @@ -785,6 +662,21 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, } } +int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size, + struct dma_attrs *attrs) +{ + struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return 0; +} + static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) @@ -998,9 +890,6 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask) static int __init dma_debug_do_init(void) { -#ifdef CONFIG_MMU - arm_vmregion_create_proc("dma-mappings", &consistent_head); -#endif dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } @@ -1088,7 +977,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t return pages; error: - while (--i) + while (i--) if (pages[i]) __free_pages(pages[i], 0); if (array_size <= PAGE_SIZE) @@ -1117,61 +1006,32 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s * Create a CPU mapping for a specified pages */ static void * -__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot) +__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, + const void *caller) { - struct arm_vmregion *c; - size_t align; - size_t count = size >> PAGE_SHIFT; - int bit; + unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct vm_struct *area; + unsigned long p; - if (!consistent_pte[0]) { - pr_err("%s: not initialised\n", __func__); - dump_stack(); + area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, + caller); + if (!area) return NULL; - } - - /* - * Align the virtual region allocation - maximum alignment is - * a section size, minimum is a page size. This helps reduce - * fragmentation of the DMA space, and also prevents allocations - * smaller than a section from crossing a section boundary. - */ - bit = fls(size - 1); - if (bit > SECTION_SHIFT) - bit = SECTION_SHIFT; - align = 1 << bit; - - /* - * Allocate a virtual address in the consistent mapping region. - */ - c = arm_vmregion_alloc(&consistent_head, align, size, - gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL); - if (c) { - pte_t *pte; - int idx = CONSISTENT_PTE_INDEX(c->vm_start); - int i = 0; - u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); - - pte = consistent_pte[idx] + off; - c->priv = pages; - - do { - BUG_ON(!pte_none(*pte)); - - set_pte_ext(pte, mk_pte(pages[i], prot), 0); - pte++; - off++; - i++; - if (off >= PTRS_PER_PTE) { - off = 0; - pte = consistent_pte[++idx]; - } - } while (i < count); - dsb(); + area->pages = pages; + area->nr_pages = nr_pages; + p = (unsigned long)area->addr; - return (void *)c->vm_start; + for (i = 0; i < nr_pages; i++) { + phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); + if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) + goto err; + p += PAGE_SIZE; } + return area->addr; +err: + unmap_kernel_range((unsigned long)area->addr, size); + vunmap(area->addr); return NULL; } @@ -1230,6 +1090,19 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si return 0; } +static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) +{ + struct vm_struct *area; + + if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) + return cpu_addr; + + area = find_vm_area(cpu_addr); + if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) + return area->pages; + return NULL; +} + static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { @@ -1248,7 +1121,11 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, if (*handle == DMA_ERROR_CODE) goto err_buffer; - addr = __iommu_alloc_remap(pages, size, gfp, prot); + if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) + return pages; + + addr = __iommu_alloc_remap(pages, size, gfp, prot, + __builtin_return_address(0)); if (!addr) goto err_mapping; @@ -1265,31 +1142,25 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { - struct arm_vmregion *c; + unsigned long uaddr = vma->vm_start; + unsigned long usize = vma->vm_end - vma->vm_start; + struct page **pages = __iommu_get_pages(cpu_addr, attrs); vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); - c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); - - if (c) { - struct page **pages = c->priv; - - unsigned long uaddr = vma->vm_start; - unsigned long usize = vma->vm_end - vma->vm_start; - int i = 0; - do { - int ret; + if (!pages) + return -ENXIO; - ret = vm_insert_page(vma, uaddr, pages[i++]); - if (ret) { - pr_err("Remapping memory, error: %d\n", ret); - return ret; - } + do { + int ret = vm_insert_page(vma, uaddr, *pages++); + if (ret) { + pr_err("Remapping memory failed: %d\n", ret); + return ret; + } + uaddr += PAGE_SIZE; + usize -= PAGE_SIZE; + } while (usize > 0); - uaddr += PAGE_SIZE; - usize -= PAGE_SIZE; - } while (usize > 0); - } return 0; } @@ -1300,16 +1171,35 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { - struct arm_vmregion *c; + struct page **pages = __iommu_get_pages(cpu_addr, attrs); size = PAGE_ALIGN(size); - c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); - if (c) { - struct page **pages = c->priv; - __dma_free_remap(cpu_addr, size); - __iommu_remove_mapping(dev, handle, size); - __iommu_free_buffer(dev, pages, size); + if (!pages) { + WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); + return; } + + if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { + unmap_kernel_range((unsigned long)cpu_addr, size); + vunmap(cpu_addr); + } + + __iommu_remove_mapping(dev, handle, size); + __iommu_free_buffer(dev, pages, size); +} + +static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size, struct dma_attrs *attrs) +{ + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct page **pages = __iommu_get_pages(cpu_addr, attrs); + + if (!pages) + return -ENXIO; + + return sg_alloc_table_from_pages(sgt, pages, count, 0, size, + GFP_KERNEL); } /* @@ -1317,7 +1207,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, */ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, size_t size, dma_addr_t *handle, - enum dma_data_direction dir) + enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova, iova_base; @@ -1336,7 +1226,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, phys_addr_t phys = page_to_phys(sg_page(s)); unsigned int len = PAGE_ALIGN(s->offset + s->length); - if (!arch_is_coherent()) + if (!arch_is_coherent() && + !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); ret = iommu_map(mapping->domain, iova, phys, len, 0); @@ -1383,7 +1274,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { if (__map_sg_chunk(dev, start, size, &dma->dma_address, - dir) < 0) + dir, attrs) < 0) goto bad_mapping; dma->dma_address += offset; @@ -1396,7 +1287,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, } size += s->length; } - if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0) + if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0) goto bad_mapping; dma->dma_address += offset; @@ -1430,7 +1321,8 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, if (sg_dma_len(s)) __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); - if (!arch_is_coherent()) + if (!arch_is_coherent() && + !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } @@ -1492,7 +1384,7 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, dma_addr_t dma_addr; int ret, len = PAGE_ALIGN(size + offset); - if (!arch_is_coherent()) + if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(page, offset, size, dir); dma_addr = __alloc_iova(mapping, len); @@ -1531,7 +1423,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, if (!iova) return; - if (!arch_is_coherent()) + if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(page, offset, size, dir); iommu_unmap(mapping->domain, iova, len); @@ -1571,6 +1463,7 @@ struct dma_map_ops iommu_ops = { .alloc = arm_iommu_alloc_attrs, .free = arm_iommu_free_attrs, .mmap = arm_iommu_mmap_attrs, + .get_sgtable = arm_iommu_get_sgtable, .map_page = arm_iommu_map_page, .unmap_page = arm_iommu_unmap_page, diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 2e8a1efdf7b8..6776160618ef 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -59,6 +59,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page #define VM_ARM_MTYPE(mt) ((mt) << 20) #define VM_ARM_MTYPE_MASK (0x1f << 20) +/* consistent regions used by dma_alloc_attrs() */ +#define VM_ARM_DMA_CONSISTENT 0x20000000 + #endif #ifdef CONFIG_ZONE_DMA diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig index c722f9ce6918..baf9064c0844 100644 --- a/arch/arm/plat-mxc/Kconfig +++ b/arch/arm/plat-mxc/Kconfig @@ -47,12 +47,6 @@ config MXC_TZIC config MXC_AVIC bool -config MXC_PWM - tristate "Enable PWM driver" - select HAVE_PWM - help - Enable support for the i.MX PWM controller(s). - config MXC_DEBUG_BOARD bool "Enable MXC debug board(for 3-stack)" help diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile index 63b064b5c1d5..6ac720031150 100644 --- a/arch/arm/plat-mxc/Makefile +++ b/arch/arm/plat-mxc/Makefile @@ -11,7 +11,6 @@ obj-$(CONFIG_MXC_AVIC) += avic.o obj-$(CONFIG_IMX_HAVE_IOMUX_V1) += iomux-v1.o obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o obj-$(CONFIG_IRAM_ALLOC) += iram_alloc.o -obj-$(CONFIG_MXC_PWM) += pwm.o obj-$(CONFIG_MXC_ULPI) += ulpi.o obj-$(CONFIG_MXC_USE_EPIT) += epit.o obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o diff --git a/arch/arm/plat-mxc/include/mach/i2c.h b/arch/arm/plat-mxc/include/mach/i2c.h index 375cdd0cf876..8289d915e615 100644 --- a/arch/arm/plat-mxc/include/mach/i2c.h +++ b/arch/arm/plat-mxc/include/mach/i2c.h @@ -15,7 +15,7 @@ * **/ struct imxi2c_platform_data { - int bitrate; + u32 bitrate; }; #endif /* __ASM_ARCH_I2C_H_ */ diff --git a/arch/arm/plat-pxa/Makefile b/arch/arm/plat-pxa/Makefile index f302d048392d..af8e484001e5 100644 --- a/arch/arm/plat-pxa/Makefile +++ b/arch/arm/plat-pxa/Makefile @@ -8,5 +8,4 @@ obj-$(CONFIG_PXA3xx) += mfp.o obj-$(CONFIG_PXA95x) += mfp.o obj-$(CONFIG_ARCH_MMP) += mfp.o -obj-$(CONFIG_HAVE_PWM) += pwm.o obj-$(CONFIG_PXA_SSP) += ssp.o diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c deleted file mode 100644 index ef32686feef9..000000000000 --- a/arch/arm/plat-pxa/pwm.c +++ /dev/null @@ -1,304 +0,0 @@ -/* - * linux/arch/arm/mach-pxa/pwm.c - * - * simple driver for PWM (Pulse Width Modulator) controller - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * 2008-02-13 initial version - * eric miao <eric.miao@marvell.com> - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/err.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/pwm.h> - -#include <asm/div64.h> - -#define HAS_SECONDARY_PWM 0x10 -#define PWM_ID_BASE(d) ((d) & 0xf) - -static const struct platform_device_id pwm_id_table[] = { - /* PWM has_secondary_pwm? */ - { "pxa25x-pwm", 0 }, - { "pxa27x-pwm", 0 | HAS_SECONDARY_PWM }, - { "pxa168-pwm", 1 }, - { "pxa910-pwm", 1 }, - { }, -}; -MODULE_DEVICE_TABLE(platform, pwm_id_table); - -/* PWM registers and bits definitions */ -#define PWMCR (0x00) -#define PWMDCR (0x04) -#define PWMPCR (0x08) - -#define PWMCR_SD (1 << 6) -#define PWMDCR_FD (1 << 10) - -struct pwm_device { - struct list_head node; - struct pwm_device *secondary; - struct platform_device *pdev; - - const char *label; - struct clk *clk; - int clk_enabled; - void __iomem *mmio_base; - - unsigned int use_count; - unsigned int pwm_id; -}; - -/* - * period_ns = 10^9 * (PRESCALE + 1) * (PV + 1) / PWM_CLK_RATE - * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE - */ -int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) -{ - unsigned long long c; - unsigned long period_cycles, prescale, pv, dc; - - if (pwm == NULL || period_ns == 0 || duty_ns > period_ns) - return -EINVAL; - - c = clk_get_rate(pwm->clk); - c = c * period_ns; - do_div(c, 1000000000); - period_cycles = c; - - if (period_cycles < 1) - period_cycles = 1; - prescale = (period_cycles - 1) / 1024; - pv = period_cycles / (prescale + 1) - 1; - - if (prescale > 63) - return -EINVAL; - - if (duty_ns == period_ns) - dc = PWMDCR_FD; - else - dc = (pv + 1) * duty_ns / period_ns; - - /* NOTE: the clock to PWM has to be enabled first - * before writing to the registers - */ - clk_enable(pwm->clk); - __raw_writel(prescale, pwm->mmio_base + PWMCR); - __raw_writel(dc, pwm->mmio_base + PWMDCR); - __raw_writel(pv, pwm->mmio_base + PWMPCR); - clk_disable(pwm->clk); - - return 0; -} -EXPORT_SYMBOL(pwm_config); - -int pwm_enable(struct pwm_device *pwm) -{ - int rc = 0; - - if (!pwm->clk_enabled) { - rc = clk_enable(pwm->clk); - if (!rc) - pwm->clk_enabled = 1; - } - return rc; -} -EXPORT_SYMBOL(pwm_enable); - -void pwm_disable(struct pwm_device *pwm) -{ - if (pwm->clk_enabled) { - clk_disable(pwm->clk); - pwm->clk_enabled = 0; - } -} -EXPORT_SYMBOL(pwm_disable); - -static DEFINE_MUTEX(pwm_lock); -static LIST_HEAD(pwm_list); - -struct pwm_device *pwm_request(int pwm_id, const char *label) -{ - struct pwm_device *pwm; - int found = 0; - - mutex_lock(&pwm_lock); - - list_for_each_entry(pwm, &pwm_list, node) { - if (pwm->pwm_id == pwm_id) { - found = 1; - break; - } - } - - if (found) { - if (pwm->use_count == 0) { - pwm->use_count++; - pwm->label = label; - } else - pwm = ERR_PTR(-EBUSY); - } else - pwm = ERR_PTR(-ENOENT); - - mutex_unlock(&pwm_lock); - return pwm; -} -EXPORT_SYMBOL(pwm_request); - -void pwm_free(struct pwm_device *pwm) -{ - mutex_lock(&pwm_lock); - - if (pwm->use_count) { - pwm->use_count--; - pwm->label = NULL; - } else - pr_warning("PWM device already freed\n"); - - mutex_unlock(&pwm_lock); -} -EXPORT_SYMBOL(pwm_free); - -static inline void __add_pwm(struct pwm_device *pwm) -{ - mutex_lock(&pwm_lock); - list_add_tail(&pwm->node, &pwm_list); - mutex_unlock(&pwm_lock); -} - -static int __devinit pwm_probe(struct platform_device *pdev) -{ - const struct platform_device_id *id = platform_get_device_id(pdev); - struct pwm_device *pwm, *secondary = NULL; - struct resource *r; - int ret = 0; - - pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL); - if (pwm == NULL) { - dev_err(&pdev->dev, "failed to allocate memory\n"); - return -ENOMEM; - } - - pwm->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(pwm->clk)) { - ret = PTR_ERR(pwm->clk); - goto err_free; - } - pwm->clk_enabled = 0; - - pwm->use_count = 0; - pwm->pwm_id = PWM_ID_BASE(id->driver_data) + pdev->id; - pwm->pdev = pdev; - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); - ret = -ENODEV; - goto err_free_clk; - } - - r = request_mem_region(r->start, resource_size(r), pdev->name); - if (r == NULL) { - dev_err(&pdev->dev, "failed to request memory resource\n"); - ret = -EBUSY; - goto err_free_clk; - } - - pwm->mmio_base = ioremap(r->start, resource_size(r)); - if (pwm->mmio_base == NULL) { - dev_err(&pdev->dev, "failed to ioremap() registers\n"); - ret = -ENODEV; - goto err_free_mem; - } - - if (id->driver_data & HAS_SECONDARY_PWM) { - secondary = kzalloc(sizeof(struct pwm_device), GFP_KERNEL); - if (secondary == NULL) { - ret = -ENOMEM; - goto err_free_mem; - } - - *secondary = *pwm; - pwm->secondary = secondary; - - /* registers for the second PWM has offset of 0x10 */ - secondary->mmio_base = pwm->mmio_base + 0x10; - secondary->pwm_id = pdev->id + 2; - } - - __add_pwm(pwm); - if (secondary) - __add_pwm(secondary); - - platform_set_drvdata(pdev, pwm); - return 0; - -err_free_mem: - release_mem_region(r->start, resource_size(r)); -err_free_clk: - clk_put(pwm->clk); -err_free: - kfree(pwm); - return ret; -} - -static int __devexit pwm_remove(struct platform_device *pdev) -{ - struct pwm_device *pwm; - struct resource *r; - - pwm = platform_get_drvdata(pdev); - if (pwm == NULL) - return -ENODEV; - - mutex_lock(&pwm_lock); - - if (pwm->secondary) { - list_del(&pwm->secondary->node); - kfree(pwm->secondary); - } - - list_del(&pwm->node); - mutex_unlock(&pwm_lock); - - iounmap(pwm->mmio_base); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(r->start, resource_size(r)); - - clk_put(pwm->clk); - kfree(pwm); - return 0; -} - -static struct platform_driver pwm_driver = { - .driver = { - .name = "pxa25x-pwm", - .owner = THIS_MODULE, - }, - .probe = pwm_probe, - .remove = __devexit_p(pwm_remove), - .id_table = pwm_id_table, -}; - -static int __init pwm_init(void) -{ - return platform_driver_register(&pwm_driver); -} -arch_initcall(pwm_init); - -static void __exit pwm_exit(void) -{ - platform_driver_unregister(&pwm_driver); -} -module_exit(pwm_exit); - -MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile index b78717496677..9e40e8d00740 100644 --- a/arch/arm/plat-samsung/Makefile +++ b/arch/arm/plat-samsung/Makefile @@ -59,7 +59,3 @@ obj-$(CONFIG_SAMSUNG_WAKEMASK) += wakeup-mask.o obj-$(CONFIG_S5P_PM) += s5p-pm.o s5p-irq-pm.o obj-$(CONFIG_S5P_SLEEP) += s5p-sleep.o - -# PWM support - -obj-$(CONFIG_HAVE_PWM) += pwm.o diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 9b765107e15c..ec44fc6c34ca 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -1002,16 +1002,6 @@ config BFIN_GPTIMERS To compile this driver as a module, choose M here: the module will be called gptimers. -config HAVE_PWM - tristate "Enable PWM API support" - depends on BFIN_GPTIMERS - help - Enable support for the Pulse Width Modulation framework (as - found in linux/pwm.h). - - To compile this driver as a module, choose M here: the module - will be called pwm. - choice prompt "Uncached DMA region" default DMA_UNCACHED_1M diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index 08e6625106be..735f24e07425 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile @@ -21,7 +21,6 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o CFLAGS_REMOVE_ftrace.o = -pg -obj-$(CONFIG_HAVE_PWM) += pwm.o obj-$(CONFIG_IPIPE) += ipipe.o obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o obj-$(CONFIG_CPLB_INFO) += cplbinfo.o diff --git a/arch/blackfin/kernel/pwm.c b/arch/blackfin/kernel/pwm.c deleted file mode 100644 index 33f5942733bd..000000000000 --- a/arch/blackfin/kernel/pwm.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Blackfin Pulse Width Modulation (PWM) core - * - * Copyright (c) 2011 Analog Devices Inc. - * - * Licensed under the GPL-2 or later. - */ - -#include <linux/module.h> -#include <linux/pwm.h> -#include <linux/slab.h> - -#include <asm/gptimers.h> -#include <asm/portmux.h> - -struct pwm_device { - unsigned id; - unsigned short pin; -}; - -static const unsigned short pwm_to_gptimer_per[] = { - P_TMR0, P_TMR1, P_TMR2, P_TMR3, P_TMR4, P_TMR5, - P_TMR6, P_TMR7, P_TMR8, P_TMR9, P_TMR10, P_TMR11, -}; - -struct pwm_device *pwm_request(int pwm_id, const char *label) -{ - struct pwm_device *pwm; - int ret; - - /* XXX: pwm_id really should be unsigned */ - if (pwm_id < 0) - return NULL; - - pwm = kzalloc(sizeof(*pwm), GFP_KERNEL); - if (!pwm) - return pwm; - - pwm->id = pwm_id; - if (pwm->id >= ARRAY_SIZE(pwm_to_gptimer_per)) - goto err; - - pwm->pin = pwm_to_gptimer_per[pwm->id]; - ret = peripheral_request(pwm->pin, label); - if (ret) - goto err; - - return pwm; - err: - kfree(pwm); - return NULL; -} -EXPORT_SYMBOL(pwm_request); - -void pwm_free(struct pwm_device *pwm) -{ - peripheral_free(pwm->pin); - kfree(pwm); -} -EXPORT_SYMBOL(pwm_free); - -int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) -{ - unsigned long period, duty; - unsigned long long val; - - if (duty_ns < 0 || duty_ns > period_ns) - return -EINVAL; - - val = (unsigned long long)get_sclk() * period_ns; - do_div(val, NSEC_PER_SEC); - period = val; - - val = (unsigned long long)period * duty_ns; - do_div(val, period_ns); - duty = period - val; - - if (duty >= period) - duty = period - 1; - - set_gptimer_config(pwm->id, TIMER_MODE_PWM | TIMER_PERIOD_CNT); - set_gptimer_pwidth(pwm->id, duty); - set_gptimer_period(pwm->id, period); - - return 0; -} -EXPORT_SYMBOL(pwm_config); - -int pwm_enable(struct pwm_device *pwm) -{ - enable_gptimer(pwm->id); - return 0; -} -EXPORT_SYMBOL(pwm_enable); - -void pwm_disable(struct pwm_device *pwm) -{ - disable_gptimer(pwm->id); -} -EXPORT_SYMBOL(pwm_disable); diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 62678e365ca0..78160874809a 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h @@ -27,7 +27,10 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, extern void dma_direct_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs); - +extern int dma_direct_mmap_coherent(struct device *dev, + struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t handle, + size_t size, struct dma_attrs *attrs); #ifdef CONFIG_NOT_COHERENT_CACHE /* @@ -207,11 +210,8 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -extern int dma_mmap_coherent(struct device *, struct vm_area_struct *, - void *, dma_addr_t, size_t); #define ARCH_HAS_DMA_MMAP_COHERENT - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index bcfdcd22c766..2d7bb8ced136 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -109,6 +109,7 @@ static u64 dma_iommu_get_required_mask(struct device *dev) struct dma_map_ops dma_iommu_ops = { .alloc = dma_iommu_alloc_coherent, .free = dma_iommu_free_coherent, + .mmap = dma_direct_mmap_coherent, .map_sg = dma_iommu_map_sg, .unmap_sg = dma_iommu_unmap_sg, .dma_supported = dma_iommu_dma_supported, diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c index 4ab88dafb235..46943651da23 100644 --- a/arch/powerpc/kernel/dma-swiotlb.c +++ b/arch/powerpc/kernel/dma-swiotlb.c @@ -49,6 +49,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev) struct dma_map_ops swiotlb_dma_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, + .mmap = dma_direct_mmap_coherent, .map_sg = swiotlb_map_sg_attrs, .unmap_sg = swiotlb_unmap_sg_attrs, .dma_supported = swiotlb_dma_supported, diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 289be751cd75..355b9d84b0f8 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c @@ -67,6 +67,24 @@ void dma_direct_free_coherent(struct device *dev, size_t size, #endif } +int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t handle, size_t size, + struct dma_attrs *attrs) +{ + unsigned long pfn; + +#ifdef CONFIG_NOT_COHERENT_CACHE + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); +#else + pfn = page_to_pfn(virt_to_page(cpu_addr)); +#endif + return remap_pfn_range(vma, vma->vm_start, + pfn + vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) @@ -156,6 +174,7 @@ static inline void dma_direct_sync_single(struct device *dev, struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, + .mmap = dma_direct_mmap_coherent, .map_sg = dma_direct_map_sg, .unmap_sg = dma_direct_unmap_sg, .dma_supported = dma_direct_dma_supported, @@ -219,20 +238,3 @@ static int __init dma_init(void) } fs_initcall(dma_init); -int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t handle, size_t size) -{ - unsigned long pfn; - -#ifdef CONFIG_NOT_COHERENT_CACHE - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); -#else - pfn = page_to_pfn(virt_to_page(cpu_addr)); -#endif - return remap_pfn_range(vma, vma->vm_start, - pfn + vma->vm_pgoff, - vma->vm_end - vma->vm_start, - vma->vm_page_prot); -} -EXPORT_SYMBOL_GPL(dma_mmap_coherent); diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 3052a931f2b5..02b32216bbc3 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -611,6 +611,7 @@ static u64 vio_dma_get_required_mask(struct device *dev) struct dma_map_ops vio_dma_mapping_ops = { .alloc = vio_dma_iommu_alloc_coherent, .free = vio_dma_iommu_free_coherent, + .mmap = dma_direct_mmap_coherent, .map_sg = vio_dma_iommu_map_sg, .unmap_sg = vio_dma_iommu_unmap_sg, .map_page = vio_dma_iommu_map_page, diff --git a/drivers/Kconfig b/drivers/Kconfig index bfc918633fd9..805c432c9439 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -148,4 +148,6 @@ source "drivers/iio/Kconfig" source "drivers/vme/Kconfig" +source "drivers/pwm/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index 2ba29ffef2cb..bd36f09f2246 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -8,6 +8,7 @@ # GPIO must come after pinctrl as gpios may need to mux pins etc obj-y += pinctrl/ obj-y += gpio/ +obj-y += pwm/ obj-$(CONFIG_PCI) += pci/ obj-$(CONFIG_PARISC) += parisc/ obj-$(CONFIG_RAPIDIO) += rapidio/ diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 6f3676f1559f..3fbedc75e7c5 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -10,6 +10,7 @@ #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/gfp.h> +#include <asm-generic/dma-coherent.h> /* * Managed DMA API @@ -217,4 +218,52 @@ void dmam_release_declared_memory(struct device *dev) } EXPORT_SYMBOL(dmam_release_declared_memory); +/* + * Create scatter-list for the already allocated DMA buffer. + */ +int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + struct page *page = virt_to_page(cpu_addr); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return 0; +} +EXPORT_SYMBOL(dma_common_get_sgtable); + #endif + +/* + * Create userspace mapping for the DMA-coherent memory. + */ +int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + int ret = -ENXIO; +#ifdef CONFIG_MMU + unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; + unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr)); + unsigned long off = vma->vm_pgoff; + + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + return ret; + + if (off < count && user_count <= (count - off)) { + ret = remap_pfn_range(vma, vma->vm_start, + pfn + off, + user_count << PAGE_SHIFT, + vma->vm_page_prot); + } +#endif /* CONFIG_MMU */ + + return ret; +} +EXPORT_SYMBOL(dma_common_mmap); diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c index a88331644ebf..e64c253cb169 100644 --- a/drivers/cpufreq/exynos5250-cpufreq.c +++ b/drivers/cpufreq/exynos5250-cpufreq.c @@ -65,20 +65,20 @@ static unsigned int clkdiv_cpu0_5250[CPUFREQ_LEVEL_END][8] = { * Clock divider value for following * { ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2 } */ - { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1700 MHz - N/A */ - { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1600 MHz - N/A */ - { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1500 MHz - N/A */ - { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1400 MHz */ - { 0, 3, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */ - { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */ - { 0, 2, 7, 7, 5, 1, 2, 0 }, /* 1100 MHz */ - { 0, 2, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */ - { 0, 2, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */ - { 0, 2, 7, 7, 3, 1, 1, 0 }, /* 800 MHz */ + { 0, 3, 7, 7, 7, 3, 5, 0 }, /* 1700 MHz */ + { 0, 3, 7, 7, 7, 1, 4, 0 }, /* 1600 MHz */ + { 0, 2, 7, 7, 7, 1, 4, 0 }, /* 1500 MHz */ + { 0, 2, 7, 7, 6, 1, 4, 0 }, /* 1400 MHz */ + { 0, 2, 7, 7, 6, 1, 3, 0 }, /* 1300 MHz */ + { 0, 2, 7, 7, 5, 1, 3, 0 }, /* 1200 MHz */ + { 0, 3, 7, 7, 5, 1, 3, 0 }, /* 1100 MHz */ + { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 1000 MHz */ + { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 900 MHz */ + { 0, 1, 7, 7, 4, 1, 2, 0 }, /* 800 MHz */ { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 700 MHz */ - { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 600 MHz */ + { 0, 1, 7, 7, 3, 1, 1, 0 }, /* 600 MHz */ { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 500 MHz */ - { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 400 MHz */ + { 0, 1, 7, 7, 2, 1, 1, 0 }, /* 400 MHz */ { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 300 MHz */ { 0, 1, 7, 7, 1, 1, 1, 0 }, /* 200 MHz */ }; @@ -87,9 +87,9 @@ static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = { /* Clock divider value for following * { COPY, HPM } */ - { 0, 2 }, /* 1700 MHz - N/A */ - { 0, 2 }, /* 1600 MHz - N/A */ - { 0, 2 }, /* 1500 MHz - N/A */ + { 0, 2 }, /* 1700 MHz */ + { 0, 2 }, /* 1600 MHz */ + { 0, 2 }, /* 1500 MHz */ { 0, 2 }, /* 1400 MHz */ { 0, 2 }, /* 1300 MHz */ { 0, 2 }, /* 1200 MHz */ @@ -106,10 +106,10 @@ static unsigned int clkdiv_cpu1_5250[CPUFREQ_LEVEL_END][2] = { }; static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = { - (0), /* 1700 MHz - N/A */ - (0), /* 1600 MHz - N/A */ - (0), /* 1500 MHz - N/A */ - (0), /* 1400 MHz */ + ((425 << 16) | (6 << 8) | 0), /* 1700 MHz */ + ((200 << 16) | (3 << 8) | 0), /* 1600 MHz */ + ((250 << 16) | (4 << 8) | 0), /* 1500 MHz */ + ((175 << 16) | (3 << 8) | 0), /* 1400 MHz */ ((325 << 16) | (6 << 8) | 0), /* 1300 MHz */ ((200 << 16) | (4 << 8) | 0), /* 1200 MHz */ ((275 << 16) | (6 << 8) | 0), /* 1100 MHz */ @@ -126,9 +126,10 @@ static unsigned int exynos5_apll_pms_table[CPUFREQ_LEVEL_END] = { /* ASV group voltage table */ static const unsigned int asv_voltage_5250[CPUFREQ_LEVEL_END] = { - 0, 0, 0, 0, 0, 0, 0, /* 1700 MHz ~ 1100 MHz Not supported */ - 1175000, 1125000, 1075000, 1050000, 1000000, - 950000, 925000, 925000, 900000 + 1300000, 1250000, 1225000, 1200000, 1150000, + 1125000, 1100000, 1075000, 1050000, 1025000, + 1012500, 1000000, 975000, 950000, 937500, + 925000 }; static void set_clkdiv(unsigned int div_index) @@ -248,15 +249,7 @@ static void __init set_volt_table(void) { unsigned int i; - exynos5250_freq_table[L0].frequency = CPUFREQ_ENTRY_INVALID; - exynos5250_freq_table[L1].frequency = CPUFREQ_ENTRY_INVALID; - exynos5250_freq_table[L2].frequency = CPUFREQ_ENTRY_INVALID; - exynos5250_freq_table[L3].frequency = CPUFREQ_ENTRY_INVALID; - exynos5250_freq_table[L4].frequency = CPUFREQ_ENTRY_INVALID; - exynos5250_freq_table[L5].frequency = CPUFREQ_ENTRY_INVALID; - exynos5250_freq_table[L6].frequency = CPUFREQ_ENTRY_INVALID; - - max_support_idx = L7; + max_support_idx = L0; for (i = 0 ; i < CPUFREQ_LEVEL_END ; i++) exynos5250_volt_table[i] = asv_voltage_5250[i]; diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index fdffa1beca17..409b92b8d346 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -7,7 +7,7 @@ menuconfig EDAC bool "EDAC (Error Detection And Correction) reporting" depends on HAS_IOMEM - depends on X86 || PPC || TILE + depends on X86 || PPC || TILE || ARM help EDAC is designed to report errors in the core system. These are low-level errors that are reported in the CPU or @@ -31,6 +31,14 @@ if EDAC comment "Reporting subsystems" +config EDAC_LEGACY_SYSFS + bool "EDAC legacy sysfs" + default y + help + Enable the compatibility sysfs nodes. + Use 'Y' if your edac utilities aren't ported to work with the newer + structures. + config EDAC_DEBUG bool "Debugging" help @@ -294,4 +302,18 @@ config EDAC_TILE Support for error detection and correction on the Tilera memory controller. +config EDAC_HIGHBANK_MC + tristate "Highbank Memory Controller" + depends on EDAC_MM_EDAC && ARCH_HIGHBANK + help + Support for error detection and correction on the + Calxeda Highbank memory controller. + +config EDAC_HIGHBANK_L2 + tristate "Highbank L2 Cache" + depends on EDAC_MM_EDAC && ARCH_HIGHBANK + help + Support for error detection and correction on the + Calxeda Highbank memory controller. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 196a63dd37c5..7e5129a733f8 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -55,3 +55,6 @@ obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o obj-$(CONFIG_EDAC_TILE) += tile_edac.o + +obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o +obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 7be9b7288e90..5a297a26211d 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -321,8 +321,8 @@ found: return edac_mc_find((int)node_id); err_no_match: - debugf2("sys_addr 0x%lx doesn't match any node\n", - (unsigned long)sys_addr); + edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n", + (unsigned long)sys_addr); return NULL; } @@ -393,15 +393,15 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) mask = ~mask; if ((input_addr & mask) == (base & mask)) { - debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", - (unsigned long)input_addr, csrow, - pvt->mc_node_id); + edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n", + (unsigned long)input_addr, csrow, + pvt->mc_node_id); return csrow; } } - debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", - (unsigned long)input_addr, pvt->mc_node_id); + edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n", + (unsigned long)input_addr, pvt->mc_node_id); return -1; } @@ -430,20 +430,20 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, /* only revE and later have the DRAM Hole Address Register */ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) { - debugf1(" revision %d for node %d does not support DHAR\n", - pvt->ext_model, pvt->mc_node_id); + edac_dbg(1, " revision %d for node %d does not support DHAR\n", + pvt->ext_model, pvt->mc_node_id); return 1; } /* valid for Fam10h and above */ if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { - debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); + edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n"); return 1; } if (!dhar_valid(pvt)) { - debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", - pvt->mc_node_id); + edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n", + pvt->mc_node_id); return 1; } @@ -475,9 +475,9 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, else *hole_offset = k8_dhar_offset(pvt); - debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", - pvt->mc_node_id, (unsigned long)*hole_base, - (unsigned long)*hole_offset, (unsigned long)*hole_size); + edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", + pvt->mc_node_id, (unsigned long)*hole_base, + (unsigned long)*hole_offset, (unsigned long)*hole_size); return 0; } @@ -528,10 +528,9 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) /* use DHAR to translate SysAddr to DramAddr */ dram_addr = sys_addr - hole_offset; - debugf2("using DHAR to translate SysAddr 0x%lx to " - "DramAddr 0x%lx\n", - (unsigned long)sys_addr, - (unsigned long)dram_addr); + edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n", + (unsigned long)sys_addr, + (unsigned long)dram_addr); return dram_addr; } @@ -548,9 +547,8 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) */ dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; - debugf2("using DRAM Base register to translate SysAddr 0x%lx to " - "DramAddr 0x%lx\n", (unsigned long)sys_addr, - (unsigned long)dram_addr); + edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n", + (unsigned long)sys_addr, (unsigned long)dram_addr); return dram_addr; } @@ -586,9 +584,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + (dram_addr & 0xfff); - debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", - intlv_shift, (unsigned long)dram_addr, - (unsigned long)input_addr); + edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", + intlv_shift, (unsigned long)dram_addr, + (unsigned long)input_addr); return input_addr; } @@ -604,8 +602,8 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) input_addr = dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr)); - debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n", - (unsigned long)sys_addr, (unsigned long)input_addr); + edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n", + (unsigned long)sys_addr, (unsigned long)input_addr); return input_addr; } @@ -637,8 +635,8 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); if (intlv_shift == 0) { - debugf1(" InputAddr 0x%lx translates to DramAddr of " - "same value\n", (unsigned long)input_addr); + edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n", + (unsigned long)input_addr); return input_addr; } @@ -649,9 +647,9 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); dram_addr = bits + (intlv_sel << 12); - debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " - "(%d node interleave bits)\n", (unsigned long)input_addr, - (unsigned long)dram_addr, intlv_shift); + edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n", + (unsigned long)input_addr, + (unsigned long)dram_addr, intlv_shift); return dram_addr; } @@ -673,9 +671,9 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) (dram_addr < (hole_base + hole_size))) { sys_addr = dram_addr + hole_offset; - debugf1("using DHAR to translate DramAddr 0x%lx to " - "SysAddr 0x%lx\n", (unsigned long)dram_addr, - (unsigned long)sys_addr); + edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n", + (unsigned long)dram_addr, + (unsigned long)sys_addr); return sys_addr; } @@ -697,9 +695,9 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) */ sys_addr |= ~((sys_addr & (1ull << 39)) - 1); - debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", - pvt->mc_node_id, (unsigned long)dram_addr, - (unsigned long)sys_addr); + edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n", + pvt->mc_node_id, (unsigned long)dram_addr, + (unsigned long)sys_addr); return sys_addr; } @@ -768,49 +766,48 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); static void amd64_dump_dramcfg_low(u32 dclr, int chan) { - debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); + edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr); - debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n", - (dclr & BIT(16)) ? "un" : "", - (dclr & BIT(19)) ? "yes" : "no"); + edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n", + (dclr & BIT(16)) ? "un" : "", + (dclr & BIT(19)) ? "yes" : "no"); - debugf1(" PAR/ERR parity: %s\n", - (dclr & BIT(8)) ? "enabled" : "disabled"); + edac_dbg(1, " PAR/ERR parity: %s\n", + (dclr & BIT(8)) ? "enabled" : "disabled"); if (boot_cpu_data.x86 == 0x10) - debugf1(" DCT 128bit mode width: %s\n", - (dclr & BIT(11)) ? "128b" : "64b"); + edac_dbg(1, " DCT 128bit mode width: %s\n", + (dclr & BIT(11)) ? "128b" : "64b"); - debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", - (dclr & BIT(12)) ? "yes" : "no", - (dclr & BIT(13)) ? "yes" : "no", - (dclr & BIT(14)) ? "yes" : "no", - (dclr & BIT(15)) ? "yes" : "no"); + edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", + (dclr & BIT(12)) ? "yes" : "no", + (dclr & BIT(13)) ? "yes" : "no", + (dclr & BIT(14)) ? "yes" : "no", + (dclr & BIT(15)) ? "yes" : "no"); } /* Display and decode various NB registers for debug purposes. */ static void dump_misc_regs(struct amd64_pvt *pvt) { - debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); + edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); - debugf1(" NB two channel DRAM capable: %s\n", - (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); + edac_dbg(1, " NB two channel DRAM capable: %s\n", + (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); - debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", - (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", - (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); + edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n", + (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", + (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); amd64_dump_dramcfg_low(pvt->dclr0, 0); - debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); + edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare); - debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " - "offset: 0x%08x\n", - pvt->dhar, dhar_base(pvt), - (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) - : f10_dhar_offset(pvt)); + edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n", + pvt->dhar, dhar_base(pvt), + (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) + : f10_dhar_offset(pvt)); - debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); + edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); amd64_debug_display_dimm_sizes(pvt, 0); @@ -857,15 +854,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) u32 *base1 = &pvt->csels[1].csbases[cs]; if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) - debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", - cs, *base0, reg0); + edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n", + cs, *base0, reg0); if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) continue; if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) - debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", - cs, *base1, reg1); + edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n", + cs, *base1, reg1); } for_each_chip_select_mask(cs, 0, pvt) { @@ -875,15 +872,15 @@ static void read_dct_base_mask(struct amd64_pvt *pvt) u32 *mask1 = &pvt->csels[1].csmasks[cs]; if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) - debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", - cs, *mask0, reg0); + edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n", + cs, *mask0, reg0); if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) continue; if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) - debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", - cs, *mask1, reg1); + edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n", + cs, *mask1, reg1); } } @@ -1049,24 +1046,22 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, if (!src_mci) { amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n", (unsigned long)sys_addr); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, offset, syndrome, -1, -1, -1, - EDAC_MOD_STR, "failed to map error addr to a node", - NULL); + ""); return; } /* Now map the sys_addr to a CSROW */ csrow = sys_addr_to_csrow(src_mci, sys_addr); if (csrow < 0) { - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, offset, syndrome, -1, -1, -1, - EDAC_MOD_STR, "failed to map error addr to a csrow", - NULL); + ""); return; } @@ -1082,12 +1077,11 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - " "possible error reporting race\n", syndrome); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, offset, syndrome, csrow, -1, -1, - EDAC_MOD_STR, "unknown syndrome - possible error reporting race", - NULL); + ""); return; } } else { @@ -1102,10 +1096,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, channel = ((sys_addr & BIT(3)) != 0); } - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci, 1, page, offset, syndrome, csrow, channel, -1, - EDAC_MOD_STR, "", NULL); + "", ""); } static int ddr2_cs_size(unsigned i, bool dct_width) @@ -1193,7 +1187,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt) * Need to check DCT0[0] and DCT1[0] to see if only one of them has * their CSEnable bit on. If so, then SINGLE DIMM case. */ - debugf0("Data width is not 128 bits - need more decoding\n"); + edac_dbg(0, "Data width is not 128 bits - need more decoding\n"); /* * Check DRAM Bank Address Mapping values for each DIMM to see if there @@ -1272,25 +1266,24 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt) return; if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { - debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", - pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); + edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", + pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); - debugf0(" DCTs operate in %s mode.\n", - (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); + edac_dbg(0, " DCTs operate in %s mode\n", + (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); if (!dct_ganging_enabled(pvt)) - debugf0(" Address range split per DCT: %s\n", - (dct_high_range_enabled(pvt) ? "yes" : "no")); + edac_dbg(0, " Address range split per DCT: %s\n", + (dct_high_range_enabled(pvt) ? "yes" : "no")); - debugf0(" data interleave for ECC: %s, " - "DRAM cleared since last warm reset: %s\n", - (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), - (dct_memory_cleared(pvt) ? "yes" : "no")); + edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n", + (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), + (dct_memory_cleared(pvt) ? "yes" : "no")); - debugf0(" channel interleave: %s, " - "interleave bits selector: 0x%x\n", - (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), - dct_sel_interleave_addr(pvt)); + edac_dbg(0, " channel interleave: %s, " + "interleave bits selector: 0x%x\n", + (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), + dct_sel_interleave_addr(pvt)); } amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); @@ -1428,7 +1421,7 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) pvt = mci->pvt_info; - debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct); + edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct); for_each_chip_select(csrow, dct, pvt) { if (!csrow_enabled(csrow, dct, pvt)) @@ -1436,19 +1429,18 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); - debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", - csrow, cs_base, cs_mask); + edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", + csrow, cs_base, cs_mask); cs_mask = ~cs_mask; - debugf1(" (InputAddr & ~CSMask)=0x%llx " - "(CSBase & ~CSMask)=0x%llx\n", - (in_addr & cs_mask), (cs_base & cs_mask)); + edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n", + (in_addr & cs_mask), (cs_base & cs_mask)); if ((in_addr & cs_mask) == (cs_base & cs_mask)) { cs_found = f10_process_possible_spare(pvt, dct, csrow); - debugf1(" MATCH csrow=%d\n", cs_found); + edac_dbg(1, " MATCH csrow=%d\n", cs_found); break; } } @@ -1505,8 +1497,8 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, u8 intlv_en = dram_intlv_en(pvt, range); u32 intlv_sel = dram_intlv_sel(pvt, range); - debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", - range, sys_addr, get_dram_limit(pvt, range)); + edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", + range, sys_addr, get_dram_limit(pvt, range)); if (dhar_valid(pvt) && dhar_base(pvt) <= sys_addr && @@ -1562,7 +1554,7 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, (chan_addr & 0xfff); } - debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr); + edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr); cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); @@ -1616,12 +1608,11 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); if (csrow < 0) { - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, offset, syndrome, -1, -1, -1, - EDAC_MOD_STR, "failed to map error addr to a csrow", - NULL); + ""); return; } @@ -1633,10 +1624,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, if (dct_ganging_enabled(pvt)) chan = get_channel_from_ecc_syndrome(mci, syndrome); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, offset, syndrome, csrow, chan, -1, - EDAC_MOD_STR, "", NULL); + "", ""); } /* @@ -1664,7 +1655,8 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases : pvt->csels[0].csbases; - debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); + edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", + ctrl, dbam); edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl); @@ -1840,7 +1832,7 @@ static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs, } } - debugf0("syndrome(%x) not found\n", syndrome); + edac_dbg(0, "syndrome(%x) not found\n", syndrome); return -1; } @@ -1917,12 +1909,11 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m) /* Ensure that the Error Address is VALID */ if (!(m->status & MCI_STATUS_ADDRV)) { amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, - EDAC_MOD_STR, "HW has no ERROR_ADDRESS available", - NULL); + ""); return; } @@ -1946,12 +1937,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m) if (!(m->status & MCI_STATUS_ADDRV)) { amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, - EDAC_MOD_STR, "HW has no ERROR_ADDRESS available", - NULL); + ""); return; } @@ -1966,11 +1956,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m) if (!src_mci) { amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", (unsigned long)sys_addr); - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, page, offset, 0, -1, -1, -1, - EDAC_MOD_STR, - "ERROR ADDRESS NOT mapped to a MC", NULL); + "ERROR ADDRESS NOT mapped to a MC", + ""); return; } @@ -1980,17 +1970,16 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m) if (csrow < 0) { amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", (unsigned long)sys_addr); - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, page, offset, 0, -1, -1, -1, - EDAC_MOD_STR, "ERROR ADDRESS NOT mapped to CS", - NULL); + ""); } else { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, page, offset, 0, csrow, -1, -1, - EDAC_MOD_STR, "", NULL); + "", ""); } } @@ -2047,9 +2036,9 @@ static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id) return -ENODEV; } - debugf1("F1: %s\n", pci_name(pvt->F1)); - debugf1("F2: %s\n", pci_name(pvt->F2)); - debugf1("F3: %s\n", pci_name(pvt->F3)); + edac_dbg(1, "F1: %s\n", pci_name(pvt->F1)); + edac_dbg(1, "F2: %s\n", pci_name(pvt->F2)); + edac_dbg(1, "F3: %s\n", pci_name(pvt->F3)); return 0; } @@ -2076,15 +2065,15 @@ static void read_mc_regs(struct amd64_pvt *pvt) * those are Read-As-Zero */ rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem); - debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem); + edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem); /* check first whether TOP_MEM2 is enabled */ rdmsrl(MSR_K8_SYSCFG, msr_val); if (msr_val & (1U << 21)) { rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2); - debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2); + edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2); } else - debugf0(" TOP_MEM2 disabled.\n"); + edac_dbg(0, " TOP_MEM2 disabled\n"); amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); @@ -2100,17 +2089,17 @@ static void read_mc_regs(struct amd64_pvt *pvt) if (!rw) continue; - debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", - range, - get_dram_base(pvt, range), - get_dram_limit(pvt, range)); + edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", + range, + get_dram_base(pvt, range), + get_dram_limit(pvt, range)); - debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", - dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", - (rw & 0x1) ? "R" : "-", - (rw & 0x2) ? "W" : "-", - dram_intlv_sel(pvt, range), - dram_dst_node(pvt, range)); + edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", + dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", + (rw & 0x1) ? "R" : "-", + (rw & 0x2) ? "W" : "-", + dram_intlv_sel(pvt, range), + dram_dst_node(pvt, range)); } read_dct_base_mask(pvt); @@ -2191,9 +2180,9 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); - debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); - debugf0(" nr_pages/channel= %u channel-count = %d\n", - nr_pages, pvt->channel_count); + edac_dbg(0, " (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); + edac_dbg(0, " nr_pages/channel= %u channel-count = %d\n", + nr_pages, pvt->channel_count); return nr_pages; } @@ -2205,6 +2194,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) static int init_csrows(struct mem_ctl_info *mci) { struct csrow_info *csrow; + struct dimm_info *dimm; struct amd64_pvt *pvt = mci->pvt_info; u64 base, mask; u32 val; @@ -2217,22 +2207,19 @@ static int init_csrows(struct mem_ctl_info *mci) pvt->nbcfg = val; - debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", - pvt->mc_node_id, val, - !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); + edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", + pvt->mc_node_id, val, + !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); for_each_chip_select(i, 0, pvt) { - csrow = &mci->csrows[i]; + csrow = mci->csrows[i]; if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) { - debugf1("----CSROW %d EMPTY for node %d\n", i, - pvt->mc_node_id); + edac_dbg(1, "----CSROW %d VALID for MC node %d\n", + i, pvt->mc_node_id); continue; } - debugf1("----CSROW %d VALID for MC node %d\n", - i, pvt->mc_node_id); - empty = 0; if (csrow_enabled(i, 0, pvt)) nr_pages = amd64_csrow_nr_pages(pvt, 0, i); @@ -2244,8 +2231,9 @@ static int init_csrows(struct mem_ctl_info *mci) mtype = amd64_determine_memory_type(pvt, i); - debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); - debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count); + edac_dbg(1, " for MC node %d csrow %d:\n", pvt->mc_node_id, i); + edac_dbg(1, " nr_pages: %u\n", + nr_pages * pvt->channel_count); /* * determine whether CHIPKILL or JUST ECC or NO ECC is operating @@ -2257,9 +2245,10 @@ static int init_csrows(struct mem_ctl_info *mci) edac_mode = EDAC_NONE; for (j = 0; j < pvt->channel_count; j++) { - csrow->channels[j].dimm->mtype = mtype; - csrow->channels[j].dimm->edac_mode = edac_mode; - csrow->channels[j].dimm->nr_pages = nr_pages; + dimm = csrow->channels[j]->dimm; + dimm->mtype = mtype; + dimm->edac_mode = edac_mode; + dimm->nr_pages = nr_pages; } } @@ -2296,9 +2285,9 @@ static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid) struct msr *reg = per_cpu_ptr(msrs, cpu); nbe = reg->l & MSR_MCGCTL_NBE; - debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", - cpu, reg->q, - (nbe ? "enabled" : "disabled")); + edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", + cpu, reg->q, + (nbe ? "enabled" : "disabled")); if (!nbe) goto out; @@ -2369,8 +2358,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, amd64_read_pci_cfg(F3, NBCFG, &value); - debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", - nid, value, !!(value & NBCFG_ECC_ENABLE)); + edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", + nid, value, !!(value & NBCFG_ECC_ENABLE)); if (!(value & NBCFG_ECC_ENABLE)) { amd64_warn("DRAM ECC disabled on this node, enabling...\n"); @@ -2394,8 +2383,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, s->flags.nb_ecc_prev = 1; } - debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", - nid, value, !!(value & NBCFG_ECC_ENABLE)); + edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", + nid, value, !!(value & NBCFG_ECC_ENABLE)); return ret; } @@ -2463,26 +2452,29 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid) return true; } -struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) + - ARRAY_SIZE(amd64_inj_attrs) + - 1]; - -struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } }; - -static void set_mc_sysfs_attrs(struct mem_ctl_info *mci) +static int set_mc_sysfs_attrs(struct mem_ctl_info *mci) { - unsigned int i = 0, j = 0; + int rc; - for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++) - sysfs_attrs[i] = amd64_dbg_attrs[i]; + rc = amd64_create_sysfs_dbg_files(mci); + if (rc < 0) + return rc; - if (boot_cpu_data.x86 >= 0x10) - for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++) - sysfs_attrs[i] = amd64_inj_attrs[j]; + if (boot_cpu_data.x86 >= 0x10) { + rc = amd64_create_sysfs_inject_files(mci); + if (rc < 0) + return rc; + } + + return 0; +} - sysfs_attrs[i] = terminator; +static void del_mc_sysfs_attrs(struct mem_ctl_info *mci) +{ + amd64_remove_sysfs_dbg_files(mci); - mci->mc_driver_sysfs_attributes = sysfs_attrs; + if (boot_cpu_data.x86 >= 0x10) + amd64_remove_sysfs_inject_files(mci); } static void setup_mci_misc_attrs(struct mem_ctl_info *mci, @@ -2601,20 +2593,22 @@ static int amd64_init_one_instance(struct pci_dev *F2) goto err_siblings; mci->pvt_info = pvt; - mci->dev = &pvt->F2->dev; + mci->pdev = &pvt->F2->dev; setup_mci_misc_attrs(mci, fam_type); if (init_csrows(mci)) mci->edac_cap = EDAC_FLAG_NONE; - set_mc_sysfs_attrs(mci); - ret = -ENODEV; if (edac_mc_add_mc(mci)) { - debugf1("failed edac_mc_add_mc()\n"); + edac_dbg(1, "failed edac_mc_add_mc()\n"); goto err_add_mc; } + if (set_mc_sysfs_attrs(mci)) { + edac_dbg(1, "failed edac_mc_add_mc()\n"); + goto err_add_sysfs; + } /* register stuff with EDAC MCE */ if (report_gart_errors) @@ -2628,6 +2622,8 @@ static int amd64_init_one_instance(struct pci_dev *F2) return 0; +err_add_sysfs: + edac_mc_del_mc(mci->pdev); err_add_mc: edac_mc_free(mci); @@ -2651,7 +2647,7 @@ static int __devinit amd64_probe_one_instance(struct pci_dev *pdev, ret = pci_enable_device(pdev); if (ret < 0) { - debugf0("ret=%d\n", ret); + edac_dbg(0, "ret=%d\n", ret); return -EIO; } @@ -2698,6 +2694,8 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev) struct pci_dev *F3 = node_to_amd_nb(nid)->misc; struct ecc_settings *s = ecc_stngs[nid]; + mci = find_mci_by_dev(&pdev->dev); + del_mc_sysfs_attrs(mci); /* Remove from EDAC CORE tracking list */ mci = edac_mc_del_mc(&pdev->dev); if (!mci) diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 9a666cb985b2..8d4804732bac 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -413,20 +413,33 @@ struct ecc_settings { }; #ifdef CONFIG_EDAC_DEBUG -#define NUM_DBG_ATTRS 5 +int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci); +void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci); + #else -#define NUM_DBG_ATTRS 0 +static inline int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci) +{ + return 0; +} +static void inline amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci) +{ +} #endif #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION -#define NUM_INJ_ATTRS 5 +int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci); +void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci); + #else -#define NUM_INJ_ATTRS 0 +static inline int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci) +{ + return 0; +} +static inline void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci) +{ +} #endif -extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS], - amd64_inj_attrs[NUM_INJ_ATTRS]; - /* * Each of the PCI Device IDs types have their own set of hardware accessor * functions and per device encoding/decoding logic. @@ -460,3 +473,5 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, u64 *hole_offset, u64 *hole_size); + +#define to_mci(k) container_of(k, struct mem_ctl_info, dev) diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c index e3562288f4ce..2c1bbf740605 100644 --- a/drivers/edac/amd64_edac_dbg.c +++ b/drivers/edac/amd64_edac_dbg.c @@ -1,8 +1,11 @@ #include "amd64_edac.h" #define EDAC_DCT_ATTR_SHOW(reg) \ -static ssize_t amd64_##reg##_show(struct mem_ctl_info *mci, char *data) \ +static ssize_t amd64_##reg##_show(struct device *dev, \ + struct device_attribute *mattr, \ + char *data) \ { \ + struct mem_ctl_info *mci = to_mci(dev); \ struct amd64_pvt *pvt = mci->pvt_info; \ return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \ } @@ -12,8 +15,12 @@ EDAC_DCT_ATTR_SHOW(dbam0); EDAC_DCT_ATTR_SHOW(top_mem); EDAC_DCT_ATTR_SHOW(top_mem2); -static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data) +static ssize_t amd64_hole_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); + u64 hole_base = 0; u64 hole_offset = 0; u64 hole_size = 0; @@ -27,46 +34,40 @@ static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data) /* * update NUM_DBG_ATTRS in case you add new members */ -struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { +static DEVICE_ATTR(dhar, S_IRUGO, amd64_dhar_show, NULL); +static DEVICE_ATTR(dbam, S_IRUGO, amd64_dbam0_show, NULL); +static DEVICE_ATTR(topmem, S_IRUGO, amd64_top_mem_show, NULL); +static DEVICE_ATTR(topmem2, S_IRUGO, amd64_top_mem2_show, NULL); +static DEVICE_ATTR(dram_hole, S_IRUGO, amd64_hole_show, NULL); + +int amd64_create_sysfs_dbg_files(struct mem_ctl_info *mci) +{ + int rc; + + rc = device_create_file(&mci->dev, &dev_attr_dhar); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_dbam); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_topmem); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_topmem2); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_dram_hole); + if (rc < 0) + return rc; - { - .attr = { - .name = "dhar", - .mode = (S_IRUGO) - }, - .show = amd64_dhar_show, - .store = NULL, - }, - { - .attr = { - .name = "dbam", - .mode = (S_IRUGO) - }, - .show = amd64_dbam0_show, - .store = NULL, - }, - { - .attr = { - .name = "topmem", - .mode = (S_IRUGO) - }, - .show = amd64_top_mem_show, - .store = NULL, - }, - { - .attr = { - .name = "topmem2", - .mode = (S_IRUGO) - }, - .show = amd64_top_mem2_show, - .store = NULL, - }, - { - .attr = { - .name = "dram_hole", - .mode = (S_IRUGO) - }, - .show = amd64_hole_show, - .store = NULL, - }, -}; + return 0; +} + +void amd64_remove_sysfs_dbg_files(struct mem_ctl_info *mci) +{ + device_remove_file(&mci->dev, &dev_attr_dhar); + device_remove_file(&mci->dev, &dev_attr_dbam); + device_remove_file(&mci->dev, &dev_attr_topmem); + device_remove_file(&mci->dev, &dev_attr_topmem2); + device_remove_file(&mci->dev, &dev_attr_dram_hole); +} diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c index 303f10e03dda..53d972e00dfb 100644 --- a/drivers/edac/amd64_edac_inj.c +++ b/drivers/edac/amd64_edac_inj.c @@ -1,7 +1,10 @@ #include "amd64_edac.h" -static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf) +static ssize_t amd64_inject_section_show(struct device *dev, + struct device_attribute *mattr, + char *buf) { + struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; return sprintf(buf, "0x%x\n", pvt->injection.section); } @@ -12,9 +15,11 @@ static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf) * * range: 0..3 */ -static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, +static ssize_t amd64_inject_section_store(struct device *dev, + struct device_attribute *mattr, const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; int ret = 0; @@ -33,8 +38,11 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, return ret; } -static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf) +static ssize_t amd64_inject_word_show(struct device *dev, + struct device_attribute *mattr, + char *buf) { + struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; return sprintf(buf, "0x%x\n", pvt->injection.word); } @@ -45,9 +53,11 @@ static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf) * * range: 0..8 */ -static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, - const char *data, size_t count) +static ssize_t amd64_inject_word_store(struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; int ret = 0; @@ -66,8 +76,11 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, return ret; } -static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf) +static ssize_t amd64_inject_ecc_vector_show(struct device *dev, + struct device_attribute *mattr, + char *buf) { + struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; return sprintf(buf, "0x%x\n", pvt->injection.bit_map); } @@ -77,9 +90,11 @@ static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf) * corresponding bit within the error injection word above. When used during a * DRAM ECC read, it holds the contents of the of the DRAM ECC bits. */ -static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, - const char *data, size_t count) +static ssize_t amd64_inject_ecc_vector_store(struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; int ret = 0; @@ -103,9 +118,11 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, * Do a DRAM ECC read. Assemble staged values in the pvt area, format into * fields needed by the injection registers and read the NB Array Data Port. */ -static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, - const char *data, size_t count) +static ssize_t amd64_inject_read_store(struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; u32 section, word_bits; @@ -125,7 +142,8 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, /* Issue 'word' and 'bit' along with the READ request */ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); - debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); + edac_dbg(0, "section=0x%x word_bits=0x%x\n", + section, word_bits); return count; } @@ -136,9 +154,11 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, * Do a DRAM ECC write. Assemble staged values in the pvt area and format into * fields needed by the injection registers. */ -static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, +static ssize_t amd64_inject_write_store(struct device *dev, + struct device_attribute *mattr, const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct amd64_pvt *pvt = mci->pvt_info; unsigned long value; u32 section, word_bits; @@ -158,7 +178,8 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, /* Issue 'word' and 'bit' along with the READ request */ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); - debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); + edac_dbg(0, "section=0x%x word_bits=0x%x\n", + section, word_bits); return count; } @@ -168,46 +189,47 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, /* * update NUM_INJ_ATTRS in case you add new members */ -struct mcidev_sysfs_attribute amd64_inj_attrs[] = { - - { - .attr = { - .name = "inject_section", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = amd64_inject_section_show, - .store = amd64_inject_section_store, - }, - { - .attr = { - .name = "inject_word", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = amd64_inject_word_show, - .store = amd64_inject_word_store, - }, - { - .attr = { - .name = "inject_ecc_vector", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = amd64_inject_ecc_vector_show, - .store = amd64_inject_ecc_vector_store, - }, - { - .attr = { - .name = "inject_write", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = NULL, - .store = amd64_inject_write_store, - }, - { - .attr = { - .name = "inject_read", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = NULL, - .store = amd64_inject_read_store, - }, -}; + +static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR, + amd64_inject_section_show, amd64_inject_section_store); +static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR, + amd64_inject_word_show, amd64_inject_word_store); +static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR, + amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store); +static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR, + NULL, amd64_inject_write_store); +static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR, + NULL, amd64_inject_read_store); + + +int amd64_create_sysfs_inject_files(struct mem_ctl_info *mci) +{ + int rc; + + rc = device_create_file(&mci->dev, &dev_attr_inject_section); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_inject_word); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_inject_ecc_vector); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_inject_write); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_inject_read); + if (rc < 0) + return rc; + + return 0; +} + +void amd64_remove_sysfs_inject_files(struct mem_ctl_info *mci) +{ + device_remove_file(&mci->dev, &dev_attr_inject_section); + device_remove_file(&mci->dev, &dev_attr_inject_word); + device_remove_file(&mci->dev, &dev_attr_inject_ecc_vector); + device_remove_file(&mci->dev, &dev_attr_inject_write); + device_remove_file(&mci->dev, &dev_attr_inject_read); +} diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index 9774d443fa57..29eeb68a200c 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c @@ -105,7 +105,7 @@ static void amd76x_get_error_info(struct mem_ctl_info *mci, { struct pci_dev *pdev; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &info->ecc_mode_status); @@ -145,10 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, if (handle_errors) { row = (info->ecc_mode_status >> 4) & 0xf; - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, - mci->csrows[row].first_page, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, + mci->csrows[row]->first_page, 0, 0, row, 0, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); } } @@ -160,10 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, if (handle_errors) { row = info->ecc_mode_status & 0xf; - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, - mci->csrows[row].first_page, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, + mci->csrows[row]->first_page, 0, 0, row, 0, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); } } @@ -180,7 +180,7 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci, static void amd76x_check(struct mem_ctl_info *mci) { struct amd76x_error_info info; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); amd76x_get_error_info(mci, &info); amd76x_process_error_info(mci, &info, 1); } @@ -194,8 +194,8 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, int index; for (index = 0; index < mci->nr_csrows; index++) { - csrow = &mci->csrows[index]; - dimm = csrow->channels[0].dimm; + csrow = mci->csrows[index]; + dimm = csrow->channels[0]->dimm; /* find the DRAM Chip Select Base address and mask */ pci_read_config_dword(pdev, @@ -241,7 +241,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) u32 ems_mode; struct amd76x_error_info discard; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); ems_mode = (ems >> 10) & 0x3; @@ -256,8 +256,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - debugf0("%s(): mci = %p\n", __func__, mci); - mci->dev = &pdev->dev; + edac_dbg(0, "mci = %p\n", mci); + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_RDDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; mci->edac_cap = ems_mode ? @@ -276,7 +276,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { - debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "failed edac_mc_add_mc()\n"); goto fail; } @@ -292,7 +292,7 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) } /* get this far and it's successful */ - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; fail: @@ -304,7 +304,7 @@ fail: static int __devinit amd76x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* don't need to call pci_enable_device() */ return amd76x_probe1(pdev, ent->driver_data); @@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); if (amd76x_pci) edac_pci_release_generic_ctl(amd76x_pci); diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c index 69ee6aab5c71..a1bbd8edd257 100644 --- a/drivers/edac/cell_edac.c +++ b/drivers/edac/cell_edac.c @@ -33,10 +33,10 @@ struct cell_edac_priv static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) { struct cell_edac_priv *priv = mci->pvt_info; - struct csrow_info *csrow = &mci->csrows[0]; + struct csrow_info *csrow = mci->csrows[0]; unsigned long address, pfn, offset, syndrome; - dev_dbg(mci->dev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n", + dev_dbg(mci->pdev, "ECC CE err on node %d, channel %d, ar = 0x%016llx\n", priv->node, chan, ar); /* Address decoding is likely a bit bogus, to dbl check */ @@ -48,18 +48,18 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar) syndrome = (ar & 0x000000001fe00000ul) >> 21; /* TODO: Decoding of the error address */ - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, csrow->first_page + pfn, offset, syndrome, - 0, chan, -1, "", "", NULL); + 0, chan, -1, "", ""); } static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) { struct cell_edac_priv *priv = mci->pvt_info; - struct csrow_info *csrow = &mci->csrows[0]; + struct csrow_info *csrow = mci->csrows[0]; unsigned long address, pfn, offset; - dev_dbg(mci->dev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n", + dev_dbg(mci->pdev, "ECC UE err on node %d, channel %d, ar = 0x%016llx\n", priv->node, chan, ar); /* Address decoding is likely a bit bogus, to dbl check */ @@ -70,9 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) offset = address & ~PAGE_MASK; /* TODO: Decoding of the error address */ - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, csrow->first_page + pfn, offset, 0, - 0, chan, -1, "", "", NULL); + 0, chan, -1, "", ""); } static void cell_edac_check(struct mem_ctl_info *mci) @@ -83,7 +83,7 @@ static void cell_edac_check(struct mem_ctl_info *mci) fir = in_be64(&priv->regs->mic_fir); #ifdef DEBUG if (fir != priv->prev_fir) { - dev_dbg(mci->dev, "fir change : 0x%016lx\n", fir); + dev_dbg(mci->pdev, "fir change : 0x%016lx\n", fir); priv->prev_fir = fir; } #endif @@ -119,14 +119,14 @@ static void cell_edac_check(struct mem_ctl_info *mci) mb(); /* sync up */ #ifdef DEBUG fir = in_be64(&priv->regs->mic_fir); - dev_dbg(mci->dev, "fir clear : 0x%016lx\n", fir); + dev_dbg(mci->pdev, "fir clear : 0x%016lx\n", fir); #endif } } static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) { - struct csrow_info *csrow = &mci->csrows[0]; + struct csrow_info *csrow = mci->csrows[0]; struct dimm_info *dimm; struct cell_edac_priv *priv = mci->pvt_info; struct device_node *np; @@ -150,12 +150,12 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) csrow->last_page = csrow->first_page + nr_pages - 1; for (j = 0; j < csrow->nr_channels; j++) { - dimm = csrow->channels[j].dimm; + dimm = csrow->channels[j]->dimm; dimm->mtype = MEM_XDR; dimm->edac_mode = EDAC_SECDED; dimm->nr_pages = nr_pages / csrow->nr_channels; } - dev_dbg(mci->dev, + dev_dbg(mci->pdev, "Initialized on node %d, chanmask=0x%x," " first_page=0x%lx, nr_pages=0x%x\n", priv->node, priv->chanmask, @@ -212,7 +212,7 @@ static int __devinit cell_edac_probe(struct platform_device *pdev) priv->regs = regs; priv->node = pdev->id; priv->chanmask = chanmask; - mci->dev = &pdev->dev; + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_XDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_EC | EDAC_FLAG_SECDED; diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c index e22030a9de66..c2ef13495873 100644 --- a/drivers/edac/cpc925_edac.c +++ b/drivers/edac/cpc925_edac.c @@ -316,13 +316,12 @@ static void get_total_mem(struct cpc925_mc_pdata *pdata) reg += aw; size = of_read_number(reg, sw); reg += sw; - debugf1("%s: start 0x%lx, size 0x%lx\n", __func__, - start, size); + edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size); pdata->total_mem += size; } while (reg < reg_end); of_node_put(np); - debugf0("%s: total_mem 0x%lx\n", __func__, pdata->total_mem); + edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem); } static void cpc925_init_csrows(struct mem_ctl_info *mci) @@ -330,8 +329,9 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci) struct cpc925_mc_pdata *pdata = mci->pvt_info; struct csrow_info *csrow; struct dimm_info *dimm; + enum dev_type dtype; int index, j; - u32 mbmr, mbbar, bba; + u32 mbmr, mbbar, bba, grain; unsigned long row_size, nr_pages, last_nr_pages = 0; get_total_mem(pdata); @@ -347,7 +347,7 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci) if (bba == 0) continue; /* not populated */ - csrow = &mci->csrows[index]; + csrow = mci->csrows[index]; row_size = bba * (1UL << 28); /* 256M */ csrow->first_page = last_nr_pages; @@ -355,37 +355,36 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci) csrow->last_page = csrow->first_page + nr_pages - 1; last_nr_pages = csrow->last_page + 1; + switch (csrow->nr_channels) { + case 1: /* Single channel */ + grain = 32; /* four-beat burst of 32 bytes */ + break; + case 2: /* Dual channel */ + default: + grain = 64; /* four-beat burst of 64 bytes */ + break; + } + switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { + case 6: /* 0110, no way to differentiate X8 VS X16 */ + case 5: /* 0101 */ + case 8: /* 1000 */ + dtype = DEV_X16; + break; + case 7: /* 0111 */ + case 9: /* 1001 */ + dtype = DEV_X8; + break; + default: + dtype = DEV_UNKNOWN; + break; + } for (j = 0; j < csrow->nr_channels; j++) { - dimm = csrow->channels[j].dimm; - + dimm = csrow->channels[j]->dimm; dimm->nr_pages = nr_pages / csrow->nr_channels; dimm->mtype = MEM_RDDR; dimm->edac_mode = EDAC_SECDED; - - switch (csrow->nr_channels) { - case 1: /* Single channel */ - dimm->grain = 32; /* four-beat burst of 32 bytes */ - break; - case 2: /* Dual channel */ - default: - dimm->grain = 64; /* four-beat burst of 64 bytes */ - break; - } - - switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { - case 6: /* 0110, no way to differentiate X8 VS X16 */ - case 5: /* 0101 */ - case 8: /* 1000 */ - dimm->dtype = DEV_X16; - break; - case 7: /* 0111 */ - case 9: /* 1001 */ - dimm->dtype = DEV_X8; - break; - default: - dimm->dtype = DEV_UNKNOWN; - break; - } + dimm->grain = grain; + dimm->dtype = dtype; } } } @@ -463,7 +462,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, *csrow = rank; #ifdef CONFIG_EDAC_DEBUG - if (mci->csrows[rank].first_page == 0) { + if (mci->csrows[rank]->first_page == 0) { cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " "non-populated csrow, broken hardware?\n"); return; @@ -471,7 +470,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, #endif /* Revert csrow number */ - pa = mci->csrows[rank].first_page << PAGE_SHIFT; + pa = mci->csrows[rank]->first_page << PAGE_SHIFT; /* Revert column address */ col += bcnt; @@ -512,7 +511,7 @@ static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, *offset = pa & (PAGE_SIZE - 1); *pfn = pa >> PAGE_SHIFT; - debugf0("%s: ECC physical address 0x%lx\n", __func__, pa); + edac_dbg(0, "ECC physical address 0x%lx\n", pa); } static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) @@ -555,18 +554,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci) if (apiexcp & CECC_EXCP_DETECTED) { cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); channel = cpc925_mc_find_channel(mci, syndrome); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, pfn, offset, syndrome, csrow, channel, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); } if (apiexcp & UECC_EXCP_DETECTED) { cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, pfn, offset, 0, csrow, -1, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); } cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); @@ -852,8 +851,8 @@ static void cpc925_add_edac_devices(void __iomem *vbase) goto err2; } - debugf0("%s: Successfully added edac device for %s\n", - __func__, dev_info->ctl_name); + edac_dbg(0, "Successfully added edac device for %s\n", + dev_info->ctl_name); continue; @@ -884,8 +883,8 @@ static void cpc925_del_edac_devices(void) if (dev_info->exit) dev_info->exit(dev_info); - debugf0("%s: Successfully deleted edac device for %s\n", - __func__, dev_info->ctl_name); + edac_dbg(0, "Successfully deleted edac device for %s\n", + dev_info->ctl_name); } } @@ -900,7 +899,7 @@ static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci) mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; - debugf0("%s, Mem Scrub Ctrl Register 0x%x\n", __func__, mscr); + edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr); if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || (si == 0)) { @@ -928,8 +927,7 @@ static int cpc925_mc_get_channels(void __iomem *vbase) ((mbcr & MBCR_64BITBUS_MASK) == 0)) dual = 1; - debugf0("%s: %s channel\n", __func__, - (dual > 0) ? "Dual" : "Single"); + edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single"); return dual; } @@ -944,7 +942,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev) struct resource *r; int res = 0, nr_channels; - debugf0("%s: %s platform device found!\n", __func__, pdev->name); + edac_dbg(0, "%s platform device found!\n", pdev->name); if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { res = -ENOMEM; @@ -995,7 +993,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev) pdata->edac_idx = edac_mc_idx++; pdata->name = pdev->name; - mci->dev = &pdev->dev; + mci->pdev = &pdev->dev; platform_set_drvdata(pdev, mci); mci->dev_name = dev_name(&pdev->dev); mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; @@ -1026,7 +1024,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev) cpc925_add_edac_devices(vbase); /* get this far and it's successful */ - debugf0("%s: success\n", __func__); + edac_dbg(0, "success\n"); res = 0; goto out; diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index 3186512c9739..a5ed6b795fd4 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c @@ -309,7 +309,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, u32 remap; struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); if (page < pvt->tolm) return page; @@ -335,7 +335,7 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, int i; struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); /* convert the addr to 4k page */ page = sec1_add >> (PAGE_SHIFT - 4); @@ -371,10 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, channel = !(error_one & 1); /* e752x mc reads 34:6 of the DRAM linear address */ - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, offset_in_page(sec1_add << 4), sec1_syndrome, row, channel, -1, - "e752x CE", "", NULL); + "e752x CE", ""); } static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, @@ -394,7 +394,7 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, int row; struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); if (error_one & 0x0202) { error_2b = ded_add; @@ -408,11 +408,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, edac_mc_find_csrow_by_page(mci, block_page); /* e752x mc reads 34:6 of the DRAM linear address */ - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, block_page, offset_in_page(error_2b << 4), 0, row, -1, -1, - "e752x UE from Read", "", NULL); + "e752x UE from Read", ""); } if (error_one & 0x0404) { @@ -427,11 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, edac_mc_find_csrow_by_page(mci, block_page); /* e752x mc reads 34:6 of the DRAM linear address */ - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, block_page, offset_in_page(error_2b << 4), 0, row, -1, -1, - "e752x UE from Scruber", "", NULL); + "e752x UE from Scruber", ""); } } @@ -453,10 +453,10 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, if (!handle_error) return; - debugf3("%s()\n", __func__); - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, + edac_dbg(3, "\n"); + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, - "e752x UE log memory write", "", NULL); + "e752x UE log memory write", ""); } static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, @@ -982,7 +982,7 @@ static void e752x_check(struct mem_ctl_info *mci) { struct e752x_error_info info; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); e752x_get_error_info(mci, &info); e752x_process_error_info(mci, &info, 1); } @@ -1069,6 +1069,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, u16 ddrcsr) { struct csrow_info *csrow; + enum edac_type edac_mode; unsigned long last_cumul_size; int index, mem_dev, drc_chan; int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ @@ -1095,14 +1096,13 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { /* mem_dev 0=x8, 1=x4 */ mem_dev = (dra >> (index * 4 + 2)) & 0x3; - csrow = &mci->csrows[remap_csrow_index(mci, index)]; + csrow = mci->csrows[remap_csrow_index(mci, index)]; mem_dev = (mem_dev == 2); pci_read_config_byte(pdev, E752X_DRB + index, &value); /* convert a 128 or 64 MiB DRB to a page size. */ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); - debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, - cumul_size); + edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); if (cumul_size == last_cumul_size) continue; /* not populated */ @@ -1111,29 +1111,29 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; + /* + * if single channel or x8 devices then SECDED + * if dual channel and x4 then S4ECD4ED + */ + if (drc_ddim) { + if (drc_chan && mem_dev) { + edac_mode = EDAC_S4ECD4ED; + mci->edac_cap |= EDAC_FLAG_S4ECD4ED; + } else { + edac_mode = EDAC_SECDED; + mci->edac_cap |= EDAC_FLAG_SECDED; + } + } else + edac_mode = EDAC_NONE; for (i = 0; i < csrow->nr_channels; i++) { - struct dimm_info *dimm = csrow->channels[i].dimm; + struct dimm_info *dimm = csrow->channels[i]->dimm; - debugf3("Initializing rank at (%i,%i)\n", index, i); + edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i); dimm->nr_pages = nr_pages / csrow->nr_channels; dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ dimm->mtype = MEM_RDDR; /* only one type supported */ dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; - - /* - * if single channel or x8 devices then SECDED - * if dual channel and x4 then S4ECD4ED - */ - if (drc_ddim) { - if (drc_chan && mem_dev) { - dimm->edac_mode = EDAC_S4ECD4ED; - mci->edac_cap |= EDAC_FLAG_S4ECD4ED; - } else { - dimm->edac_mode = EDAC_SECDED; - mci->edac_cap |= EDAC_FLAG_SECDED; - } - } else - dimm->edac_mode = EDAC_NONE; + dimm->edac_mode = edac_mode; } } } @@ -1269,8 +1269,8 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) int drc_chan; /* Number of channels 0=1chan,1=2chan */ struct e752x_error_info discard; - debugf0("%s(): mci\n", __func__); - debugf0("Starting Probe1\n"); + edac_dbg(0, "mci\n"); + edac_dbg(0, "Starting Probe1\n"); /* check to see if device 0 function 1 is enabled; if it isn't, we * assume the BIOS has reserved it for a reason and is expecting @@ -1300,7 +1300,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - debugf3("%s(): init mci\n", __func__); + edac_dbg(3, "init mci\n"); mci->mtype_cap = MEM_FLAG_RDDR; /* 3100 IMCH supports SECDEC only */ mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED : @@ -1308,9 +1308,9 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) /* FIXME - what if different memory types are in different csrows? */ mci->mod_name = EDAC_MOD_STR; mci->mod_ver = E752X_REVISION; - mci->dev = &pdev->dev; + mci->pdev = &pdev->dev; - debugf3("%s(): init pvt\n", __func__); + edac_dbg(3, "init pvt\n"); pvt = (struct e752x_pvt *)mci->pvt_info; pvt->dev_info = &e752x_devs[dev_idx]; pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); @@ -1320,7 +1320,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) return -ENODEV; } - debugf3("%s(): more mci init\n", __func__); + edac_dbg(3, "more mci init\n"); mci->ctl_name = pvt->dev_info->ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = e752x_check; @@ -1342,7 +1342,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */ else mci->edac_cap |= EDAC_FLAG_NONE; - debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); + edac_dbg(3, "tolm, remapbase, remaplimit\n"); /* load the top of low memory, remap base, and remap limit vars */ pci_read_config_word(pdev, E752X_TOLM, &pci_data); @@ -1359,7 +1359,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { - debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "failed edac_mc_add_mc()\n"); goto fail; } @@ -1377,7 +1377,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) } /* get this far and it's successful */ - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; fail: @@ -1393,7 +1393,7 @@ fail: static int __devinit e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* wake up and enable device */ if (pci_enable_device(pdev) < 0) @@ -1407,7 +1407,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev) struct mem_ctl_info *mci; struct e752x_pvt *pvt; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); if (e752x_pci) edac_pci_release_generic_ctl(e752x_pci); @@ -1453,7 +1453,7 @@ static int __init e752x_init(void) { int pci_rc; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1464,7 +1464,7 @@ static int __init e752x_init(void) static void __exit e752x_exit(void) { - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); pci_unregister_driver(&e752x_driver); } diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index 9a9c1a546797..9ff57f361a43 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c @@ -166,7 +166,7 @@ static const struct e7xxx_dev_info e7xxx_devs[] = { /* FIXME - is this valid for both SECDED and S4ECD4ED? */ static inline int e7xxx_find_channel(u16 syndrome) { - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); if ((syndrome & 0xff00) == 0) return 0; @@ -186,7 +186,7 @@ static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, u32 remap; struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); if ((page < pvt->tolm) || ((page >= 0x100000) && (page < pvt->remapbase))) @@ -208,7 +208,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) int row; int channel; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); /* read the error address */ error_1b = info->dram_celog_add; /* FIXME - should use PAGE_SHIFT */ @@ -219,15 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) row = edac_mc_find_csrow_by_page(mci, page); /* convert syndrome to channel */ channel = e7xxx_find_channel(syndrome); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome, - row, channel, -1, "e7xxx CE", "", NULL); + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, 0, syndrome, + row, channel, -1, "e7xxx CE", ""); } static void process_ce_no_info(struct mem_ctl_info *mci) { - debugf3("%s()\n", __func__); - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1, - "e7xxx CE log register overflow", "", NULL); + edac_dbg(3, "\n"); + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, + "e7xxx CE log register overflow", ""); } static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) @@ -235,23 +235,23 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) u32 error_2b, block_page; int row; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); /* read the error address */ error_2b = info->dram_uelog_add; /* FIXME - should use PAGE_SHIFT */ block_page = error_2b >> 6; /* convert to 4k address */ row = edac_mc_find_csrow_by_page(mci, block_page); - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0, - row, -1, -1, "e7xxx UE", "", NULL); + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, block_page, 0, 0, + row, -1, -1, "e7xxx UE", ""); } static void process_ue_no_info(struct mem_ctl_info *mci) { - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1, - "e7xxx UE log register overflow", "", NULL); + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, + "e7xxx UE log register overflow", ""); } static void e7xxx_get_error_info(struct mem_ctl_info *mci, @@ -334,7 +334,7 @@ static void e7xxx_check(struct mem_ctl_info *mci) { struct e7xxx_error_info info; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); e7xxx_get_error_info(mci, &info); e7xxx_process_error_info(mci, &info, 1); } @@ -362,6 +362,7 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, int drc_chan, drc_drbg, drc_ddim, mem_dev; struct csrow_info *csrow; struct dimm_info *dimm; + enum edac_type edac_mode; pci_read_config_dword(pdev, E7XXX_DRA, &dra); drc_chan = dual_channel_active(drc, dev_idx); @@ -377,13 +378,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, for (index = 0; index < mci->nr_csrows; index++) { /* mem_dev 0=x8, 1=x4 */ mem_dev = (dra >> (index * 4 + 3)) & 0x1; - csrow = &mci->csrows[index]; + csrow = mci->csrows[index]; pci_read_config_byte(pdev, E7XXX_DRB + index, &value); /* convert a 64 or 32 MiB DRB to a page size. */ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); - debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, - cumul_size); + edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); if (cumul_size == last_cumul_size) continue; /* not populated */ @@ -392,28 +392,29 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; + /* + * if single channel or x8 devices then SECDED + * if dual channel and x4 then S4ECD4ED + */ + if (drc_ddim) { + if (drc_chan && mem_dev) { + edac_mode = EDAC_S4ECD4ED; + mci->edac_cap |= EDAC_FLAG_S4ECD4ED; + } else { + edac_mode = EDAC_SECDED; + mci->edac_cap |= EDAC_FLAG_SECDED; + } + } else + edac_mode = EDAC_NONE; + for (j = 0; j < drc_chan + 1; j++) { - dimm = csrow->channels[j].dimm; + dimm = csrow->channels[j]->dimm; dimm->nr_pages = nr_pages / (drc_chan + 1); dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */ dimm->mtype = MEM_RDDR; /* only one type supported */ dimm->dtype = mem_dev ? DEV_X4 : DEV_X8; - - /* - * if single channel or x8 devices then SECDED - * if dual channel and x4 then S4ECD4ED - */ - if (drc_ddim) { - if (drc_chan && mem_dev) { - dimm->edac_mode = EDAC_S4ECD4ED; - mci->edac_cap |= EDAC_FLAG_S4ECD4ED; - } else { - dimm->edac_mode = EDAC_SECDED; - mci->edac_cap |= EDAC_FLAG_SECDED; - } - } else - dimm->edac_mode = EDAC_NONE; + dimm->edac_mode = edac_mode; } } } @@ -428,7 +429,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) int drc_chan; struct e7xxx_error_info discard; - debugf0("%s(): mci\n", __func__); + edac_dbg(0, "mci\n"); pci_read_config_dword(pdev, E7XXX_DRC, &drc); @@ -451,15 +452,15 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - debugf3("%s(): init mci\n", __func__); + edac_dbg(3, "init mci\n"); mci->mtype_cap = MEM_FLAG_RDDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED; /* FIXME - what if different memory types are in different csrows? */ mci->mod_name = EDAC_MOD_STR; mci->mod_ver = E7XXX_REVISION; - mci->dev = &pdev->dev; - debugf3("%s(): init pvt\n", __func__); + mci->pdev = &pdev->dev; + edac_dbg(3, "init pvt\n"); pvt = (struct e7xxx_pvt *)mci->pvt_info; pvt->dev_info = &e7xxx_devs[dev_idx]; pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, @@ -472,14 +473,14 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) goto fail0; } - debugf3("%s(): more mci init\n", __func__); + edac_dbg(3, "more mci init\n"); mci->ctl_name = pvt->dev_info->ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = e7xxx_check; mci->ctl_page_to_phys = ctl_page_to_phys; e7xxx_init_csrows(mci, pdev, dev_idx, drc); mci->edac_cap |= EDAC_FLAG_NONE; - debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); + edac_dbg(3, "tolm, remapbase, remaplimit\n"); /* load the top of low memory, remap base, and remap limit vars */ pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); pvt->tolm = ((u32) pci_data) << 4; @@ -498,7 +499,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { - debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "failed edac_mc_add_mc()\n"); goto fail1; } @@ -514,7 +515,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) } /* get this far and it's successful */ - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; fail1: @@ -530,7 +531,7 @@ fail0: static int __devinit e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* wake up and enable device */ return pci_enable_device(pdev) ? @@ -542,7 +543,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev) struct mem_ctl_info *mci; struct e7xxx_pvt *pvt; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); if (e7xxx_pci) edac_pci_release_generic_ctl(e7xxx_pci); diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 117490d4f835..23bb99fa44f1 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h @@ -71,26 +71,21 @@ extern const char *edac_mem_types[]; #ifdef CONFIG_EDAC_DEBUG extern int edac_debug_level; -#define edac_debug_printk(level, fmt, arg...) \ - do { \ - if (level <= edac_debug_level) \ - edac_printk(KERN_DEBUG, EDAC_DEBUG, \ - "%s: " fmt, __func__, ##arg); \ - } while (0) - -#define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) -#define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) -#define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ ) -#define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ ) -#define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ ) +#define edac_dbg(level, fmt, ...) \ +do { \ + if (level <= edac_debug_level) \ + edac_printk(KERN_DEBUG, EDAC_DEBUG, \ + "%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) #else /* !CONFIG_EDAC_DEBUG */ -#define debugf0( ... ) -#define debugf1( ... ) -#define debugf2( ... ) -#define debugf3( ... ) -#define debugf4( ... ) +#define edac_dbg(level, fmt, ...) \ +do { \ + if (0) \ + edac_printk(KERN_DEBUG, EDAC_DEBUG, \ + "%s: " fmt, __func__, ##__VA_ARGS__); \ +} while (0) #endif /* !CONFIG_EDAC_DEBUG */ @@ -460,15 +455,15 @@ extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page); void edac_mc_handle_error(const enum hw_event_mc_err_type type, struct mem_ctl_info *mci, + const u16 error_count, const unsigned long page_frame_number, const unsigned long offset_in_page, const unsigned long syndrome, - const int layer0, - const int layer1, - const int layer2, + const int top_layer, + const int mid_layer, + const int low_layer, const char *msg, - const char *other_detail, - const void *mcelog); + const char *other_detail); /* * edac_device APIs diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index ee3f1f810c1e..211021dfec73 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -40,12 +40,13 @@ static LIST_HEAD(edac_device_list); #ifdef CONFIG_EDAC_DEBUG static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) { - debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx); - debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check); - debugf3("\tdev = %p\n", edac_dev->dev); - debugf3("\tmod_name:ctl_name = %s:%s\n", - edac_dev->mod_name, edac_dev->ctl_name); - debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info); + edac_dbg(3, "\tedac_dev = %p dev_idx=%d\n", + edac_dev, edac_dev->dev_idx); + edac_dbg(4, "\tedac_dev->edac_check = %p\n", edac_dev->edac_check); + edac_dbg(3, "\tdev = %p\n", edac_dev->dev); + edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", + edac_dev->mod_name, edac_dev->ctl_name); + edac_dbg(3, "\tpvt_info = %p\n\n", edac_dev->pvt_info); } #endif /* CONFIG_EDAC_DEBUG */ @@ -82,8 +83,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( void *pvt, *p; int err; - debugf4("%s() instances=%d blocks=%d\n", - __func__, nr_instances, nr_blocks); + edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks); /* Calculate the size of memory we need to allocate AND * determine the offsets of the various item arrays @@ -156,8 +156,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( /* Name of this edac device */ snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name); - debugf4("%s() edac_dev=%p next after end=%p\n", - __func__, dev_ctl, pvt + sz_private ); + edac_dbg(4, "edac_dev=%p next after end=%p\n", + dev_ctl, pvt + sz_private); /* Initialize every Instance */ for (instance = 0; instance < nr_instances; instance++) { @@ -178,10 +178,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( snprintf(blk->name, sizeof(blk->name), "%s%d", edac_block_name, block+offset_value); - debugf4("%s() instance=%d inst_p=%p block=#%d " - "block_p=%p name='%s'\n", - __func__, instance, inst, block, - blk, blk->name); + edac_dbg(4, "instance=%d inst_p=%p block=#%d block_p=%p name='%s'\n", + instance, inst, block, blk, blk->name); /* if there are NO attributes OR no attribute pointer * then continue on to next block iteration @@ -194,8 +192,8 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( attrib_p = &dev_attrib[block*nr_instances*nr_attrib]; blk->block_attributes = attrib_p; - debugf4("%s() THIS BLOCK_ATTRIB=%p\n", - __func__, blk->block_attributes); + edac_dbg(4, "THIS BLOCK_ATTRIB=%p\n", + blk->block_attributes); /* Initialize every user specified attribute in this * block with the data the caller passed in @@ -214,11 +212,10 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info( attrib->block = blk; /* up link */ - debugf4("%s() alloc-attrib=%p attrib_name='%s' " - "attrib-spec=%p spec-name=%s\n", - __func__, attrib, attrib->attr.name, - &attrib_spec[attr], - attrib_spec[attr].attr.name + edac_dbg(4, "alloc-attrib=%p attrib_name='%s' attrib-spec=%p spec-name=%s\n", + attrib, attrib->attr.name, + &attrib_spec[attr], + attrib_spec[attr].attr.name ); } } @@ -273,7 +270,7 @@ static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev) struct edac_device_ctl_info *edac_dev; struct list_head *item; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); list_for_each(item, &edac_device_list) { edac_dev = list_entry(item, struct edac_device_ctl_info, link); @@ -408,7 +405,7 @@ static void edac_device_workq_function(struct work_struct *work_req) void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, unsigned msec) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* take the arg 'msec' and set it into the control structure * to used in the time period calculation @@ -496,7 +493,7 @@ EXPORT_SYMBOL_GPL(edac_device_alloc_index); */ int edac_device_add_device(struct edac_device_ctl_info *edac_dev) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); #ifdef CONFIG_EDAC_DEBUG if (edac_debug_level >= 3) @@ -570,7 +567,7 @@ struct edac_device_ctl_info *edac_device_del_device(struct device *dev) { struct edac_device_ctl_info *edac_dev; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); mutex_lock(&device_ctls_mutex); diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index b4ea185ccebf..fb68a06ad683 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c @@ -202,7 +202,7 @@ static void edac_device_ctrl_master_release(struct kobject *kobj) { struct edac_device_ctl_info *edac_dev = to_edacdev(kobj); - debugf4("%s() control index=%d\n", __func__, edac_dev->dev_idx); + edac_dbg(4, "control index=%d\n", edac_dev->dev_idx); /* decrement the EDAC CORE module ref count */ module_put(edac_dev->owner); @@ -233,12 +233,12 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) struct bus_type *edac_subsys; int err; - debugf1("%s()\n", __func__); + edac_dbg(1, "\n"); /* get the /sys/devices/system/edac reference */ edac_subsys = edac_get_sysfs_subsys(); if (edac_subsys == NULL) { - debugf1("%s() no edac_subsys error\n", __func__); + edac_dbg(1, "no edac_subsys error\n"); err = -ENODEV; goto err_out; } @@ -264,8 +264,8 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) &edac_subsys->dev_root->kobj, "%s", edac_dev->name); if (err) { - debugf1("%s()Failed to register '.../edac/%s'\n", - __func__, edac_dev->name); + edac_dbg(1, "Failed to register '.../edac/%s'\n", + edac_dev->name); goto err_kobj_reg; } kobject_uevent(&edac_dev->kobj, KOBJ_ADD); @@ -274,8 +274,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) * edac_device_unregister_sysfs_main_kobj() must be used */ - debugf4("%s() Registered '.../edac/%s' kobject\n", - __func__, edac_dev->name); + edac_dbg(4, "Registered '.../edac/%s' kobject\n", edac_dev->name); return 0; @@ -296,9 +295,8 @@ err_out: */ void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev) { - debugf0("%s()\n", __func__); - debugf4("%s() name of kobject is: %s\n", - __func__, kobject_name(&dev->kobj)); + edac_dbg(0, "\n"); + edac_dbg(4, "name of kobject is: %s\n", kobject_name(&dev->kobj)); /* * Unregister the edac device's kobject and @@ -336,7 +334,7 @@ static void edac_device_ctrl_instance_release(struct kobject *kobj) { struct edac_device_instance *instance; - debugf1("%s()\n", __func__); + edac_dbg(1, "\n"); /* map from this kobj to the main control struct * and then dec the main kobj count @@ -442,7 +440,7 @@ static void edac_device_ctrl_block_release(struct kobject *kobj) { struct edac_device_block *block; - debugf1("%s()\n", __func__); + edac_dbg(1, "\n"); /* get the container of the kobj */ block = to_block(kobj); @@ -524,10 +522,10 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev, struct edac_dev_sysfs_block_attribute *sysfs_attrib; struct kobject *main_kobj; - debugf4("%s() Instance '%s' inst_p=%p block '%s' block_p=%p\n", - __func__, instance->name, instance, block->name, block); - debugf4("%s() block kobj=%p block kobj->parent=%p\n", - __func__, &block->kobj, &block->kobj.parent); + edac_dbg(4, "Instance '%s' inst_p=%p block '%s' block_p=%p\n", + instance->name, instance, block->name, block); + edac_dbg(4, "block kobj=%p block kobj->parent=%p\n", + &block->kobj, &block->kobj.parent); /* init this block's kobject */ memset(&block->kobj, 0, sizeof(struct kobject)); @@ -546,8 +544,7 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev, &instance->kobj, "%s", block->name); if (err) { - debugf1("%s() Failed to register instance '%s'\n", - __func__, block->name); + edac_dbg(1, "Failed to register instance '%s'\n", block->name); kobject_put(main_kobj); err = -ENODEV; goto err_out; @@ -560,11 +557,9 @@ static int edac_device_create_block(struct edac_device_ctl_info *edac_dev, if (sysfs_attrib && block->nr_attribs) { for (i = 0; i < block->nr_attribs; i++, sysfs_attrib++) { - debugf4("%s() creating block attrib='%s' " - "attrib->%p to kobj=%p\n", - __func__, - sysfs_attrib->attr.name, - sysfs_attrib, &block->kobj); + edac_dbg(4, "creating block attrib='%s' attrib->%p to kobj=%p\n", + sysfs_attrib->attr.name, + sysfs_attrib, &block->kobj); /* Create each block_attribute file */ err = sysfs_create_file(&block->kobj, @@ -647,14 +642,14 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev, err = kobject_init_and_add(&instance->kobj, &ktype_instance_ctrl, &edac_dev->kobj, "%s", instance->name); if (err != 0) { - debugf2("%s() Failed to register instance '%s'\n", - __func__, instance->name); + edac_dbg(2, "Failed to register instance '%s'\n", + instance->name); kobject_put(main_kobj); goto err_out; } - debugf4("%s() now register '%d' blocks for instance %d\n", - __func__, instance->nr_blocks, idx); + edac_dbg(4, "now register '%d' blocks for instance %d\n", + instance->nr_blocks, idx); /* register all blocks of this instance */ for (i = 0; i < instance->nr_blocks; i++) { @@ -670,8 +665,8 @@ static int edac_device_create_instance(struct edac_device_ctl_info *edac_dev, } kobject_uevent(&instance->kobj, KOBJ_ADD); - debugf4("%s() Registered instance %d '%s' kobject\n", - __func__, idx, instance->name); + edac_dbg(4, "Registered instance %d '%s' kobject\n", + idx, instance->name); return 0; @@ -715,7 +710,7 @@ static int edac_device_create_instances(struct edac_device_ctl_info *edac_dev) int i, j; int err; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* iterate over creation of the instances */ for (i = 0; i < edac_dev->nr_instances; i++) { @@ -817,12 +812,12 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev) int err; struct kobject *edac_kobj = &edac_dev->kobj; - debugf0("%s() idx=%d\n", __func__, edac_dev->dev_idx); + edac_dbg(0, "idx=%d\n", edac_dev->dev_idx); /* go create any main attributes callers wants */ err = edac_device_add_main_sysfs_attributes(edac_dev); if (err) { - debugf0("%s() failed to add sysfs attribs\n", __func__); + edac_dbg(0, "failed to add sysfs attribs\n"); goto err_out; } @@ -832,8 +827,7 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev) err = sysfs_create_link(edac_kobj, &edac_dev->dev->kobj, EDAC_DEVICE_SYMLINK); if (err) { - debugf0("%s() sysfs_create_link() returned err= %d\n", - __func__, err); + edac_dbg(0, "sysfs_create_link() returned err= %d\n", err); goto err_remove_main_attribs; } @@ -843,14 +837,13 @@ int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev) */ err = edac_device_create_instances(edac_dev); if (err) { - debugf0("%s() edac_device_create_instances() " - "returned err= %d\n", __func__, err); + edac_dbg(0, "edac_device_create_instances() returned err= %d\n", + err); goto err_remove_link; } - debugf4("%s() create-instances done, idx=%d\n", - __func__, edac_dev->dev_idx); + edac_dbg(4, "create-instances done, idx=%d\n", edac_dev->dev_idx); return 0; @@ -873,7 +866,7 @@ err_out: */ void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* remove any main attributes for this device */ edac_device_remove_main_sysfs_attributes(edac_dev); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index de5ba86e8b89..616d90bcb3a4 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -27,70 +27,95 @@ #include <linux/list.h> #include <linux/ctype.h> #include <linux/edac.h> +#include <linux/bitops.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/edac.h> #include "edac_core.h" #include "edac_module.h" +#define CREATE_TRACE_POINTS +#define TRACE_INCLUDE_PATH ../../include/ras +#include <ras/ras_event.h> + /* lock to memory controller's control array */ static DEFINE_MUTEX(mem_ctls_mutex); static LIST_HEAD(mc_devices); +unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, + unsigned len) +{ + struct mem_ctl_info *mci = dimm->mci; + int i, n, count = 0; + char *p = buf; + + for (i = 0; i < mci->n_layers; i++) { + n = snprintf(p, len, "%s %d ", + edac_layer_name[mci->layers[i].type], + dimm->location[i]); + p += n; + len -= n; + count += n; + if (!len) + break; + } + + return count; +} + #ifdef CONFIG_EDAC_DEBUG static void edac_mc_dump_channel(struct rank_info *chan) { - debugf4("\tchannel = %p\n", chan); - debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx); - debugf4("\tchannel->csrow = %p\n\n", chan->csrow); - debugf4("\tchannel->dimm = %p\n", chan->dimm); + edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx); + edac_dbg(4, " channel = %p\n", chan); + edac_dbg(4, " channel->csrow = %p\n", chan->csrow); + edac_dbg(4, " channel->dimm = %p\n", chan->dimm); } -static void edac_mc_dump_dimm(struct dimm_info *dimm) +static void edac_mc_dump_dimm(struct dimm_info *dimm, int number) { - int i; - - debugf4("\tdimm = %p\n", dimm); - debugf4("\tdimm->label = '%s'\n", dimm->label); - debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages); - debugf4("\tdimm location "); - for (i = 0; i < dimm->mci->n_layers; i++) { - printk(KERN_CONT "%d", dimm->location[i]); - if (i < dimm->mci->n_layers - 1) - printk(KERN_CONT "."); - } - printk(KERN_CONT "\n"); - debugf4("\tdimm->grain = %d\n", dimm->grain); - debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages); + char location[80]; + + edac_dimm_info_location(dimm, location, sizeof(location)); + + edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", + dimm->mci->mem_is_per_rank ? "rank" : "dimm", + number, location, dimm->csrow, dimm->cschannel); + edac_dbg(4, " dimm = %p\n", dimm); + edac_dbg(4, " dimm->label = '%s'\n", dimm->label); + edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); + edac_dbg(4, " dimm->grain = %d\n", dimm->grain); + edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); } static void edac_mc_dump_csrow(struct csrow_info *csrow) { - debugf4("\tcsrow = %p\n", csrow); - debugf4("\tcsrow->csrow_idx = %d\n", csrow->csrow_idx); - debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page); - debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page); - debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask); - debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels); - debugf4("\tcsrow->channels = %p\n", csrow->channels); - debugf4("\tcsrow->mci = %p\n\n", csrow->mci); + edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx); + edac_dbg(4, " csrow = %p\n", csrow); + edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page); + edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page); + edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask); + edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels); + edac_dbg(4, " csrow->channels = %p\n", csrow->channels); + edac_dbg(4, " csrow->mci = %p\n", csrow->mci); } static void edac_mc_dump_mci(struct mem_ctl_info *mci) { - debugf3("\tmci = %p\n", mci); - debugf3("\tmci->mtype_cap = %lx\n", mci->mtype_cap); - debugf3("\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); - debugf3("\tmci->edac_cap = %lx\n", mci->edac_cap); - debugf4("\tmci->edac_check = %p\n", mci->edac_check); - debugf3("\tmci->nr_csrows = %d, csrows = %p\n", - mci->nr_csrows, mci->csrows); - debugf3("\tmci->nr_dimms = %d, dimms = %p\n", - mci->tot_dimms, mci->dimms); - debugf3("\tdev = %p\n", mci->dev); - debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name); - debugf3("\tpvt_info = %p\n\n", mci->pvt_info); + edac_dbg(3, "\tmci = %p\n", mci); + edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap); + edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); + edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap); + edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check); + edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n", + mci->nr_csrows, mci->csrows); + edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n", + mci->tot_dimms, mci->dimms); + edac_dbg(3, "\tdev = %p\n", mci->pdev); + edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", + mci->mod_name, mci->ctl_name); + edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info); } #endif /* CONFIG_EDAC_DEBUG */ @@ -205,15 +230,15 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, { struct mem_ctl_info *mci; struct edac_mc_layer *layer; - struct csrow_info *csi, *csr; - struct rank_info *chi, *chp, *chan; + struct csrow_info *csr; + struct rank_info *chan; struct dimm_info *dimm; u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; unsigned pos[EDAC_MAX_LAYERS]; unsigned size, tot_dimms = 1, count = 1; unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0; void *pvt, *p, *ptr = NULL; - int i, j, err, row, chn, n, len; + int i, j, row, chn, n, len, off; bool per_rank = false; BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0); @@ -239,26 +264,24 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, */ mci = edac_align_ptr(&ptr, sizeof(*mci), 1); layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); - csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows); - chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels); - dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms); for (i = 0; i < n_layers; i++) { count *= layers[i].size; - debugf4("%s: errcount layer %d size %d\n", __func__, i, count); + edac_dbg(4, "errcount layer %d size %d\n", i, count); ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); tot_errcount += 2 * count; } - debugf4("%s: allocating %d error counters\n", __func__, tot_errcount); + edac_dbg(4, "allocating %d error counters\n", tot_errcount); pvt = edac_align_ptr(&ptr, sz_pvt, 1); size = ((unsigned long)pvt) + sz_pvt; - debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", - __func__, size, - tot_dimms, - per_rank ? "ranks" : "dimms", - tot_csrows * tot_channels); + edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", + size, + tot_dimms, + per_rank ? "ranks" : "dimms", + tot_csrows * tot_channels); + mci = kzalloc(size, GFP_KERNEL); if (mci == NULL) return NULL; @@ -267,9 +290,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, * rather than an imaginary chunk of memory located at address 0. */ layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); - csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi)); - chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi)); - dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm)); for (i = 0; i < n_layers; i++) { mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); @@ -278,8 +298,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, /* setup index and various internal pointers */ mci->mc_idx = mc_num; - mci->csrows = csi; - mci->dimms = dimm; mci->tot_dimms = tot_dimms; mci->pvt_info = pvt; mci->n_layers = n_layers; @@ -290,40 +308,57 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, mci->mem_is_per_rank = per_rank; /* - * Fill the csrow struct + * Alocate and fill the csrow/channels structs */ + mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL); + if (!mci->csrows) + goto error; for (row = 0; row < tot_csrows; row++) { - csr = &csi[row]; + csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); + if (!csr) + goto error; + mci->csrows[row] = csr; csr->csrow_idx = row; csr->mci = mci; csr->nr_channels = tot_channels; - chp = &chi[row * tot_channels]; - csr->channels = chp; + csr->channels = kcalloc(sizeof(*csr->channels), tot_channels, + GFP_KERNEL); + if (!csr->channels) + goto error; for (chn = 0; chn < tot_channels; chn++) { - chan = &chp[chn]; + chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL); + if (!chan) + goto error; + csr->channels[chn] = chan; chan->chan_idx = chn; chan->csrow = csr; } } /* - * Fill the dimm struct + * Allocate and fill the dimm structs */ + mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL); + if (!mci->dimms) + goto error; + memset(&pos, 0, sizeof(pos)); row = 0; chn = 0; - debugf4("%s: initializing %d %s\n", __func__, tot_dimms, - per_rank ? "ranks" : "dimms"); for (i = 0; i < tot_dimms; i++) { - chan = &csi[row].channels[chn]; - dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers, - pos[0], pos[1], pos[2]); - dimm->mci = mci; + chan = mci->csrows[row]->channels[chn]; + off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]); + if (off < 0 || off >= tot_dimms) { + edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n"); + goto error; + } - debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__, - i, per_rank ? "rank" : "dimm", (dimm - mci->dimms), - pos[0], pos[1], pos[2], row, chn); + dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); + if (!dimm) + goto error; + mci->dimms[off] = dimm; + dimm->mci = mci; /* * Copy DIMM location and initialize it. @@ -367,16 +402,6 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, } mci->op_state = OP_ALLOC; - INIT_LIST_HEAD(&mci->grp_kobj_list); - - /* - * Initialize the 'root' kobj for the edac_mc controller - */ - err = edac_mc_register_sysfs_main_kobj(mci); - if (err) { - kfree(mci); - return NULL; - } /* at this point, the root kobj is valid, and in order to * 'free' the object, then the function: @@ -384,7 +409,30 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, * which will perform kobj unregistration and the actual free * will occur during the kobject callback operation */ + return mci; + +error: + if (mci->dimms) { + for (i = 0; i < tot_dimms; i++) + kfree(mci->dimms[i]); + kfree(mci->dimms); + } + if (mci->csrows) { + for (chn = 0; chn < tot_channels; chn++) { + csr = mci->csrows[chn]; + if (csr) { + for (chn = 0; chn < tot_channels; chn++) + kfree(csr->channels[chn]); + kfree(csr); + } + kfree(mci->csrows[i]); + } + kfree(mci->csrows); + } + kfree(mci); + + return NULL; } EXPORT_SYMBOL_GPL(edac_mc_alloc); @@ -395,12 +443,10 @@ EXPORT_SYMBOL_GPL(edac_mc_alloc); */ void edac_mc_free(struct mem_ctl_info *mci) { - debugf1("%s()\n", __func__); + edac_dbg(1, "\n"); - edac_mc_unregister_sysfs_main_kobj(mci); - - /* free the mci instance memory here */ - kfree(mci); + /* the mci instance is freed here, when the sysfs object is dropped */ + edac_unregister_sysfs(mci); } EXPORT_SYMBOL_GPL(edac_mc_free); @@ -417,12 +463,12 @@ struct mem_ctl_info *find_mci_by_dev(struct device *dev) struct mem_ctl_info *mci; struct list_head *item; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); - if (mci->dev == dev) + if (mci->pdev == dev) return mci; } @@ -485,7 +531,7 @@ static void edac_mc_workq_function(struct work_struct *work_req) */ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* if this instance is not in the POLL state, then simply return */ if (mci->op_state != OP_RUNNING_POLL) @@ -512,8 +558,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci) status = cancel_delayed_work(&mci->work); if (status == 0) { - debugf0("%s() not canceled, flush the queue\n", - __func__); + edac_dbg(0, "not canceled, flush the queue\n"); /* workq instance might be running, wait for it */ flush_workqueue(edac_workqueue); @@ -574,7 +619,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci) insert_before = &mc_devices; - p = find_mci_by_dev(mci->dev); + p = find_mci_by_dev(mci->pdev); if (unlikely(p != NULL)) goto fail0; @@ -596,7 +641,7 @@ static int add_mc_to_global_list(struct mem_ctl_info *mci) fail0: edac_printk(KERN_WARNING, EDAC_MC, - "%s (%s) %s %s already assigned %d\n", dev_name(p->dev), + "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev), edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); return 1; @@ -660,7 +705,7 @@ EXPORT_SYMBOL(edac_mc_find); /* FIXME - should a warning be printed if no error detection? correction? */ int edac_mc_add_mc(struct mem_ctl_info *mci) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); #ifdef CONFIG_EDAC_DEBUG if (edac_debug_level >= 3) @@ -670,15 +715,22 @@ int edac_mc_add_mc(struct mem_ctl_info *mci) int i; for (i = 0; i < mci->nr_csrows; i++) { + struct csrow_info *csrow = mci->csrows[i]; + u32 nr_pages = 0; int j; - edac_mc_dump_csrow(&mci->csrows[i]); - for (j = 0; j < mci->csrows[i].nr_channels; j++) - edac_mc_dump_channel(&mci->csrows[i]. - channels[j]); + for (j = 0; j < csrow->nr_channels; j++) + nr_pages += csrow->channels[j]->dimm->nr_pages; + if (!nr_pages) + continue; + edac_mc_dump_csrow(csrow); + for (j = 0; j < csrow->nr_channels; j++) + if (csrow->channels[j]->dimm->nr_pages) + edac_mc_dump_channel(csrow->channels[j]); } for (i = 0; i < mci->tot_dimms; i++) - edac_mc_dump_dimm(&mci->dimms[i]); + if (mci->dimms[i]->nr_pages) + edac_mc_dump_dimm(mci->dimms[i], i); } #endif mutex_lock(&mem_ctls_mutex); @@ -732,7 +784,7 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev) { struct mem_ctl_info *mci; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); mutex_lock(&mem_ctls_mutex); @@ -770,7 +822,7 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset, void *virt_addr; unsigned long flags = 0; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); /* ECC error page was not in our memory. Ignore it. */ if (!pfn_valid(page)) @@ -797,26 +849,26 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset, /* FIXME - should return -1 */ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) { - struct csrow_info *csrows = mci->csrows; + struct csrow_info **csrows = mci->csrows; int row, i, j, n; - debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); + edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page); row = -1; for (i = 0; i < mci->nr_csrows; i++) { - struct csrow_info *csrow = &csrows[i]; + struct csrow_info *csrow = csrows[i]; n = 0; for (j = 0; j < csrow->nr_channels; j++) { - struct dimm_info *dimm = csrow->channels[j].dimm; + struct dimm_info *dimm = csrow->channels[j]->dimm; n += dimm->nr_pages; } if (n == 0) continue; - debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) " - "mask(0x%lx)\n", mci->mc_idx, __func__, - csrow->first_page, page, csrow->last_page, - csrow->page_mask); + edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n", + mci->mc_idx, + csrow->first_page, page, csrow->last_page, + csrow->page_mask); if ((page >= csrow->first_page) && (page <= csrow->last_page) && @@ -845,15 +897,16 @@ const char *edac_layer_name[] = { EXPORT_SYMBOL_GPL(edac_layer_name); static void edac_inc_ce_error(struct mem_ctl_info *mci, - bool enable_per_layer_report, - const int pos[EDAC_MAX_LAYERS]) + bool enable_per_layer_report, + const int pos[EDAC_MAX_LAYERS], + const u16 count) { int i, index = 0; - mci->ce_mc++; + mci->ce_mc += count; if (!enable_per_layer_report) { - mci->ce_noinfo_count++; + mci->ce_noinfo_count += count; return; } @@ -861,7 +914,7 @@ static void edac_inc_ce_error(struct mem_ctl_info *mci, if (pos[i] < 0) break; index += pos[i]; - mci->ce_per_layer[i][index]++; + mci->ce_per_layer[i][index] += count; if (i < mci->n_layers - 1) index *= mci->layers[i + 1].size; @@ -870,14 +923,15 @@ static void edac_inc_ce_error(struct mem_ctl_info *mci, static void edac_inc_ue_error(struct mem_ctl_info *mci, bool enable_per_layer_report, - const int pos[EDAC_MAX_LAYERS]) + const int pos[EDAC_MAX_LAYERS], + const u16 count) { int i, index = 0; - mci->ue_mc++; + mci->ue_mc += count; if (!enable_per_layer_report) { - mci->ce_noinfo_count++; + mci->ce_noinfo_count += count; return; } @@ -885,7 +939,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci, if (pos[i] < 0) break; index += pos[i]; - mci->ue_per_layer[i][index]++; + mci->ue_per_layer[i][index] += count; if (i < mci->n_layers - 1) index *= mci->layers[i + 1].size; @@ -893,6 +947,7 @@ static void edac_inc_ue_error(struct mem_ctl_info *mci, } static void edac_ce_error(struct mem_ctl_info *mci, + const u16 error_count, const int pos[EDAC_MAX_LAYERS], const char *msg, const char *location, @@ -902,23 +957,25 @@ static void edac_ce_error(struct mem_ctl_info *mci, const bool enable_per_layer_report, const unsigned long page_frame_number, const unsigned long offset_in_page, - u32 grain) + long grain) { unsigned long remapped_page; if (edac_mc_get_log_ce()) { if (other_detail && *other_detail) edac_mc_printk(mci, KERN_WARNING, - "CE %s on %s (%s%s - %s)\n", + "%d CE %s on %s (%s %s - %s)\n", + error_count, msg, label, location, detail, other_detail); else edac_mc_printk(mci, KERN_WARNING, - "CE %s on %s (%s%s)\n", + "%d CE %s on %s (%s %s)\n", + error_count, msg, label, location, detail); } - edac_inc_ce_error(mci, enable_per_layer_report, pos); + edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count); if (mci->scrub_mode & SCRUB_SW_SRC) { /* @@ -942,6 +999,7 @@ static void edac_ce_error(struct mem_ctl_info *mci, } static void edac_ue_error(struct mem_ctl_info *mci, + const u16 error_count, const int pos[EDAC_MAX_LAYERS], const char *msg, const char *location, @@ -953,12 +1011,14 @@ static void edac_ue_error(struct mem_ctl_info *mci, if (edac_mc_get_log_ue()) { if (other_detail && *other_detail) edac_mc_printk(mci, KERN_WARNING, - "UE %s on %s (%s%s - %s)\n", + "%d UE %s on %s (%s %s - %s)\n", + error_count, msg, label, location, detail, other_detail); else edac_mc_printk(mci, KERN_WARNING, - "UE %s on %s (%s%s)\n", + "%d UE %s on %s (%s %s)\n", + error_count, msg, label, location, detail); } @@ -971,33 +1031,53 @@ static void edac_ue_error(struct mem_ctl_info *mci, msg, label, location, detail); } - edac_inc_ue_error(mci, enable_per_layer_report, pos); + edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count); } #define OTHER_LABEL " or " + +/** + * edac_mc_handle_error - reports a memory event to userspace + * + * @type: severity of the error (CE/UE/Fatal) + * @mci: a struct mem_ctl_info pointer + * @error_count: Number of errors of the same type + * @page_frame_number: mem page where the error occurred + * @offset_in_page: offset of the error inside the page + * @syndrome: ECC syndrome + * @top_layer: Memory layer[0] position + * @mid_layer: Memory layer[1] position + * @low_layer: Memory layer[2] position + * @msg: Message meaningful to the end users that + * explains the event + * @other_detail: Technical details about the event that + * may help hardware manufacturers and + * EDAC developers to analyse the event + */ void edac_mc_handle_error(const enum hw_event_mc_err_type type, struct mem_ctl_info *mci, + const u16 error_count, const unsigned long page_frame_number, const unsigned long offset_in_page, const unsigned long syndrome, - const int layer0, - const int layer1, - const int layer2, + const int top_layer, + const int mid_layer, + const int low_layer, const char *msg, - const char *other_detail, - const void *mcelog) + const char *other_detail) { /* FIXME: too much for stack: move it to some pre-alocated area */ char detail[80], location[80]; char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms]; char *p; int row = -1, chan = -1; - int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 }; + int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer }; int i; - u32 grain; + long grain; bool enable_per_layer_report = false; + u8 grain_bits; - debugf3("MC%d: %s()\n", mci->mc_idx, __func__); + edac_dbg(3, "MC%d\n", mci->mc_idx); /* * Check if the event report is consistent and if the memory @@ -1043,13 +1123,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, p = label; *p = '\0'; for (i = 0; i < mci->tot_dimms; i++) { - struct dimm_info *dimm = &mci->dimms[i]; + struct dimm_info *dimm = mci->dimms[i]; - if (layer0 >= 0 && layer0 != dimm->location[0]) + if (top_layer >= 0 && top_layer != dimm->location[0]) continue; - if (layer1 >= 0 && layer1 != dimm->location[1]) + if (mid_layer >= 0 && mid_layer != dimm->location[1]) continue; - if (layer2 >= 0 && layer2 != dimm->location[2]) + if (low_layer >= 0 && low_layer != dimm->location[2]) continue; /* get the max grain, over the error match range */ @@ -1075,11 +1155,9 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, * get csrow/channel of the DIMM, in order to allow * incrementing the compat API counters */ - debugf4("%s: %s csrows map: (%d,%d)\n", - __func__, - mci->mem_is_per_rank ? "rank" : "dimm", - dimm->csrow, dimm->cschannel); - + edac_dbg(4, "%s csrows map: (%d,%d)\n", + mci->mem_is_per_rank ? "rank" : "dimm", + dimm->csrow, dimm->cschannel); if (row == -1) row = dimm->csrow; else if (row >= 0 && row != dimm->csrow) @@ -1095,19 +1173,18 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, if (!enable_per_layer_report) { strcpy(label, "any memory"); } else { - debugf4("%s: csrow/channel to increment: (%d,%d)\n", - __func__, row, chan); + edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan); if (p == label) strcpy(label, "unknown memory"); if (type == HW_EVENT_ERR_CORRECTED) { if (row >= 0) { - mci->csrows[row].ce_count++; + mci->csrows[row]->ce_count += error_count; if (chan >= 0) - mci->csrows[row].channels[chan].ce_count++; + mci->csrows[row]->channels[chan]->ce_count += error_count; } } else if (row >= 0) - mci->csrows[row].ue_count++; + mci->csrows[row]->ue_count += error_count; } /* Fill the RAM location data */ @@ -1120,23 +1197,33 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, edac_layer_name[mci->layers[i].type], pos[i]); } + if (p > location) + *(p - 1) = '\0'; + + /* Report the error via the trace interface */ + + grain_bits = fls_long(grain) + 1; + trace_mc_event(type, msg, label, error_count, + mci->mc_idx, top_layer, mid_layer, low_layer, + PAGES_TO_MiB(page_frame_number) | offset_in_page, + grain_bits, syndrome, other_detail); /* Memory type dependent details about the error */ if (type == HW_EVENT_ERR_CORRECTED) { snprintf(detail, sizeof(detail), - "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx", + "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx", page_frame_number, offset_in_page, grain, syndrome); - edac_ce_error(mci, pos, msg, location, label, detail, - other_detail, enable_per_layer_report, + edac_ce_error(mci, error_count, pos, msg, location, label, + detail, other_detail, enable_per_layer_report, page_frame_number, offset_in_page, grain); } else { snprintf(detail, sizeof(detail), - "page:0x%lx offset:0x%lx grain:%d", + "page:0x%lx offset:0x%lx grain:%ld", page_frame_number, offset_in_page, grain); - edac_ue_error(mci, pos, msg, location, label, detail, - other_detail, enable_per_layer_report); + edac_ue_error(mci, error_count, pos, msg, location, label, + detail, other_detail, enable_per_layer_report); } } EXPORT_SYMBOL_GPL(edac_mc_handle_error); diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index f6a29b0eedc8..ed0bc07b8503 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -7,17 +7,21 @@ * * Written Doug Thompson <norsk5@xmission.com> www.softwarebitmaker.com * + * (c) 2012 - Mauro Carvalho Chehab <mchehab@redhat.com> + * The entire API were re-written, and ported to use struct device + * */ #include <linux/ctype.h> #include <linux/slab.h> #include <linux/edac.h> #include <linux/bug.h> +#include <linux/pm_runtime.h> +#include <linux/uaccess.h> #include "edac_core.h" #include "edac_module.h" - /* MC EDAC Controls, setable by module parameter, and sysfs */ static int edac_mc_log_ue = 1; static int edac_mc_log_ce = 1; @@ -78,6 +82,8 @@ module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int, &edac_mc_poll_msec, 0644); MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds"); +static struct device *mci_pdev; + /* * various constants for Memory Controllers */ @@ -125,317 +131,526 @@ static const char *edac_caps[] = { [EDAC_S16ECD16ED] = "S16ECD16ED" }; -/* EDAC sysfs CSROW data structures and methods +#ifdef CONFIG_EDAC_LEGACY_SYSFS +/* + * EDAC sysfs CSROW data structures and methods + */ + +#define to_csrow(k) container_of(k, struct csrow_info, dev) + +/* + * We need it to avoid namespace conflicts between the legacy API + * and the per-dimm/per-rank one */ +#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ + struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) + +struct dev_ch_attribute { + struct device_attribute attr; + int channel; +}; + +#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \ + struct dev_ch_attribute dev_attr_legacy_##_name = \ + { __ATTR(_name, _mode, _show, _store), (_var) } + +#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel) /* Set of more default csrow<id> attribute show/store functions */ -static ssize_t csrow_ue_count_show(struct csrow_info *csrow, char *data, - int private) +static ssize_t csrow_ue_count_show(struct device *dev, + struct device_attribute *mattr, char *data) { + struct csrow_info *csrow = to_csrow(dev); + return sprintf(data, "%u\n", csrow->ue_count); } -static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data, - int private) +static ssize_t csrow_ce_count_show(struct device *dev, + struct device_attribute *mattr, char *data) { + struct csrow_info *csrow = to_csrow(dev); + return sprintf(data, "%u\n", csrow->ce_count); } -static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, - int private) +static ssize_t csrow_size_show(struct device *dev, + struct device_attribute *mattr, char *data) { + struct csrow_info *csrow = to_csrow(dev); int i; u32 nr_pages = 0; for (i = 0; i < csrow->nr_channels; i++) - nr_pages += csrow->channels[i].dimm->nr_pages; - + nr_pages += csrow->channels[i]->dimm->nr_pages; return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); } -static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, - int private) +static ssize_t csrow_mem_type_show(struct device *dev, + struct device_attribute *mattr, char *data) { - return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]); + struct csrow_info *csrow = to_csrow(dev); + + return sprintf(data, "%s\n", mem_types[csrow->channels[0]->dimm->mtype]); } -static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, - int private) +static ssize_t csrow_dev_type_show(struct device *dev, + struct device_attribute *mattr, char *data) { - return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]); + struct csrow_info *csrow = to_csrow(dev); + + return sprintf(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]); } -static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, - int private) +static ssize_t csrow_edac_mode_show(struct device *dev, + struct device_attribute *mattr, + char *data) { - return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]); + struct csrow_info *csrow = to_csrow(dev); + + return sprintf(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]); } /* show/store functions for DIMM Label attributes */ -static ssize_t channel_dimm_label_show(struct csrow_info *csrow, - char *data, int channel) +static ssize_t channel_dimm_label_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct csrow_info *csrow = to_csrow(dev); + unsigned chan = to_channel(mattr); + struct rank_info *rank = csrow->channels[chan]; + /* if field has not been initialized, there is nothing to send */ - if (!csrow->channels[channel].dimm->label[0]) + if (!rank->dimm->label[0]) return 0; return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", - csrow->channels[channel].dimm->label); + rank->dimm->label); } -static ssize_t channel_dimm_label_store(struct csrow_info *csrow, - const char *data, - size_t count, int channel) +static ssize_t channel_dimm_label_store(struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) { + struct csrow_info *csrow = to_csrow(dev); + unsigned chan = to_channel(mattr); + struct rank_info *rank = csrow->channels[chan]; + ssize_t max_size = 0; max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); - strncpy(csrow->channels[channel].dimm->label, data, max_size); - csrow->channels[channel].dimm->label[max_size] = '\0'; + strncpy(rank->dimm->label, data, max_size); + rank->dimm->label[max_size] = '\0'; return max_size; } /* show function for dynamic chX_ce_count attribute */ -static ssize_t channel_ce_count_show(struct csrow_info *csrow, - char *data, int channel) +static ssize_t channel_ce_count_show(struct device *dev, + struct device_attribute *mattr, char *data) { - return sprintf(data, "%u\n", csrow->channels[channel].ce_count); + struct csrow_info *csrow = to_csrow(dev); + unsigned chan = to_channel(mattr); + struct rank_info *rank = csrow->channels[chan]; + + return sprintf(data, "%u\n", rank->ce_count); } -/* csrow specific attribute structure */ -struct csrowdev_attribute { - struct attribute attr; - ssize_t(*show) (struct csrow_info *, char *, int); - ssize_t(*store) (struct csrow_info *, const char *, size_t, int); - int private; -}; +/* cwrow<id>/attribute files */ +DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL); +DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL); +DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL); +DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL); +DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL); +DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL); -#define to_csrow(k) container_of(k, struct csrow_info, kobj) -#define to_csrowdev_attr(a) container_of(a, struct csrowdev_attribute, attr) +/* default attributes of the CSROW<id> object */ +static struct attribute *csrow_attrs[] = { + &dev_attr_legacy_dev_type.attr, + &dev_attr_legacy_mem_type.attr, + &dev_attr_legacy_edac_mode.attr, + &dev_attr_legacy_size_mb.attr, + &dev_attr_legacy_ue_count.attr, + &dev_attr_legacy_ce_count.attr, + NULL, +}; -/* Set of show/store higher level functions for default csrow attributes */ -static ssize_t csrowdev_show(struct kobject *kobj, - struct attribute *attr, char *buffer) -{ - struct csrow_info *csrow = to_csrow(kobj); - struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); +static struct attribute_group csrow_attr_grp = { + .attrs = csrow_attrs, +}; - if (csrowdev_attr->show) - return csrowdev_attr->show(csrow, - buffer, csrowdev_attr->private); - return -EIO; -} +static const struct attribute_group *csrow_attr_groups[] = { + &csrow_attr_grp, + NULL +}; -static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) +static void csrow_attr_release(struct device *dev) { - struct csrow_info *csrow = to_csrow(kobj); - struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); - - if (csrowdev_attr->store) - return csrowdev_attr->store(csrow, - buffer, - count, csrowdev_attr->private); - return -EIO; -} + struct csrow_info *csrow = container_of(dev, struct csrow_info, dev); -static const struct sysfs_ops csrowfs_ops = { - .show = csrowdev_show, - .store = csrowdev_store -}; + edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev)); + kfree(csrow); +} -#define CSROWDEV_ATTR(_name,_mode,_show,_store,_private) \ -static struct csrowdev_attribute attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = _mode }, \ - .show = _show, \ - .store = _store, \ - .private = _private, \ +static struct device_type csrow_attr_type = { + .groups = csrow_attr_groups, + .release = csrow_attr_release, }; -/* default cwrow<id>/attribute files */ -CSROWDEV_ATTR(size_mb, S_IRUGO, csrow_size_show, NULL, 0); -CSROWDEV_ATTR(dev_type, S_IRUGO, csrow_dev_type_show, NULL, 0); -CSROWDEV_ATTR(mem_type, S_IRUGO, csrow_mem_type_show, NULL, 0); -CSROWDEV_ATTR(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL, 0); -CSROWDEV_ATTR(ue_count, S_IRUGO, csrow_ue_count_show, NULL, 0); -CSROWDEV_ATTR(ce_count, S_IRUGO, csrow_ce_count_show, NULL, 0); +/* + * possible dynamic channel DIMM Label attribute files + * + */ -/* default attributes of the CSROW<id> object */ -static struct csrowdev_attribute *default_csrow_attr[] = { - &attr_dev_type, - &attr_mem_type, - &attr_edac_mode, - &attr_size_mb, - &attr_ue_count, - &attr_ce_count, - NULL, -}; +#define EDAC_NR_CHANNELS 6 -/* possible dynamic channel DIMM Label attribute files */ -CSROWDEV_ATTR(ch0_dimm_label, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 0); -CSROWDEV_ATTR(ch1_dimm_label, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 1); -CSROWDEV_ATTR(ch2_dimm_label, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 2); -CSROWDEV_ATTR(ch3_dimm_label, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 3); -CSROWDEV_ATTR(ch4_dimm_label, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 4); -CSROWDEV_ATTR(ch5_dimm_label, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR, channel_dimm_label_show, channel_dimm_label_store, 5); /* Total possible dynamic DIMM Label attribute file table */ -static struct csrowdev_attribute *dynamic_csrow_dimm_attr[] = { - &attr_ch0_dimm_label, - &attr_ch1_dimm_label, - &attr_ch2_dimm_label, - &attr_ch3_dimm_label, - &attr_ch4_dimm_label, - &attr_ch5_dimm_label +static struct device_attribute *dynamic_csrow_dimm_attr[] = { + &dev_attr_legacy_ch0_dimm_label.attr, + &dev_attr_legacy_ch1_dimm_label.attr, + &dev_attr_legacy_ch2_dimm_label.attr, + &dev_attr_legacy_ch3_dimm_label.attr, + &dev_attr_legacy_ch4_dimm_label.attr, + &dev_attr_legacy_ch5_dimm_label.attr }; /* possible dynamic channel ce_count attribute files */ -CSROWDEV_ATTR(ch0_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 0); -CSROWDEV_ATTR(ch1_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 1); -CSROWDEV_ATTR(ch2_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 2); -CSROWDEV_ATTR(ch3_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 3); -CSROWDEV_ATTR(ch4_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 4); -CSROWDEV_ATTR(ch5_ce_count, S_IRUGO | S_IWUSR, channel_ce_count_show, NULL, 5); +DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR, + channel_ce_count_show, NULL, 0); +DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR, + channel_ce_count_show, NULL, 1); +DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR, + channel_ce_count_show, NULL, 2); +DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR, + channel_ce_count_show, NULL, 3); +DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR, + channel_ce_count_show, NULL, 4); +DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR, + channel_ce_count_show, NULL, 5); /* Total possible dynamic ce_count attribute file table */ -static struct csrowdev_attribute *dynamic_csrow_ce_count_attr[] = { - &attr_ch0_ce_count, - &attr_ch1_ce_count, - &attr_ch2_ce_count, - &attr_ch3_ce_count, - &attr_ch4_ce_count, - &attr_ch5_ce_count +static struct device_attribute *dynamic_csrow_ce_count_attr[] = { + &dev_attr_legacy_ch0_ce_count.attr, + &dev_attr_legacy_ch1_ce_count.attr, + &dev_attr_legacy_ch2_ce_count.attr, + &dev_attr_legacy_ch3_ce_count.attr, + &dev_attr_legacy_ch4_ce_count.attr, + &dev_attr_legacy_ch5_ce_count.attr }; -#define EDAC_NR_CHANNELS 6 +static inline int nr_pages_per_csrow(struct csrow_info *csrow) +{ + int chan, nr_pages = 0; + + for (chan = 0; chan < csrow->nr_channels; chan++) + nr_pages += csrow->channels[chan]->dimm->nr_pages; + + return nr_pages; +} -/* Create dynamic CHANNEL files, indexed by 'chan', under specifed CSROW */ -static int edac_create_channel_files(struct kobject *kobj, int chan) +/* Create a CSROW object under specifed edac_mc_device */ +static int edac_create_csrow_object(struct mem_ctl_info *mci, + struct csrow_info *csrow, int index) { - int err = -ENODEV; + int err, chan; + + if (csrow->nr_channels >= EDAC_NR_CHANNELS) + return -ENODEV; + + csrow->dev.type = &csrow_attr_type; + csrow->dev.bus = &mci->bus; + device_initialize(&csrow->dev); + csrow->dev.parent = &mci->dev; + dev_set_name(&csrow->dev, "csrow%d", index); + dev_set_drvdata(&csrow->dev, csrow); - if (chan >= EDAC_NR_CHANNELS) + edac_dbg(0, "creating (virtual) csrow node %s\n", + dev_name(&csrow->dev)); + + err = device_add(&csrow->dev); + if (err < 0) return err; - /* create the DIMM label attribute file */ - err = sysfs_create_file(kobj, - (struct attribute *) - dynamic_csrow_dimm_attr[chan]); - - if (!err) { - /* create the CE Count attribute file */ - err = sysfs_create_file(kobj, - (struct attribute *) - dynamic_csrow_ce_count_attr[chan]); - } else { - debugf1("%s() dimm labels and ce_count files created", - __func__); + for (chan = 0; chan < csrow->nr_channels; chan++) { + /* Only expose populated DIMMs */ + if (!csrow->channels[chan]->dimm->nr_pages) + continue; + err = device_create_file(&csrow->dev, + dynamic_csrow_dimm_attr[chan]); + if (err < 0) + goto error; + err = device_create_file(&csrow->dev, + dynamic_csrow_ce_count_attr[chan]); + if (err < 0) { + device_remove_file(&csrow->dev, + dynamic_csrow_dimm_attr[chan]); + goto error; + } + } + + return 0; + +error: + for (--chan; chan >= 0; chan--) { + device_remove_file(&csrow->dev, + dynamic_csrow_dimm_attr[chan]); + device_remove_file(&csrow->dev, + dynamic_csrow_ce_count_attr[chan]); } + put_device(&csrow->dev); return err; } -/* No memory to release for this kobj */ -static void edac_csrow_instance_release(struct kobject *kobj) +/* Create a CSROW object under specifed edac_mc_device */ +static int edac_create_csrow_objects(struct mem_ctl_info *mci) { - struct mem_ctl_info *mci; - struct csrow_info *cs; + int err, i, chan; + struct csrow_info *csrow; + + for (i = 0; i < mci->nr_csrows; i++) { + csrow = mci->csrows[i]; + if (!nr_pages_per_csrow(csrow)) + continue; + err = edac_create_csrow_object(mci, mci->csrows[i], i); + if (err < 0) + goto error; + } + return 0; - debugf1("%s()\n", __func__); +error: + for (--i; i >= 0; i--) { + csrow = mci->csrows[i]; + if (!nr_pages_per_csrow(csrow)) + continue; + for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { + if (!csrow->channels[chan]->dimm->nr_pages) + continue; + device_remove_file(&csrow->dev, + dynamic_csrow_dimm_attr[chan]); + device_remove_file(&csrow->dev, + dynamic_csrow_ce_count_attr[chan]); + } + put_device(&mci->csrows[i]->dev); + } - cs = container_of(kobj, struct csrow_info, kobj); - mci = cs->mci; + return err; +} - kobject_put(&mci->edac_mci_kobj); +static void edac_delete_csrow_objects(struct mem_ctl_info *mci) +{ + int i, chan; + struct csrow_info *csrow; + + for (i = mci->nr_csrows - 1; i >= 0; i--) { + csrow = mci->csrows[i]; + if (!nr_pages_per_csrow(csrow)) + continue; + for (chan = csrow->nr_channels - 1; chan >= 0; chan--) { + if (!csrow->channels[chan]->dimm->nr_pages) + continue; + edac_dbg(1, "Removing csrow %d channel %d sysfs nodes\n", + i, chan); + device_remove_file(&csrow->dev, + dynamic_csrow_dimm_attr[chan]); + device_remove_file(&csrow->dev, + dynamic_csrow_ce_count_attr[chan]); + } + put_device(&mci->csrows[i]->dev); + device_del(&mci->csrows[i]->dev); + } } +#endif -/* the kobj_type instance for a CSROW */ -static struct kobj_type ktype_csrow = { - .release = edac_csrow_instance_release, - .sysfs_ops = &csrowfs_ops, - .default_attrs = (struct attribute **)default_csrow_attr, +/* + * Per-dimm (or per-rank) devices + */ + +#define to_dimm(k) container_of(k, struct dimm_info, dev) + +/* show/store functions for DIMM Label attributes */ +static ssize_t dimmdev_location_show(struct device *dev, + struct device_attribute *mattr, char *data) +{ + struct dimm_info *dimm = to_dimm(dev); + + return edac_dimm_info_location(dimm, data, PAGE_SIZE); +} + +static ssize_t dimmdev_label_show(struct device *dev, + struct device_attribute *mattr, char *data) +{ + struct dimm_info *dimm = to_dimm(dev); + + /* if field has not been initialized, there is nothing to send */ + if (!dimm->label[0]) + return 0; + + return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", dimm->label); +} + +static ssize_t dimmdev_label_store(struct device *dev, + struct device_attribute *mattr, + const char *data, + size_t count) +{ + struct dimm_info *dimm = to_dimm(dev); + + ssize_t max_size = 0; + + max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); + strncpy(dimm->label, data, max_size); + dimm->label[max_size] = '\0'; + + return max_size; +} + +static ssize_t dimmdev_size_show(struct device *dev, + struct device_attribute *mattr, char *data) +{ + struct dimm_info *dimm = to_dimm(dev); + + return sprintf(data, "%u\n", PAGES_TO_MiB(dimm->nr_pages)); +} + +static ssize_t dimmdev_mem_type_show(struct device *dev, + struct device_attribute *mattr, char *data) +{ + struct dimm_info *dimm = to_dimm(dev); + + return sprintf(data, "%s\n", mem_types[dimm->mtype]); +} + +static ssize_t dimmdev_dev_type_show(struct device *dev, + struct device_attribute *mattr, char *data) +{ + struct dimm_info *dimm = to_dimm(dev); + + return sprintf(data, "%s\n", dev_types[dimm->dtype]); +} + +static ssize_t dimmdev_edac_mode_show(struct device *dev, + struct device_attribute *mattr, + char *data) +{ + struct dimm_info *dimm = to_dimm(dev); + + return sprintf(data, "%s\n", edac_caps[dimm->edac_mode]); +} + +/* dimm/rank attribute files */ +static DEVICE_ATTR(dimm_label, S_IRUGO | S_IWUSR, + dimmdev_label_show, dimmdev_label_store); +static DEVICE_ATTR(dimm_location, S_IRUGO, dimmdev_location_show, NULL); +static DEVICE_ATTR(size, S_IRUGO, dimmdev_size_show, NULL); +static DEVICE_ATTR(dimm_mem_type, S_IRUGO, dimmdev_mem_type_show, NULL); +static DEVICE_ATTR(dimm_dev_type, S_IRUGO, dimmdev_dev_type_show, NULL); +static DEVICE_ATTR(dimm_edac_mode, S_IRUGO, dimmdev_edac_mode_show, NULL); + +/* attributes of the dimm<id>/rank<id> object */ +static struct attribute *dimm_attrs[] = { + &dev_attr_dimm_label.attr, + &dev_attr_dimm_location.attr, + &dev_attr_size.attr, + &dev_attr_dimm_mem_type.attr, + &dev_attr_dimm_dev_type.attr, + &dev_attr_dimm_edac_mode.attr, + NULL, }; -/* Create a CSROW object under specifed edac_mc_device */ -static int edac_create_csrow_object(struct mem_ctl_info *mci, - struct csrow_info *csrow, int index) +static struct attribute_group dimm_attr_grp = { + .attrs = dimm_attrs, +}; + +static const struct attribute_group *dimm_attr_groups[] = { + &dimm_attr_grp, + NULL +}; + +static void dimm_attr_release(struct device *dev) { - struct kobject *kobj_mci = &mci->edac_mci_kobj; - struct kobject *kobj; - int chan; - int err; + struct dimm_info *dimm = container_of(dev, struct dimm_info, dev); - /* generate ..../edac/mc/mc<id>/csrow<index> */ - memset(&csrow->kobj, 0, sizeof(csrow->kobj)); - csrow->mci = mci; /* include container up link */ + edac_dbg(1, "Releasing dimm device %s\n", dev_name(dev)); + kfree(dimm); +} - /* bump the mci instance's kobject's ref count */ - kobj = kobject_get(&mci->edac_mci_kobj); - if (!kobj) { - err = -ENODEV; - goto err_out; - } +static struct device_type dimm_attr_type = { + .groups = dimm_attr_groups, + .release = dimm_attr_release, +}; + +/* Create a DIMM object under specifed memory controller device */ +static int edac_create_dimm_object(struct mem_ctl_info *mci, + struct dimm_info *dimm, + int index) +{ + int err; + dimm->mci = mci; - /* Instanstiate the csrow object */ - err = kobject_init_and_add(&csrow->kobj, &ktype_csrow, kobj_mci, - "csrow%d", index); - if (err) - goto err_release_top_kobj; + dimm->dev.type = &dimm_attr_type; + dimm->dev.bus = &mci->bus; + device_initialize(&dimm->dev); - /* At this point, to release a csrow kobj, one must - * call the kobject_put and allow that tear down - * to work the releasing - */ + dimm->dev.parent = &mci->dev; + if (mci->mem_is_per_rank) + dev_set_name(&dimm->dev, "rank%d", index); + else + dev_set_name(&dimm->dev, "dimm%d", index); + dev_set_drvdata(&dimm->dev, dimm); + pm_runtime_forbid(&mci->dev); - /* Create the dyanmic attribute files on this csrow, - * namely, the DIMM labels and the channel ce_count - */ - for (chan = 0; chan < csrow->nr_channels; chan++) { - err = edac_create_channel_files(&csrow->kobj, chan); - if (err) { - /* special case the unregister here */ - kobject_put(&csrow->kobj); - goto err_out; - } - } - kobject_uevent(&csrow->kobj, KOBJ_ADD); - return 0; + err = device_add(&dimm->dev); - /* error unwind stack */ -err_release_top_kobj: - kobject_put(&mci->edac_mci_kobj); + edac_dbg(0, "creating rank/dimm device %s\n", dev_name(&dimm->dev)); -err_out: return err; } -/* default sysfs methods and data structures for the main MCI kobject */ +/* + * Memory controller device + */ + +#define to_mci(k) container_of(k, struct mem_ctl_info, dev) -static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, +static ssize_t mci_reset_counters_store(struct device *dev, + struct device_attribute *mattr, const char *data, size_t count) { - int row, chan; - - mci->ue_noinfo_count = 0; - mci->ce_noinfo_count = 0; + struct mem_ctl_info *mci = to_mci(dev); + int cnt, row, chan, i; mci->ue_mc = 0; mci->ce_mc = 0; + mci->ue_noinfo_count = 0; + mci->ce_noinfo_count = 0; for (row = 0; row < mci->nr_csrows; row++) { - struct csrow_info *ri = &mci->csrows[row]; + struct csrow_info *ri = mci->csrows[row]; ri->ue_count = 0; ri->ce_count = 0; for (chan = 0; chan < ri->nr_channels; chan++) - ri->channels[chan].ce_count = 0; + ri->channels[chan]->ce_count = 0; + } + + cnt = 1; + for (i = 0; i < mci->n_layers; i++) { + cnt *= mci->layers[i].size; + memset(mci->ce_per_layer[i], 0, cnt * sizeof(u32)); + memset(mci->ue_per_layer[i], 0, cnt * sizeof(u32)); } mci->start_time = jiffies; @@ -451,9 +666,11 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, * Negative value still means that an error has occurred while setting * the scrub rate. */ -static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, +static ssize_t mci_sdram_scrub_rate_store(struct device *dev, + struct device_attribute *mattr, const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); unsigned long bandwidth = 0; int new_bw = 0; @@ -476,8 +693,11 @@ static ssize_t mci_sdram_scrub_rate_store(struct mem_ctl_info *mci, /* * ->get_sdram_scrub_rate() return value semantics same as above. */ -static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) +static ssize_t mci_sdram_scrub_rate_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); int bandwidth = 0; if (!mci->get_sdram_scrub_rate) @@ -493,45 +713,72 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data) } /* default attribute files for the MCI object */ -static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data) +static ssize_t mci_ue_count_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); + return sprintf(data, "%d\n", mci->ue_mc); } -static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data) +static ssize_t mci_ce_count_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); + return sprintf(data, "%d\n", mci->ce_mc); } -static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data) +static ssize_t mci_ce_noinfo_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); + return sprintf(data, "%d\n", mci->ce_noinfo_count); } -static ssize_t mci_ue_noinfo_show(struct mem_ctl_info *mci, char *data) +static ssize_t mci_ue_noinfo_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); + return sprintf(data, "%d\n", mci->ue_noinfo_count); } -static ssize_t mci_seconds_show(struct mem_ctl_info *mci, char *data) +static ssize_t mci_seconds_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); + return sprintf(data, "%ld\n", (jiffies - mci->start_time) / HZ); } -static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data) +static ssize_t mci_ctl_name_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); + return sprintf(data, "%s\n", mci->ctl_name); } -static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) +static ssize_t mci_size_mb_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); int total_pages = 0, csrow_idx, j; for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { - struct csrow_info *csrow = &mci->csrows[csrow_idx]; + struct csrow_info *csrow = mci->csrows[csrow_idx]; for (j = 0; j < csrow->nr_channels; j++) { - struct dimm_info *dimm = csrow->channels[j].dimm; + struct dimm_info *dimm = csrow->channels[j]->dimm; total_pages += dimm->nr_pages; } @@ -540,361 +787,187 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); } -#define to_mci(k) container_of(k, struct mem_ctl_info, edac_mci_kobj) -#define to_mcidev_attr(a) container_of(a,struct mcidev_sysfs_attribute,attr) - -/* MCI show/store functions for top most object */ -static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, - char *buffer) +static ssize_t mci_max_location_show(struct device *dev, + struct device_attribute *mattr, + char *data) { - struct mem_ctl_info *mem_ctl_info = to_mci(kobj); - struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); - - debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); + struct mem_ctl_info *mci = to_mci(dev); + int i; + char *p = data; - if (mcidev_attr->show) - return mcidev_attr->show(mem_ctl_info, buffer); + for (i = 0; i < mci->n_layers; i++) { + p += sprintf(p, "%s %d ", + edac_layer_name[mci->layers[i].type], + mci->layers[i].size - 1); + } - return -EIO; + return p - data; } -static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) +#ifdef CONFIG_EDAC_DEBUG +static ssize_t edac_fake_inject_write(struct file *file, + const char __user *data, + size_t count, loff_t *ppos) { - struct mem_ctl_info *mem_ctl_info = to_mci(kobj); - struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); - - debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); - - if (mcidev_attr->store) - return mcidev_attr->store(mem_ctl_info, buffer, count); + struct device *dev = file->private_data; + struct mem_ctl_info *mci = to_mci(dev); + static enum hw_event_mc_err_type type; + u16 errcount = mci->fake_inject_count; + + if (!errcount) + errcount = 1; + + type = mci->fake_inject_ue ? HW_EVENT_ERR_UNCORRECTED + : HW_EVENT_ERR_CORRECTED; + + printk(KERN_DEBUG + "Generating %d %s fake error%s to %d.%d.%d to test core handling. NOTE: this won't test the driver-specific decoding logic.\n", + errcount, + (type == HW_EVENT_ERR_UNCORRECTED) ? "UE" : "CE", + errcount > 1 ? "s" : "", + mci->fake_inject_layer[0], + mci->fake_inject_layer[1], + mci->fake_inject_layer[2] + ); + edac_mc_handle_error(type, mci, errcount, 0, 0, 0, + mci->fake_inject_layer[0], + mci->fake_inject_layer[1], + mci->fake_inject_layer[2], + "FAKE ERROR", "for EDAC testing only"); - return -EIO; + return count; } -/* Intermediate show/store table */ -static const struct sysfs_ops mci_ops = { - .show = mcidev_show, - .store = mcidev_store -}; +static int debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} -#define MCIDEV_ATTR(_name,_mode,_show,_store) \ -static struct mcidev_sysfs_attribute mci_attr_##_name = { \ - .attr = {.name = __stringify(_name), .mode = _mode }, \ - .show = _show, \ - .store = _store, \ +static const struct file_operations debug_fake_inject_fops = { + .open = debugfs_open, + .write = edac_fake_inject_write, + .llseek = generic_file_llseek, }; +#endif /* default Control file */ -MCIDEV_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); +DEVICE_ATTR(reset_counters, S_IWUSR, NULL, mci_reset_counters_store); /* default Attribute files */ -MCIDEV_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); -MCIDEV_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); -MCIDEV_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); -MCIDEV_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); -MCIDEV_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); -MCIDEV_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); -MCIDEV_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); +DEVICE_ATTR(mc_name, S_IRUGO, mci_ctl_name_show, NULL); +DEVICE_ATTR(size_mb, S_IRUGO, mci_size_mb_show, NULL); +DEVICE_ATTR(seconds_since_reset, S_IRUGO, mci_seconds_show, NULL); +DEVICE_ATTR(ue_noinfo_count, S_IRUGO, mci_ue_noinfo_show, NULL); +DEVICE_ATTR(ce_noinfo_count, S_IRUGO, mci_ce_noinfo_show, NULL); +DEVICE_ATTR(ue_count, S_IRUGO, mci_ue_count_show, NULL); +DEVICE_ATTR(ce_count, S_IRUGO, mci_ce_count_show, NULL); +DEVICE_ATTR(max_location, S_IRUGO, mci_max_location_show, NULL); /* memory scrubber attribute file */ -MCIDEV_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show, +DEVICE_ATTR(sdram_scrub_rate, S_IRUGO | S_IWUSR, mci_sdram_scrub_rate_show, mci_sdram_scrub_rate_store); -static struct mcidev_sysfs_attribute *mci_attr[] = { - &mci_attr_reset_counters, - &mci_attr_mc_name, - &mci_attr_size_mb, - &mci_attr_seconds_since_reset, - &mci_attr_ue_noinfo_count, - &mci_attr_ce_noinfo_count, - &mci_attr_ue_count, - &mci_attr_ce_count, - &mci_attr_sdram_scrub_rate, +static struct attribute *mci_attrs[] = { + &dev_attr_reset_counters.attr, + &dev_attr_mc_name.attr, + &dev_attr_size_mb.attr, + &dev_attr_seconds_since_reset.attr, + &dev_attr_ue_noinfo_count.attr, + &dev_attr_ce_noinfo_count.attr, + &dev_attr_ue_count.attr, + &dev_attr_ce_count.attr, + &dev_attr_sdram_scrub_rate.attr, + &dev_attr_max_location.attr, NULL }; +static struct attribute_group mci_attr_grp = { + .attrs = mci_attrs, +}; -/* - * Release of a MC controlling instance - * - * each MC control instance has the following resources upon entry: - * a) a ref count on the top memctl kobj - * b) a ref count on this module - * - * this function must decrement those ref counts and then - * issue a free on the instance's memory - */ -static void edac_mci_control_release(struct kobject *kobj) -{ - struct mem_ctl_info *mci; - - mci = to_mci(kobj); +static const struct attribute_group *mci_attr_groups[] = { + &mci_attr_grp, + NULL +}; - debugf0("%s() mci instance idx=%d releasing\n", __func__, mci->mc_idx); +static void mci_attr_release(struct device *dev) +{ + struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); - /* decrement the module ref count */ - module_put(mci->owner); + edac_dbg(1, "Releasing csrow device %s\n", dev_name(dev)); + kfree(mci); } -static struct kobj_type ktype_mci = { - .release = edac_mci_control_release, - .sysfs_ops = &mci_ops, - .default_attrs = (struct attribute **)mci_attr, +static struct device_type mci_attr_type = { + .groups = mci_attr_groups, + .release = mci_attr_release, }; -/* EDAC memory controller sysfs kset: - * /sys/devices/system/edac/mc - */ -static struct kset *mc_kset; +#ifdef CONFIG_EDAC_DEBUG +static struct dentry *edac_debugfs; -/* - * edac_mc_register_sysfs_main_kobj - * - * setups and registers the main kobject for each mci - */ -int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci) +int __init edac_debugfs_init(void) { - struct kobject *kobj_mci; - int err; - - debugf1("%s()\n", __func__); - - kobj_mci = &mci->edac_mci_kobj; - - /* Init the mci's kobject */ - memset(kobj_mci, 0, sizeof(*kobj_mci)); - - /* Record which module 'owns' this control structure - * and bump the ref count of the module - */ - mci->owner = THIS_MODULE; - - /* bump ref count on this module */ - if (!try_module_get(mci->owner)) { - err = -ENODEV; - goto fail_out; - } - - /* this instance become part of the mc_kset */ - kobj_mci->kset = mc_kset; - - /* register the mc<id> kobject to the mc_kset */ - err = kobject_init_and_add(kobj_mci, &ktype_mci, NULL, - "mc%d", mci->mc_idx); - if (err) { - debugf1("%s()Failed to register '.../edac/mc%d'\n", - __func__, mci->mc_idx); - goto kobj_reg_fail; + edac_debugfs = debugfs_create_dir("edac", NULL); + if (IS_ERR(edac_debugfs)) { + edac_debugfs = NULL; + return -ENOMEM; } - kobject_uevent(kobj_mci, KOBJ_ADD); - - /* At this point, to 'free' the control struct, - * edac_mc_unregister_sysfs_main_kobj() must be used - */ - - debugf1("%s() Registered '.../edac/mc%d' kobject\n", - __func__, mci->mc_idx); - return 0; - - /* Error exit stack */ - -kobj_reg_fail: - module_put(mci->owner); - -fail_out: - return err; -} - -/* - * edac_mc_register_sysfs_main_kobj - * - * tears down and the main mci kobject from the mc_kset - */ -void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci) -{ - debugf1("%s()\n", __func__); - - /* delete the kobj from the mc_kset */ - kobject_put(&mci->edac_mci_kobj); -} - -#define EDAC_DEVICE_SYMLINK "device" - -#define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci) - -/* MCI show/store functions for top most object */ -static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr, - char *buffer) -{ - struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj); - struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); - - debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); - - if (mcidev_attr->show) - return mcidev_attr->show(mem_ctl_info, buffer); - - return -EIO; } -static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr, - const char *buffer, size_t count) +void __exit edac_debugfs_exit(void) { - struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj); - struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr); - - debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info); - - if (mcidev_attr->store) - return mcidev_attr->store(mem_ctl_info, buffer, count); - - return -EIO; + debugfs_remove(edac_debugfs); } -/* No memory to release for this kobj */ -static void edac_inst_grp_release(struct kobject *kobj) +int edac_create_debug_nodes(struct mem_ctl_info *mci) { - struct mcidev_sysfs_group_kobj *grp; - struct mem_ctl_info *mci; - - debugf1("%s()\n", __func__); - - grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj); - mci = grp->mci; -} - -/* Intermediate show/store table */ -static struct sysfs_ops inst_grp_ops = { - .show = inst_grp_show, - .store = inst_grp_store -}; - -/* the kobj_type instance for a instance group */ -static struct kobj_type ktype_inst_grp = { - .release = edac_inst_grp_release, - .sysfs_ops = &inst_grp_ops, -}; - + struct dentry *d, *parent; + char name[80]; + int i; -/* - * edac_create_mci_instance_attributes - * create MC driver specific attributes bellow an specified kobj - * This routine calls itself recursively, in order to create an entire - * object tree. - */ -static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, - const struct mcidev_sysfs_attribute *sysfs_attrib, - struct kobject *kobj) -{ - int err; + if (!edac_debugfs) + return -ENODEV; - debugf4("%s()\n", __func__); - - while (sysfs_attrib) { - debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); - if (sysfs_attrib->grp) { - struct mcidev_sysfs_group_kobj *grp_kobj; - - grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL); - if (!grp_kobj) - return -ENOMEM; - - grp_kobj->grp = sysfs_attrib->grp; - grp_kobj->mci = mci; - list_add_tail(&grp_kobj->list, &mci->grp_kobj_list); - - debugf0("%s() grp %s, mci %p\n", __func__, - sysfs_attrib->grp->name, mci); - - err = kobject_init_and_add(&grp_kobj->kobj, - &ktype_inst_grp, - &mci->edac_mci_kobj, - sysfs_attrib->grp->name); - if (err < 0) { - printk(KERN_ERR "kobject_init_and_add failed: %d\n", err); - return err; - } - err = edac_create_mci_instance_attributes(mci, - grp_kobj->grp->mcidev_attr, - &grp_kobj->kobj); - - if (err < 0) - return err; - } else if (sysfs_attrib->attr.name) { - debugf4("%s() file %s\n", __func__, - sysfs_attrib->attr.name); - - err = sysfs_create_file(kobj, &sysfs_attrib->attr); - if (err < 0) { - printk(KERN_ERR "sysfs_create_file failed: %d\n", err); - return err; - } - } else - break; - - sysfs_attrib++; + d = debugfs_create_dir(mci->dev.kobj.name, edac_debugfs); + if (!d) + return -ENOMEM; + parent = d; + + for (i = 0; i < mci->n_layers; i++) { + sprintf(name, "fake_inject_%s", + edac_layer_name[mci->layers[i].type]); + d = debugfs_create_u8(name, S_IRUGO | S_IWUSR, parent, + &mci->fake_inject_layer[i]); + if (!d) + goto nomem; } - return 0; -} + d = debugfs_create_bool("fake_inject_ue", S_IRUGO | S_IWUSR, parent, + &mci->fake_inject_ue); + if (!d) + goto nomem; -/* - * edac_remove_mci_instance_attributes - * remove MC driver specific attributes at the topmost level - * directory of this mci instance. - */ -static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci, - const struct mcidev_sysfs_attribute *sysfs_attrib, - struct kobject *kobj, int count) -{ - struct mcidev_sysfs_group_kobj *grp_kobj, *tmp; + d = debugfs_create_u16("fake_inject_count", S_IRUGO | S_IWUSR, parent, + &mci->fake_inject_count); + if (!d) + goto nomem; - debugf1("%s()\n", __func__); - - /* - * loop if there are attributes and until we hit a NULL entry - * Remove first all the attributes - */ - while (sysfs_attrib) { - debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); - if (sysfs_attrib->grp) { - debugf4("%s() seeking for group %s\n", - __func__, sysfs_attrib->grp->name); - list_for_each_entry(grp_kobj, - &mci->grp_kobj_list, list) { - debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp); - if (grp_kobj->grp == sysfs_attrib->grp) { - edac_remove_mci_instance_attributes(mci, - grp_kobj->grp->mcidev_attr, - &grp_kobj->kobj, count + 1); - debugf4("%s() group %s\n", __func__, - sysfs_attrib->grp->name); - kobject_put(&grp_kobj->kobj); - } - } - debugf4("%s() end of seeking for group %s\n", - __func__, sysfs_attrib->grp->name); - } else if (sysfs_attrib->attr.name) { - debugf4("%s() file %s\n", __func__, - sysfs_attrib->attr.name); - sysfs_remove_file(kobj, &sysfs_attrib->attr); - } else - break; - sysfs_attrib++; - } + d = debugfs_create_file("fake_inject", S_IWUSR, parent, + &mci->dev, + &debug_fake_inject_fops); + if (!d) + goto nomem; - /* Remove the group objects */ - if (count) - return; - list_for_each_entry_safe(grp_kobj, tmp, - &mci->grp_kobj_list, list) { - list_del(&grp_kobj->list); - kfree(grp_kobj); - } + mci->debugfs = parent; + return 0; +nomem: + debugfs_remove(mci->debugfs); + return -ENOMEM; } - +#endif /* * Create a new Memory Controller kobject instance, @@ -906,77 +979,87 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci, */ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) { - int i, j; - int err; - struct csrow_info *csrow; - struct kobject *kobj_mci = &mci->edac_mci_kobj; + int i, err; - debugf0("%s() idx=%d\n", __func__, mci->mc_idx); - - INIT_LIST_HEAD(&mci->grp_kobj_list); + /* + * The memory controller needs its own bus, in order to avoid + * namespace conflicts at /sys/bus/edac. + */ + mci->bus.name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx); + if (!mci->bus.name) + return -ENOMEM; + edac_dbg(0, "creating bus %s\n", mci->bus.name); + err = bus_register(&mci->bus); + if (err < 0) + return err; - /* create a symlink for the device */ - err = sysfs_create_link(kobj_mci, &mci->dev->kobj, - EDAC_DEVICE_SYMLINK); - if (err) { - debugf1("%s() failure to create symlink\n", __func__); - goto fail0; + /* get the /sys/devices/system/edac subsys reference */ + mci->dev.type = &mci_attr_type; + device_initialize(&mci->dev); + + mci->dev.parent = mci_pdev; + mci->dev.bus = &mci->bus; + dev_set_name(&mci->dev, "mc%d", mci->mc_idx); + dev_set_drvdata(&mci->dev, mci); + pm_runtime_forbid(&mci->dev); + + edac_dbg(0, "creating device %s\n", dev_name(&mci->dev)); + err = device_add(&mci->dev); + if (err < 0) { + bus_unregister(&mci->bus); + kfree(mci->bus.name); + return err; } - /* If the low level driver desires some attributes, - * then create them now for the driver. + /* + * Create the dimm/rank devices */ - if (mci->mc_driver_sysfs_attributes) { - err = edac_create_mci_instance_attributes(mci, - mci->mc_driver_sysfs_attributes, - &mci->edac_mci_kobj); + for (i = 0; i < mci->tot_dimms; i++) { + struct dimm_info *dimm = mci->dimms[i]; + /* Only expose populated DIMMs */ + if (dimm->nr_pages == 0) + continue; +#ifdef CONFIG_EDAC_DEBUG + edac_dbg(1, "creating dimm%d, located at ", i); + if (edac_debug_level >= 1) { + int lay; + for (lay = 0; lay < mci->n_layers; lay++) + printk(KERN_CONT "%s %d ", + edac_layer_name[mci->layers[lay].type], + dimm->location[lay]); + printk(KERN_CONT "\n"); + } +#endif + err = edac_create_dimm_object(mci, dimm, i); if (err) { - debugf1("%s() failure to create mci attributes\n", - __func__); - goto fail0; + edac_dbg(1, "failure: create dimm %d obj\n", i); + goto fail; } } - /* Make directories for each CSROW object under the mc<id> kobject - */ - for (i = 0; i < mci->nr_csrows; i++) { - int nr_pages = 0; - - csrow = &mci->csrows[i]; - for (j = 0; j < csrow->nr_channels; j++) - nr_pages += csrow->channels[j].dimm->nr_pages; - - if (nr_pages > 0) { - err = edac_create_csrow_object(mci, csrow, i); - if (err) { - debugf1("%s() failure: create csrow %d obj\n", - __func__, i); - goto fail1; - } - } - } +#ifdef CONFIG_EDAC_LEGACY_SYSFS + err = edac_create_csrow_objects(mci); + if (err < 0) + goto fail; +#endif +#ifdef CONFIG_EDAC_DEBUG + edac_create_debug_nodes(mci); +#endif return 0; -fail1: +fail: for (i--; i >= 0; i--) { - int nr_pages = 0; - - csrow = &mci->csrows[i]; - for (j = 0; j < csrow->nr_channels; j++) - nr_pages += csrow->channels[j].dimm->nr_pages; - if (nr_pages > 0) - kobject_put(&mci->csrows[i].kobj); + struct dimm_info *dimm = mci->dimms[i]; + if (dimm->nr_pages == 0) + continue; + put_device(&dimm->dev); + device_del(&dimm->dev); } - - /* remove the mci instance's attributes, if any */ - edac_remove_mci_instance_attributes(mci, - mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0); - - /* remove the symlink */ - sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK); - -fail0: + put_device(&mci->dev); + device_del(&mci->dev); + bus_unregister(&mci->bus); + kfree(mci->bus.name); return err; } @@ -985,98 +1068,84 @@ fail0: */ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) { - struct csrow_info *csrow; - int i, j; - - debugf0("%s()\n", __func__); - - /* remove all csrow kobjects */ - debugf4("%s() unregister this mci kobj\n", __func__); - for (i = 0; i < mci->nr_csrows; i++) { - int nr_pages = 0; - - csrow = &mci->csrows[i]; - for (j = 0; j < csrow->nr_channels; j++) - nr_pages += csrow->channels[j].dimm->nr_pages; - if (nr_pages > 0) { - debugf0("%s() unreg csrow-%d\n", __func__, i); - kobject_put(&mci->csrows[i].kobj); - } - } + int i; - /* remove this mci instance's attribtes */ - if (mci->mc_driver_sysfs_attributes) { - debugf4("%s() unregister mci private attributes\n", __func__); - edac_remove_mci_instance_attributes(mci, - mci->mc_driver_sysfs_attributes, - &mci->edac_mci_kobj, 0); + edac_dbg(0, "\n"); + +#ifdef CONFIG_EDAC_DEBUG + debugfs_remove(mci->debugfs); +#endif +#ifdef CONFIG_EDAC_LEGACY_SYSFS + edac_delete_csrow_objects(mci); +#endif + + for (i = 0; i < mci->tot_dimms; i++) { + struct dimm_info *dimm = mci->dimms[i]; + if (dimm->nr_pages == 0) + continue; + edac_dbg(0, "removing device %s\n", dev_name(&dimm->dev)); + put_device(&dimm->dev); + device_del(&dimm->dev); } - - /* remove the symlink */ - debugf4("%s() remove_link\n", __func__); - sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); - - /* unregister this instance's kobject */ - debugf4("%s() remove_mci_instance\n", __func__); - kobject_put(&mci->edac_mci_kobj); } +void edac_unregister_sysfs(struct mem_ctl_info *mci) +{ + edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev)); + put_device(&mci->dev); + device_del(&mci->dev); + bus_unregister(&mci->bus); + kfree(mci->bus.name); +} +static void mc_attr_release(struct device *dev) +{ + /* + * There's no container structure here, as this is just the mci + * parent device, used to create the /sys/devices/mc sysfs node. + * So, there are no attributes on it. + */ + edac_dbg(1, "Releasing device %s\n", dev_name(dev)); + kfree(dev); +} - +static struct device_type mc_attr_type = { + .release = mc_attr_release, +}; /* - * edac_setup_sysfs_mc_kset(void) - * - * Initialize the mc_kset for the 'mc' entry - * This requires creating the top 'mc' directory with a kset - * and its controls/attributes. - * - * To this 'mc' kset, instance 'mci' will be grouped as children. - * - * Return: 0 SUCCESS - * !0 FAILURE error code + * Init/exit code for the module. Basically, creates/removes /sys/class/rc */ -int edac_sysfs_setup_mc_kset(void) +int __init edac_mc_sysfs_init(void) { - int err = -EINVAL; struct bus_type *edac_subsys; - - debugf1("%s()\n", __func__); + int err; /* get the /sys/devices/system/edac subsys reference */ edac_subsys = edac_get_sysfs_subsys(); if (edac_subsys == NULL) { - debugf1("%s() no edac_subsys error=%d\n", __func__, err); - goto fail_out; + edac_dbg(1, "no edac_subsys\n"); + return -EINVAL; } - /* Init the MC's kobject */ - mc_kset = kset_create_and_add("mc", NULL, &edac_subsys->dev_root->kobj); - if (!mc_kset) { - err = -ENOMEM; - debugf1("%s() Failed to register '.../edac/mc'\n", __func__); - goto fail_kset; - } + mci_pdev = kzalloc(sizeof(*mci_pdev), GFP_KERNEL); - debugf1("%s() Registered '.../edac/mc' kobject\n", __func__); + mci_pdev->bus = edac_subsys; + mci_pdev->type = &mc_attr_type; + device_initialize(mci_pdev); + dev_set_name(mci_pdev, "mc"); - return 0; + err = device_add(mci_pdev); + if (err < 0) + return err; -fail_kset: - edac_put_sysfs_subsys(); + edac_dbg(0, "device %s created\n", dev_name(mci_pdev)); -fail_out: - return err; + return 0; } -/* - * edac_sysfs_teardown_mc_kset - * - * deconstruct the mc_ket for memory controllers - */ -void edac_sysfs_teardown_mc_kset(void) +void __exit edac_mc_sysfs_exit(void) { - kset_unregister(mc_kset); + put_device(mci_pdev); + device_del(mci_pdev); edac_put_sysfs_subsys(); } - diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c index 5ddaa86d6a6e..58a28d838f37 100644 --- a/drivers/edac/edac_module.c +++ b/drivers/edac/edac_module.c @@ -15,7 +15,7 @@ #include "edac_core.h" #include "edac_module.h" -#define EDAC_VERSION "Ver: 2.1.0" +#define EDAC_VERSION "Ver: 3.0.0" #ifdef CONFIG_EDAC_DEBUG /* Values of 0 to 4 will generate output */ @@ -90,26 +90,21 @@ static int __init edac_init(void) */ edac_pci_clear_parity_errors(); - /* - * now set up the mc_kset under the edac class object - */ - err = edac_sysfs_setup_mc_kset(); + err = edac_mc_sysfs_init(); if (err) goto error; + edac_debugfs_init(); + /* Setup/Initialize the workq for this core */ err = edac_workqueue_setup(); if (err) { edac_printk(KERN_ERR, EDAC_MC, "init WorkQueue failure\n"); - goto workq_fail; + goto error; } return 0; - /* Error teardown stack */ -workq_fail: - edac_sysfs_teardown_mc_kset(); - error: return err; } @@ -120,11 +115,12 @@ error: */ static void __exit edac_exit(void) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* tear down the various subsystems */ edac_workqueue_teardown(); - edac_sysfs_teardown_mc_kset(); + edac_mc_sysfs_exit(); + edac_debugfs_exit(); } /* diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index 0ea7d14cb930..3d139c6e7fe3 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h @@ -19,12 +19,12 @@ * * edac_mc objects */ -extern int edac_sysfs_setup_mc_kset(void); -extern void edac_sysfs_teardown_mc_kset(void); -extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci); -extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci); + /* on edac_mc_sysfs.c */ +int edac_mc_sysfs_init(void); +void edac_mc_sysfs_exit(void); extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); +void edac_unregister_sysfs(struct mem_ctl_info *mci); extern int edac_get_log_ue(void); extern int edac_get_log_ce(void); extern int edac_get_panic_on_ue(void); @@ -34,6 +34,10 @@ extern int edac_mc_get_panic_on_ue(void); extern int edac_get_poll_msec(void); extern int edac_mc_get_poll_msec(void); +unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, + unsigned len); + + /* on edac_device.c */ extern int edac_device_register_sysfs_main_kobj( struct edac_device_ctl_info *edac_dev); extern void edac_device_unregister_sysfs_main_kobj( @@ -53,6 +57,20 @@ extern void edac_mc_reset_delay_period(int value); extern void *edac_align_ptr(void **p, unsigned size, int n_elems); /* + * EDAC debugfs functions + */ +#ifdef CONFIG_EDAC_DEBUG +int edac_debugfs_init(void); +void edac_debugfs_exit(void); +#else +static inline int edac_debugfs_init(void) +{ + return -ENODEV; +} +static inline void edac_debugfs_exit(void) {} +#endif + +/* * EDAC PCI functions */ #ifdef CONFIG_PCI diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index f1ac86649886..ee87ef972ead 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c @@ -45,7 +45,7 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt, void *p = NULL, *pvt; unsigned int size; - debugf1("%s()\n", __func__); + edac_dbg(1, "\n"); pci = edac_align_ptr(&p, sizeof(*pci), 1); pvt = edac_align_ptr(&p, 1, sz_pvt); @@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_ctl_info); */ void edac_pci_free_ctl_info(struct edac_pci_ctl_info *pci) { - debugf1("%s()\n", __func__); + edac_dbg(1, "\n"); edac_pci_remove_sysfs(pci); } @@ -97,7 +97,7 @@ static struct edac_pci_ctl_info *find_edac_pci_by_dev(struct device *dev) struct edac_pci_ctl_info *pci; struct list_head *item; - debugf1("%s()\n", __func__); + edac_dbg(1, "\n"); list_for_each(item, &edac_pci_list) { pci = list_entry(item, struct edac_pci_ctl_info, link); @@ -122,7 +122,7 @@ static int add_edac_pci_to_global_list(struct edac_pci_ctl_info *pci) struct list_head *item, *insert_before; struct edac_pci_ctl_info *rover; - debugf1("%s()\n", __func__); + edac_dbg(1, "\n"); insert_before = &edac_pci_list; @@ -226,7 +226,7 @@ static void edac_pci_workq_function(struct work_struct *work_req) int msec; unsigned long delay; - debugf3("%s() checking\n", __func__); + edac_dbg(3, "checking\n"); mutex_lock(&edac_pci_ctls_mutex); @@ -261,7 +261,7 @@ static void edac_pci_workq_function(struct work_struct *work_req) static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci, unsigned int msec) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function); queue_delayed_work(edac_workqueue, &pci->work, @@ -276,7 +276,7 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci) { int status; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); status = cancel_delayed_work(&pci->work); if (status == 0) @@ -293,7 +293,7 @@ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci) void edac_pci_reset_delay_period(struct edac_pci_ctl_info *pci, unsigned long value) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); edac_pci_workq_teardown(pci); @@ -333,7 +333,7 @@ EXPORT_SYMBOL_GPL(edac_pci_alloc_index); */ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); pci->pci_idx = edac_idx; pci->start_time = jiffies; @@ -393,7 +393,7 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev) { struct edac_pci_ctl_info *pci; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); mutex_lock(&edac_pci_ctls_mutex); @@ -430,7 +430,7 @@ EXPORT_SYMBOL_GPL(edac_pci_del_device); */ static void edac_pci_generic_check(struct edac_pci_ctl_info *pci) { - debugf4("%s()\n", __func__); + edac_dbg(4, "\n"); edac_pci_do_parity_check(); } @@ -475,7 +475,7 @@ struct edac_pci_ctl_info *edac_pci_create_generic_ctl(struct device *dev, pdata->edac_idx = edac_pci_idx++; if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { - debugf3("%s(): failed edac_pci_add_device()\n", __func__); + edac_dbg(3, "failed edac_pci_add_device()\n"); edac_pci_free_ctl_info(pci); return NULL; } @@ -491,7 +491,7 @@ EXPORT_SYMBOL_GPL(edac_pci_create_generic_ctl); */ void edac_pci_release_generic_ctl(struct edac_pci_ctl_info *pci) { - debugf0("%s() pci mod=%s\n", __func__, pci->mod_name); + edac_dbg(0, "pci mod=%s\n", pci->mod_name); edac_pci_del_device(pci->dev); edac_pci_free_ctl_info(pci); diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 97f5064e3992..e164c555a337 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -78,7 +78,7 @@ static void edac_pci_instance_release(struct kobject *kobj) { struct edac_pci_ctl_info *pci; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* Form pointer to containing struct, the pci control struct */ pci = to_instance(kobj); @@ -161,7 +161,7 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx) struct kobject *main_kobj; int err; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* First bump the ref count on the top main kobj, which will * track the number of PCI instances we have, and thus nest @@ -177,14 +177,13 @@ static int edac_pci_create_instance_kobj(struct edac_pci_ctl_info *pci, int idx) err = kobject_init_and_add(&pci->kobj, &ktype_pci_instance, edac_pci_top_main_kobj, "pci%d", idx); if (err != 0) { - debugf2("%s() failed to register instance pci%d\n", - __func__, idx); + edac_dbg(2, "failed to register instance pci%d\n", idx); kobject_put(edac_pci_top_main_kobj); goto error_out; } kobject_uevent(&pci->kobj, KOBJ_ADD); - debugf1("%s() Register instance 'pci%d' kobject\n", __func__, idx); + edac_dbg(1, "Register instance 'pci%d' kobject\n", idx); return 0; @@ -201,7 +200,7 @@ error_out: static void edac_pci_unregister_sysfs_instance_kobj( struct edac_pci_ctl_info *pci) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* Unregister the instance kobject and allow its release * function release the main reference count and then @@ -317,7 +316,7 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = { */ static void edac_pci_release_main_kobj(struct kobject *kobj) { - debugf0("%s() here to module_put(THIS_MODULE)\n", __func__); + edac_dbg(0, "here to module_put(THIS_MODULE)\n"); kfree(kobj); @@ -345,7 +344,7 @@ static int edac_pci_main_kobj_setup(void) int err; struct bus_type *edac_subsys; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* check and count if we have already created the main kobject */ if (atomic_inc_return(&edac_pci_sysfs_refcount) != 1) @@ -356,7 +355,7 @@ static int edac_pci_main_kobj_setup(void) */ edac_subsys = edac_get_sysfs_subsys(); if (edac_subsys == NULL) { - debugf1("%s() no edac_subsys\n", __func__); + edac_dbg(1, "no edac_subsys\n"); err = -ENODEV; goto decrement_count_fail; } @@ -366,14 +365,14 @@ static int edac_pci_main_kobj_setup(void) * level main kobj for EDAC PCI */ if (!try_module_get(THIS_MODULE)) { - debugf1("%s() try_module_get() failed\n", __func__); + edac_dbg(1, "try_module_get() failed\n"); err = -ENODEV; goto mod_get_fail; } edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); if (!edac_pci_top_main_kobj) { - debugf1("Failed to allocate\n"); + edac_dbg(1, "Failed to allocate\n"); err = -ENOMEM; goto kzalloc_fail; } @@ -383,7 +382,7 @@ static int edac_pci_main_kobj_setup(void) &ktype_edac_pci_main_kobj, &edac_subsys->dev_root->kobj, "pci"); if (err) { - debugf1("Failed to register '.../edac/pci'\n"); + edac_dbg(1, "Failed to register '.../edac/pci'\n"); goto kobject_init_and_add_fail; } @@ -392,7 +391,7 @@ static int edac_pci_main_kobj_setup(void) * must be used, for resources to be cleaned up properly */ kobject_uevent(edac_pci_top_main_kobj, KOBJ_ADD); - debugf1("Registered '.../edac/pci' kobject\n"); + edac_dbg(1, "Registered '.../edac/pci' kobject\n"); return 0; @@ -421,15 +420,14 @@ decrement_count_fail: */ static void edac_pci_main_kobj_teardown(void) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* Decrement the count and only if no more controller instances * are connected perform the unregisteration of the top level * main kobj */ if (atomic_dec_return(&edac_pci_sysfs_refcount) == 0) { - debugf0("%s() called kobject_put on main kobj\n", - __func__); + edac_dbg(0, "called kobject_put on main kobj\n"); kobject_put(edac_pci_top_main_kobj); } edac_put_sysfs_subsys(); @@ -446,7 +444,7 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci) int err; struct kobject *edac_kobj = &pci->kobj; - debugf0("%s() idx=%d\n", __func__, pci->pci_idx); + edac_dbg(0, "idx=%d\n", pci->pci_idx); /* create the top main EDAC PCI kobject, IF needed */ err = edac_pci_main_kobj_setup(); @@ -460,8 +458,7 @@ int edac_pci_create_sysfs(struct edac_pci_ctl_info *pci) err = sysfs_create_link(edac_kobj, &pci->dev->kobj, EDAC_PCI_SYMLINK); if (err) { - debugf0("%s() sysfs_create_link() returned err= %d\n", - __func__, err); + edac_dbg(0, "sysfs_create_link() returned err= %d\n", err); goto symlink_fail; } @@ -484,7 +481,7 @@ unregister_cleanup: */ void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci) { - debugf0("%s() index=%d\n", __func__, pci->pci_idx); + edac_dbg(0, "index=%d\n", pci->pci_idx); /* Remove the symlink */ sysfs_remove_link(&pci->kobj, EDAC_PCI_SYMLINK); @@ -496,7 +493,7 @@ void edac_pci_remove_sysfs(struct edac_pci_ctl_info *pci) * if this 'pci' is the last instance. * If it is, the main kobject will be unregistered as a result */ - debugf0("%s() calling edac_pci_main_kobj_teardown()\n", __func__); + edac_dbg(0, "calling edac_pci_main_kobj_teardown()\n"); edac_pci_main_kobj_teardown(); } @@ -572,7 +569,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) local_irq_restore(flags); - debugf4("PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev)); + edac_dbg(4, "PCI STATUS= 0x%04x %s\n", status, dev_name(&dev->dev)); /* check the status reg for errors on boards NOT marked as broken * if broken, we cannot trust any of the status bits @@ -603,13 +600,15 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) } - debugf4("PCI HEADER TYPE= 0x%02x %s\n", header_type, dev_name(&dev->dev)); + edac_dbg(4, "PCI HEADER TYPE= 0x%02x %s\n", + header_type, dev_name(&dev->dev)); if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* On bridges, need to examine secondary status register */ status = get_pci_parity_status(dev, 1); - debugf4("PCI SEC_STATUS= 0x%04x %s\n", status, dev_name(&dev->dev)); + edac_dbg(4, "PCI SEC_STATUS= 0x%04x %s\n", + status, dev_name(&dev->dev)); /* check the secondary status reg for errors, * on NOT broken boards @@ -671,7 +670,7 @@ void edac_pci_do_parity_check(void) { int before_count; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); /* if policy has PCI check off, leave now */ if (!check_pci_errors) diff --git a/drivers/edac/highbank_l2_edac.c b/drivers/edac/highbank_l2_edac.c new file mode 100644 index 000000000000..e599b00c05a8 --- /dev/null +++ b/drivers/edac/highbank_l2_edac.c @@ -0,0 +1,149 @@ +/* + * Copyright 2011-2012 Calxeda, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/ctype.h> +#include <linux/edac.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> + +#include "edac_core.h" +#include "edac_module.h" + +#define SR_CLR_SB_ECC_INTR 0x0 +#define SR_CLR_DB_ECC_INTR 0x4 + +struct hb_l2_drvdata { + void __iomem *base; + int sb_irq; + int db_irq; +}; + +static irqreturn_t highbank_l2_err_handler(int irq, void *dev_id) +{ + struct edac_device_ctl_info *dci = dev_id; + struct hb_l2_drvdata *drvdata = dci->pvt_info; + + if (irq == drvdata->sb_irq) { + writel(1, drvdata->base + SR_CLR_SB_ECC_INTR); + edac_device_handle_ce(dci, 0, 0, dci->ctl_name); + } + if (irq == drvdata->db_irq) { + writel(1, drvdata->base + SR_CLR_DB_ECC_INTR); + edac_device_handle_ue(dci, 0, 0, dci->ctl_name); + } + + return IRQ_HANDLED; +} + +static int __devinit highbank_l2_err_probe(struct platform_device *pdev) +{ + struct edac_device_ctl_info *dci; + struct hb_l2_drvdata *drvdata; + struct resource *r; + int res = 0; + + dci = edac_device_alloc_ctl_info(sizeof(*drvdata), "cpu", + 1, "L", 1, 2, NULL, 0, 0); + if (!dci) + return -ENOMEM; + + drvdata = dci->pvt_info; + dci->dev = &pdev->dev; + platform_set_drvdata(pdev, dci); + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "Unable to get mem resource\n"); + res = -ENODEV; + goto err; + } + + if (!devm_request_mem_region(&pdev->dev, r->start, + resource_size(r), dev_name(&pdev->dev))) { + dev_err(&pdev->dev, "Error while requesting mem region\n"); + res = -EBUSY; + goto err; + } + + drvdata->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); + if (!drvdata->base) { + dev_err(&pdev->dev, "Unable to map regs\n"); + res = -ENOMEM; + goto err; + } + + drvdata->db_irq = platform_get_irq(pdev, 0); + res = devm_request_irq(&pdev->dev, drvdata->db_irq, + highbank_l2_err_handler, + 0, dev_name(&pdev->dev), dci); + if (res < 0) + goto err; + + drvdata->sb_irq = platform_get_irq(pdev, 1); + res = devm_request_irq(&pdev->dev, drvdata->sb_irq, + highbank_l2_err_handler, + 0, dev_name(&pdev->dev), dci); + if (res < 0) + goto err; + + dci->mod_name = dev_name(&pdev->dev); + dci->dev_name = dev_name(&pdev->dev); + + if (edac_device_add_device(dci)) + goto err; + + devres_close_group(&pdev->dev, NULL); + return 0; +err: + devres_release_group(&pdev->dev, NULL); + edac_device_free_ctl_info(dci); + return res; +} + +static int highbank_l2_err_remove(struct platform_device *pdev) +{ + struct edac_device_ctl_info *dci = platform_get_drvdata(pdev); + + edac_device_del_device(&pdev->dev); + edac_device_free_ctl_info(dci); + return 0; +} + +static const struct of_device_id hb_l2_err_of_match[] = { + { .compatible = "calxeda,hb-sregs-l2-ecc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, hb_l2_err_of_match); + +static struct platform_driver highbank_l2_edac_driver = { + .probe = highbank_l2_err_probe, + .remove = highbank_l2_err_remove, + .driver = { + .name = "hb_l2_edac", + .of_match_table = hb_l2_err_of_match, + }, +}; + +module_platform_driver(highbank_l2_edac_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Calxeda, Inc."); +MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank L2 Cache"); diff --git a/drivers/edac/highbank_mc_edac.c b/drivers/edac/highbank_mc_edac.c new file mode 100644 index 000000000000..c769f477fd22 --- /dev/null +++ b/drivers/edac/highbank_mc_edac.c @@ -0,0 +1,264 @@ +/* + * Copyright 2011-2012 Calxeda, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/ctype.h> +#include <linux/edac.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> +#include <linux/uaccess.h> + +#include "edac_core.h" +#include "edac_module.h" + +/* DDR Ctrlr Error Registers */ +#define HB_DDR_ECC_OPT 0x128 +#define HB_DDR_ECC_U_ERR_ADDR 0x130 +#define HB_DDR_ECC_U_ERR_STAT 0x134 +#define HB_DDR_ECC_U_ERR_DATAL 0x138 +#define HB_DDR_ECC_U_ERR_DATAH 0x13c +#define HB_DDR_ECC_C_ERR_ADDR 0x140 +#define HB_DDR_ECC_C_ERR_STAT 0x144 +#define HB_DDR_ECC_C_ERR_DATAL 0x148 +#define HB_DDR_ECC_C_ERR_DATAH 0x14c +#define HB_DDR_ECC_INT_STATUS 0x180 +#define HB_DDR_ECC_INT_ACK 0x184 +#define HB_DDR_ECC_U_ERR_ID 0x424 +#define HB_DDR_ECC_C_ERR_ID 0x428 + +#define HB_DDR_ECC_INT_STAT_CE 0x8 +#define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10 +#define HB_DDR_ECC_INT_STAT_UE 0x20 +#define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40 + +#define HB_DDR_ECC_OPT_MODE_MASK 0x3 +#define HB_DDR_ECC_OPT_FWC 0x100 +#define HB_DDR_ECC_OPT_XOR_SHIFT 16 + +struct hb_mc_drvdata { + void __iomem *mc_vbase; +}; + +static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id) +{ + struct mem_ctl_info *mci = dev_id; + struct hb_mc_drvdata *drvdata = mci->pvt_info; + u32 status, err_addr; + + /* Read the interrupt status register */ + status = readl(drvdata->mc_vbase + HB_DDR_ECC_INT_STATUS); + + if (status & HB_DDR_ECC_INT_STAT_UE) { + err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_U_ERR_ADDR); + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, + err_addr >> PAGE_SHIFT, + err_addr & ~PAGE_MASK, 0, + 0, 0, -1, + mci->ctl_name, ""); + } + if (status & HB_DDR_ECC_INT_STAT_CE) { + u32 syndrome = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_STAT); + syndrome = (syndrome >> 8) & 0xff; + err_addr = readl(drvdata->mc_vbase + HB_DDR_ECC_C_ERR_ADDR); + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, + err_addr >> PAGE_SHIFT, + err_addr & ~PAGE_MASK, syndrome, + 0, 0, -1, + mci->ctl_name, ""); + } + + /* clear the error, clears the interrupt */ + writel(status, drvdata->mc_vbase + HB_DDR_ECC_INT_ACK); + return IRQ_HANDLED; +} + +#ifdef CONFIG_EDAC_DEBUG +static ssize_t highbank_mc_err_inject_write(struct file *file, + const char __user *data, + size_t count, loff_t *ppos) +{ + struct mem_ctl_info *mci = file->private_data; + struct hb_mc_drvdata *pdata = mci->pvt_info; + char buf[32]; + size_t buf_size; + u32 reg; + u8 synd; + + buf_size = min(count, (sizeof(buf)-1)); + if (copy_from_user(buf, data, buf_size)) + return -EFAULT; + buf[buf_size] = 0; + + if (!kstrtou8(buf, 16, &synd)) { + reg = readl(pdata->mc_vbase + HB_DDR_ECC_OPT); + reg &= HB_DDR_ECC_OPT_MODE_MASK; + reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC; + writel(reg, pdata->mc_vbase + HB_DDR_ECC_OPT); + } + + return count; +} + +static int debugfs_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static const struct file_operations highbank_mc_debug_inject_fops = { + .open = debugfs_open, + .write = highbank_mc_err_inject_write, + .llseek = generic_file_llseek, +}; + +static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci) +{ + if (mci->debugfs) + debugfs_create_file("inject_ctrl", S_IWUSR, mci->debugfs, mci, + &highbank_mc_debug_inject_fops); +; +} +#else +static void __devinit highbank_mc_create_debugfs_nodes(struct mem_ctl_info *mci) +{} +#endif + +static int __devinit highbank_mc_probe(struct platform_device *pdev) +{ + struct edac_mc_layer layers[2]; + struct mem_ctl_info *mci; + struct hb_mc_drvdata *drvdata; + struct dimm_info *dimm; + struct resource *r; + u32 control; + int irq; + int res = 0; + + layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; + layers[0].size = 1; + layers[0].is_virt_csrow = true; + layers[1].type = EDAC_MC_LAYER_CHANNEL; + layers[1].size = 1; + layers[1].is_virt_csrow = false; + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, + sizeof(struct hb_mc_drvdata)); + if (!mci) + return -ENOMEM; + + mci->pdev = &pdev->dev; + drvdata = mci->pvt_info; + platform_set_drvdata(pdev, mci); + + if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) + return -ENOMEM; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "Unable to get mem resource\n"); + res = -ENODEV; + goto err; + } + + if (!devm_request_mem_region(&pdev->dev, r->start, + resource_size(r), dev_name(&pdev->dev))) { + dev_err(&pdev->dev, "Error while requesting mem region\n"); + res = -EBUSY; + goto err; + } + + drvdata->mc_vbase = devm_ioremap(&pdev->dev, + r->start, resource_size(r)); + if (!drvdata->mc_vbase) { + dev_err(&pdev->dev, "Unable to map regs\n"); + res = -ENOMEM; + goto err; + } + + control = readl(drvdata->mc_vbase + HB_DDR_ECC_OPT) & 0x3; + if (!control || (control == 0x2)) { + dev_err(&pdev->dev, "No ECC present, or ECC disabled\n"); + res = -ENODEV; + goto err; + } + + irq = platform_get_irq(pdev, 0); + res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler, + 0, dev_name(&pdev->dev), mci); + if (res < 0) { + dev_err(&pdev->dev, "Unable to request irq %d\n", irq); + goto err; + } + + mci->mtype_cap = MEM_FLAG_DDR3; + mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; + mci->edac_cap = EDAC_FLAG_SECDED; + mci->mod_name = dev_name(&pdev->dev); + mci->mod_ver = "1"; + mci->ctl_name = dev_name(&pdev->dev); + mci->scrub_mode = SCRUB_SW_SRC; + + /* Only a single 4GB DIMM is supported */ + dimm = *mci->dimms; + dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1; + dimm->grain = 8; + dimm->dtype = DEV_X8; + dimm->mtype = MEM_DDR3; + dimm->edac_mode = EDAC_SECDED; + + res = edac_mc_add_mc(mci); + if (res < 0) + goto err; + + highbank_mc_create_debugfs_nodes(mci); + + devres_close_group(&pdev->dev, NULL); + return 0; +err: + devres_release_group(&pdev->dev, NULL); + edac_mc_free(mci); + return res; +} + +static int highbank_mc_remove(struct platform_device *pdev) +{ + struct mem_ctl_info *mci = platform_get_drvdata(pdev); + + edac_mc_del_mc(&pdev->dev); + edac_mc_free(mci); + return 0; +} + +static const struct of_device_id hb_ddr_ctrl_of_match[] = { + { .compatible = "calxeda,hb-ddr-ctrl", }, + {}, +}; +MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match); + +static struct platform_driver highbank_mc_edac_driver = { + .probe = highbank_mc_probe, + .remove = highbank_mc_remove, + .driver = { + .name = "hb_mc_edac", + .of_match_table = hb_ddr_ctrl_of_match, + }, +}; + +module_platform_driver(highbank_mc_edac_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Calxeda, Inc."); +MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank"); diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c index 8ad1744faacd..d3d19cc4e9a1 100644 --- a/drivers/edac/i3000_edac.c +++ b/drivers/edac/i3000_edac.c @@ -194,7 +194,7 @@ static void i3000_get_error_info(struct mem_ctl_info *mci, { struct pci_dev *pdev; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); /* * This is a mess because there is no atomic way to read all the @@ -236,7 +236,7 @@ static int i3000_process_error_info(struct mem_ctl_info *mci, int row, multi_chan, channel; unsigned long pfn, offset; - multi_chan = mci->csrows[0].nr_channels - 1; + multi_chan = mci->csrows[0]->nr_channels - 1; if (!(info->errsts & I3000_ERRSTS_BITS)) return 0; @@ -245,9 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci, return 1; if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, - "UE overwrote CE", "", NULL); + "UE overwrote CE", ""); info->errsts = info->errsts2; } @@ -258,15 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci, row = edac_mc_find_csrow_by_page(mci, pfn); if (info->errsts & I3000_ERRSTS_UE) - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, pfn, offset, 0, row, -1, -1, - "i3000 UE", "", NULL); + "i3000 UE", ""); else - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, pfn, offset, info->derrsyn, row, multi_chan ? channel : 0, -1, - "i3000 CE", "", NULL); + "i3000 CE", ""); return 1; } @@ -275,7 +275,7 @@ static void i3000_check(struct mem_ctl_info *mci) { struct i3000_error_info info; - debugf1("MC%d: %s()\n", mci->mc_idx, __func__); + edac_dbg(1, "MC%d\n", mci->mc_idx); i3000_get_error_info(mci, &info); i3000_process_error_info(mci, &info, 1); } @@ -322,7 +322,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) unsigned long mchbar; void __iomem *window; - debugf0("MC: %s()\n", __func__); + edac_dbg(0, "MC:\n"); pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar); mchbar &= I3000_MCHBAR_MASK; @@ -366,9 +366,9 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) if (!mci) return -ENOMEM; - debugf3("MC: %s(): init mci\n", __func__); + edac_dbg(3, "MC: init mci\n"); - mci->dev = &pdev->dev; + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; @@ -393,14 +393,13 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) { u8 value; u32 cumul_size; - struct csrow_info *csrow = &mci->csrows[i]; + struct csrow_info *csrow = mci->csrows[i]; value = drb[i]; cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT); if (interleaved) cumul_size <<= 1; - debugf3("MC: %s(): (%d) cumul_size 0x%x\n", - __func__, i, cumul_size); + edac_dbg(3, "MC: (%d) cumul_size 0x%x\n", i, cumul_size); if (cumul_size == last_cumul_size) continue; @@ -410,7 +409,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) last_cumul_size = cumul_size; for (j = 0; j < nr_channels; j++) { - struct dimm_info *dimm = csrow->channels[j].dimm; + struct dimm_info *dimm = csrow->channels[j]->dimm; dimm->nr_pages = nr_pages / nr_channels; dimm->grain = I3000_DEAP_GRAIN; @@ -429,7 +428,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) rc = -ENODEV; if (edac_mc_add_mc(mci)) { - debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "MC: failed edac_mc_add_mc()\n"); goto fail; } @@ -445,7 +444,7 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) } /* get this far and it's successful */ - debugf3("MC: %s(): success\n", __func__); + edac_dbg(3, "MC: success\n"); return 0; fail: @@ -461,7 +460,7 @@ static int __devinit i3000_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: %s()\n", __func__); + edac_dbg(0, "MC:\n"); if (pci_enable_device(pdev) < 0) return -EIO; @@ -477,7 +476,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); if (i3000_pci) edac_pci_release_generic_ctl(i3000_pci); @@ -511,7 +510,7 @@ static int __init i3000_init(void) { int pci_rc; - debugf3("MC: %s()\n", __func__); + edac_dbg(3, "MC:\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -525,14 +524,14 @@ static int __init i3000_init(void) mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_3000_HB, NULL); if (!mci_pdev) { - debugf0("i3000 pci_get_device fail\n"); + edac_dbg(0, "i3000 pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl); if (pci_rc < 0) { - debugf0("i3000 init fail\n"); + edac_dbg(0, "i3000 init fail\n"); pci_rc = -ENODEV; goto fail1; } @@ -552,7 +551,7 @@ fail0: static void __exit i3000_exit(void) { - debugf3("MC: %s()\n", __func__); + edac_dbg(3, "MC:\n"); pci_unregister_driver(&i3000_driver); if (!i3000_registered) { diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index bbe43ef71823..47180a08edad 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c @@ -110,10 +110,10 @@ static int how_many_channels(struct pci_dev *pdev) pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ - debugf0("In single channel mode.\n"); + edac_dbg(0, "In single channel mode\n"); return 1; } else { - debugf0("In dual channel mode.\n"); + edac_dbg(0, "In dual channel mode\n"); return 2; } } @@ -159,7 +159,7 @@ static void i3200_clear_error_info(struct mem_ctl_info *mci) { struct pci_dev *pdev; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); /* * Clear any error bits. @@ -176,7 +176,7 @@ static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci, struct i3200_priv *priv = mci->pvt_info; void __iomem *window = priv->window; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); /* * This is a mess because there is no atomic way to read all the @@ -218,25 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci, return; if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, - -1, -1, -1, "UE overwrote CE", "", NULL); + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, + -1, -1, -1, "UE overwrote CE", ""); info->errsts = info->errsts2; } for (channel = 0; channel < nr_channels; channel++) { log = info->eccerrlog[channel]; if (log & I3200_ECCERRLOG_UE) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, eccerrlog_row(channel, log), -1, -1, - "i3000 UE", "", NULL); + "i3000 UE", ""); } else if (log & I3200_ECCERRLOG_CE) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, eccerrlog_syndrome(log), eccerrlog_row(channel, log), -1, -1, - "i3000 UE", "", NULL); + "i3000 UE", ""); } } } @@ -245,7 +245,7 @@ static void i3200_check(struct mem_ctl_info *mci) { struct i3200_error_info info; - debugf1("MC%d: %s()\n", mci->mc_idx, __func__); + edac_dbg(1, "MC%d\n", mci->mc_idx); i3200_get_and_clear_error_info(mci, &info); i3200_process_error_info(mci, &info); } @@ -332,7 +332,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) void __iomem *window; struct i3200_priv *priv; - debugf0("MC: %s()\n", __func__); + edac_dbg(0, "MC:\n"); window = i3200_map_mchbar(pdev); if (!window) @@ -352,9 +352,9 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) if (!mci) return -ENOMEM; - debugf3("MC: %s(): init mci\n", __func__); + edac_dbg(3, "MC: init mci\n"); - mci->dev = &pdev->dev; + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; @@ -379,7 +379,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) */ for (i = 0; i < mci->nr_csrows; i++) { unsigned long nr_pages; - struct csrow_info *csrow = &mci->csrows[i]; + struct csrow_info *csrow = mci->csrows[i]; nr_pages = drb_to_nr_pages(drbs, stacked, i / I3200_RANKS_PER_CHANNEL, @@ -389,7 +389,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) continue; for (j = 0; j < nr_channels; j++) { - struct dimm_info *dimm = csrow->channels[j].dimm; + struct dimm_info *dimm = csrow->channels[j]->dimm; dimm->nr_pages = nr_pages / nr_channels; dimm->grain = nr_pages << PAGE_SHIFT; @@ -403,12 +403,12 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) rc = -ENODEV; if (edac_mc_add_mc(mci)) { - debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "MC: failed edac_mc_add_mc()\n"); goto fail; } /* get this far and it's successful */ - debugf3("MC: %s(): success\n", __func__); + edac_dbg(3, "MC: success\n"); return 0; fail: @@ -424,7 +424,7 @@ static int __devinit i3200_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: %s()\n", __func__); + edac_dbg(0, "MC:\n"); if (pci_enable_device(pdev) < 0) return -EIO; @@ -441,7 +441,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev) struct mem_ctl_info *mci; struct i3200_priv *priv; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); mci = edac_mc_del_mc(&pdev->dev); if (!mci) @@ -475,7 +475,7 @@ static int __init i3200_init(void) { int pci_rc; - debugf3("MC: %s()\n", __func__); + edac_dbg(3, "MC:\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -489,14 +489,14 @@ static int __init i3200_init(void) mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_3200_HB, NULL); if (!mci_pdev) { - debugf0("i3200 pci_get_device fail\n"); + edac_dbg(0, "i3200 pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); if (pci_rc < 0) { - debugf0("i3200 init fail\n"); + edac_dbg(0, "i3200 init fail\n"); pci_rc = -ENODEV; goto fail1; } @@ -516,7 +516,7 @@ fail0: static void __exit i3200_exit(void) { - debugf3("MC: %s()\n", __func__); + edac_dbg(3, "MC:\n"); pci_unregister_driver(&i3200_driver); if (!i3200_registered) { diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 11ea835f155a..39c63757c2a1 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c @@ -273,7 +273,7 @@ #define CHANNELS_PER_BRANCH 2 #define MAX_BRANCHES 2 -/* Defines to extract the vaious fields from the +/* Defines to extract the various fields from the * MTRx - Memory Technology Registers */ #define MTR_DIMMS_PRESENT(mtr) ((mtr) & (0x1 << 8)) @@ -287,22 +287,6 @@ #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) -#ifdef CONFIG_EDAC_DEBUG -static char *numrow_toString[] = { - "8,192 - 13 rows", - "16,384 - 14 rows", - "32,768 - 15 rows", - "reserved" -}; - -static char *numcol_toString[] = { - "1,024 - 10 columns", - "2,048 - 11 columns", - "4,096 - 12 columns", - "reserved" -}; -#endif - /* enables the report of miscellaneous messages as CE errors - default off */ static int misc_messages; @@ -344,7 +328,13 @@ struct i5000_pvt { struct pci_dev *branch_1; /* 22.0 */ u16 tolm; /* top of low memory */ - u64 ambase; /* AMB BAR */ + union { + u64 ambase; /* AMB BAR */ + struct { + u32 ambase_bottom; + u32 ambase_top; + } u __packed; + }; u16 mir0, mir1, mir2; @@ -494,10 +484,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci, ras = NREC_RAS(info->nrecmemb); cas = NREC_CAS(info->nrecmemb); - debugf0("\t\tCSROW= %d Channel= %d " - "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", - rank, channel, bank, - rdwr ? "Write" : "Read", ras, cas); + edac_dbg(0, "\t\tCSROW= %d Channel= %d (DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", + rank, channel, bank, + rdwr ? "Write" : "Read", ras, cas); /* Only 1 bit will be on */ switch (allErrors) { @@ -536,10 +525,10 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci, bank, ras, cas, allErrors, specific); /* Call the helper to output message */ - edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0, channel >> 1, channel & 1, rank, rdwr ? "Write error" : "Read error", - msg, NULL); + msg); } /* @@ -574,7 +563,7 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, /* ONLY ONE of the possible error bits will be set, as per the docs */ ue_errors = allErrors & FERR_NF_UNCORRECTABLE; if (ue_errors) { - debugf0("\tUncorrected bits= 0x%x\n", ue_errors); + edac_dbg(0, "\tUncorrected bits= 0x%x\n", ue_errors); branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); @@ -590,11 +579,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, ras = NREC_RAS(info->nrecmemb); cas = NREC_CAS(info->nrecmemb); - debugf0 - ("\t\tCSROW= %d Channels= %d,%d (Branch= %d " - "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", - rank, channel, channel + 1, branch >> 1, bank, - rdwr ? "Write" : "Read", ras, cas); + edac_dbg(0, "\t\tCSROW= %d Channels= %d,%d (Branch= %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", + rank, channel, channel + 1, branch >> 1, bank, + rdwr ? "Write" : "Read", ras, cas); switch (ue_errors) { case FERR_NF_M12ERR: @@ -637,16 +624,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, rank, bank, ras, cas, ue_errors, specific); /* Call the helper to output message */ - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, channel >> 1, -1, rank, rdwr ? "Write error" : "Read error", - msg, NULL); + msg); } /* Check correctable errors */ ce_errors = allErrors & FERR_NF_CORRECTABLE; if (ce_errors) { - debugf0("\tCorrected bits= 0x%x\n", ce_errors); + edac_dbg(0, "\tCorrected bits= 0x%x\n", ce_errors); branch = EXTRACT_FBDCHAN_INDX(info->ferr_nf_fbd); @@ -664,10 +651,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, ras = REC_RAS(info->recmemb); cas = REC_CAS(info->recmemb); - debugf0("\t\tCSROW= %d Channel= %d (Branch %d " - "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", - rank, channel, branch >> 1, bank, - rdwr ? "Write" : "Read", ras, cas); + edac_dbg(0, "\t\tCSROW= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", + rank, channel, branch >> 1, bank, + rdwr ? "Write" : "Read", ras, cas); switch (ce_errors) { case FERR_NF_M17ERR: @@ -692,10 +678,10 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, specific); /* Call the helper to output message */ - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, channel >> 1, channel % 2, rank, rdwr ? "Write error" : "Read error", - msg, NULL); + msg); } if (!misc_messages) @@ -738,9 +724,9 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci, "Err=%#x (%s)", misc_errors, specific); /* Call the helper to output message */ - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, branch >> 1, -1, -1, - "Misc error", msg, NULL); + "Misc error", msg); } } @@ -779,7 +765,7 @@ static void i5000_clear_error(struct mem_ctl_info *mci) static void i5000_check_error(struct mem_ctl_info *mci) { struct i5000_error_info info; - debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); + edac_dbg(4, "MC%d\n", mci->mc_idx); i5000_get_error_info(mci, &info); i5000_process_error_info(mci, &info, 1); } @@ -850,15 +836,16 @@ static int i5000_get_devices(struct mem_ctl_info *mci, int dev_idx) pvt->fsb_error_regs = pdev; - debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", - pci_name(pvt->system_address), - pvt->system_address->vendor, pvt->system_address->device); - debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", - pci_name(pvt->branchmap_werrors), - pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); - debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", - pci_name(pvt->fsb_error_regs), - pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); + edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n", + pci_name(pvt->system_address), + pvt->system_address->vendor, pvt->system_address->device); + edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", + pci_name(pvt->branchmap_werrors), + pvt->branchmap_werrors->vendor, + pvt->branchmap_werrors->device); + edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n", + pci_name(pvt->fsb_error_regs), + pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); pdev = NULL; pdev = pci_get_device(PCI_VENDOR_ID_INTEL, @@ -981,16 +968,25 @@ static void decode_mtr(int slot_row, u16 mtr) ans = MTR_DIMMS_PRESENT(mtr); - debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, - ans ? "Present" : "NOT Present"); + edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n", + slot_row, mtr, ans ? "" : "NOT "); if (!ans) return; - debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); - debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); - debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); - debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); - debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); + edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); + edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); + edac_dbg(2, "\t\tNUMRANK: %s\n", + MTR_DIMM_RANK(mtr) ? "double" : "single"); + edac_dbg(2, "\t\tNUMROW: %s\n", + MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" : + MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" : + MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" : + "reserved"); + edac_dbg(2, "\t\tNUMCOL: %s\n", + MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" : + MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" : + MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" : + "reserved"); } static void handle_channel(struct i5000_pvt *pvt, int slot, int channel, @@ -1061,7 +1057,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt) "--------------------------------"); p += n; space -= n; - debugf2("%s\n", mem_buffer); + edac_dbg(2, "%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; } @@ -1082,7 +1078,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt) } p += n; space -= n; - debugf2("%s\n", mem_buffer); + edac_dbg(2, "%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; } @@ -1092,7 +1088,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt) "--------------------------------"); p += n; space -= n; - debugf2("%s\n", mem_buffer); + edac_dbg(2, "%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; @@ -1105,7 +1101,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt) p += n; space -= n; } - debugf2("%s\n", mem_buffer); + edac_dbg(2, "%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; @@ -1118,7 +1114,7 @@ static void calculate_dimm_size(struct i5000_pvt *pvt) } /* output the last message and free buffer */ - debugf2("%s\n", mem_buffer); + edac_dbg(2, "%s\n", mem_buffer); kfree(mem_buffer); } @@ -1141,24 +1137,25 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci) pvt = mci->pvt_info; pci_read_config_dword(pvt->system_address, AMBASE, - (u32 *) & pvt->ambase); + &pvt->u.ambase_bottom); pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), - ((u32 *) & pvt->ambase) + sizeof(u32)); + &pvt->u.ambase_top); maxdimmperch = pvt->maxdimmperch; maxch = pvt->maxch; - debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", - (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); + edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", + (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); /* Get the Branch Map regs */ pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); pvt->tolm >>= 12; - debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, - pvt->tolm); + edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n", + pvt->tolm, pvt->tolm); actual_tolm = pvt->tolm << 28; - debugf2("Actual TOLM byte addr=%u (0x%x)\n", actual_tolm, actual_tolm); + edac_dbg(2, "Actual TOLM byte addr=%u (0x%x)\n", + actual_tolm, actual_tolm); pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); @@ -1168,15 +1165,18 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci) limit = (pvt->mir0 >> 4) & 0x0FFF; way0 = pvt->mir0 & 0x1; way1 = pvt->mir0 & 0x2; - debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); + edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", + limit, way1, way0); limit = (pvt->mir1 >> 4) & 0x0FFF; way0 = pvt->mir1 & 0x1; way1 = pvt->mir1 & 0x2; - debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); + edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", + limit, way1, way0); limit = (pvt->mir2 >> 4) & 0x0FFF; way0 = pvt->mir2 & 0x1; way1 = pvt->mir2 & 0x2; - debugf2("MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); + edac_dbg(2, "MIR2: limit= 0x%x WAY1= %u WAY0= %x\n", + limit, way1, way0); /* Get the MTR[0-3] regs */ for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { @@ -1185,31 +1185,31 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci) pci_read_config_word(pvt->branch_0, where, &pvt->b0_mtr[slot_row]); - debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, - pvt->b0_mtr[slot_row]); + edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n", + slot_row, where, pvt->b0_mtr[slot_row]); if (pvt->maxch >= CHANNELS_PER_BRANCH) { pci_read_config_word(pvt->branch_1, where, &pvt->b1_mtr[slot_row]); - debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, - where, pvt->b1_mtr[slot_row]); + edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n", + slot_row, where, pvt->b1_mtr[slot_row]); } else { pvt->b1_mtr[slot_row] = 0; } } /* Read and dump branch 0's MTRs */ - debugf2("\nMemory Technology Registers:\n"); - debugf2(" Branch 0:\n"); + edac_dbg(2, "Memory Technology Registers:\n"); + edac_dbg(2, " Branch 0:\n"); for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { decode_mtr(slot_row, pvt->b0_mtr[slot_row]); } pci_read_config_word(pvt->branch_0, AMB_PRESENT_0, &pvt->b0_ambpresent0); - debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); + edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); pci_read_config_word(pvt->branch_0, AMB_PRESENT_1, &pvt->b0_ambpresent1); - debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); + edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); /* Only if we have 2 branchs (4 channels) */ if (pvt->maxch < CHANNELS_PER_BRANCH) { @@ -1217,18 +1217,18 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci) pvt->b1_ambpresent1 = 0; } else { /* Read and dump branch 1's MTRs */ - debugf2(" Branch 1:\n"); + edac_dbg(2, " Branch 1:\n"); for (slot_row = 0; slot_row < NUM_MTRS; slot_row++) { decode_mtr(slot_row, pvt->b1_mtr[slot_row]); } pci_read_config_word(pvt->branch_1, AMB_PRESENT_0, &pvt->b1_ambpresent0); - debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", - pvt->b1_ambpresent0); + edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n", + pvt->b1_ambpresent0); pci_read_config_word(pvt->branch_1, AMB_PRESENT_1, &pvt->b1_ambpresent1); - debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", - pvt->b1_ambpresent1); + edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n", + pvt->b1_ambpresent1); } /* Go and determine the size of each DIMM and place in an @@ -1363,10 +1363,9 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) int num_channels; int num_dimms_per_channel; - debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", - __FILE__, __func__, - pdev->bus->number, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n", + pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); /* We only are looking for func 0 of the set */ if (PCI_FUNC(pdev->devfn) != 0) @@ -1388,8 +1387,8 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, &num_channels); - debugf0("MC: %s(): Number of Branches=2 Channels= %d DIMMS= %d\n", - __func__, num_channels, num_dimms_per_channel); + edac_dbg(0, "MC: Number of Branches=2 Channels= %d DIMMS= %d\n", + num_channels, num_dimms_per_channel); /* allocate a new MC control structure */ @@ -1406,10 +1405,9 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - kobject_get(&mci->edac_mci_kobj); - debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); + edac_dbg(0, "MC: mci = %p\n", mci); - mci->dev = &pdev->dev; /* record ptr to the generic device */ + mci->pdev = &pdev->dev; /* record ptr to the generic device */ pvt = mci->pvt_info; pvt->system_address = pdev; /* Record this device in our private */ @@ -1439,19 +1437,16 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) /* initialize the MC control structure 'csrows' table * with the mapping and control information */ if (i5000_init_csrows(mci)) { - debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" - " because i5000_init_csrows() returned nonzero " - "value\n"); + edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5000_init_csrows() returned nonzero value\n"); mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ } else { - debugf1("MC: Enable error reporting now\n"); + edac_dbg(1, "MC: Enable error reporting now\n"); i5000_enable_error_reporting(mci); } /* add this new MC control structure to EDAC's list of MCs */ if (edac_mc_add_mc(mci)) { - debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", - __FILE__, __func__); + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ @@ -1479,7 +1474,6 @@ fail1: i5000_put_devices(mci); fail0: - kobject_put(&mci->edac_mci_kobj); edac_mc_free(mci); return -ENODEV; } @@ -1496,7 +1490,7 @@ static int __devinit i5000_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: %s: %s()\n", __FILE__, __func__); + edac_dbg(0, "MC:\n"); /* wake up device */ rc = pci_enable_device(pdev); @@ -1515,7 +1509,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0("%s: %s()\n", __FILE__, __func__); + edac_dbg(0, "\n"); if (i5000_pci) edac_pci_release_generic_ctl(i5000_pci); @@ -1525,7 +1519,6 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev) /* retrieve references to resources, and free those resources */ i5000_put_devices(mci); - kobject_put(&mci->edac_mci_kobj); edac_mc_free(mci); } @@ -1562,7 +1555,7 @@ static int __init i5000_init(void) { int pci_rc; - debugf2("MC: %s: %s()\n", __FILE__, __func__); + edac_dbg(2, "MC:\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1578,7 +1571,7 @@ static int __init i5000_init(void) */ static void __exit i5000_exit(void) { - debugf2("MC: %s: %s()\n", __FILE__, __func__); + edac_dbg(2, "MC:\n"); pci_unregister_driver(&i5000_driver); } diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index e9e7c2a29dc3..c4b5e5f868e8 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c @@ -431,10 +431,10 @@ static void i5100_handle_ce(struct mem_ctl_info *mci, "bank %u, cas %u, ras %u\n", bank, cas, ras); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, syndrome, chan, rank, -1, - msg, detail, NULL); + msg, detail); } static void i5100_handle_ue(struct mem_ctl_info *mci, @@ -453,10 +453,10 @@ static void i5100_handle_ue(struct mem_ctl_info *mci, "bank %u, cas %u, ras %u\n", bank, cas, ras); - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, syndrome, chan, rank, -1, - msg, detail, NULL); + msg, detail); } static void i5100_read_log(struct mem_ctl_info *mci, int chan, @@ -859,8 +859,8 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci) i5100_rank_to_slot(mci, chan, rank)); } - debugf2("dimm channel %d, rank %d, size %ld\n", - chan, rank, (long)PAGES_TO_MiB(npages)); + edac_dbg(2, "dimm channel %d, rank %d, size %ld\n", + chan, rank, (long)PAGES_TO_MiB(npages)); } } @@ -943,7 +943,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev, goto bail_disable_ch1; } - mci->dev = &pdev->dev; + mci->pdev = &pdev->dev; priv = mci->pvt_info; priv->ranksperchan = ranksperch; diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index 6640c29e1885..277246998b80 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c @@ -300,24 +300,6 @@ static inline int extract_fbdchan_indx(u32 x) return (x>>28) & 0x3; } -#ifdef CONFIG_EDAC_DEBUG -/* MTR NUMROW */ -static const char *numrow_toString[] = { - "8,192 - 13 rows", - "16,384 - 14 rows", - "32,768 - 15 rows", - "65,536 - 16 rows" -}; - -/* MTR NUMCOL */ -static const char *numcol_toString[] = { - "1,024 - 10 columns", - "2,048 - 11 columns", - "4,096 - 12 columns", - "reserved" -}; -#endif - /* Device name and register DID (Device ID) */ struct i5400_dev_info { const char *ctl_name; /* name for this device */ @@ -345,7 +327,13 @@ struct i5400_pvt { struct pci_dev *branch_1; /* 22.0 */ u16 tolm; /* top of low memory */ - u64 ambase; /* AMB BAR */ + union { + u64 ambase; /* AMB BAR */ + struct { + u32 ambase_bottom; + u32 ambase_top; + } u __packed; + }; u16 mir0, mir1; @@ -560,10 +548,9 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci, ras = nrec_ras(info); cas = nrec_cas(info); - debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d " - "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", - rank, channel, channel + 1, branch >> 1, bank, - buf_id, rdwr_str(rdwr), ras, cas); + edac_dbg(0, "\t\tDIMM= %d Channels= %d,%d (Branch= %d DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", + rank, channel, channel + 1, branch >> 1, bank, + buf_id, rdwr_str(rdwr), ras, cas); /* Only 1 bit will be on */ errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); @@ -573,10 +560,10 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci, "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)", bank, buf_id, ras, cas, allErrors, error_name[errnum]); - edac_mc_handle_error(tp_event, mci, 0, 0, 0, + edac_mc_handle_error(tp_event, mci, 1, 0, 0, 0, branch >> 1, -1, rank, rdwr ? "Write error" : "Read error", - msg, NULL); + msg); } /* @@ -613,7 +600,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci, /* Correctable errors */ if (allErrors & ERROR_NF_CORRECTABLE) { - debugf0("\tCorrected bits= 0x%lx\n", allErrors); + edac_dbg(0, "\tCorrected bits= 0x%lx\n", allErrors); branch = extract_fbdchan_indx(info->ferr_nf_fbd); @@ -634,10 +621,9 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci, /* Only 1 bit will be on */ errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); - debugf0("\t\tDIMM= %d Channel= %d (Branch %d " - "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", - rank, channel, branch >> 1, bank, - rdwr_str(rdwr), ras, cas); + edac_dbg(0, "\t\tDIMM= %d Channel= %d (Branch %d DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", + rank, channel, branch >> 1, bank, + rdwr_str(rdwr), ras, cas); /* Form out message */ snprintf(msg, sizeof(msg), @@ -646,10 +632,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci, branch >> 1, bank, rdwr_str(rdwr), ras, cas, allErrors, error_name[errnum]); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, branch >> 1, channel % 2, rank, rdwr ? "Write error" : "Read error", - msg, NULL); + msg); return; } @@ -700,7 +686,7 @@ static void i5400_clear_error(struct mem_ctl_info *mci) static void i5400_check_error(struct mem_ctl_info *mci) { struct i5400_error_info info; - debugf4("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); + edac_dbg(4, "MC%d\n", mci->mc_idx); i5400_get_error_info(mci, &info); i5400_process_error_info(mci, &info); } @@ -786,15 +772,16 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx) } pvt->fsb_error_regs = pdev; - debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", - pci_name(pvt->system_address), - pvt->system_address->vendor, pvt->system_address->device); - debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", - pci_name(pvt->branchmap_werrors), - pvt->branchmap_werrors->vendor, pvt->branchmap_werrors->device); - debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", - pci_name(pvt->fsb_error_regs), - pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); + edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n", + pci_name(pvt->system_address), + pvt->system_address->vendor, pvt->system_address->device); + edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", + pci_name(pvt->branchmap_werrors), + pvt->branchmap_werrors->vendor, + pvt->branchmap_werrors->device); + edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n", + pci_name(pvt->fsb_error_regs), + pvt->fsb_error_regs->vendor, pvt->fsb_error_regs->device); pvt->branch_0 = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_FBD0, NULL); @@ -882,8 +869,8 @@ static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel) n = dimm; if (n >= DIMMS_PER_CHANNEL) { - debugf0("ERROR: trying to access an invalid dimm: %d\n", - dimm); + edac_dbg(0, "ERROR: trying to access an invalid dimm: %d\n", + dimm); return 0; } @@ -903,20 +890,29 @@ static void decode_mtr(int slot_row, u16 mtr) ans = MTR_DIMMS_PRESENT(mtr); - debugf2("\tMTR%d=0x%x: DIMMs are %s\n", slot_row, mtr, - ans ? "Present" : "NOT Present"); + edac_dbg(2, "\tMTR%d=0x%x: DIMMs are %sPresent\n", + slot_row, mtr, ans ? "" : "NOT "); if (!ans) return; - debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); - - debugf2("\t\tELECTRICAL THROTTLING is %s\n", - MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); - - debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); - debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANK(mtr) ? "double" : "single"); - debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); - debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); + edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); + + edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n", + MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); + + edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); + edac_dbg(2, "\t\tNUMRANK: %s\n", + MTR_DIMM_RANK(mtr) ? "double" : "single"); + edac_dbg(2, "\t\tNUMROW: %s\n", + MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" : + MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" : + MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" : + "65,536 - 16 rows"); + edac_dbg(2, "\t\tNUMCOL: %s\n", + MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" : + MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" : + MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" : + "reserved"); } static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel, @@ -989,7 +985,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt) "-------------------------------"); p += n; space -= n; - debugf2("%s\n", mem_buffer); + edac_dbg(2, "%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; } @@ -1004,7 +1000,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt) p += n; space -= n; } - debugf2("%s\n", mem_buffer); + edac_dbg(2, "%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; } @@ -1014,7 +1010,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt) "-------------------------------"); p += n; space -= n; - debugf2("%s\n", mem_buffer); + edac_dbg(2, "%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; @@ -1029,7 +1025,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt) } space -= n; - debugf2("%s\n", mem_buffer); + edac_dbg(2, "%s\n", mem_buffer); p = mem_buffer; space = PAGE_SIZE; @@ -1042,7 +1038,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt) } /* output the last message and free buffer */ - debugf2("%s\n", mem_buffer); + edac_dbg(2, "%s\n", mem_buffer); kfree(mem_buffer); } @@ -1065,25 +1061,25 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) pvt = mci->pvt_info; pci_read_config_dword(pvt->system_address, AMBASE, - (u32 *) &pvt->ambase); + &pvt->u.ambase_bottom); pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32), - ((u32 *) &pvt->ambase) + sizeof(u32)); + &pvt->u.ambase_top); maxdimmperch = pvt->maxdimmperch; maxch = pvt->maxch; - debugf2("AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", - (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); + edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n", + (long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch); /* Get the Branch Map regs */ pci_read_config_word(pvt->branchmap_werrors, TOLM, &pvt->tolm); pvt->tolm >>= 12; - debugf2("\nTOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, - pvt->tolm); + edac_dbg(2, "\nTOLM (number of 256M regions) =%u (0x%x)\n", + pvt->tolm, pvt->tolm); actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); - debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", - actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); + edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n", + actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); pci_read_config_word(pvt->branchmap_werrors, MIR0, &pvt->mir0); pci_read_config_word(pvt->branchmap_werrors, MIR1, &pvt->mir1); @@ -1092,11 +1088,13 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) limit = (pvt->mir0 >> 4) & 0x0fff; way0 = pvt->mir0 & 0x1; way1 = pvt->mir0 & 0x2; - debugf2("MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); + edac_dbg(2, "MIR0: limit= 0x%x WAY1= %u WAY0= %x\n", + limit, way1, way0); limit = (pvt->mir1 >> 4) & 0xfff; way0 = pvt->mir1 & 0x1; way1 = pvt->mir1 & 0x2; - debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); + edac_dbg(2, "MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", + limit, way1, way0); /* Get the set of MTR[0-3] regs by each branch */ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) { @@ -1106,8 +1104,8 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) pci_read_config_word(pvt->branch_0, where, &pvt->b0_mtr[slot_row]); - debugf2("MTR%d where=0x%x B0 value=0x%x\n", slot_row, where, - pvt->b0_mtr[slot_row]); + edac_dbg(2, "MTR%d where=0x%x B0 value=0x%x\n", + slot_row, where, pvt->b0_mtr[slot_row]); if (pvt->maxch < CHANNELS_PER_BRANCH) { pvt->b1_mtr[slot_row] = 0; @@ -1117,22 +1115,22 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) /* Branch 1 set of MTR registers */ pci_read_config_word(pvt->branch_1, where, &pvt->b1_mtr[slot_row]); - debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, where, - pvt->b1_mtr[slot_row]); + edac_dbg(2, "MTR%d where=0x%x B1 value=0x%x\n", + slot_row, where, pvt->b1_mtr[slot_row]); } /* Read and dump branch 0's MTRs */ - debugf2("\nMemory Technology Registers:\n"); - debugf2(" Branch 0:\n"); + edac_dbg(2, "Memory Technology Registers:\n"); + edac_dbg(2, " Branch 0:\n"); for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) decode_mtr(slot_row, pvt->b0_mtr[slot_row]); pci_read_config_word(pvt->branch_0, AMBPRESENT_0, &pvt->b0_ambpresent0); - debugf2("\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); + edac_dbg(2, "\t\tAMB-Branch 0-present0 0x%x:\n", pvt->b0_ambpresent0); pci_read_config_word(pvt->branch_0, AMBPRESENT_1, &pvt->b0_ambpresent1); - debugf2("\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); + edac_dbg(2, "\t\tAMB-Branch 0-present1 0x%x:\n", pvt->b0_ambpresent1); /* Only if we have 2 branchs (4 channels) */ if (pvt->maxch < CHANNELS_PER_BRANCH) { @@ -1140,18 +1138,18 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) pvt->b1_ambpresent1 = 0; } else { /* Read and dump branch 1's MTRs */ - debugf2(" Branch 1:\n"); + edac_dbg(2, " Branch 1:\n"); for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) decode_mtr(slot_row, pvt->b1_mtr[slot_row]); pci_read_config_word(pvt->branch_1, AMBPRESENT_0, &pvt->b1_ambpresent0); - debugf2("\t\tAMB-Branch 1-present0 0x%x:\n", - pvt->b1_ambpresent0); + edac_dbg(2, "\t\tAMB-Branch 1-present0 0x%x:\n", + pvt->b1_ambpresent0); pci_read_config_word(pvt->branch_1, AMBPRESENT_1, &pvt->b1_ambpresent1); - debugf2("\t\tAMB-Branch 1-present1 0x%x:\n", - pvt->b1_ambpresent1); + edac_dbg(2, "\t\tAMB-Branch 1-present1 0x%x:\n", + pvt->b1_ambpresent1); } /* Go and determine the size of each DIMM and place in an @@ -1203,10 +1201,9 @@ static int i5400_init_dimms(struct mem_ctl_info *mci) size_mb = pvt->dimm_info[slot][channel].megabytes; - debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n", - __func__, dimm - mci->dimms, - channel / 2, channel % 2, slot, - size_mb / 1000, size_mb % 1000); + edac_dbg(2, "dimm (branch %d channel %d slot %d): %d.%03d GB\n", + channel / 2, channel % 2, slot, + size_mb / 1000, size_mb % 1000); dimm->nr_pages = size_mb << 8; dimm->grain = 8; @@ -1227,7 +1224,7 @@ static int i5400_init_dimms(struct mem_ctl_info *mci) * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+. */ if (ndimms == 1) - mci->dimms[0].edac_mode = EDAC_SECDED; + mci->dimms[0]->edac_mode = EDAC_SECDED; return (ndimms == 0); } @@ -1270,10 +1267,9 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) if (dev_idx >= ARRAY_SIZE(i5400_devs)) return -EINVAL; - debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", - __FILE__, __func__, - pdev->bus->number, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n", + pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); /* We only are looking for func 0 of the set */ if (PCI_FUNC(pdev->devfn) != 0) @@ -1297,9 +1293,9 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); + edac_dbg(0, "MC: mci = %p\n", mci); - mci->dev = &pdev->dev; /* record ptr to the generic device */ + mci->pdev = &pdev->dev; /* record ptr to the generic device */ pvt = mci->pvt_info; pvt->system_address = pdev; /* Record this device in our private */ @@ -1329,19 +1325,16 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) /* initialize the MC control structure 'dimms' table * with the mapping and control information */ if (i5400_init_dimms(mci)) { - debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" - " because i5400_init_dimms() returned nonzero " - "value\n"); + edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i5400_init_dimms() returned nonzero value\n"); mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */ } else { - debugf1("MC: Enable error reporting now\n"); + edac_dbg(1, "MC: Enable error reporting now\n"); i5400_enable_error_reporting(mci); } /* add this new MC control structure to EDAC's list of MCs */ if (edac_mc_add_mc(mci)) { - debugf0("MC: %s: %s(): failed edac_mc_add_mc()\n", - __FILE__, __func__); + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ @@ -1385,7 +1378,7 @@ static int __devinit i5400_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: %s: %s()\n", __FILE__, __func__); + edac_dbg(0, "MC:\n"); /* wake up device */ rc = pci_enable_device(pdev); @@ -1404,7 +1397,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0("%s: %s()\n", __FILE__, __func__); + edac_dbg(0, "\n"); if (i5400_pci) edac_pci_release_generic_ctl(i5400_pci); @@ -1450,7 +1443,7 @@ static int __init i5400_init(void) { int pci_rc; - debugf2("MC: %s: %s()\n", __FILE__, __func__); + edac_dbg(2, "MC:\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1466,7 +1459,7 @@ static int __init i5400_init(void) */ static void __exit i5400_exit(void) { - debugf2("MC: %s: %s()\n", __FILE__, __func__); + edac_dbg(2, "MC:\n"); pci_unregister_driver(&i5400_driver); } diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c index 97c22fd650ee..a09d0667f72a 100644 --- a/drivers/edac/i7300_edac.c +++ b/drivers/edac/i7300_edac.c @@ -182,24 +182,6 @@ static const u16 mtr_regs[MAX_SLOTS] = { #define MTR_DIMM_COLS(mtr) ((mtr) & 0x3) #define MTR_DIMM_COLS_ADDR_BITS(mtr) (MTR_DIMM_COLS(mtr) + 10) -#ifdef CONFIG_EDAC_DEBUG -/* MTR NUMROW */ -static const char *numrow_toString[] = { - "8,192 - 13 rows", - "16,384 - 14 rows", - "32,768 - 15 rows", - "65,536 - 16 rows" -}; - -/* MTR NUMCOL */ -static const char *numcol_toString[] = { - "1,024 - 10 columns", - "2,048 - 11 columns", - "4,096 - 12 columns", - "reserved" -}; -#endif - /************************************************ * i7300 Register definitions for error detection ************************************************/ @@ -467,10 +449,10 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci) "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))", bank, ras, cas, errors, specific); - edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0, branch, -1, rank, is_wr ? "Write error" : "Read error", - pvt->tmp_prt_buffer, NULL); + pvt->tmp_prt_buffer); } @@ -513,11 +495,11 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci) "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))", bank, ras, cas, errors, specific); - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, syndrome, branch >> 1, channel % 2, rank, is_wr ? "Write error" : "Read error", - pvt->tmp_prt_buffer, NULL); + pvt->tmp_prt_buffer); } return; } @@ -614,9 +596,8 @@ static int decode_mtr(struct i7300_pvt *pvt, mtr = pvt->mtr[slot][branch]; ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0; - debugf2("\tMTR%d CH%d: DIMMs are %s (mtr)\n", - slot, channel, - ans ? "Present" : "NOT Present"); + edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n", + slot, channel, ans ? "" : "NOT "); /* Determine if there is a DIMM present in this DIMM slot */ if (!ans) @@ -638,16 +619,25 @@ static int decode_mtr(struct i7300_pvt *pvt, dinfo->megabytes = 1 << addrBits; - debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); - - debugf2("\t\tELECTRICAL THROTTLING is %s\n", - MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); - - debugf2("\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); - debugf2("\t\tNUMRANK: %s\n", MTR_DIMM_RANKS(mtr) ? "double" : "single"); - debugf2("\t\tNUMROW: %s\n", numrow_toString[MTR_DIMM_ROWS(mtr)]); - debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); - debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes); + edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); + + edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n", + MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled"); + + edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr)); + edac_dbg(2, "\t\tNUMRANK: %s\n", + MTR_DIMM_RANKS(mtr) ? "double" : "single"); + edac_dbg(2, "\t\tNUMROW: %s\n", + MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" : + MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" : + MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" : + "65,536 - 16 rows"); + edac_dbg(2, "\t\tNUMCOL: %s\n", + MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" : + MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" : + MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" : + "reserved"); + edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes); /* * The type of error detection actually depends of the @@ -663,9 +653,9 @@ static int decode_mtr(struct i7300_pvt *pvt, dimm->mtype = MEM_FB_DDR2; if (IS_SINGLE_MODE(pvt->mc_settings_a)) { dimm->edac_mode = EDAC_SECDED; - debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); + edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); } else { - debugf2("\t\tECC code is on Lockstep mode\n"); + edac_dbg(2, "\t\tECC code is on Lockstep mode\n"); if (MTR_DRAM_WIDTH(mtr) == 8) dimm->edac_mode = EDAC_S8ECD8ED; else @@ -674,9 +664,9 @@ static int decode_mtr(struct i7300_pvt *pvt, /* ask what device type on this row */ if (MTR_DRAM_WIDTH(mtr) == 8) { - debugf2("\t\tScrub algorithm for x8 is on %s mode\n", - IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? - "enhanced" : "normal"); + edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n", + IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? + "enhanced" : "normal"); dimm->dtype = DEV_X8; } else @@ -710,14 +700,14 @@ static void print_dimm_size(struct i7300_pvt *pvt) p += n; space -= n; } - debugf2("%s\n", pvt->tmp_prt_buffer); + edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); p = pvt->tmp_prt_buffer; space = PAGE_SIZE; n = snprintf(p, space, "-------------------------------" "------------------------------"); p += n; space -= n; - debugf2("%s\n", pvt->tmp_prt_buffer); + edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); p = pvt->tmp_prt_buffer; space = PAGE_SIZE; @@ -733,7 +723,7 @@ static void print_dimm_size(struct i7300_pvt *pvt) space -= n; } - debugf2("%s\n", pvt->tmp_prt_buffer); + edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); p = pvt->tmp_prt_buffer; space = PAGE_SIZE; } @@ -742,7 +732,7 @@ static void print_dimm_size(struct i7300_pvt *pvt) "------------------------------"); p += n; space -= n; - debugf2("%s\n", pvt->tmp_prt_buffer); + edac_dbg(2, "%s\n", pvt->tmp_prt_buffer); p = pvt->tmp_prt_buffer; space = PAGE_SIZE; #endif @@ -765,7 +755,7 @@ static int i7300_init_csrows(struct mem_ctl_info *mci) pvt = mci->pvt_info; - debugf2("Memory Technology Registers:\n"); + edac_dbg(2, "Memory Technology Registers:\n"); /* Get the AMB present registers for the four channels */ for (branch = 0; branch < MAX_BRANCHES; branch++) { @@ -774,15 +764,15 @@ static int i7300_init_csrows(struct mem_ctl_info *mci) pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], AMBPRESENT_0, &pvt->ambpresent[channel]); - debugf2("\t\tAMB-present CH%d = 0x%x:\n", - channel, pvt->ambpresent[channel]); + edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n", + channel, pvt->ambpresent[channel]); channel = to_channel(1, branch); pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], AMBPRESENT_1, &pvt->ambpresent[channel]); - debugf2("\t\tAMB-present CH%d = 0x%x:\n", - channel, pvt->ambpresent[channel]); + edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n", + channel, pvt->ambpresent[channel]); } /* Get the set of MTR[0-7] regs by each branch */ @@ -824,12 +814,11 @@ static int i7300_init_csrows(struct mem_ctl_info *mci) static void decode_mir(int mir_no, u16 mir[MAX_MIR]) { if (mir[mir_no] & 3) - debugf2("MIR%d: limit= 0x%x Branch(es) that participate:" - " %s %s\n", - mir_no, - (mir[mir_no] >> 4) & 0xfff, - (mir[mir_no] & 1) ? "B0" : "", - (mir[mir_no] & 2) ? "B1" : ""); + edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n", + mir_no, + (mir[mir_no] >> 4) & 0xfff, + (mir[mir_no] & 1) ? "B0" : "", + (mir[mir_no] & 2) ? "B1" : ""); } /** @@ -849,17 +838,17 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci) pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE, (u32 *) &pvt->ambase); - debugf2("AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); + edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase); /* Get the Branch Map regs */ pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm); pvt->tolm >>= 12; - debugf2("TOLM (number of 256M regions) =%u (0x%x)\n", pvt->tolm, - pvt->tolm); + edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n", + pvt->tolm, pvt->tolm); actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28)); - debugf2("Actual TOLM byte addr=%u.%03u GB (0x%x)\n", - actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); + edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n", + actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28); /* Get memory controller settings */ pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS, @@ -868,15 +857,15 @@ static int i7300_get_mc_regs(struct mem_ctl_info *mci) &pvt->mc_settings_a); if (IS_SINGLE_MODE(pvt->mc_settings_a)) - debugf0("Memory controller operating on single mode\n"); + edac_dbg(0, "Memory controller operating on single mode\n"); else - debugf0("Memory controller operating on %s mode\n", - IS_MIRRORED(pvt->mc_settings) ? "mirrored" : "non-mirrored"); + edac_dbg(0, "Memory controller operating on %smirrored mode\n", + IS_MIRRORED(pvt->mc_settings) ? "" : "non-"); - debugf0("Error detection is %s\n", - IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); - debugf0("Retry is %s\n", - IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); + edac_dbg(0, "Error detection is %s\n", + IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); + edac_dbg(0, "Retry is %s\n", + IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled"); /* Get Memory Interleave Range registers */ pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0, @@ -970,18 +959,18 @@ static int __devinit i7300_get_devices(struct mem_ctl_info *mci) } } - debugf1("System Address, processor bus- PCI Bus ID: %s %x:%x\n", - pci_name(pvt->pci_dev_16_0_fsb_ctlr), - pvt->pci_dev_16_0_fsb_ctlr->vendor, - pvt->pci_dev_16_0_fsb_ctlr->device); - debugf1("Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", - pci_name(pvt->pci_dev_16_1_fsb_addr_map), - pvt->pci_dev_16_1_fsb_addr_map->vendor, - pvt->pci_dev_16_1_fsb_addr_map->device); - debugf1("FSB Error Regs - PCI Bus ID: %s %x:%x\n", - pci_name(pvt->pci_dev_16_2_fsb_err_regs), - pvt->pci_dev_16_2_fsb_err_regs->vendor, - pvt->pci_dev_16_2_fsb_err_regs->device); + edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n", + pci_name(pvt->pci_dev_16_0_fsb_ctlr), + pvt->pci_dev_16_0_fsb_ctlr->vendor, + pvt->pci_dev_16_0_fsb_ctlr->device); + edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s %x:%x\n", + pci_name(pvt->pci_dev_16_1_fsb_addr_map), + pvt->pci_dev_16_1_fsb_addr_map->vendor, + pvt->pci_dev_16_1_fsb_addr_map->device); + edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s %x:%x\n", + pci_name(pvt->pci_dev_16_2_fsb_err_regs), + pvt->pci_dev_16_2_fsb_err_regs->vendor, + pvt->pci_dev_16_2_fsb_err_regs->device); pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0, @@ -1032,10 +1021,9 @@ static int __devinit i7300_init_one(struct pci_dev *pdev, if (rc == -EIO) return rc; - debugf0("MC: " __FILE__ ": %s(), pdev bus %u dev=0x%x fn=0x%x\n", - __func__, - pdev->bus->number, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n", + pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); /* We only are looking for func 0 of the set */ if (PCI_FUNC(pdev->devfn) != 0) @@ -1055,9 +1043,9 @@ static int __devinit i7300_init_one(struct pci_dev *pdev, if (mci == NULL) return -ENOMEM; - debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); + edac_dbg(0, "MC: mci = %p\n", mci); - mci->dev = &pdev->dev; /* record ptr to the generic device */ + mci->pdev = &pdev->dev; /* record ptr to the generic device */ pvt = mci->pvt_info; pvt->pci_dev_16_0_fsb_ctlr = pdev; /* Record this device in our private */ @@ -1088,19 +1076,16 @@ static int __devinit i7300_init_one(struct pci_dev *pdev, /* initialize the MC control structure 'csrows' table * with the mapping and control information */ if (i7300_get_mc_regs(mci)) { - debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" - " because i7300_init_csrows() returned nonzero " - "value\n"); + edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n"); mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ } else { - debugf1("MC: Enable error reporting now\n"); + edac_dbg(1, "MC: Enable error reporting now\n"); i7300_enable_error_reporting(mci); } /* add this new MC control structure to EDAC's list of MCs */ if (edac_mc_add_mc(mci)) { - debugf0("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ @@ -1142,7 +1127,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev) struct mem_ctl_info *mci; char *tmp; - debugf0(__FILE__ ": %s()\n", __func__); + edac_dbg(0, "\n"); if (i7300_pci) edac_pci_release_generic_ctl(i7300_pci); @@ -1189,7 +1174,7 @@ static int __init i7300_init(void) { int pci_rc; - debugf2("MC: " __FILE__ ": %s()\n", __func__); + edac_dbg(2, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1204,7 +1189,7 @@ static int __init i7300_init(void) */ static void __exit i7300_exit(void) { - debugf2("MC: " __FILE__ ": %s()\n", __func__); + edac_dbg(2, "\n"); pci_unregister_driver(&i7300_driver); } diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index a499c7ed820a..3672101023bd 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -248,6 +248,8 @@ struct i7core_dev { }; struct i7core_pvt { + struct device *addrmatch_dev, *chancounts_dev; + struct pci_dev *pci_noncore; struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1]; struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1]; @@ -514,29 +516,28 @@ static int get_dimm_config(struct mem_ctl_info *mci) pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod); pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map); - debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n", - pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status, - pvt->info.max_dod, pvt->info.ch_map); + edac_dbg(0, "QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n", + pvt->i7core_dev->socket, pvt->info.mc_control, + pvt->info.mc_status, pvt->info.max_dod, pvt->info.ch_map); if (ECC_ENABLED(pvt)) { - debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4); + edac_dbg(0, "ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4); if (ECCx8(pvt)) mode = EDAC_S8ECD8ED; else mode = EDAC_S4ECD4ED; } else { - debugf0("ECC disabled\n"); + edac_dbg(0, "ECC disabled\n"); mode = EDAC_NONE; } /* FIXME: need to handle the error codes */ - debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked " - "x%x x 0x%x\n", - numdimms(pvt->info.max_dod), - numrank(pvt->info.max_dod >> 2), - numbank(pvt->info.max_dod >> 4), - numrow(pvt->info.max_dod >> 6), - numcol(pvt->info.max_dod >> 9)); + edac_dbg(0, "DOD Max limits: DIMMS: %d, %d-ranked, %d-banked x%x x 0x%x\n", + numdimms(pvt->info.max_dod), + numrank(pvt->info.max_dod >> 2), + numbank(pvt->info.max_dod >> 4), + numrow(pvt->info.max_dod >> 6), + numcol(pvt->info.max_dod >> 9)); for (i = 0; i < NUM_CHANS; i++) { u32 data, dimm_dod[3], value[8]; @@ -545,11 +546,11 @@ static int get_dimm_config(struct mem_ctl_info *mci) continue; if (!CH_ACTIVE(pvt, i)) { - debugf0("Channel %i is not active\n", i); + edac_dbg(0, "Channel %i is not active\n", i); continue; } if (CH_DISABLED(pvt, i)) { - debugf0("Channel %i is disabled\n", i); + edac_dbg(0, "Channel %i is disabled\n", i); continue; } @@ -580,15 +581,14 @@ static int get_dimm_config(struct mem_ctl_info *mci) pci_read_config_dword(pvt->pci_ch[i][1], MC_DOD_CH_DIMM2, &dimm_dod[2]); - debugf0("Ch%d phy rd%d, wr%d (0x%08x): " - "%s%s%s%cDIMMs\n", - i, - RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), - data, - pvt->channel[i].is_3dimms_present ? "3DIMMS " : "", - pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "", - pvt->channel[i].has_4rank ? "HAS_4R " : "", - (data & REGISTERED_DIMM) ? 'R' : 'U'); + edac_dbg(0, "Ch%d phy rd%d, wr%d (0x%08x): %s%s%s%cDIMMs\n", + i, + RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), + data, + pvt->channel[i].is_3dimms_present ? "3DIMMS " : "", + pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "", + pvt->channel[i].has_4rank ? "HAS_4R " : "", + (data & REGISTERED_DIMM) ? 'R' : 'U'); for (j = 0; j < 3; j++) { u32 banks, ranks, rows, cols; @@ -607,11 +607,10 @@ static int get_dimm_config(struct mem_ctl_info *mci) /* DDR3 has 8 I/O banks */ size = (rows * cols * banks * ranks) >> (20 - 3); - debugf0("\tdimm %d %d Mb offset: %x, " - "bank: %d, rank: %d, row: %#x, col: %#x\n", - j, size, - RANKOFFSET(dimm_dod[j]), - banks, ranks, rows, cols); + edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n", + j, size, + RANKOFFSET(dimm_dod[j]), + banks, ranks, rows, cols); npages = MiB_TO_PAGES(size); @@ -647,12 +646,12 @@ static int get_dimm_config(struct mem_ctl_info *mci) pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]); pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]); pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]); - debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i); + edac_dbg(1, "\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i); for (j = 0; j < 8; j++) - debugf1("\t\t%#x\t%#x\t%#x\n", - (value[j] >> 27) & 0x1, - (value[j] >> 24) & 0x7, - (value[j] & ((1 << 24) - 1))); + edac_dbg(1, "\t\t%#x\t%#x\t%#x\n", + (value[j] >> 27) & 0x1, + (value[j] >> 24) & 0x7, + (value[j] & ((1 << 24) - 1))); } return 0; @@ -662,6 +661,8 @@ static int get_dimm_config(struct mem_ctl_info *mci) Error insertion routines ****************************************************************************/ +#define to_mci(k) container_of(k, struct mem_ctl_info, dev) + /* The i7core has independent error injection features per channel. However, to have a simpler code, we don't allow enabling error injection on more than one channel. @@ -691,9 +692,11 @@ static int disable_inject(const struct mem_ctl_info *mci) * bit 0 - refers to the lower 32-byte half cacheline * bit 1 - refers to the upper 32-byte half cacheline */ -static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci, +static ssize_t i7core_inject_section_store(struct device *dev, + struct device_attribute *mattr, const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; @@ -709,9 +712,11 @@ static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci, return count; } -static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci, - char *data) +static ssize_t i7core_inject_section_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; return sprintf(data, "0x%08x\n", pvt->inject.section); } @@ -724,10 +729,12 @@ static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci, * bit 1 - inject ECC error * bit 2 - inject parity error */ -static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci, +static ssize_t i7core_inject_type_store(struct device *dev, + struct device_attribute *mattr, const char *data, size_t count) { - struct i7core_pvt *pvt = mci->pvt_info; + struct mem_ctl_info *mci = to_mci(dev); +struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; @@ -742,10 +749,13 @@ static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci, return count; } -static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci, - char *data) +static ssize_t i7core_inject_type_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; + return sprintf(data, "0x%08x\n", pvt->inject.type); } @@ -759,9 +769,11 @@ static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci, * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an * uncorrectable error to be injected. */ -static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci, - const char *data, size_t count) +static ssize_t i7core_inject_eccmask_store(struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; unsigned long value; int rc; @@ -777,10 +789,13 @@ static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci, return count; } -static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci, - char *data) +static ssize_t i7core_inject_eccmask_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; + return sprintf(data, "0x%08x\n", pvt->inject.eccmask); } @@ -797,14 +812,16 @@ static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci, #define DECLARE_ADDR_MATCH(param, limit) \ static ssize_t i7core_inject_store_##param( \ - struct mem_ctl_info *mci, \ - const char *data, size_t count) \ + struct device *dev, \ + struct device_attribute *mattr, \ + const char *data, size_t count) \ { \ + struct mem_ctl_info *mci = to_mci(dev); \ struct i7core_pvt *pvt; \ long value; \ int rc; \ \ - debugf1("%s()\n", __func__); \ + edac_dbg(1, "\n"); \ pvt = mci->pvt_info; \ \ if (pvt->inject.enable) \ @@ -824,13 +841,15 @@ static ssize_t i7core_inject_store_##param( \ } \ \ static ssize_t i7core_inject_show_##param( \ - struct mem_ctl_info *mci, \ - char *data) \ + struct device *dev, \ + struct device_attribute *mattr, \ + char *data) \ { \ + struct mem_ctl_info *mci = to_mci(dev); \ struct i7core_pvt *pvt; \ \ pvt = mci->pvt_info; \ - debugf1("%s() pvt=%p\n", __func__, pvt); \ + edac_dbg(1, "pvt=%p\n", pvt); \ if (pvt->inject.param < 0) \ return sprintf(data, "any\n"); \ else \ @@ -838,14 +857,9 @@ static ssize_t i7core_inject_show_##param( \ } #define ATTR_ADDR_MATCH(param) \ - { \ - .attr = { \ - .name = #param, \ - .mode = (S_IRUGO | S_IWUSR) \ - }, \ - .show = i7core_inject_show_##param, \ - .store = i7core_inject_store_##param, \ - } + static DEVICE_ATTR(param, S_IRUGO | S_IWUSR, \ + i7core_inject_show_##param, \ + i7core_inject_store_##param) DECLARE_ADDR_MATCH(channel, 3); DECLARE_ADDR_MATCH(dimm, 3); @@ -854,14 +868,21 @@ DECLARE_ADDR_MATCH(bank, 32); DECLARE_ADDR_MATCH(page, 0x10000); DECLARE_ADDR_MATCH(col, 0x4000); +ATTR_ADDR_MATCH(channel); +ATTR_ADDR_MATCH(dimm); +ATTR_ADDR_MATCH(rank); +ATTR_ADDR_MATCH(bank); +ATTR_ADDR_MATCH(page); +ATTR_ADDR_MATCH(col); + static int write_and_test(struct pci_dev *dev, const int where, const u32 val) { u32 read; int count; - debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n", - dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), - where, val); + edac_dbg(0, "setting pci %02x:%02x.%x reg=%02x value=%08x\n", + dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), + where, val); for (count = 0; count < 10; count++) { if (count) @@ -899,9 +920,11 @@ static int write_and_test(struct pci_dev *dev, const int where, const u32 val) * is reliable enough to check if the MC is using the * three channels. However, this is not clear at the datasheet. */ -static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci, - const char *data, size_t count) +static ssize_t i7core_inject_enable_store(struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; u64 mask = 0; @@ -994,17 +1017,18 @@ static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci, pci_write_config_dword(pvt->pci_noncore, MC_CFG_CONTROL, 8); - debugf0("Error inject addr match 0x%016llx, ecc 0x%08x," - " inject 0x%08x\n", - mask, pvt->inject.eccmask, injectmask); + edac_dbg(0, "Error inject addr match 0x%016llx, ecc 0x%08x, inject 0x%08x\n", + mask, pvt->inject.eccmask, injectmask); return count; } -static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci, - char *data) +static ssize_t i7core_inject_enable_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); struct i7core_pvt *pvt = mci->pvt_info; u32 injectmask; @@ -1014,7 +1038,7 @@ static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci, pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0], MC_CHANNEL_ERROR_INJECT, &injectmask); - debugf0("Inject error read: 0x%018x\n", injectmask); + edac_dbg(0, "Inject error read: 0x%018x\n", injectmask); if (injectmask & 0x0c) pvt->inject.enable = 1; @@ -1024,12 +1048,14 @@ static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci, #define DECLARE_COUNTER(param) \ static ssize_t i7core_show_counter_##param( \ - struct mem_ctl_info *mci, \ - char *data) \ + struct device *dev, \ + struct device_attribute *mattr, \ + char *data) \ { \ + struct mem_ctl_info *mci = to_mci(dev); \ struct i7core_pvt *pvt = mci->pvt_info; \ \ - debugf1("%s() \n", __func__); \ + edac_dbg(1, "\n"); \ if (!pvt->ce_count_available || (pvt->is_registered)) \ return sprintf(data, "data unavailable\n"); \ return sprintf(data, "%lu\n", \ @@ -1037,121 +1063,179 @@ static ssize_t i7core_show_counter_##param( \ } #define ATTR_COUNTER(param) \ - { \ - .attr = { \ - .name = __stringify(udimm##param), \ - .mode = (S_IRUGO | S_IWUSR) \ - }, \ - .show = i7core_show_counter_##param \ - } + static DEVICE_ATTR(udimm##param, S_IRUGO | S_IWUSR, \ + i7core_show_counter_##param, \ + NULL) DECLARE_COUNTER(0); DECLARE_COUNTER(1); DECLARE_COUNTER(2); +ATTR_COUNTER(0); +ATTR_COUNTER(1); +ATTR_COUNTER(2); + /* - * Sysfs struct + * inject_addrmatch device sysfs struct */ -static const struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = { - ATTR_ADDR_MATCH(channel), - ATTR_ADDR_MATCH(dimm), - ATTR_ADDR_MATCH(rank), - ATTR_ADDR_MATCH(bank), - ATTR_ADDR_MATCH(page), - ATTR_ADDR_MATCH(col), - { } /* End of list */ +static struct attribute *i7core_addrmatch_attrs[] = { + &dev_attr_channel.attr, + &dev_attr_dimm.attr, + &dev_attr_rank.attr, + &dev_attr_bank.attr, + &dev_attr_page.attr, + &dev_attr_col.attr, + NULL }; -static const struct mcidev_sysfs_group i7core_inject_addrmatch = { - .name = "inject_addrmatch", - .mcidev_attr = i7core_addrmatch_attrs, +static struct attribute_group addrmatch_grp = { + .attrs = i7core_addrmatch_attrs, }; -static const struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { - ATTR_COUNTER(0), - ATTR_COUNTER(1), - ATTR_COUNTER(2), - { .attr = { .name = NULL } } +static const struct attribute_group *addrmatch_groups[] = { + &addrmatch_grp, + NULL }; -static const struct mcidev_sysfs_group i7core_udimm_counters = { - .name = "all_channel_counts", - .mcidev_attr = i7core_udimm_counters_attrs, +static void addrmatch_release(struct device *device) +{ + edac_dbg(1, "Releasing device %s\n", dev_name(device)); + kfree(device); +} + +static struct device_type addrmatch_type = { + .groups = addrmatch_groups, + .release = addrmatch_release, }; -static const struct mcidev_sysfs_attribute i7core_sysfs_rdimm_attrs[] = { - { - .attr = { - .name = "inject_section", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = i7core_inject_section_show, - .store = i7core_inject_section_store, - }, { - .attr = { - .name = "inject_type", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = i7core_inject_type_show, - .store = i7core_inject_type_store, - }, { - .attr = { - .name = "inject_eccmask", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = i7core_inject_eccmask_show, - .store = i7core_inject_eccmask_store, - }, { - .grp = &i7core_inject_addrmatch, - }, { - .attr = { - .name = "inject_enable", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = i7core_inject_enable_show, - .store = i7core_inject_enable_store, - }, - { } /* End of list */ +/* + * all_channel_counts sysfs struct + */ + +static struct attribute *i7core_udimm_counters_attrs[] = { + &dev_attr_udimm0.attr, + &dev_attr_udimm1.attr, + &dev_attr_udimm2.attr, + NULL }; -static const struct mcidev_sysfs_attribute i7core_sysfs_udimm_attrs[] = { - { - .attr = { - .name = "inject_section", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = i7core_inject_section_show, - .store = i7core_inject_section_store, - }, { - .attr = { - .name = "inject_type", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = i7core_inject_type_show, - .store = i7core_inject_type_store, - }, { - .attr = { - .name = "inject_eccmask", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = i7core_inject_eccmask_show, - .store = i7core_inject_eccmask_store, - }, { - .grp = &i7core_inject_addrmatch, - }, { - .attr = { - .name = "inject_enable", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = i7core_inject_enable_show, - .store = i7core_inject_enable_store, - }, { - .grp = &i7core_udimm_counters, - }, - { } /* End of list */ +static struct attribute_group all_channel_counts_grp = { + .attrs = i7core_udimm_counters_attrs, }; +static const struct attribute_group *all_channel_counts_groups[] = { + &all_channel_counts_grp, + NULL +}; + +static void all_channel_counts_release(struct device *device) +{ + edac_dbg(1, "Releasing device %s\n", dev_name(device)); + kfree(device); +} + +static struct device_type all_channel_counts_type = { + .groups = all_channel_counts_groups, + .release = all_channel_counts_release, +}; + +/* + * inject sysfs attributes + */ + +static DEVICE_ATTR(inject_section, S_IRUGO | S_IWUSR, + i7core_inject_section_show, i7core_inject_section_store); + +static DEVICE_ATTR(inject_type, S_IRUGO | S_IWUSR, + i7core_inject_type_show, i7core_inject_type_store); + + +static DEVICE_ATTR(inject_eccmask, S_IRUGO | S_IWUSR, + i7core_inject_eccmask_show, i7core_inject_eccmask_store); + +static DEVICE_ATTR(inject_enable, S_IRUGO | S_IWUSR, + i7core_inject_enable_show, i7core_inject_enable_store); + +static int i7core_create_sysfs_devices(struct mem_ctl_info *mci) +{ + struct i7core_pvt *pvt = mci->pvt_info; + int rc; + + rc = device_create_file(&mci->dev, &dev_attr_inject_section); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_inject_type); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_inject_eccmask); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_inject_enable); + if (rc < 0) + return rc; + + pvt->addrmatch_dev = kzalloc(sizeof(*pvt->addrmatch_dev), GFP_KERNEL); + if (!pvt->addrmatch_dev) + return rc; + + pvt->addrmatch_dev->type = &addrmatch_type; + pvt->addrmatch_dev->bus = mci->dev.bus; + device_initialize(pvt->addrmatch_dev); + pvt->addrmatch_dev->parent = &mci->dev; + dev_set_name(pvt->addrmatch_dev, "inject_addrmatch"); + dev_set_drvdata(pvt->addrmatch_dev, mci); + + edac_dbg(1, "creating %s\n", dev_name(pvt->addrmatch_dev)); + + rc = device_add(pvt->addrmatch_dev); + if (rc < 0) + return rc; + + if (!pvt->is_registered) { + pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev), + GFP_KERNEL); + if (!pvt->chancounts_dev) { + put_device(pvt->addrmatch_dev); + device_del(pvt->addrmatch_dev); + return rc; + } + + pvt->chancounts_dev->type = &all_channel_counts_type; + pvt->chancounts_dev->bus = mci->dev.bus; + device_initialize(pvt->chancounts_dev); + pvt->chancounts_dev->parent = &mci->dev; + dev_set_name(pvt->chancounts_dev, "all_channel_counts"); + dev_set_drvdata(pvt->chancounts_dev, mci); + + edac_dbg(1, "creating %s\n", dev_name(pvt->chancounts_dev)); + + rc = device_add(pvt->chancounts_dev); + if (rc < 0) + return rc; + } + return 0; +} + +static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci) +{ + struct i7core_pvt *pvt = mci->pvt_info; + + edac_dbg(1, "\n"); + + device_remove_file(&mci->dev, &dev_attr_inject_section); + device_remove_file(&mci->dev, &dev_attr_inject_type); + device_remove_file(&mci->dev, &dev_attr_inject_eccmask); + device_remove_file(&mci->dev, &dev_attr_inject_enable); + + if (!pvt->is_registered) { + put_device(pvt->chancounts_dev); + device_del(pvt->chancounts_dev); + } + put_device(pvt->addrmatch_dev); + device_del(pvt->addrmatch_dev); +} + /**************************************************************************** Device initialization routines: put/get, init/exit ****************************************************************************/ @@ -1164,14 +1248,14 @@ static void i7core_put_devices(struct i7core_dev *i7core_dev) { int i; - debugf0(__FILE__ ": %s()\n", __func__); + edac_dbg(0, "\n"); for (i = 0; i < i7core_dev->n_devs; i++) { struct pci_dev *pdev = i7core_dev->pdev[i]; if (!pdev) continue; - debugf0("Removing dev %02x:%02x.%d\n", - pdev->bus->number, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + edac_dbg(0, "Removing dev %02x:%02x.%d\n", + pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); pci_dev_put(pdev); } } @@ -1214,12 +1298,12 @@ static unsigned i7core_pci_lastbus(void) while ((b = pci_find_next_bus(b)) != NULL) { bus = b->number; - debugf0("Found bus %d\n", bus); + edac_dbg(0, "Found bus %d\n", bus); if (bus > last_bus) last_bus = bus; } - debugf0("Last bus %d\n", last_bus); + edac_dbg(0, "Last bus %d\n", last_bus); return last_bus; } @@ -1326,10 +1410,10 @@ static int i7core_get_onedevice(struct pci_dev **prev, return -ENODEV; } - debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n", - socket, bus, dev_descr->dev, - dev_descr->func, - PCI_VENDOR_ID_INTEL, dev_descr->dev_id); + edac_dbg(0, "Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n", + socket, bus, dev_descr->dev, + dev_descr->func, + PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* * As stated on drivers/pci/search.c, the reference count for @@ -1427,13 +1511,13 @@ static int mci_bind_devs(struct mem_ctl_info *mci, family = "unknown"; pvt->enable_scrub = false; } - debugf0("Detected a processor type %s\n", family); + edac_dbg(0, "Detected a processor type %s\n", family); } else goto error; - debugf0("Associated fn %d.%d, dev = %p, socket %d\n", - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), - pdev, i7core_dev->socket); + edac_dbg(0, "Associated fn %d.%d, dev = %p, socket %d\n", + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), + pdev, i7core_dev->socket); if (PCI_SLOT(pdev->devfn) == 3 && PCI_FUNC(pdev->devfn) == 2) @@ -1452,18 +1536,6 @@ error: /**************************************************************************** Error check routines ****************************************************************************/ -static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci, - const int chan, - const int dimm, - const int add) -{ - int i; - - for (i = 0; i < add; i++) { - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0, - chan, dimm, -1, "error", "", NULL); - } -} static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, const int chan, @@ -1502,12 +1574,17 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci, /*updated the edac core */ if (add0 != 0) - i7core_rdimm_update_errcount(mci, chan, 0, add0); + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add0, + 0, 0, 0, + chan, 0, -1, "error", ""); if (add1 != 0) - i7core_rdimm_update_errcount(mci, chan, 1, add1); + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add1, + 0, 0, 0, + chan, 1, -1, "error", ""); if (add2 != 0) - i7core_rdimm_update_errcount(mci, chan, 2, add2); - + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, add2, + 0, 0, 0, + chan, 2, -1, "error", ""); } static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) @@ -1530,8 +1607,8 @@ static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci) pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5, &rcv[2][1]); for (i = 0 ; i < 3; i++) { - debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n", - (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]); + edac_dbg(3, "MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n", + (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]); /*if the channel has 3 dimms*/ if (pvt->channel[i].dimms > 2) { new0 = DIMM_BOT_COR_ERR(rcv[i][0]); @@ -1562,7 +1639,7 @@ static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci) int new0, new1, new2; if (!pvt->pci_mcr[4]) { - debugf0("%s MCR registers not found\n", __func__); + edac_dbg(0, "MCR registers not found\n"); return; } @@ -1626,7 +1703,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, const struct mce *m) { struct i7core_pvt *pvt = mci->pvt_info; - char *type, *optype, *err, msg[80]; + char *type, *optype, *err; enum hw_event_mc_err_type tp_event; unsigned long error = m->status & 0x1ff0000l; bool uncorrected_error = m->mcgstatus & 1ll << 61; @@ -1704,20 +1781,18 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci, err = "unknown"; } - snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype); - /* * Call the helper to output message * FIXME: what to do if core_err_cnt > 1? Currently, it generates * only one event */ if (uncorrected_error || !pvt->is_registered) - edac_mc_handle_error(tp_event, mci, + edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, syndrome, channel, dimm, -1, - err, msg, m); + err, optype); } /* @@ -2094,8 +2169,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev) struct i7core_pvt *pvt; if (unlikely(!mci || !mci->pvt_info)) { - debugf0("MC: " __FILE__ ": %s(): dev = %p\n", - __func__, &i7core_dev->pdev[0]->dev); + edac_dbg(0, "MC: dev = %p\n", &i7core_dev->pdev[0]->dev); i7core_printk(KERN_ERR, "Couldn't find mci handler\n"); return; @@ -2103,8 +2177,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev) pvt = mci->pvt_info; - debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", - __func__, mci, &i7core_dev->pdev[0]->dev); + edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev); /* Disable scrubrate setting */ if (pvt->enable_scrub) @@ -2114,9 +2187,10 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev) i7core_pci_ctl_release(pvt); /* Remove MC sysfs nodes */ - edac_mc_del_mc(mci->dev); + i7core_delete_sysfs_devices(mci); + edac_mc_del_mc(mci->pdev); - debugf1("%s: free mci struct\n", mci->ctl_name); + edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); kfree(mci->ctl_name); edac_mc_free(mci); i7core_dev->mci = NULL; @@ -2142,8 +2216,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) if (unlikely(!mci)) return -ENOMEM; - debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", - __func__, mci, &i7core_dev->pdev[0]->dev); + edac_dbg(0, "MC: mci = %p, dev = %p\n", mci, &i7core_dev->pdev[0]->dev); pvt = mci->pvt_info; memset(pvt, 0, sizeof(*pvt)); @@ -2172,15 +2245,11 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) if (unlikely(rc < 0)) goto fail0; - if (pvt->is_registered) - mci->mc_driver_sysfs_attributes = i7core_sysfs_rdimm_attrs; - else - mci->mc_driver_sysfs_attributes = i7core_sysfs_udimm_attrs; /* Get dimm basic config */ get_dimm_config(mci); /* record ptr to the generic device */ - mci->dev = &i7core_dev->pdev[0]->dev; + mci->pdev = &i7core_dev->pdev[0]->dev; /* Set the function pointer to an actual operation function */ mci->edac_check = i7core_check_error; @@ -2190,8 +2259,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) /* add this new MC control structure to EDAC's list of MCs */ if (unlikely(edac_mc_add_mc(mci))) { - debugf0("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); /* FIXME: perhaps some code should go here that disables error * reporting if we just enabled it */ @@ -2199,6 +2267,12 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) rc = -EINVAL; goto fail0; } + if (i7core_create_sysfs_devices(mci)) { + edac_dbg(0, "MC: failed to create sysfs nodes\n"); + edac_mc_del_mc(mci->pdev); + rc = -EINVAL; + goto fail0; + } /* Default error mask is any memory */ pvt->inject.channel = 0; @@ -2298,7 +2372,7 @@ static void __devexit i7core_remove(struct pci_dev *pdev) { struct i7core_dev *i7core_dev; - debugf0(__FILE__ ": %s()\n", __func__); + edac_dbg(0, "\n"); /* * we have a trouble here: pdev value for removal will be wrong, since @@ -2347,7 +2421,7 @@ static int __init i7core_init(void) { int pci_rc; - debugf2("MC: " __FILE__ ": %s()\n", __func__); + edac_dbg(2, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -2374,7 +2448,7 @@ static int __init i7core_init(void) */ static void __exit i7core_exit(void) { - debugf2("MC: " __FILE__ ": %s()\n", __func__); + edac_dbg(2, "\n"); pci_unregister_driver(&i7core_driver); mce_unregister_decode_chain(&i7_mce_dec); } diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c index 52072c28a8a6..90f303db5d1d 100644 --- a/drivers/edac/i82443bxgx_edac.c +++ b/drivers/edac/i82443bxgx_edac.c @@ -124,7 +124,7 @@ static void i82443bxgx_edacmc_get_error_info(struct mem_ctl_info *mci, *info) { struct pci_dev *pdev; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); pci_read_config_dword(pdev, I82443BXGX_EAP, &info->eap); if (info->eap & I82443BXGX_EAP_OFFSET_SBE) /* Clear error to allow next error to be reported [p.61] */ @@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci, if (info->eap & I82443BXGX_EAP_OFFSET_SBE) { error_found = 1; if (handle_errors) - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, pageoffset, 0, edac_mc_find_csrow_by_page(mci, page), - 0, -1, mci->ctl_name, "", NULL); + 0, -1, mci->ctl_name, ""); } if (info->eap & I82443BXGX_EAP_OFFSET_MBE) { error_found = 1; if (handle_errors) - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, page, pageoffset, 0, edac_mc_find_csrow_by_page(mci, page), - 0, -1, mci->ctl_name, "", NULL); + 0, -1, mci->ctl_name, ""); } return error_found; @@ -178,7 +178,7 @@ static void i82443bxgx_edacmc_check(struct mem_ctl_info *mci) { struct i82443bxgx_edacmc_error_info info; - debugf1("MC%d: %s: %s()\n", mci->mc_idx, __FILE__, __func__); + edac_dbg(1, "MC%d\n", mci->mc_idx); i82443bxgx_edacmc_get_error_info(mci, &info); i82443bxgx_edacmc_process_error_info(mci, &info, 1); } @@ -197,18 +197,17 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci, pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); row_high_limit_last = 0; for (index = 0; index < mci->nr_csrows; index++) { - csrow = &mci->csrows[index]; - dimm = csrow->channels[0].dimm; + csrow = mci->csrows[index]; + dimm = csrow->channels[0]->dimm; pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); - debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n", - mci->mc_idx, __FILE__, __func__, index, drbar); + edac_dbg(1, "MC%d: Row=%d DRB = %#0x\n", + mci->mc_idx, index, drbar); row_high_limit = ((u32) drbar << 23); /* find the DRAM Chip Select Base address and mask */ - debugf1("MC%d: %s: %s() Row=%d, " - "Boundary Address=%#0x, Last = %#0x\n", - mci->mc_idx, __FILE__, __func__, index, row_high_limit, - row_high_limit_last); + edac_dbg(1, "MC%d: Row=%d, Boundary Address=%#0x, Last = %#0x\n", + mci->mc_idx, index, row_high_limit, + row_high_limit_last); /* 440GX goes to 2GB, represented with a DRB of 0. */ if (row_high_limit_last && !row_high_limit) @@ -241,7 +240,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) enum mem_type mtype; enum edac_type edac_mode; - debugf0("MC: %s: %s()\n", __FILE__, __func__); + edac_dbg(0, "MC:\n"); /* Something is really hosed if PCI config space reads from * the MC aren't working. @@ -259,8 +258,8 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - debugf0("MC: %s: %s(): mci = %p\n", __FILE__, __func__, mci); - mci->dev = &pdev->dev; + edac_dbg(0, "MC: mci = %p\n", mci); + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_EDO | MEM_FLAG_SDR | MEM_FLAG_RDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; pci_read_config_byte(pdev, I82443BXGX_DRAMC, &dramc); @@ -275,8 +274,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) mtype = MEM_RDR; break; default: - debugf0("Unknown/reserved DRAM type value " - "in DRAMC register!\n"); + edac_dbg(0, "Unknown/reserved DRAM type value in DRAMC register!\n"); mtype = -MEM_UNKNOWN; } @@ -305,8 +303,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) edac_mode = EDAC_SECDED; break; default: - debugf0("%s(): Unknown/reserved ECC state " - "in NBXCFG register!\n", __func__); + edac_dbg(0, "Unknown/reserved ECC state in NBXCFG register!\n"); edac_mode = EDAC_UNKNOWN; break; } @@ -330,7 +327,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) mci->ctl_page_to_phys = NULL; if (edac_mc_add_mc(mci)) { - debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "failed edac_mc_add_mc()\n"); goto fail; } @@ -345,7 +342,7 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) __func__); } - debugf3("MC: %s: %s(): success\n", __FILE__, __func__); + edac_dbg(3, "MC: success\n"); return 0; fail: @@ -361,7 +358,7 @@ static int __devinit i82443bxgx_edacmc_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: %s: %s()\n", __FILE__, __func__); + edac_dbg(0, "MC:\n"); /* don't need to call pci_enable_device() */ rc = i82443bxgx_edacmc_probe1(pdev, ent->driver_data); @@ -376,7 +373,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0("%s: %s()\n", __FILE__, __func__); + edac_dbg(0, "\n"); if (i82443bxgx_pci) edac_pci_release_generic_ctl(i82443bxgx_pci); @@ -428,7 +425,7 @@ static int __init i82443bxgx_edacmc_init(void) id = &i82443bxgx_pci_tbl[i]; } if (!mci_pdev) { - debugf0("i82443bxgx pci_get_device fail\n"); + edac_dbg(0, "i82443bxgx pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } @@ -436,7 +433,7 @@ static int __init i82443bxgx_edacmc_init(void) pci_rc = i82443bxgx_edacmc_init_one(mci_pdev, i82443bxgx_pci_tbl); if (pci_rc < 0) { - debugf0("i82443bxgx init fail\n"); + edac_dbg(0, "i82443bxgx init fail\n"); pci_rc = -ENODEV; goto fail1; } diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index 08045059d10b..1faa74971513 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c @@ -67,7 +67,7 @@ static void i82860_get_error_info(struct mem_ctl_info *mci, { struct pci_dev *pdev; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); /* * This is a mess because there is no atomic way to read all the @@ -109,25 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci, return 1; if ((info->errsts ^ info->errsts2) & 0x0003) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, - -1, -1, -1, "UE overwrote CE", "", NULL); + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, + -1, -1, -1, "UE overwrote CE", ""); info->errsts = info->errsts2; } info->eap >>= PAGE_SHIFT; row = edac_mc_find_csrow_by_page(mci, info->eap); - dimm = mci->csrows[row].channels[0].dimm; + dimm = mci->csrows[row]->channels[0]->dimm; if (info->errsts & 0x0002) - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, info->eap, 0, 0, dimm->location[0], dimm->location[1], -1, - "i82860 UE", "", NULL); + "i82860 UE", ""); else - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, info->eap, 0, info->derrsyn, dimm->location[0], dimm->location[1], -1, - "i82860 CE", "", NULL); + "i82860 CE", ""); return 1; } @@ -136,7 +136,7 @@ static void i82860_check(struct mem_ctl_info *mci) { struct i82860_error_info info; - debugf1("MC%d: %s()\n", mci->mc_idx, __func__); + edac_dbg(1, "MC%d\n", mci->mc_idx); i82860_get_error_info(mci, &info); i82860_process_error_info(mci, &info, 1); } @@ -161,14 +161,13 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) * in all eight rows. */ for (index = 0; index < mci->nr_csrows; index++) { - csrow = &mci->csrows[index]; - dimm = csrow->channels[0].dimm; + csrow = mci->csrows[index]; + dimm = csrow->channels[0]->dimm; pci_read_config_word(pdev, I82860_GBA + index * 2, &value); cumul_size = (value & I82860_GBA_MASK) << (I82860_GBA_SHIFT - PAGE_SHIFT); - debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, - cumul_size); + edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); if (cumul_size == last_cumul_size) continue; /* not populated */ @@ -210,8 +209,8 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) if (!mci) return -ENOMEM; - debugf3("%s(): init mci\n", __func__); - mci->dev = &pdev->dev; + edac_dbg(3, "init mci\n"); + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; /* I"m not sure about this but I think that all RDRAM is SECDED */ @@ -229,7 +228,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { - debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "failed edac_mc_add_mc()\n"); goto fail; } @@ -245,7 +244,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) } /* get this far and it's successful */ - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; @@ -260,7 +259,7 @@ static int __devinit i82860_init_one(struct pci_dev *pdev, { int rc; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); i82860_printk(KERN_INFO, "i82860 init one\n"); if (pci_enable_device(pdev) < 0) @@ -278,7 +277,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); if (i82860_pci) edac_pci_release_generic_ctl(i82860_pci); @@ -311,7 +310,7 @@ static int __init i82860_init(void) { int pci_rc; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -324,7 +323,7 @@ static int __init i82860_init(void) PCI_DEVICE_ID_INTEL_82860_0, NULL); if (mci_pdev == NULL) { - debugf0("860 pci_get_device fail\n"); + edac_dbg(0, "860 pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } @@ -332,7 +331,7 @@ static int __init i82860_init(void) pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); if (pci_rc < 0) { - debugf0("860 init fail\n"); + edac_dbg(0, "860 init fail\n"); pci_rc = -ENODEV; goto fail1; } @@ -352,7 +351,7 @@ fail0: static void __exit i82860_exit(void) { - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); pci_unregister_driver(&i82860_driver); diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index b613e31c16e5..3e416b1a6b53 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c @@ -189,7 +189,7 @@ static void i82875p_get_error_info(struct mem_ctl_info *mci, { struct pci_dev *pdev; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); /* * This is a mess because there is no atomic way to read all the @@ -227,7 +227,7 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci, { int row, multi_chan; - multi_chan = mci->csrows[0].nr_channels - 1; + multi_chan = mci->csrows[0]->nr_channels - 1; if (!(info->errsts & 0x0081)) return 0; @@ -236,9 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci, return 1; if ((info->errsts ^ info->errsts2) & 0x0081) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, - "UE overwrote CE", "", NULL); + "UE overwrote CE", ""); info->errsts = info->errsts2; } @@ -246,15 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci, row = edac_mc_find_csrow_by_page(mci, info->eap); if (info->errsts & 0x0080) - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, info->eap, 0, 0, row, -1, -1, - "i82875p UE", "", NULL); + "i82875p UE", ""); else - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, info->eap, 0, info->derrsyn, row, multi_chan ? (info->des & 0x1) : 0, - -1, "i82875p CE", "", NULL); + -1, "i82875p CE", ""); return 1; } @@ -263,7 +263,7 @@ static void i82875p_check(struct mem_ctl_info *mci) { struct i82875p_error_info info; - debugf1("MC%d: %s()\n", mci->mc_idx, __func__); + edac_dbg(1, "MC%d\n", mci->mc_idx); i82875p_get_error_info(mci, &info); i82875p_process_error_info(mci, &info, 1); } @@ -367,12 +367,11 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci, */ for (index = 0; index < mci->nr_csrows; index++) { - csrow = &mci->csrows[index]; + csrow = mci->csrows[index]; value = readb(ovrfl_window + I82875P_DRB + index); cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); - debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, - cumul_size); + edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); if (cumul_size == last_cumul_size) continue; /* not populated */ @@ -382,7 +381,7 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci, last_cumul_size = cumul_size; for (j = 0; j < nr_chans; j++) { - dimm = csrow->channels[j].dimm; + dimm = csrow->channels[j]->dimm; dimm->nr_pages = nr_pages / nr_chans; dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ @@ -405,7 +404,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) u32 nr_chans; struct i82875p_error_info discard; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); @@ -426,11 +425,8 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) goto fail0; } - /* Keeps mci available after edac_mc_del_mc() till edac_mc_free() */ - kobject_get(&mci->edac_mci_kobj); - - debugf3("%s(): init mci\n", __func__); - mci->dev = &pdev->dev; + edac_dbg(3, "init mci\n"); + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_UNKNOWN; @@ -440,7 +436,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) mci->dev_name = pci_name(pdev); mci->edac_check = i82875p_check; mci->ctl_page_to_phys = NULL; - debugf3("%s(): init pvt\n", __func__); + edac_dbg(3, "init pvt\n"); pvt = (struct i82875p_pvt *)mci->pvt_info; pvt->ovrfl_pdev = ovrfl_pdev; pvt->ovrfl_window = ovrfl_window; @@ -451,7 +447,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { - debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "failed edac_mc_add_mc()\n"); goto fail1; } @@ -467,11 +463,10 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) } /* get this far and it's successful */ - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; fail1: - kobject_put(&mci->edac_mci_kobj); edac_mc_free(mci); fail0: @@ -489,7 +484,7 @@ static int __devinit i82875p_init_one(struct pci_dev *pdev, { int rc; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); i82875p_printk(KERN_INFO, "i82875p init one\n"); if (pci_enable_device(pdev) < 0) @@ -508,7 +503,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev) struct mem_ctl_info *mci; struct i82875p_pvt *pvt = NULL; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); if (i82875p_pci) edac_pci_release_generic_ctl(i82875p_pci); @@ -554,7 +549,7 @@ static int __init i82875p_init(void) { int pci_rc; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -569,7 +564,7 @@ static int __init i82875p_init(void) PCI_DEVICE_ID_INTEL_82875_0, NULL); if (!mci_pdev) { - debugf0("875p pci_get_device fail\n"); + edac_dbg(0, "875p pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } @@ -577,7 +572,7 @@ static int __init i82875p_init(void) pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); if (pci_rc < 0) { - debugf0("875p init fail\n"); + edac_dbg(0, "875p init fail\n"); pci_rc = -ENODEV; goto fail1; } @@ -597,7 +592,7 @@ fail0: static void __exit i82875p_exit(void) { - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); i82875p_remove_one(mci_pdev); pci_dev_put(mci_pdev); diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 433332c7cdba..069e26c11c4f 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c @@ -241,7 +241,7 @@ static void i82975x_get_error_info(struct mem_ctl_info *mci, { struct pci_dev *pdev; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); /* * This is a mess because there is no atomic way to read all the @@ -288,8 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci, return 1; if ((info->errsts ^ info->errsts2) & 0x0003) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, - -1, -1, -1, "UE overwrote CE", "", NULL); + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, + -1, -1, -1, "UE overwrote CE", ""); info->errsts = info->errsts2; } @@ -308,21 +308,21 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci, (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page); return 0; } - chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1; + chan = (mci->csrows[row]->nr_channels == 1) ? 0 : info->eap & 1; offst = info->eap & ((1 << PAGE_SHIFT) - - (1 << mci->csrows[row].channels[chan].dimm->grain)); + (1 << mci->csrows[row]->channels[chan]->dimm->grain)); if (info->errsts & 0x0002) - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, page, offst, 0, row, -1, -1, - "i82975x UE", "", NULL); + "i82975x UE", ""); else - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, offst, info->derrsyn, row, chan ? chan : 0, -1, - "i82975x CE", "", NULL); + "i82975x CE", ""); return 1; } @@ -331,7 +331,7 @@ static void i82975x_check(struct mem_ctl_info *mci) { struct i82975x_error_info info; - debugf1("MC%d: %s()\n", mci->mc_idx, __func__); + edac_dbg(1, "MC%d\n", mci->mc_idx); i82975x_get_error_info(mci, &info); i82975x_process_error_info(mci, &info, 1); } @@ -394,7 +394,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, */ for (index = 0; index < mci->nr_csrows; index++) { - csrow = &mci->csrows[index]; + csrow = mci->csrows[index]; value = readb(mch_window + I82975X_DRB + index + ((index >= 4) ? 0x80 : 0)); @@ -406,8 +406,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, */ if (csrow->nr_channels > 1) cumul_size <<= 1; - debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, - cumul_size); + edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size); nr_pages = cumul_size - last_cumul_size; if (!nr_pages) @@ -421,10 +420,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, */ dtype = i82975x_dram_type(mch_window, index); for (chan = 0; chan < csrow->nr_channels; chan++) { - dimm = mci->csrows[index].channels[chan].dimm; + dimm = mci->csrows[index]->channels[chan]->dimm; dimm->nr_pages = nr_pages / csrow->nr_channels; - strncpy(csrow->channels[chan].dimm->label, + strncpy(csrow->channels[chan]->dimm->label, labels[(index >> 1) + (chan * 2)], EDAC_MC_LABEL_LEN); dimm->grain = 1 << 7; /* 128Byte cache-line resolution */ @@ -489,11 +488,11 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) u8 c1drb[4]; #endif - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); pci_read_config_dword(pdev, I82975X_MCHBAR, &mchbar); if (!(mchbar & 1)) { - debugf3("%s(): failed, MCHBAR disabled!\n", __func__); + edac_dbg(3, "failed, MCHBAR disabled!\n"); goto fail0; } mchbar &= 0xffffc000; /* bits 31:14 used for 16K window */ @@ -558,8 +557,8 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) goto fail1; } - debugf3("%s(): init mci\n", __func__); - mci->dev = &pdev->dev; + edac_dbg(3, "init mci\n"); + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; @@ -569,7 +568,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) mci->dev_name = pci_name(pdev); mci->edac_check = i82975x_check; mci->ctl_page_to_phys = NULL; - debugf3("%s(): init pvt\n", __func__); + edac_dbg(3, "init pvt\n"); pvt = (struct i82975x_pvt *) mci->pvt_info; pvt->mch_window = mch_window; i82975x_init_csrows(mci, pdev, mch_window); @@ -578,12 +577,12 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) /* finalize this instance of memory controller with edac core */ if (edac_mc_add_mc(mci)) { - debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "failed edac_mc_add_mc()\n"); goto fail2; } /* get this far and it's successful */ - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; fail2: @@ -601,7 +600,7 @@ static int __devinit i82975x_init_one(struct pci_dev *pdev, { int rc; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); if (pci_enable_device(pdev) < 0) return -EIO; @@ -619,7 +618,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev) struct mem_ctl_info *mci; struct i82975x_pvt *pvt; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); mci = edac_mc_del_mc(&pdev->dev); if (mci == NULL) @@ -655,7 +654,7 @@ static int __init i82975x_init(void) { int pci_rc; - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -669,7 +668,7 @@ static int __init i82975x_init(void) PCI_DEVICE_ID_INTEL_82975_0, NULL); if (!mci_pdev) { - debugf0("i82975x pci_get_device fail\n"); + edac_dbg(0, "i82975x pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } @@ -677,7 +676,7 @@ static int __init i82975x_init(void) pci_rc = i82975x_init_one(mci_pdev, i82975x_pci_tbl); if (pci_rc < 0) { - debugf0("i82975x init fail\n"); + edac_dbg(0, "i82975x init fail\n"); pci_rc = -ENODEV; goto fail1; } @@ -697,7 +696,7 @@ fail0: static void __exit i82975x_exit(void) { - debugf3("%s()\n", __func__); + edac_dbg(3, "\n"); pci_unregister_driver(&i82975x_driver); diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 0e374625f6f8..a1e791ec25d3 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -49,34 +49,45 @@ static u32 orig_hid1[2]; /************************ MC SYSFS parts ***********************************/ -static ssize_t mpc85xx_mc_inject_data_hi_show(struct mem_ctl_info *mci, +#define to_mci(k) container_of(k, struct mem_ctl_info, dev) + +static ssize_t mpc85xx_mc_inject_data_hi_show(struct device *dev, + struct device_attribute *mattr, char *data) { + struct mem_ctl_info *mci = to_mci(dev); struct mpc85xx_mc_pdata *pdata = mci->pvt_info; return sprintf(data, "0x%08x", in_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI)); } -static ssize_t mpc85xx_mc_inject_data_lo_show(struct mem_ctl_info *mci, +static ssize_t mpc85xx_mc_inject_data_lo_show(struct device *dev, + struct device_attribute *mattr, char *data) { + struct mem_ctl_info *mci = to_mci(dev); struct mpc85xx_mc_pdata *pdata = mci->pvt_info; return sprintf(data, "0x%08x", in_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO)); } -static ssize_t mpc85xx_mc_inject_ctrl_show(struct mem_ctl_info *mci, char *data) +static ssize_t mpc85xx_mc_inject_ctrl_show(struct device *dev, + struct device_attribute *mattr, + char *data) { + struct mem_ctl_info *mci = to_mci(dev); struct mpc85xx_mc_pdata *pdata = mci->pvt_info; return sprintf(data, "0x%08x", in_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT)); } -static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci, +static ssize_t mpc85xx_mc_inject_data_hi_store(struct device *dev, + struct device_attribute *mattr, const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct mpc85xx_mc_pdata *pdata = mci->pvt_info; if (isdigit(*data)) { out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_HI, @@ -86,9 +97,11 @@ static ssize_t mpc85xx_mc_inject_data_hi_store(struct mem_ctl_info *mci, return 0; } -static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci, +static ssize_t mpc85xx_mc_inject_data_lo_store(struct device *dev, + struct device_attribute *mattr, const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct mpc85xx_mc_pdata *pdata = mci->pvt_info; if (isdigit(*data)) { out_be32(pdata->mc_vbase + MPC85XX_MC_DATA_ERR_INJECT_LO, @@ -98,9 +111,11 @@ static ssize_t mpc85xx_mc_inject_data_lo_store(struct mem_ctl_info *mci, return 0; } -static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci, - const char *data, size_t count) +static ssize_t mpc85xx_mc_inject_ctrl_store(struct device *dev, + struct device_attribute *mattr, + const char *data, size_t count) { + struct mem_ctl_info *mci = to_mci(dev); struct mpc85xx_mc_pdata *pdata = mci->pvt_info; if (isdigit(*data)) { out_be32(pdata->mc_vbase + MPC85XX_MC_ECC_ERR_INJECT, @@ -110,38 +125,35 @@ static ssize_t mpc85xx_mc_inject_ctrl_store(struct mem_ctl_info *mci, return 0; } -static struct mcidev_sysfs_attribute mpc85xx_mc_sysfs_attributes[] = { - { - .attr = { - .name = "inject_data_hi", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = mpc85xx_mc_inject_data_hi_show, - .store = mpc85xx_mc_inject_data_hi_store}, - { - .attr = { - .name = "inject_data_lo", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = mpc85xx_mc_inject_data_lo_show, - .store = mpc85xx_mc_inject_data_lo_store}, - { - .attr = { - .name = "inject_ctrl", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = mpc85xx_mc_inject_ctrl_show, - .store = mpc85xx_mc_inject_ctrl_store}, +DEVICE_ATTR(inject_data_hi, S_IRUGO | S_IWUSR, + mpc85xx_mc_inject_data_hi_show, mpc85xx_mc_inject_data_hi_store); +DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR, + mpc85xx_mc_inject_data_lo_show, mpc85xx_mc_inject_data_lo_store); +DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR, + mpc85xx_mc_inject_ctrl_show, mpc85xx_mc_inject_ctrl_store); - /* End of list */ - { - .attr = {.name = NULL} - } -}; +static int mpc85xx_create_sysfs_attributes(struct mem_ctl_info *mci) +{ + int rc; + + rc = device_create_file(&mci->dev, &dev_attr_inject_data_hi); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_inject_data_lo); + if (rc < 0) + return rc; + rc = device_create_file(&mci->dev, &dev_attr_inject_ctrl); + if (rc < 0) + return rc; -static void mpc85xx_set_mc_sysfs_attributes(struct mem_ctl_info *mci) + return 0; +} + +static void mpc85xx_remove_sysfs_attributes(struct mem_ctl_info *mci) { - mci->mc_driver_sysfs_attributes = mpc85xx_mc_sysfs_attributes; + device_remove_file(&mci->dev, &dev_attr_inject_data_hi); + device_remove_file(&mci->dev, &dev_attr_inject_data_lo); + device_remove_file(&mci->dev, &dev_attr_inject_ctrl); } /**************************** PCI Err device ***************************/ @@ -268,7 +280,7 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *op) out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_DR, ~0); if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { - debugf3("%s(): failed edac_pci_add_device()\n", __func__); + edac_dbg(3, "failed edac_pci_add_device()\n"); goto err; } @@ -291,7 +303,7 @@ static int __devinit mpc85xx_pci_err_probe(struct platform_device *op) } devres_remove_group(&op->dev, mpc85xx_pci_err_probe); - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); printk(KERN_INFO EDAC_MOD_STR " PCI err registered\n"); return 0; @@ -309,7 +321,7 @@ static int mpc85xx_pci_err_remove(struct platform_device *op) struct edac_pci_ctl_info *pci = dev_get_drvdata(&op->dev); struct mpc85xx_pci_pdata *pdata = pci->pvt_info; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); out_be32(pdata->pci_vbase + MPC85XX_PCI_ERR_CAP_DR, orig_pci_err_cap_dr); @@ -570,7 +582,7 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op) pdata->edac_idx = edac_dev_idx++; if (edac_device_add_device(edac_dev) > 0) { - debugf3("%s(): failed edac_device_add_device()\n", __func__); + edac_dbg(3, "failed edac_device_add_device()\n"); goto err; } @@ -598,7 +610,7 @@ static int __devinit mpc85xx_l2_err_probe(struct platform_device *op) devres_remove_group(&op->dev, mpc85xx_l2_err_probe); - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); printk(KERN_INFO EDAC_MOD_STR " L2 err registered\n"); return 0; @@ -616,7 +628,7 @@ static int mpc85xx_l2_err_remove(struct platform_device *op) struct edac_device_ctl_info *edac_dev = dev_get_drvdata(&op->dev); struct mpc85xx_l2_pdata *pdata = edac_dev->pvt_info; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); if (edac_op_state == EDAC_OPSTATE_INT) { out_be32(pdata->l2_vbase + MPC85XX_L2_ERRINTEN, 0); @@ -813,7 +825,7 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci) pfn = err_addr >> PAGE_SHIFT; for (row_index = 0; row_index < mci->nr_csrows; row_index++) { - csrow = &mci->csrows[row_index]; + csrow = mci->csrows[row_index]; if ((pfn >= csrow->first_page) && (pfn <= csrow->last_page)) break; } @@ -854,16 +866,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci) mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); if (err_detect & DDR_EDE_SBE) - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, pfn, err_addr & ~PAGE_MASK, syndrome, row_index, 0, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); if (err_detect & DDR_EDE_MBE) - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, pfn, err_addr & ~PAGE_MASK, syndrome, row_index, 0, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); } @@ -933,8 +945,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci) u32 start; u32 end; - csrow = &mci->csrows[index]; - dimm = csrow->channels[0].dimm; + csrow = mci->csrows[index]; + dimm = csrow->channels[0]->dimm; cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + (index * MPC85XX_MC_CS_BNDS_OFS)); @@ -990,9 +1002,9 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) pdata = mci->pvt_info; pdata->name = "mpc85xx_mc_err"; pdata->irq = NO_IRQ; - mci->dev = &op->dev; + mci->pdev = &op->dev; pdata->edac_idx = edac_mc_idx++; - dev_set_drvdata(mci->dev, mci); + dev_set_drvdata(mci->pdev, mci); mci->ctl_name = pdata->name; mci->dev_name = pdata->name; @@ -1026,7 +1038,7 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) goto err; } - debugf3("%s(): init mci\n", __func__); + edac_dbg(3, "init mci\n"); mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_RDDR2 | MEM_FLAG_DDR | MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; @@ -1041,8 +1053,6 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) mci->scrub_mode = SCRUB_SW_SRC; - mpc85xx_set_mc_sysfs_attributes(mci); - mpc85xx_init_csrows(mci); /* store the original error disable bits */ @@ -1054,7 +1064,13 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, ~0); if (edac_mc_add_mc(mci)) { - debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "failed edac_mc_add_mc()\n"); + goto err; + } + + if (mpc85xx_create_sysfs_attributes(mci)) { + edac_mc_del_mc(mci->pdev); + edac_dbg(3, "failed edac_mc_add_mc()\n"); goto err; } @@ -1088,7 +1104,7 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) } devres_remove_group(&op->dev, mpc85xx_mc_err_probe); - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); printk(KERN_INFO EDAC_MOD_STR " MC err registered\n"); return 0; @@ -1106,7 +1122,7 @@ static int mpc85xx_mc_err_remove(struct platform_device *op) struct mem_ctl_info *mci = dev_get_drvdata(&op->dev); struct mpc85xx_mc_pdata *pdata = mci->pvt_info; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); if (edac_op_state == EDAC_OPSTATE_INT) { out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_INT_EN, 0); @@ -1117,6 +1133,7 @@ static int mpc85xx_mc_err_remove(struct platform_device *op) orig_ddr_err_disable); out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_SBE, orig_ddr_err_sbe); + mpc85xx_remove_sysfs_attributes(mci); edac_mc_del_mc(&op->dev); edac_mc_free(mci); return 0; diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c index b0bb5a3d2527..2b315c2edc3c 100644 --- a/drivers/edac/mv64x60_edac.c +++ b/drivers/edac/mv64x60_edac.c @@ -169,7 +169,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev) MV64X60_PCIx_ERR_MASK_VAL); if (edac_pci_add_device(pci, pdata->edac_idx) > 0) { - debugf3("%s(): failed edac_pci_add_device()\n", __func__); + edac_dbg(3, "failed edac_pci_add_device()\n"); goto err; } @@ -194,7 +194,7 @@ static int __devinit mv64x60_pci_err_probe(struct platform_device *pdev) devres_remove_group(&pdev->dev, mv64x60_pci_err_probe); /* get this far and it's successful */ - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; @@ -210,7 +210,7 @@ static int mv64x60_pci_err_remove(struct platform_device *pdev) { struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev); - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); edac_pci_del_device(&pdev->dev); @@ -336,7 +336,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev) pdata->edac_idx = edac_dev_idx++; if (edac_device_add_device(edac_dev) > 0) { - debugf3("%s(): failed edac_device_add_device()\n", __func__); + edac_dbg(3, "failed edac_device_add_device()\n"); goto err; } @@ -363,7 +363,7 @@ static int __devinit mv64x60_sram_err_probe(struct platform_device *pdev) devres_remove_group(&pdev->dev, mv64x60_sram_err_probe); /* get this far and it's successful */ - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; @@ -379,7 +379,7 @@ static int mv64x60_sram_err_remove(struct platform_device *pdev) { struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(edac_dev); @@ -531,7 +531,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev) pdata->edac_idx = edac_dev_idx++; if (edac_device_add_device(edac_dev) > 0) { - debugf3("%s(): failed edac_device_add_device()\n", __func__); + edac_dbg(3, "failed edac_device_add_device()\n"); goto err; } @@ -558,7 +558,7 @@ static int __devinit mv64x60_cpu_err_probe(struct platform_device *pdev) devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe); /* get this far and it's successful */ - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; @@ -574,7 +574,7 @@ static int mv64x60_cpu_err_remove(struct platform_device *pdev) { struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev); - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(edac_dev); @@ -611,17 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci) /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ if (!(reg & 0x1)) - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, err_addr >> PAGE_SHIFT, err_addr & PAGE_MASK, syndrome, 0, 0, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); else /* 2 bit error, UE */ - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, err_addr >> PAGE_SHIFT, err_addr & PAGE_MASK, 0, 0, 0, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); /* clear the error */ out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); @@ -670,8 +670,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci, ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); - csrow = &mci->csrows[0]; - dimm = csrow->channels[0].dimm; + csrow = mci->csrows[0]; + dimm = csrow->channels[0]->dimm; dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT; dimm->grain = 8; @@ -724,7 +724,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev) } pdata = mci->pvt_info; - mci->dev = &pdev->dev; + mci->pdev = &pdev->dev; platform_set_drvdata(pdev, mci); pdata->name = "mv64x60_mc_err"; pdata->irq = NO_IRQ; @@ -766,7 +766,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev) goto err2; } - debugf3("%s(): init mci\n", __func__); + edac_dbg(3, "init mci\n"); mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; @@ -790,7 +790,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev) out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL, ctl); if (edac_mc_add_mc(mci)) { - debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "failed edac_mc_add_mc()\n"); goto err; } @@ -815,7 +815,7 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev) } /* get this far and it's successful */ - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; @@ -831,7 +831,7 @@ static int mv64x60_mc_err_remove(struct platform_device *pdev) { struct mem_ctl_info *mci = platform_get_drvdata(pdev); - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); edac_mc_del_mc(&pdev->dev); edac_mc_free(mci); diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c index b095a906a994..2d35b78ada3c 100644 --- a/drivers/edac/pasemi_edac.c +++ b/drivers/edac/pasemi_edac.c @@ -74,7 +74,7 @@ static int system_mmc_id; static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci) { - struct pci_dev *pdev = to_pci_dev(mci->dev); + struct pci_dev *pdev = to_pci_dev(mci->pdev); u32 tmp; pci_read_config_dword(pdev, MCDEBUG_ERRSTA, @@ -95,7 +95,7 @@ static u32 pasemi_edac_get_error_info(struct mem_ctl_info *mci) static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta) { - struct pci_dev *pdev = to_pci_dev(mci->dev); + struct pci_dev *pdev = to_pci_dev(mci->pdev); u32 errlog1a; u32 cs; @@ -110,16 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta) /* uncorrectable/multi-bit errors */ if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS | MCDEBUG_ERRSTA_RFL_STATUS)) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, - mci->csrows[cs].first_page, 0, 0, - cs, 0, -1, mci->ctl_name, "", NULL); + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, + mci->csrows[cs]->first_page, 0, 0, + cs, 0, -1, mci->ctl_name, ""); } /* correctable/single-bit errors */ if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, - mci->csrows[cs].first_page, 0, 0, - cs, 0, -1, mci->ctl_name, "", NULL); + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, + mci->csrows[cs]->first_page, 0, 0, + cs, 0, -1, mci->ctl_name, ""); } static void pasemi_edac_check(struct mem_ctl_info *mci) @@ -141,8 +141,8 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci, int index; for (index = 0; index < mci->nr_csrows; index++) { - csrow = &mci->csrows[index]; - dimm = csrow->channels[0].dimm; + csrow = mci->csrows[index]; + dimm = csrow->channels[0]->dimm; pci_read_config_dword(pdev, MCDRAM_RANKCFG + (index * 12), @@ -225,7 +225,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev, MCCFG_ERRCOR_ECC_GEN_EN | MCCFG_ERRCOR_ECC_CRR_EN; - mci->dev = &pdev->dev; + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR | MEM_FLAG_RDDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; mci->edac_cap = (errcor & MCCFG_ERRCOR_ECC_GEN_EN) ? diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index f3f9fed06ad7..bf0957635991 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -727,10 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci, for (row = 0; row < mci->nr_csrows; row++) if (ppc4xx_edac_check_bank_error(status, row)) - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, row, 0, -1, - message, "", NULL); + message, ""); } /** @@ -758,10 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci, for (row = 0; row < mci->nr_csrows; row++) if (ppc4xx_edac_check_bank_error(status, row)) - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, page, offset, 0, row, 0, -1, - message, "", NULL); + message, ""); } /** @@ -1027,9 +1027,9 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci, /* Initial driver pointers and private data */ - mci->dev = &op->dev; + mci->pdev = &op->dev; - dev_set_drvdata(mci->dev, mci); + dev_set_drvdata(mci->pdev, mci); pdata = mci->pvt_info; @@ -1334,7 +1334,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op) return 0; fail1: - edac_mc_del_mc(mci->dev); + edac_mc_del_mc(mci->pdev); fail: edac_mc_free(mci); @@ -1368,7 +1368,7 @@ ppc4xx_edac_remove(struct platform_device *op) dcr_unmap(pdata->dcr_host, SDRAM_DCR_RESOURCE_LEN); - edac_mc_del_mc(mci->dev); + edac_mc_del_mc(mci->pdev); edac_mc_free(mci); return 0; diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index e1cacd164f31..f854debd5533 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c @@ -140,7 +140,7 @@ static void r82600_get_error_info(struct mem_ctl_info *mci, { struct pci_dev *pdev; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); pci_read_config_dword(pdev, R82600_EAP, &info->eapr); if (info->eapr & BIT(0)) @@ -179,11 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci, error_found = 1; if (handle_errors) - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, page, 0, syndrome, edac_mc_find_csrow_by_page(mci, page), 0, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); } if (info->eapr & BIT(1)) { /* UE? */ @@ -191,11 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci, if (handle_errors) /* 82600 doesn't give enough info */ - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, page, 0, 0, edac_mc_find_csrow_by_page(mci, page), 0, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); } return error_found; @@ -205,7 +205,7 @@ static void r82600_check(struct mem_ctl_info *mci) { struct r82600_error_info info; - debugf1("MC%d: %s()\n", mci->mc_idx, __func__); + edac_dbg(1, "MC%d\n", mci->mc_idx); r82600_get_error_info(mci, &info); r82600_process_error_info(mci, &info, 1); } @@ -230,19 +230,19 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, row_high_limit_last = 0; for (index = 0; index < mci->nr_csrows; index++) { - csrow = &mci->csrows[index]; - dimm = csrow->channels[0].dimm; + csrow = mci->csrows[index]; + dimm = csrow->channels[0]->dimm; /* find the DRAM Chip Select Base address and mask */ pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); - debugf1("%s() Row=%d DRBA = %#0x\n", __func__, index, drbar); + edac_dbg(1, "Row=%d DRBA = %#0x\n", index, drbar); row_high_limit = ((u32) drbar << 24); /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ - debugf1("%s() Row=%d, Boundary Address=%#0x, Last = %#0x\n", - __func__, index, row_high_limit, row_high_limit_last); + edac_dbg(1, "Row=%d, Boundary Address=%#0x, Last = %#0x\n", + index, row_high_limit, row_high_limit_last); /* Empty row [p.57] */ if (row_high_limit == row_high_limit_last) @@ -277,14 +277,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) u32 sdram_refresh_rate; struct r82600_error_info discard; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); pci_read_config_dword(pdev, R82600_EAP, &eapr); scrub_disabled = eapr & BIT(31); sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); - debugf2("%s(): sdram refresh rate = %#0x\n", __func__, - sdram_refresh_rate); - debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); + edac_dbg(2, "sdram refresh rate = %#0x\n", sdram_refresh_rate); + edac_dbg(2, "DRAMC register = %#0x\n", dramcr); layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; layers[0].size = R82600_NR_CSROWS; layers[0].is_virt_csrow = true; @@ -295,8 +294,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) if (mci == NULL) return -ENOMEM; - debugf0("%s(): mci = %p\n", __func__, mci); - mci->dev = &pdev->dev; + edac_dbg(0, "mci = %p\n", mci); + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; /* FIXME try to work out if the chip leads have been used for COM2 @@ -311,8 +310,8 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) if (ecc_enabled(dramcr)) { if (scrub_disabled) - debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " - "%#0x\n", __func__, mci, eapr); + edac_dbg(3, "mci = %p - Scrubbing disabled! EAP: %#0x\n", + mci, eapr); } else mci->edac_cap = EDAC_FLAG_NONE; @@ -329,15 +328,14 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { - debugf3("%s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "failed edac_mc_add_mc()\n"); goto fail; } /* get this far and it's successful */ if (disable_hardware_scrub) { - debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n", - __func__); + edac_dbg(3, "Disabling Hardware Scrub (scrub on error)\n"); pci_write_bits32(pdev, R82600_EAP, BIT(31), BIT(31)); } @@ -352,7 +350,7 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) __func__); } - debugf3("%s(): success\n", __func__); + edac_dbg(3, "success\n"); return 0; fail: @@ -364,7 +362,7 @@ fail: static int __devinit r82600_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); /* don't need to call pci_enable_device() */ return r82600_probe1(pdev, ent->driver_data); @@ -374,7 +372,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); if (r82600_pci) edac_pci_release_generic_ctl(r82600_pci); diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 36ad17e79d61..f3b1f9fafa4b 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -381,8 +381,8 @@ static inline int numrank(u32 mtr) int ranks = (1 << RANK_CNT_BITS(mtr)); if (ranks > 4) { - debugf0("Invalid number of ranks: %d (max = 4) raw value = %x (%04x)", - ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr); + edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n", + ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr); return -EINVAL; } @@ -394,8 +394,8 @@ static inline int numrow(u32 mtr) int rows = (RANK_WIDTH_BITS(mtr) + 12); if (rows < 13 || rows > 18) { - debugf0("Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)", - rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr); + edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n", + rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr); return -EINVAL; } @@ -407,8 +407,8 @@ static inline int numcol(u32 mtr) int cols = (COL_WIDTH_BITS(mtr) + 10); if (cols > 12) { - debugf0("Invalid number of cols: %d (max = 4) raw value = %x (%04x)", - cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr); + edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n", + cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr); return -EINVAL; } @@ -475,8 +475,8 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot, if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot && PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) { - debugf1("Associated %02x.%02x.%d with %p\n", - bus, slot, func, sbridge_dev->pdev[i]); + edac_dbg(1, "Associated %02x.%02x.%d with %p\n", + bus, slot, func, sbridge_dev->pdev[i]); return sbridge_dev->pdev[i]; } } @@ -523,45 +523,45 @@ static int get_dimm_config(struct mem_ctl_info *mci) pci_read_config_dword(pvt->pci_br, SAD_CONTROL, ®); pvt->sbridge_dev->node_id = NODE_ID(reg); - debugf0("mc#%d: Node ID: %d, source ID: %d\n", - pvt->sbridge_dev->mc, - pvt->sbridge_dev->node_id, - pvt->sbridge_dev->source_id); + edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n", + pvt->sbridge_dev->mc, + pvt->sbridge_dev->node_id, + pvt->sbridge_dev->source_id); pci_read_config_dword(pvt->pci_ras, RASENABLES, ®); if (IS_MIRROR_ENABLED(reg)) { - debugf0("Memory mirror is enabled\n"); + edac_dbg(0, "Memory mirror is enabled\n"); pvt->is_mirrored = true; } else { - debugf0("Memory mirror is disabled\n"); + edac_dbg(0, "Memory mirror is disabled\n"); pvt->is_mirrored = false; } pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr); if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) { - debugf0("Lockstep is enabled\n"); + edac_dbg(0, "Lockstep is enabled\n"); mode = EDAC_S8ECD8ED; pvt->is_lockstep = true; } else { - debugf0("Lockstep is disabled\n"); + edac_dbg(0, "Lockstep is disabled\n"); mode = EDAC_S4ECD4ED; pvt->is_lockstep = false; } if (IS_CLOSE_PG(pvt->info.mcmtr)) { - debugf0("address map is on closed page mode\n"); + edac_dbg(0, "address map is on closed page mode\n"); pvt->is_close_pg = true; } else { - debugf0("address map is on open page mode\n"); + edac_dbg(0, "address map is on open page mode\n"); pvt->is_close_pg = false; } pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, ®); if (IS_RDIMM_ENABLED(reg)) { /* FIXME: Can also be LRDIMM */ - debugf0("Memory is registered\n"); + edac_dbg(0, "Memory is registered\n"); mtype = MEM_RDDR3; } else { - debugf0("Memory is unregistered\n"); + edac_dbg(0, "Memory is unregistered\n"); mtype = MEM_DDR3; } @@ -576,7 +576,7 @@ static int get_dimm_config(struct mem_ctl_info *mci) i, j, 0); pci_read_config_dword(pvt->pci_tad[i], mtr_regs[j], &mtr); - debugf4("Channel #%d MTR%d = %x\n", i, j, mtr); + edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr); if (IS_DIMM_PRESENT(mtr)) { pvt->channel[i].dimms++; @@ -588,10 +588,10 @@ static int get_dimm_config(struct mem_ctl_info *mci) size = (rows * cols * banks * ranks) >> (20 - 3); npages = MiB_TO_PAGES(size); - debugf0("mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", - pvt->sbridge_dev->mc, i, j, - size, npages, - banks, ranks, rows, cols); + edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", + pvt->sbridge_dev->mc, i, j, + size, npages, + banks, ranks, rows, cols); dimm->nr_pages = npages; dimm->grain = 32; @@ -629,8 +629,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci) tmp_mb = (1 + pvt->tolm) >> 20; mb = div_u64_rem(tmp_mb, 1000, &kb); - debugf0("TOLM: %u.%03u GB (0x%016Lx)\n", - mb, kb, (u64)pvt->tolm); + edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n", mb, kb, (u64)pvt->tolm); /* Address range is already 45:25 */ pci_read_config_dword(pvt->pci_sad1, TOHM, @@ -639,8 +638,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci) tmp_mb = (1 + pvt->tohm) >> 20; mb = div_u64_rem(tmp_mb, 1000, &kb); - debugf0("TOHM: %u.%03u GB (0x%016Lx)", - mb, kb, (u64)pvt->tohm); + edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)", mb, kb, (u64)pvt->tohm); /* * Step 2) Get SAD range and SAD Interleave list @@ -663,13 +661,13 @@ static void get_memory_layout(const struct mem_ctl_info *mci) tmp_mb = (limit + 1) >> 20; mb = div_u64_rem(tmp_mb, 1000, &kb); - debugf0("SAD#%d %s up to %u.%03u GB (0x%016Lx) %s reg=0x%08x\n", - n_sads, - get_dram_attr(reg), - mb, kb, - ((u64)tmp_mb) << 20L, - INTERLEAVE_MODE(reg) ? "Interleave: 8:6" : "Interleave: [8:6]XOR[18:16]", - reg); + edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n", + n_sads, + get_dram_attr(reg), + mb, kb, + ((u64)tmp_mb) << 20L, + INTERLEAVE_MODE(reg) ? "8:6" : "[8:6]XOR[18:16]", + reg); prv = limit; pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], @@ -679,8 +677,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci) if (j > 0 && sad_interl == sad_pkg(reg, j)) break; - debugf0("SAD#%d, interleave #%d: %d\n", - n_sads, j, sad_pkg(reg, j)); + edac_dbg(0, "SAD#%d, interleave #%d: %d\n", + n_sads, j, sad_pkg(reg, j)); } } @@ -697,16 +695,16 @@ static void get_memory_layout(const struct mem_ctl_info *mci) tmp_mb = (limit + 1) >> 20; mb = div_u64_rem(tmp_mb, 1000, &kb); - debugf0("TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", - n_tads, mb, kb, - ((u64)tmp_mb) << 20L, - (u32)TAD_SOCK(reg), - (u32)TAD_CH(reg), - (u32)TAD_TGT0(reg), - (u32)TAD_TGT1(reg), - (u32)TAD_TGT2(reg), - (u32)TAD_TGT3(reg), - reg); + edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n", + n_tads, mb, kb, + ((u64)tmp_mb) << 20L, + (u32)TAD_SOCK(reg), + (u32)TAD_CH(reg), + (u32)TAD_TGT0(reg), + (u32)TAD_TGT1(reg), + (u32)TAD_TGT2(reg), + (u32)TAD_TGT3(reg), + reg); prv = limit; } @@ -722,11 +720,11 @@ static void get_memory_layout(const struct mem_ctl_info *mci) ®); tmp_mb = TAD_OFFSET(reg) >> 20; mb = div_u64_rem(tmp_mb, 1000, &kb); - debugf0("TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", - i, j, - mb, kb, - ((u64)tmp_mb) << 20L, - reg); + edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n", + i, j, + mb, kb, + ((u64)tmp_mb) << 20L, + reg); } } @@ -747,12 +745,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci) tmp_mb = RIR_LIMIT(reg) >> 20; rir_way = 1 << RIR_WAY(reg); mb = div_u64_rem(tmp_mb, 1000, &kb); - debugf0("CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", - i, j, - mb, kb, - ((u64)tmp_mb) << 20L, - rir_way, - reg); + edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n", + i, j, + mb, kb, + ((u64)tmp_mb) << 20L, + rir_way, + reg); for (k = 0; k < rir_way; k++) { pci_read_config_dword(pvt->pci_tad[i], @@ -761,12 +759,12 @@ static void get_memory_layout(const struct mem_ctl_info *mci) tmp_mb = RIR_OFFSET(reg) << 6; mb = div_u64_rem(tmp_mb, 1000, &kb); - debugf0("CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", - i, j, k, - mb, kb, - ((u64)tmp_mb) << 20L, - (u32)RIR_RNK_TGT(reg), - reg); + edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", + i, j, k, + mb, kb, + ((u64)tmp_mb) << 20L, + (u32)RIR_RNK_TGT(reg), + reg); } } } @@ -853,16 +851,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci, if (sad_way > 0 && sad_interl == sad_pkg(reg, sad_way)) break; sad_interleave[sad_way] = sad_pkg(reg, sad_way); - debugf0("SAD interleave #%d: %d\n", - sad_way, sad_interleave[sad_way]); + edac_dbg(0, "SAD interleave #%d: %d\n", + sad_way, sad_interleave[sad_way]); } - debugf0("mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", - pvt->sbridge_dev->mc, - n_sads, - addr, - limit, - sad_way + 7, - interleave_mode ? "" : "XOR[18:16]"); + edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n", + pvt->sbridge_dev->mc, + n_sads, + addr, + limit, + sad_way + 7, + interleave_mode ? "" : "XOR[18:16]"); if (interleave_mode) idx = ((addr >> 6) ^ (addr >> 16)) & 7; else @@ -884,8 +882,8 @@ static int get_memory_error_data(struct mem_ctl_info *mci, return -EINVAL; } *socket = sad_interleave[idx]; - debugf0("SAD interleave index: %d (wayness %d) = CPU socket %d\n", - idx, sad_way, *socket); + edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n", + idx, sad_way, *socket); /* * Move to the proper node structure, in order to access the @@ -972,16 +970,16 @@ static int get_memory_error_data(struct mem_ctl_info *mci, offset = TAD_OFFSET(tad_offset); - debugf0("TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n", - n_tads, - addr, - limit, - (u32)TAD_SOCK(reg), - ch_way, - offset, - idx, - base_ch, - *channel_mask); + edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n", + n_tads, + addr, + limit, + (u32)TAD_SOCK(reg), + ch_way, + offset, + idx, + base_ch, + *channel_mask); /* Calculate channel address */ /* Remove the TAD offset */ @@ -1017,11 +1015,11 @@ static int get_memory_error_data(struct mem_ctl_info *mci, limit = RIR_LIMIT(reg); mb = div_u64_rem(limit >> 20, 1000, &kb); - debugf0("RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", - n_rir, - mb, kb, - limit, - 1 << RIR_WAY(reg)); + edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n", + n_rir, + mb, kb, + limit, + 1 << RIR_WAY(reg)); if (ch_addr <= limit) break; } @@ -1042,12 +1040,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci, ®); *rank = RIR_RNK_TGT(reg); - debugf0("RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", - n_rir, - ch_addr, - limit, - rir_way, - idx); + edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", + n_rir, + ch_addr, + limit, + rir_way, + idx); return 0; } @@ -1064,14 +1062,14 @@ static void sbridge_put_devices(struct sbridge_dev *sbridge_dev) { int i; - debugf0(__FILE__ ": %s()\n", __func__); + edac_dbg(0, "\n"); for (i = 0; i < sbridge_dev->n_devs; i++) { struct pci_dev *pdev = sbridge_dev->pdev[i]; if (!pdev) continue; - debugf0("Removing dev %02x:%02x.%d\n", - pdev->bus->number, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); + edac_dbg(0, "Removing dev %02x:%02x.%d\n", + pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); pci_dev_put(pdev); } } @@ -1177,10 +1175,9 @@ static int sbridge_get_onedevice(struct pci_dev **prev, return -ENODEV; } - debugf0("Detected dev %02x:%d.%d PCI ID %04x:%04x\n", - bus, dev_descr->dev, - dev_descr->func, - PCI_VENDOR_ID_INTEL, dev_descr->dev_id); + edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n", + bus, dev_descr->dev, dev_descr->func, + PCI_VENDOR_ID_INTEL, dev_descr->dev_id); /* * As stated on drivers/pci/search.c, the reference count for @@ -1297,10 +1294,10 @@ static int mci_bind_devs(struct mem_ctl_info *mci, goto error; } - debugf0("Associated PCI %02x.%02d.%d with dev = %p\n", - sbridge_dev->bus, - PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), - pdev); + edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n", + sbridge_dev->bus, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), + pdev); } /* Check if everything were registered */ @@ -1435,8 +1432,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, * to the group of dimm's where the error may be happening. */ snprintf(msg, sizeof(msg), - "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d", - core_err_cnt, + "%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d", overflow ? " OVERFLOW" : "", (uncorrected_error && recoverable) ? " recoverable" : "", area_type, @@ -1445,20 +1441,20 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, channel_mask, rank); - debugf0("%s", msg); + edac_dbg(0, "%s\n", msg); /* FIXME: need support for channel mask */ /* Call the helper to output message */ - edac_mc_handle_error(tp_event, mci, + edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, channel, dimm, -1, - optype, msg, m); + optype, msg); return; err_parsing: - edac_mc_handle_error(tp_event, mci, 0, 0, 0, + edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, - msg, "", m); + msg, ""); } @@ -1592,8 +1588,7 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) struct sbridge_pvt *pvt; if (unlikely(!mci || !mci->pvt_info)) { - debugf0("MC: " __FILE__ ": %s(): dev = %p\n", - __func__, &sbridge_dev->pdev[0]->dev); + edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev); sbridge_printk(KERN_ERR, "Couldn't find mci handler\n"); return; @@ -1601,13 +1596,13 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev) pvt = mci->pvt_info; - debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", - __func__, mci, &sbridge_dev->pdev[0]->dev); + edac_dbg(0, "MC: mci = %p, dev = %p\n", + mci, &sbridge_dev->pdev[0]->dev); /* Remove MC sysfs nodes */ - edac_mc_del_mc(mci->dev); + edac_mc_del_mc(mci->pdev); - debugf1("%s: free mci struct\n", mci->ctl_name); + edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); kfree(mci->ctl_name); edac_mc_free(mci); sbridge_dev->mci = NULL; @@ -1638,8 +1633,8 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) if (unlikely(!mci)) return -ENOMEM; - debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", - __func__, mci, &sbridge_dev->pdev[0]->dev); + edac_dbg(0, "MC: mci = %p, dev = %p\n", + mci, &sbridge_dev->pdev[0]->dev); pvt = mci->pvt_info; memset(pvt, 0, sizeof(*pvt)); @@ -1670,12 +1665,11 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) get_memory_layout(mci); /* record ptr to the generic device */ - mci->dev = &sbridge_dev->pdev[0]->dev; + mci->pdev = &sbridge_dev->pdev[0]->dev; /* add this new MC control structure to EDAC's list of MCs */ if (unlikely(edac_mc_add_mc(mci))) { - debugf0("MC: " __FILE__ - ": %s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); rc = -EINVAL; goto fail0; } @@ -1722,7 +1716,8 @@ static int __devinit sbridge_probe(struct pci_dev *pdev, mc = 0; list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { - debugf0("Registering MC#%d (%d of %d)\n", mc, mc + 1, num_mc); + edac_dbg(0, "Registering MC#%d (%d of %d)\n", + mc, mc + 1, num_mc); sbridge_dev->mc = mc++; rc = sbridge_register_mci(sbridge_dev); if (unlikely(rc < 0)) @@ -1752,7 +1747,7 @@ static void __devexit sbridge_remove(struct pci_dev *pdev) { struct sbridge_dev *sbridge_dev; - debugf0(__FILE__ ": %s()\n", __func__); + edac_dbg(0, "\n"); /* * we have a trouble here: pdev value for removal will be wrong, since @@ -1801,7 +1796,7 @@ static int __init sbridge_init(void) { int pci_rc; - debugf2("MC: " __FILE__ ": %s()\n", __func__); + edac_dbg(2, "\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -1825,7 +1820,7 @@ static int __init sbridge_init(void) */ static void __exit sbridge_exit(void) { - debugf2("MC: " __FILE__ ": %s()\n", __func__); + edac_dbg(2, "\n"); pci_unregister_driver(&sbridge_driver); mce_unregister_decode_chain(&sbridge_mce_dec); } diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c index 7bb4614730db..1e904b7b79a0 100644 --- a/drivers/edac/tile_edac.c +++ b/drivers/edac/tile_edac.c @@ -69,12 +69,12 @@ static void tile_edac_check(struct mem_ctl_info *mci) /* Check if the current error count is different from the saved one. */ if (mem_error.sbe_count != priv->ce_count) { - dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node); + dev_dbg(mci->pdev, "ECC CE err on node %d\n", priv->node); priv->ce_count = mem_error.sbe_count; - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, 0, 0, -1, - mci->ctl_name, "", NULL); + mci->ctl_name, ""); } } @@ -84,10 +84,10 @@ static void tile_edac_check(struct mem_ctl_info *mci) */ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci) { - struct csrow_info *csrow = &mci->csrows[0]; + struct csrow_info *csrow = mci->csrows[0]; struct tile_edac_priv *priv = mci->pvt_info; struct mshim_mem_info mem_info; - struct dimm_info *dimm = csrow->channels[0].dimm; + struct dimm_info *dimm = csrow->channels[0]->dimm; if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != @@ -149,7 +149,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev) priv->node = pdev->id; priv->hv_devhdl = hv_devhdl; - mci->dev = &pdev->dev; + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c index 1ac7962d63ea..08a992693e62 100644 --- a/drivers/edac/x38_edac.c +++ b/drivers/edac/x38_edac.c @@ -103,10 +103,10 @@ static int how_many_channel(struct pci_dev *pdev) pci_read_config_byte(pdev, X38_CAPID0 + 8, &capid0_8b); if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ - debugf0("In single channel mode.\n"); + edac_dbg(0, "In single channel mode\n"); x38_channel_num = 1; } else { - debugf0("In dual channel mode.\n"); + edac_dbg(0, "In dual channel mode\n"); x38_channel_num = 2; } @@ -151,7 +151,7 @@ static void x38_clear_error_info(struct mem_ctl_info *mci) { struct pci_dev *pdev; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); /* * Clear any error bits. @@ -172,7 +172,7 @@ static void x38_get_and_clear_error_info(struct mem_ctl_info *mci, struct pci_dev *pdev; void __iomem *window = mci->pvt_info; - pdev = to_pci_dev(mci->dev); + pdev = to_pci_dev(mci->pdev); /* * This is a mess because there is no atomic way to read all the @@ -215,26 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci, return; if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, - "UE overwrote CE", "", NULL); + "UE overwrote CE", ""); info->errsts = info->errsts2; } for (channel = 0; channel < x38_channel_num; channel++) { log = info->eccerrlog[channel]; if (log & X38_ECCERRLOG_UE) { - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, eccerrlog_row(channel, log), -1, -1, - "x38 UE", "", NULL); + "x38 UE", ""); } else if (log & X38_ECCERRLOG_CE) { - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, eccerrlog_syndrome(log), eccerrlog_row(channel, log), -1, -1, - "x38 CE", "", NULL); + "x38 CE", ""); } } } @@ -243,7 +243,7 @@ static void x38_check(struct mem_ctl_info *mci) { struct x38_error_info info; - debugf1("MC%d: %s()\n", mci->mc_idx, __func__); + edac_dbg(1, "MC%d\n", mci->mc_idx); x38_get_and_clear_error_info(mci, &info); x38_process_error_info(mci, &info); } @@ -331,7 +331,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx) bool stacked; void __iomem *window; - debugf0("MC: %s()\n", __func__); + edac_dbg(0, "MC:\n"); window = x38_map_mchbar(pdev); if (!window) @@ -352,9 +352,9 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx) if (!mci) return -ENOMEM; - debugf3("MC: %s(): init mci\n", __func__); + edac_dbg(3, "MC: init mci\n"); - mci->dev = &pdev->dev; + mci->pdev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; @@ -378,7 +378,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx) */ for (i = 0; i < mci->nr_csrows; i++) { unsigned long nr_pages; - struct csrow_info *csrow = &mci->csrows[i]; + struct csrow_info *csrow = mci->csrows[i]; nr_pages = drb_to_nr_pages(drbs, stacked, i / X38_RANKS_PER_CHANNEL, @@ -388,7 +388,7 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx) continue; for (j = 0; j < x38_channel_num; j++) { - struct dimm_info *dimm = csrow->channels[j].dimm; + struct dimm_info *dimm = csrow->channels[j]->dimm; dimm->nr_pages = nr_pages / x38_channel_num; dimm->grain = nr_pages << PAGE_SHIFT; @@ -402,12 +402,12 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx) rc = -ENODEV; if (edac_mc_add_mc(mci)) { - debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); + edac_dbg(3, "MC: failed edac_mc_add_mc()\n"); goto fail; } /* get this far and it's successful */ - debugf3("MC: %s(): success\n", __func__); + edac_dbg(3, "MC: success\n"); return 0; fail: @@ -423,7 +423,7 @@ static int __devinit x38_init_one(struct pci_dev *pdev, { int rc; - debugf0("MC: %s()\n", __func__); + edac_dbg(0, "MC:\n"); if (pci_enable_device(pdev) < 0) return -EIO; @@ -439,7 +439,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; - debugf0("%s()\n", __func__); + edac_dbg(0, "\n"); mci = edac_mc_del_mc(&pdev->dev); if (!mci) @@ -472,7 +472,7 @@ static int __init x38_init(void) { int pci_rc; - debugf3("MC: %s()\n", __func__); + edac_dbg(3, "MC:\n"); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); @@ -486,14 +486,14 @@ static int __init x38_init(void) mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X38_HB, NULL); if (!mci_pdev) { - debugf0("x38 pci_get_device fail\n"); + edac_dbg(0, "x38 pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = x38_init_one(mci_pdev, x38_pci_tbl); if (pci_rc < 0) { - debugf0("x38 init fail\n"); + edac_dbg(0, "x38 init fail\n"); pci_rc = -ENODEV; goto fail1; } @@ -513,7 +513,7 @@ fail0: static void __exit x38_exit(void) { - debugf3("MC: %s()\n", __func__); + edac_dbg(3, "MC:\n"); pci_unregister_driver(&x38_driver); if (!x38_registered) { diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 4d460ef87161..7a05fd24d68b 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -398,6 +398,14 @@ static ssize_t guid_show(struct device *dev, return ret; } +static ssize_t is_local_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fw_device *device = fw_device(dev); + + return sprintf(buf, "%u\n", device->is_local); +} + static int units_sprintf(char *buf, const u32 *directory) { struct fw_csr_iterator ci; @@ -447,6 +455,7 @@ static ssize_t units_show(struct device *dev, static struct device_attribute fw_device_attributes[] = { __ATTR_RO(config_rom), __ATTR_RO(guid), + __ATTR_RO(is_local), __ATTR_RO(units), __ATTR_NULL, }; diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 8382e27e9a27..38c0aa60b2cb 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -146,7 +146,7 @@ EXPORT_SYMBOL(fw_iso_buffer_destroy); /* Convert DMA address to offset into virtually contiguous buffer. */ size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) { - int i; + size_t i; dma_addr_t address; ssize_t offset; diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 780708dc6e25..87d6f2d2f02d 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -525,9 +525,10 @@ const struct fw_address_region fw_high_memory_region = { .start = 0x000100000000ULL, .end = 0xffffe0000000ULL, }; EXPORT_SYMBOL(fw_high_memory_region); -#if 0 -const struct fw_address_region fw_low_memory_region = +static const struct fw_address_region low_memory_region = { .start = 0x000000000000ULL, .end = 0x000100000000ULL, }; + +#if 0 const struct fw_address_region fw_private_region = { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, }; const struct fw_address_region fw_csr_region = @@ -1198,6 +1199,23 @@ static struct fw_address_handler registers = { .address_callback = handle_registers, }; +static void handle_low_memory(struct fw_card *card, struct fw_request *request, + int tcode, int destination, int source, int generation, + unsigned long long offset, void *payload, size_t length, + void *callback_data) +{ + /* + * This catches requests not handled by the physical DMA unit, + * i.e., wrong transaction types or unauthorized source nodes. + */ + fw_send_response(card, request, RCODE_TYPE_ERROR); +} + +static struct fw_address_handler low_memory = { + .length = 0x000100000000ULL, + .address_callback = handle_low_memory, +}; + MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); MODULE_DESCRIPTION("Core IEEE1394 transaction logic"); MODULE_LICENSE("GPL"); @@ -1259,6 +1277,7 @@ static int __init fw_core_init(void) fw_core_add_address_handler(&topology_map, &topology_map_region); fw_core_add_address_handler(®isters, ®isters_region); + fw_core_add_address_handler(&low_memory, &low_memory_region); fw_core_add_descriptor(&vendor_id_descriptor); fw_core_add_descriptor(&model_id_descriptor); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index c1af05e834b6..c788dbdaf3bc 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -191,6 +191,7 @@ struct fw_ohci { unsigned quirks; unsigned int pri_req_max; u32 bus_time; + bool bus_time_running; bool is_root; bool csr_state_setclear_abdicate; int n_ir; @@ -1726,6 +1727,13 @@ static u32 update_bus_time(struct fw_ohci *ohci) { u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; + if (unlikely(!ohci->bus_time_running)) { + reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds); + ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) | + (cycle_time_seconds & 0x40); + ohci->bus_time_running = true; + } + if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) ohci->bus_time += 0x40; @@ -2213,7 +2221,7 @@ static int ohci_enable(struct fw_card *card, { struct fw_ohci *ohci = fw_ohci(card); struct pci_dev *dev = to_pci_dev(card->device); - u32 lps, seconds, version, irqs; + u32 lps, version, irqs; int i, ret; if (software_reset(ohci)) { @@ -2269,9 +2277,12 @@ static int ohci_enable(struct fw_card *card, (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | (200 << 16)); - seconds = lower_32_bits(get_seconds()); - reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25); - ohci->bus_time = seconds & ~0x3f; + ohci->bus_time_running = false; + + for (i = 0; i < 32; i++) + if (ohci->ir_context_support & (1 << i)) + reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i), + IR_CONTEXT_MULTI_CHANNEL_MODE); version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; if (version >= OHCI_VERSION_1_1) { @@ -2369,7 +2380,6 @@ static int ohci_enable(struct fw_card *card, OHCI1394_postedWriteErr | OHCI1394_selfIDComplete | OHCI1394_regAccessFail | - OHCI1394_cycle64Seconds | OHCI1394_cycleInconsistent | OHCI1394_unrecoverableError | OHCI1394_cycleTooLong | @@ -2658,7 +2668,8 @@ static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) case CSR_BUS_TIME: spin_lock_irqsave(&ohci->lock, flags); - ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f); + ohci->bus_time = (update_bus_time(ohci) & 0x40) | + (value & ~0x7f); spin_unlock_irqrestore(&ohci->lock, flags); break; @@ -3539,6 +3550,13 @@ static int __devinit pci_probe(struct pci_dev *dev, INIT_WORK(&ohci->bus_reset_work, bus_reset_work); + if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) || + pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) { + dev_err(&dev->dev, "invalid MMIO resource\n"); + err = -ENXIO; + goto fail_disable; + } + err = pci_request_region(dev, 0, ohci_driver_name); if (err) { dev_err(&dev->dev, "MMIO resource unavailable\n"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c index bf791fa0e50d..d9568198c300 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c @@ -196,7 +196,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector, return ret; } -struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) +static struct drm_encoder *exynos_drm_best_encoder( + struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct exynos_drm_connector *exynos_connector = diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index eaf630dc5dba..84dd099eae3b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -33,7 +33,6 @@ #include "exynos_drm_fbdev.h" static LIST_HEAD(exynos_drm_subdrv_list); -static struct drm_device *drm_dev; static int exynos_drm_subdrv_probe(struct drm_device *dev, struct exynos_drm_subdrv *subdrv) @@ -120,8 +119,6 @@ int exynos_drm_device_register(struct drm_device *dev) if (!dev) return -EINVAL; - drm_dev = dev; - list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { subdrv->drm_dev = dev; err = exynos_drm_subdrv_probe(dev, subdrv); @@ -149,8 +146,6 @@ int exynos_drm_device_unregister(struct drm_device *dev) list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) exynos_drm_subdrv_remove(dev, subdrv); - drm_dev = NULL; - return 0; } EXPORT_SYMBOL_GPL(exynos_drm_device_unregister); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 32a34c85899b..abb1e2f8227f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -29,21 +29,23 @@ #include "drmP.h" #include "drm_crtc_helper.h" -#include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" -#include "exynos_drm_fb.h" #include "exynos_drm_encoder.h" -#include "exynos_drm_gem.h" +#include "exynos_drm_plane.h" #define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ drm_crtc) +enum exynos_crtc_mode { + CRTC_MODE_NORMAL, /* normal mode */ + CRTC_MODE_BLANK, /* The private plane of crtc is blank */ +}; + /* * Exynos specific crtc structure. * * @drm_crtc: crtc object. - * @overlay: contain information common to display controller and hdmi and - * contents of this overlay object would be copied to sub driver size. + * @drm_plane: pointer of private plane object for this crtc * @pipe: a crtc index created at load() with a new crtc object creation * and the crtc object would be set to private->crtc array * to get a crtc object corresponding to this pipe from private->crtc @@ -52,115 +54,16 @@ * we can refer to the crtc to current hardware interrupt occured through * this pipe value. * @dpms: store the crtc dpms value + * @mode: store the crtc mode value */ struct exynos_drm_crtc { struct drm_crtc drm_crtc; - struct exynos_drm_overlay overlay; + struct drm_plane *plane; unsigned int pipe; unsigned int dpms; + enum exynos_crtc_mode mode; }; -static void exynos_drm_crtc_apply(struct drm_crtc *crtc) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - struct exynos_drm_overlay *overlay = &exynos_crtc->overlay; - - exynos_drm_fn_encoder(crtc, overlay, - exynos_drm_encoder_crtc_mode_set); - exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, - exynos_drm_encoder_crtc_commit); -} - -int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, - struct drm_framebuffer *fb, - struct drm_display_mode *mode, - struct exynos_drm_crtc_pos *pos) -{ - struct exynos_drm_gem_buf *buffer; - unsigned int actual_w; - unsigned int actual_h; - int nr = exynos_drm_format_num_buffers(fb->pixel_format); - int i; - - for (i = 0; i < nr; i++) { - buffer = exynos_drm_fb_buffer(fb, i); - if (!buffer) { - DRM_LOG_KMS("buffer is null\n"); - return -EFAULT; - } - - overlay->dma_addr[i] = buffer->dma_addr; - overlay->vaddr[i] = buffer->kvaddr; - - DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n", - i, (unsigned long)overlay->vaddr[i], - (unsigned long)overlay->dma_addr[i]); - } - - actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); - actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); - - /* set drm framebuffer data. */ - overlay->fb_x = pos->fb_x; - overlay->fb_y = pos->fb_y; - overlay->fb_width = fb->width; - overlay->fb_height = fb->height; - overlay->src_width = pos->src_w; - overlay->src_height = pos->src_h; - overlay->bpp = fb->bits_per_pixel; - overlay->pitch = fb->pitches[0]; - overlay->pixel_format = fb->pixel_format; - - /* set overlay range to be displayed. */ - overlay->crtc_x = pos->crtc_x; - overlay->crtc_y = pos->crtc_y; - overlay->crtc_width = actual_w; - overlay->crtc_height = actual_h; - - /* set drm mode data. */ - overlay->mode_width = mode->hdisplay; - overlay->mode_height = mode->vdisplay; - overlay->refresh = mode->vrefresh; - overlay->scan_flag = mode->flags; - - DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)", - overlay->crtc_x, overlay->crtc_y, - overlay->crtc_width, overlay->crtc_height); - - return 0; -} - -static int exynos_drm_crtc_update(struct drm_crtc *crtc) -{ - struct exynos_drm_crtc *exynos_crtc; - struct exynos_drm_overlay *overlay; - struct exynos_drm_crtc_pos pos; - struct drm_display_mode *mode = &crtc->mode; - struct drm_framebuffer *fb = crtc->fb; - - if (!mode || !fb) - return -EINVAL; - - exynos_crtc = to_exynos_crtc(crtc); - overlay = &exynos_crtc->overlay; - - memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos)); - - /* it means the offset of framebuffer to be displayed. */ - pos.fb_x = crtc->x; - pos.fb_y = crtc->y; - - /* OSD position to be displayed. */ - pos.crtc_x = 0; - pos.crtc_y = 0; - pos.crtc_w = fb->width - crtc->x; - pos.crtc_h = fb->height - crtc->y; - pos.src_w = pos.crtc_w; - pos.src_h = pos.crtc_h; - - return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos); -} - static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) { struct drm_device *dev = crtc->dev; @@ -175,23 +78,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) mutex_lock(&dev->struct_mutex); - switch (mode) { - case DRM_MODE_DPMS_ON: - exynos_drm_fn_encoder(crtc, &mode, - exynos_drm_encoder_crtc_dpms); - exynos_crtc->dpms = mode; - break; - case DRM_MODE_DPMS_STANDBY: - case DRM_MODE_DPMS_SUSPEND: - case DRM_MODE_DPMS_OFF: - exynos_drm_fn_encoder(crtc, &mode, - exynos_drm_encoder_crtc_dpms); - exynos_crtc->dpms = mode; - break; - default: - DRM_ERROR("unspecified mode %d\n", mode); - break; - } + exynos_drm_fn_encoder(crtc, &mode, exynos_drm_encoder_crtc_dpms); + exynos_crtc->dpms = mode; mutex_unlock(&dev->struct_mutex); } @@ -209,30 +97,8 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc) DRM_DEBUG_KMS("%s\n", __FILE__); - /* - * when set_crtc is requested from user or at booting time, - * crtc->commit would be called without dpms call so if dpms is - * no power on then crtc->dpms should be called - * with DRM_MODE_DPMS_ON for the hardware power to be on. - */ - if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) { - int mode = DRM_MODE_DPMS_ON; - - /* - * enable hardware(power on) to all encoders hdmi connected - * to current crtc. - */ - exynos_drm_crtc_dpms(crtc, mode); - /* - * enable dma to all encoders connected to current crtc and - * lcd panel. - */ - exynos_drm_fn_encoder(crtc, &mode, - exynos_drm_encoder_dpms_from_crtc); - } - - exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe, - exynos_drm_encoder_crtc_commit); + exynos_plane_commit(exynos_crtc->plane); + exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_ON); } static bool @@ -251,31 +117,61 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + struct drm_plane *plane = exynos_crtc->plane; + unsigned int crtc_w; + unsigned int crtc_h; + int pipe = exynos_crtc->pipe; + int ret; + DRM_DEBUG_KMS("%s\n", __FILE__); + exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); + /* * copy the mode data adjusted by mode_fixup() into crtc->mode * so that hardware can be seet to proper mode. */ memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode)); - return exynos_drm_crtc_update(crtc); + crtc_w = crtc->fb->width - x; + crtc_h = crtc->fb->height - y; + + ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h, + x, y, crtc_w, crtc_h); + if (ret) + return ret; + + plane->crtc = crtc; + plane->fb = crtc->fb; + + exynos_drm_fn_encoder(crtc, &pipe, exynos_drm_encoder_crtc_pipe); + + return 0; } static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + struct drm_plane *plane = exynos_crtc->plane; + unsigned int crtc_w; + unsigned int crtc_h; int ret; DRM_DEBUG_KMS("%s\n", __FILE__); - ret = exynos_drm_crtc_update(crtc); + crtc_w = crtc->fb->width - x; + crtc_h = crtc->fb->height - y; + + ret = exynos_plane_mode_set(plane, crtc, crtc->fb, 0, 0, crtc_w, crtc_h, + x, y, crtc_w, crtc_h); if (ret) return ret; - exynos_drm_crtc_apply(crtc); + exynos_drm_crtc_commit(crtc); - return ret; + return 0; } static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc) @@ -284,6 +180,16 @@ static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc) /* drm framework doesn't check NULL */ } +static void exynos_drm_crtc_disable(struct drm_crtc *crtc) +{ + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + + DRM_DEBUG_KMS("%s\n", __FILE__); + + exynos_plane_dpms(exynos_crtc->plane, DRM_MODE_DPMS_OFF); + exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); +} + static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .dpms = exynos_drm_crtc_dpms, .prepare = exynos_drm_crtc_prepare, @@ -292,6 +198,7 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .mode_set = exynos_drm_crtc_mode_set, .mode_set_base = exynos_drm_crtc_mode_set_base, .load_lut = exynos_drm_crtc_load_lut, + .disable = exynos_drm_crtc_disable, }; static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, @@ -327,7 +234,8 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, &dev_priv->pageflip_event_list); crtc->fb = fb; - ret = exynos_drm_crtc_update(crtc); + ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, + NULL); if (ret) { crtc->fb = old_fb; drm_vblank_put(dev, exynos_crtc->pipe); @@ -335,14 +243,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc, goto out; } - - /* - * the values related to a buffer of the drm framebuffer - * to be applied should be set at here. because these values - * first, are set to shadow registers and then to - * real registers at vsync front porch period. - */ - exynos_drm_crtc_apply(crtc); } out: mutex_unlock(&dev->struct_mutex); @@ -362,18 +262,73 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) kfree(exynos_crtc); } +static int exynos_drm_crtc_set_property(struct drm_crtc *crtc, + struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = crtc->dev; + struct exynos_drm_private *dev_priv = dev->dev_private; + struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + + DRM_DEBUG_KMS("%s\n", __func__); + + if (property == dev_priv->crtc_mode_property) { + enum exynos_crtc_mode mode = val; + + if (mode == exynos_crtc->mode) + return 0; + + exynos_crtc->mode = mode; + + switch (mode) { + case CRTC_MODE_NORMAL: + exynos_drm_crtc_commit(crtc); + break; + case CRTC_MODE_BLANK: + exynos_plane_dpms(exynos_crtc->plane, + DRM_MODE_DPMS_OFF); + break; + default: + break; + } + + return 0; + } + + return -EINVAL; +} + static struct drm_crtc_funcs exynos_crtc_funcs = { .set_config = drm_crtc_helper_set_config, .page_flip = exynos_drm_crtc_page_flip, .destroy = exynos_drm_crtc_destroy, + .set_property = exynos_drm_crtc_set_property, +}; + +static const struct drm_prop_enum_list mode_names[] = { + { CRTC_MODE_NORMAL, "normal" }, + { CRTC_MODE_BLANK, "blank" }, }; -struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev, - struct drm_crtc *crtc) +static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc) { - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct exynos_drm_private *dev_priv = dev->dev_private; + struct drm_property *prop; - return &exynos_crtc->overlay; + DRM_DEBUG_KMS("%s\n", __func__); + + prop = dev_priv->crtc_mode_property; + if (!prop) { + prop = drm_property_create_enum(dev, 0, "mode", mode_names, + ARRAY_SIZE(mode_names)); + if (!prop) + return; + + dev_priv->crtc_mode_property = prop; + } + + drm_object_attach_property(&crtc->base, prop, 0); } int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) @@ -392,7 +347,12 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) exynos_crtc->pipe = nr; exynos_crtc->dpms = DRM_MODE_DPMS_OFF; - exynos_crtc->overlay.zpos = DEFAULT_ZPOS; + exynos_crtc->plane = exynos_plane_init(dev, 1 << nr, true); + if (!exynos_crtc->plane) { + kfree(exynos_crtc); + return -ENOMEM; + } + crtc = &exynos_crtc->drm_crtc; private->crtc[nr] = crtc; @@ -400,6 +360,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr) drm_crtc_init(dev, crtc, &exynos_crtc_funcs); drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs); + exynos_drm_crtc_attach_mode_property(crtc); + return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index 16b8e2195a0d..6bae8d8c250e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h @@ -29,39 +29,8 @@ #ifndef _EXYNOS_DRM_CRTC_H_ #define _EXYNOS_DRM_CRTC_H_ -struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev, - struct drm_crtc *crtc); int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr); int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); -/* - * Exynos specific crtc postion structure. - * - * @fb_x: offset x on a framebuffer to be displyed - * - the unit is screen coordinates. - * @fb_y: offset y on a framebuffer to be displayed - * - the unit is screen coordinates. - * @src_w: width of source area to be displayed from a framebuffer. - * @src_h: height of source area to be displayed from a framebuffer. - * @crtc_x: offset x on hardware screen. - * @crtc_y: offset y on hardware screen. - * @crtc_w: width of hardware screen. - * @crtc_h: height of hardware screen. - */ -struct exynos_drm_crtc_pos { - unsigned int fb_x; - unsigned int fb_y; - unsigned int src_w; - unsigned int src_h; - unsigned int crtc_x; - unsigned int crtc_y; - unsigned int crtc_w; - unsigned int crtc_h; -}; - -int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, - struct drm_framebuffer *fb, - struct drm_display_mode *mode, - struct exynos_drm_crtc_pos *pos); #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index 274909271c36..613bf8a5d9b2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -25,6 +25,7 @@ #include "drmP.h" #include "drm.h" +#include "exynos_drm.h" #include "exynos_drm_drv.h" #include "exynos_drm_gem.h" @@ -86,6 +87,10 @@ static struct sg_table * npages = buf->size / buf->page_size; sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); + if (!sgt) { + DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n"); + goto err_unlock; + } nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", @@ -186,7 +191,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, struct exynos_drm_gem_obj *exynos_gem_obj; struct exynos_drm_gem_buf *buffer; struct page *page; - int ret, i = 0; + int ret; DRM_DEBUG_PRIME("%s\n", __FILE__); @@ -210,7 +215,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) { + if (IS_ERR_OR_NULL(sgt)) { ret = PTR_ERR(sgt); goto err_buf_detach; } @@ -236,13 +241,25 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, } sgl = sgt->sgl; - buffer->dma_addr = sg_dma_address(sgl); - while (i < sgt->nents) { - buffer->pages[i] = sg_page(sgl); - buffer->size += sg_dma_len(sgl); - sgl = sg_next(sgl); - i++; + if (sgt->nents == 1) { + buffer->dma_addr = sg_dma_address(sgt->sgl); + buffer->size = sg_dma_len(sgt->sgl); + + /* always physically continuous memory if sgt->nents is 1. */ + exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; + } else { + unsigned int i = 0; + + buffer->dma_addr = sg_dma_address(sgl); + while (i < sgt->nents) { + buffer->pages[i] = sg_page(sgl); + buffer->size += sg_dma_len(sgl); + sgl = sg_next(sgl); + i++; + } + + exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; } exynos_gem_obj->buffer = buffer; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index d6de2e07fa03..ebacec6f1e48 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -85,8 +85,11 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags) } for (nr = 0; nr < MAX_PLANE; nr++) { - ret = exynos_plane_init(dev, nr); - if (ret) + struct drm_plane *plane; + unsigned int possible_crtcs = (1 << MAX_CRTC) - 1; + + plane = exynos_plane_init(dev, possible_crtcs, false); + if (!plane) goto err_crtc; } @@ -221,8 +224,6 @@ static struct drm_ioctl_desc exynos_ioctls[] = { exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH), DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl, DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl, - DRM_UNLOCKED | DRM_AUTH), DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH), DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 277653d5fda0..e22704b249d7 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -59,12 +59,14 @@ enum exynos_drm_output_type { * * @mode_set: copy drm overlay info to hw specific overlay info. * @commit: apply hardware specific overlay data to registers. + * @enable: enable hardware specific overlay. * @disable: disable hardware specific overlay. */ struct exynos_drm_overlay_ops { void (*mode_set)(struct device *subdrv_dev, struct exynos_drm_overlay *overlay); void (*commit)(struct device *subdrv_dev, int zpos); + void (*enable)(struct device *subdrv_dev, int zpos); void (*disable)(struct device *subdrv_dev, int zpos); }; @@ -235,6 +237,8 @@ struct exynos_drm_private { * this array is used to be aware of which crtc did it request vblank. */ struct drm_crtc *crtc[MAX_CRTC]; + struct drm_property *plane_zpos_property; + struct drm_property *crtc_mode_property; }; /* diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c index 4a13a747f5d4..2c037cd7d2d4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c @@ -30,7 +30,6 @@ #include "drm_crtc_helper.h" #include "exynos_drm_drv.h" -#include "exynos_drm_crtc.h" #include "exynos_drm_encoder.h" #define to_exynos_encoder(x) container_of(x, struct exynos_drm_encoder,\ @@ -136,21 +135,16 @@ static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder, struct drm_connector *connector; struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); struct exynos_drm_manager_ops *manager_ops = manager->ops; - struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; - struct exynos_drm_overlay *overlay = get_exynos_drm_overlay(dev, - encoder->crtc); DRM_DEBUG_KMS("%s\n", __FILE__); + exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_ON); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { + if (connector->encoder == encoder) if (manager_ops && manager_ops->mode_set) manager_ops->mode_set(manager->dev, adjusted_mode); - - if (overlay_ops && overlay_ops->mode_set) - overlay_ops->mode_set(manager->dev, overlay); - } } } @@ -310,8 +304,8 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data) struct exynos_drm_manager_ops *manager_ops = manager->ops; int crtc = *(int *)data; - if (manager->pipe == -1) - manager->pipe = crtc; + if (manager->pipe != crtc) + return; if (manager_ops->enable_vblank) manager_ops->enable_vblank(manager->dev); @@ -324,34 +318,41 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data) struct exynos_drm_manager_ops *manager_ops = manager->ops; int crtc = *(int *)data; - if (manager->pipe == -1) - manager->pipe = crtc; + if (manager->pipe != crtc) + return; if (manager_ops->disable_vblank) manager_ops->disable_vblank(manager->dev); } -void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder, - void *data) +void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data) { - struct exynos_drm_manager *manager = - to_exynos_encoder(encoder)->manager; - struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; - int zpos = DEFAULT_ZPOS; + struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); + struct exynos_drm_manager *manager = exynos_encoder->manager; + struct exynos_drm_manager_ops *manager_ops = manager->ops; + int mode = *(int *)data; - if (data) - zpos = *(int *)data; + DRM_DEBUG_KMS("%s\n", __FILE__); - if (overlay_ops && overlay_ops->commit) - overlay_ops->commit(manager->dev, zpos); + if (manager_ops && manager_ops->dpms) + manager_ops->dpms(manager->dev, mode); + + /* + * if this condition is ok then it means that the crtc is already + * detached from encoder and last function for detaching is properly + * done, so clear pipe from manager to prevent repeated call. + */ + if (mode > DRM_MODE_DPMS_ON) { + if (!encoder->crtc) + manager->pipe = -1; + } } -void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data) +void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; - int crtc = *(int *)data; - int zpos = DEFAULT_ZPOS; + int pipe = *(int *)data; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -359,76 +360,62 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data) * when crtc is detached from encoder, this pipe is used * to select manager operation */ - manager->pipe = crtc; - - exynos_drm_encoder_crtc_plane_commit(encoder, &zpos); + manager->pipe = pipe; } -void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data) +void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data) { - struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); - int mode = *(int *)data; + struct exynos_drm_manager *manager = + to_exynos_encoder(encoder)->manager; + struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; + struct exynos_drm_overlay *overlay = data; DRM_DEBUG_KMS("%s\n", __FILE__); - exynos_drm_encoder_dpms(encoder, mode); - - exynos_encoder->dpms = mode; + if (overlay_ops && overlay_ops->mode_set) + overlay_ops->mode_set(manager->dev, overlay); } -void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data) +void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data) { - struct drm_device *dev = encoder->dev; - struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); - struct exynos_drm_manager *manager = exynos_encoder->manager; - struct exynos_drm_manager_ops *manager_ops = manager->ops; - struct drm_connector *connector; - int mode = *(int *)data; + struct exynos_drm_manager *manager = + to_exynos_encoder(encoder)->manager; + struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; + int zpos = DEFAULT_ZPOS; DRM_DEBUG_KMS("%s\n", __FILE__); - if (manager_ops && manager_ops->dpms) - manager_ops->dpms(manager->dev, mode); - - /* - * set current dpms mode to the connector connected to - * current encoder. connector->dpms would be checked - * at drm_helper_connector_dpms() - */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - if (connector->encoder == encoder) - connector->dpms = mode; + if (data) + zpos = *(int *)data; - /* - * if this condition is ok then it means that the crtc is already - * detached from encoder and last function for detaching is properly - * done, so clear pipe from manager to prevent repeated call. - */ - if (mode > DRM_MODE_DPMS_ON) { - if (!encoder->crtc) - manager->pipe = -1; - } + if (overlay_ops && overlay_ops->commit) + overlay_ops->commit(manager->dev, zpos); } -void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) +void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; - struct exynos_drm_overlay *overlay = data; + int zpos = DEFAULT_ZPOS; - if (overlay_ops && overlay_ops->mode_set) - overlay_ops->mode_set(manager->dev, overlay); + DRM_DEBUG_KMS("%s\n", __FILE__); + + if (data) + zpos = *(int *)data; + + if (overlay_ops && overlay_ops->enable) + overlay_ops->enable(manager->dev, zpos); } -void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data) +void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data) { struct exynos_drm_manager *manager = to_exynos_encoder(encoder)->manager; struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; int zpos = DEFAULT_ZPOS; - DRM_DEBUG_KMS("\n"); + DRM_DEBUG_KMS("%s\n", __FILE__); if (data) zpos = *(int *)data; diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h index eb7d2316847e..6470d9ddf5a1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h +++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h @@ -40,13 +40,11 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data, void (*fn)(struct drm_encoder *, void *)); void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data); void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); -void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder, - void *data); -void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); -void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, - void *data); void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data); -void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); -void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data); +void exynos_drm_encoder_crtc_pipe(struct drm_encoder *encoder, void *data); +void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data); +void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data); +void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data); +void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data); #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 29fdbfeb43cb..a68d2b313f03 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -78,7 +78,6 @@ struct fimd_context { struct drm_crtc *crtc; struct clk *bus_clk; struct clk *lcd_clk; - struct resource *regs_res; void __iomem *regs; struct fimd_win_data win_data[WINDOWS_NR]; unsigned int clkdiv; @@ -813,7 +812,7 @@ static int __devinit fimd_probe(struct platform_device *pdev) return -EINVAL; } - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -838,33 +837,26 @@ static int __devinit fimd_probe(struct platform_device *pdev) goto err_clk; } - ctx->regs_res = request_mem_region(res->start, resource_size(res), - dev_name(dev)); - if (!ctx->regs_res) { - dev_err(dev, "failed to claim register region\n"); - ret = -ENOENT; - goto err_clk; - } - - ctx->regs = ioremap(res->start, resource_size(res)); + ctx->regs = devm_request_and_ioremap(&pdev->dev, res); if (!ctx->regs) { dev_err(dev, "failed to map registers\n"); ret = -ENXIO; - goto err_req_region_io; + goto err_clk; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "irq request failed.\n"); - goto err_req_region_irq; + goto err_clk; } ctx->irq = res->start; - ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx); - if (ret < 0) { + ret = devm_request_irq(&pdev->dev, ctx->irq, fimd_irq_handler, + 0, "drm_fimd", ctx); + if (ret) { dev_err(dev, "irq request failed.\n"); - goto err_req_irq; + goto err_clk; } ctx->vidcon0 = pdata->vidcon0; @@ -899,14 +891,6 @@ static int __devinit fimd_probe(struct platform_device *pdev) return 0; -err_req_irq: -err_req_region_irq: - iounmap(ctx->regs); - -err_req_region_io: - release_resource(ctx->regs_res); - kfree(ctx->regs_res); - err_clk: clk_disable(ctx->lcd_clk); clk_put(ctx->lcd_clk); @@ -916,7 +900,6 @@ err_bus_clk: clk_put(ctx->bus_clk); err_clk_get: - kfree(ctx); return ret; } @@ -944,13 +927,6 @@ out: clk_put(ctx->lcd_clk); clk_put(ctx->bus_clk); - iounmap(ctx->regs); - release_resource(ctx->regs_res); - kfree(ctx->regs_res); - free_irq(ctx->irq, ctx); - - kfree(ctx); - return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 5c8b683029ea..f9efde40c097 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -99,25 +99,17 @@ out: struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) { - struct inode *inode; - struct address_space *mapping; struct page *p, **pages; int i, npages; - /* This is the shared memory object that backs the GEM resource */ - inode = obj->filp->f_path.dentry->d_inode; - mapping = inode->i_mapping; - npages = obj->size >> PAGE_SHIFT; pages = drm_malloc_ab(npages, sizeof(struct page *)); if (pages == NULL) return ERR_PTR(-ENOMEM); - gfpmask |= mapping_gfp_mask(mapping); - for (i = 0; i < npages; i++) { - p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); + p = alloc_page(gfpmask); if (IS_ERR(p)) goto fail; pages[i] = p; @@ -126,31 +118,22 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, return pages; fail: - while (i--) - page_cache_release(pages[i]); + while (--i) + __free_page(pages[i]); drm_free_large(pages); return ERR_PTR(PTR_ERR(p)); } static void exynos_gem_put_pages(struct drm_gem_object *obj, - struct page **pages, - bool dirty, bool accessed) + struct page **pages) { - int i, npages; + int npages; npages = obj->size >> PAGE_SHIFT; - for (i = 0; i < npages; i++) { - if (dirty) - set_page_dirty(pages[i]); - - if (accessed) - mark_page_accessed(pages[i]); - - /* Undo the reference we took when populating the table */ - page_cache_release(pages[i]); - } + while (--npages >= 0) + __free_page(pages[npages]); drm_free_large(pages); } @@ -189,7 +172,7 @@ static int exynos_drm_gem_get_pages(struct drm_gem_object *obj) return -EINVAL; } - pages = exynos_gem_get_pages(obj, GFP_KERNEL); + pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE); if (IS_ERR(pages)) { DRM_ERROR("failed to get pages.\n"); return PTR_ERR(pages); @@ -230,7 +213,7 @@ err1: kfree(buf->sgt); buf->sgt = NULL; err: - exynos_gem_put_pages(obj, pages, true, false); + exynos_gem_put_pages(obj, pages); return ret; } @@ -248,7 +231,7 @@ static void exynos_drm_gem_put_pages(struct drm_gem_object *obj) kfree(buf->sgt); buf->sgt = NULL; - exynos_gem_put_pages(obj, buf->pages, true, false); + exynos_gem_put_pages(obj, buf->pages); buf->pages = NULL; /* add some codes for UNCACHED type here. TODO */ @@ -291,11 +274,21 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) if (!buf->pages) return; + /* + * do not release memory region from exporter. + * + * the region will be released by exporter + * once dmabuf's refcount becomes 0. + */ + if (obj->import_attach) + goto out; + if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) exynos_drm_gem_put_pages(obj); else exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf); +out: exynos_drm_fini_buf(obj->dev, buf); exynos_gem_obj->buffer = NULL; @@ -668,7 +661,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, * with DRM_IOCTL_MODE_CREATE_DUMB command. */ - args->pitch = args->width * args->bpp >> 3; + args->pitch = args->width * ((args->bpp + 7) / 8); args->size = PAGE_ALIGN(args->pitch * args->height); exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 14d038b6cb02..085b2a5d5f70 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -63,7 +63,8 @@ struct exynos_drm_gem_buf { * by user request or at framebuffer creation. * continuous memory region allocated by user request * or at framebuffer creation. - * @size: total memory size to physically non-continuous memory region. + * @size: size requested from user, in bytes and this size is aligned + * in page unit. * @flags: indicate memory type to allocated buffer and cache attruibute. * * P.S. this object would be transfered to user as kms_bo.handle so diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index c4c6525d4653..b89829e5043a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -12,9 +12,12 @@ #include "drmP.h" #include "exynos_drm.h" -#include "exynos_drm_crtc.h" #include "exynos_drm_drv.h" #include "exynos_drm_encoder.h" +#include "exynos_drm_fb.h" +#include "exynos_drm_gem.h" + +#define to_exynos_plane(x) container_of(x, struct exynos_plane, base) struct exynos_plane { struct drm_plane base; @@ -30,6 +33,108 @@ static const uint32_t formats[] = { DRM_FORMAT_NV12MT, }; +int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct exynos_plane *exynos_plane = to_exynos_plane(plane); + struct exynos_drm_overlay *overlay = &exynos_plane->overlay; + unsigned int actual_w; + unsigned int actual_h; + int nr; + int i; + + DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); + + nr = exynos_drm_format_num_buffers(fb->pixel_format); + for (i = 0; i < nr; i++) { + struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i); + + if (!buffer) { + DRM_LOG_KMS("buffer is null\n"); + return -EFAULT; + } + + overlay->dma_addr[i] = buffer->dma_addr; + overlay->vaddr[i] = buffer->kvaddr; + + DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n", + i, (unsigned long)overlay->vaddr[i], + (unsigned long)overlay->dma_addr[i]); + } + + actual_w = min((unsigned)(crtc->mode.hdisplay - crtc_x), crtc_w); + actual_h = min((unsigned)(crtc->mode.vdisplay - crtc_y), crtc_h); + + /* set drm framebuffer data. */ + overlay->fb_x = src_x; + overlay->fb_y = src_y; + overlay->fb_width = fb->width; + overlay->fb_height = fb->height; + overlay->src_width = src_w; + overlay->src_height = src_h; + overlay->bpp = fb->bits_per_pixel; + overlay->pitch = fb->pitches[0]; + overlay->pixel_format = fb->pixel_format; + + /* set overlay range to be displayed. */ + overlay->crtc_x = crtc_x; + overlay->crtc_y = crtc_y; + overlay->crtc_width = actual_w; + overlay->crtc_height = actual_h; + + /* set drm mode data. */ + overlay->mode_width = crtc->mode.hdisplay; + overlay->mode_height = crtc->mode.vdisplay; + overlay->refresh = crtc->mode.vrefresh; + overlay->scan_flag = crtc->mode.flags; + + DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)", + overlay->crtc_x, overlay->crtc_y, + overlay->crtc_width, overlay->crtc_height); + + exynos_drm_fn_encoder(crtc, overlay, exynos_drm_encoder_plane_mode_set); + + return 0; +} + +void exynos_plane_commit(struct drm_plane *plane) +{ + struct exynos_plane *exynos_plane = to_exynos_plane(plane); + struct exynos_drm_overlay *overlay = &exynos_plane->overlay; + + exynos_drm_fn_encoder(plane->crtc, &overlay->zpos, + exynos_drm_encoder_plane_commit); +} + +void exynos_plane_dpms(struct drm_plane *plane, int mode) +{ + struct exynos_plane *exynos_plane = to_exynos_plane(plane); + struct exynos_drm_overlay *overlay = &exynos_plane->overlay; + + DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); + + if (mode == DRM_MODE_DPMS_ON) { + if (exynos_plane->enabled) + return; + + exynos_drm_fn_encoder(plane->crtc, &overlay->zpos, + exynos_drm_encoder_plane_enable); + + exynos_plane->enabled = true; + } else { + if (!exynos_plane->enabled) + return; + + exynos_drm_fn_encoder(plane->crtc, &overlay->zpos, + exynos_drm_encoder_plane_disable); + + exynos_plane->enabled = false; + } +} + static int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, @@ -37,64 +142,37 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { - struct exynos_plane *exynos_plane = - container_of(plane, struct exynos_plane, base); - struct exynos_drm_overlay *overlay = &exynos_plane->overlay; - struct exynos_drm_crtc_pos pos; int ret; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); - memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos)); - pos.crtc_x = crtc_x; - pos.crtc_y = crtc_y; - pos.crtc_w = crtc_w; - pos.crtc_h = crtc_h; - - /* considering 16.16 fixed point of source values */ - pos.fb_x = src_x >> 16; - pos.fb_y = src_y >> 16; - pos.src_w = src_w >> 16; - pos.src_h = src_h >> 16; - - ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos); + ret = exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, + crtc_w, crtc_h, src_x >> 16, src_y >> 16, + src_w >> 16, src_h >> 16); if (ret < 0) return ret; - exynos_drm_fn_encoder(crtc, overlay, - exynos_drm_encoder_crtc_mode_set); - exynos_drm_fn_encoder(crtc, &overlay->zpos, - exynos_drm_encoder_crtc_plane_commit); + plane->crtc = crtc; + plane->fb = crtc->fb; - exynos_plane->enabled = true; + exynos_plane_commit(plane); + exynos_plane_dpms(plane, DRM_MODE_DPMS_ON); return 0; } static int exynos_disable_plane(struct drm_plane *plane) { - struct exynos_plane *exynos_plane = - container_of(plane, struct exynos_plane, base); - struct exynos_drm_overlay *overlay = &exynos_plane->overlay; - DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); - if (!exynos_plane->enabled) - return 0; - - exynos_drm_fn_encoder(plane->crtc, &overlay->zpos, - exynos_drm_encoder_crtc_disable); - - exynos_plane->enabled = false; - exynos_plane->overlay.zpos = DEFAULT_ZPOS; + exynos_plane_dpms(plane, DRM_MODE_DPMS_OFF); return 0; } static void exynos_plane_destroy(struct drm_plane *plane) { - struct exynos_plane *exynos_plane = - container_of(plane, struct exynos_plane, base); + struct exynos_plane *exynos_plane = to_exynos_plane(plane); DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); @@ -103,69 +181,79 @@ static void exynos_plane_destroy(struct drm_plane *plane) kfree(exynos_plane); } +static int exynos_plane_set_property(struct drm_plane *plane, + struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = plane->dev; + struct exynos_plane *exynos_plane = to_exynos_plane(plane); + struct exynos_drm_private *dev_priv = dev->dev_private; + + DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); + + if (property == dev_priv->plane_zpos_property) { + exynos_plane->overlay.zpos = val; + return 0; + } + + return -EINVAL; +} + static struct drm_plane_funcs exynos_plane_funcs = { .update_plane = exynos_update_plane, .disable_plane = exynos_disable_plane, .destroy = exynos_plane_destroy, + .set_property = exynos_plane_set_property, }; -int exynos_plane_init(struct drm_device *dev, unsigned int nr) +static void exynos_plane_attach_zpos_property(struct drm_plane *plane) { - struct exynos_plane *exynos_plane; - uint32_t possible_crtcs; + struct drm_device *dev = plane->dev; + struct exynos_drm_private *dev_priv = dev->dev_private; + struct drm_property *prop; - exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); - if (!exynos_plane) - return -ENOMEM; + DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); - /* all CRTCs are available */ - possible_crtcs = (1 << MAX_CRTC) - 1; + prop = dev_priv->plane_zpos_property; + if (!prop) { + prop = drm_property_create_range(dev, 0, "zpos", 0, + MAX_PLANE - 1); + if (!prop) + return; - exynos_plane->overlay.zpos = DEFAULT_ZPOS; + dev_priv->plane_zpos_property = prop; + } - return drm_plane_init(dev, &exynos_plane->base, possible_crtcs, - &exynos_plane_funcs, formats, ARRAY_SIZE(formats), - false); + drm_object_attach_property(&plane->base, prop, 0); } -int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +struct drm_plane *exynos_plane_init(struct drm_device *dev, + unsigned int possible_crtcs, bool priv) { - struct drm_exynos_plane_set_zpos *zpos_req = data; - struct drm_mode_object *obj; - struct drm_plane *plane; struct exynos_plane *exynos_plane; - int ret = 0; + int err; DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) { - if (zpos_req->zpos != DEFAULT_ZPOS) { - DRM_ERROR("zpos not within limits\n"); - return -EINVAL; - } + exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL); + if (!exynos_plane) { + DRM_ERROR("failed to allocate plane\n"); + return NULL; } - mutex_lock(&dev->mode_config.mutex); - - obj = drm_mode_object_find(dev, zpos_req->plane_id, - DRM_MODE_OBJECT_PLANE); - if (!obj) { - DRM_DEBUG_KMS("Unknown plane ID %d\n", - zpos_req->plane_id); - ret = -EINVAL; - goto out; + err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs, + &exynos_plane_funcs, formats, ARRAY_SIZE(formats), + priv); + if (err) { + DRM_ERROR("failed to initialize plane\n"); + kfree(exynos_plane); + return NULL; } - plane = obj_to_plane(obj); - exynos_plane = container_of(plane, struct exynos_plane, base); - - exynos_plane->overlay.zpos = zpos_req->zpos; + if (priv) + exynos_plane->overlay.zpos = DEFAULT_ZPOS; + else + exynos_plane_attach_zpos_property(&exynos_plane->base); -out: - mutex_unlock(&dev->mode_config.mutex); - return ret; + return &exynos_plane->base; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h index 16b71f8217e7..88312458580d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.h +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h @@ -9,6 +9,12 @@ * */ -int exynos_plane_init(struct drm_device *dev, unsigned int nr); -int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); +int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h); +void exynos_plane_commit(struct drm_plane *plane); +void exynos_plane_dpms(struct drm_plane *plane, int mode); +struct drm_plane *exynos_plane_init(struct drm_device *dev, + unsigned int possible_crtcs, bool priv); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 7b9c153dceb6..bb1550c4dd57 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -85,8 +85,6 @@ static const char fake_edid_info[] = { 0x00, 0x00, 0x00, 0x06 }; -static void vidi_fake_vblank_handler(struct work_struct *work); - static bool vidi_display_is_connected(struct device *dev) { struct vidi_context *ctx = get_vidi_context(dev); @@ -531,6 +529,16 @@ static int vidi_store_connection(struct device *dev, if (ctx->connected > 1) return -EINVAL; + /* use fake edid data for test. */ + if (!ctx->raw_edid) + ctx->raw_edid = (struct edid *)fake_edid_info; + + /* if raw_edid isn't same as fake data then it can't be tested. */ + if (ctx->raw_edid != (struct edid *)fake_edid_info) { + DRM_DEBUG_KMS("edid data is not fake data.\n"); + return -EINVAL; + } + DRM_DEBUG_KMS("requested connection.\n"); drm_helper_hpd_irq_event(ctx->subdrv.drm_dev); @@ -549,6 +557,8 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, struct exynos_drm_manager *manager; struct exynos_drm_display_ops *display_ops; struct drm_exynos_vidi_connection *vidi = data; + struct edid *raw_edid; + int edid_len; DRM_DEBUG_KMS("%s\n", __FILE__); @@ -557,11 +567,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, return -EINVAL; } - if (!vidi->edid) { - DRM_DEBUG_KMS("edid data is null.\n"); - return -EINVAL; - } - if (vidi->connection > 1) { DRM_DEBUG_KMS("connection should be 0 or 1.\n"); return -EINVAL; @@ -588,8 +593,30 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, return -EINVAL; } - if (vidi->connection) - ctx->raw_edid = (struct edid *)vidi->edid; + if (vidi->connection) { + if (!vidi->edid) { + DRM_DEBUG_KMS("edid data is null.\n"); + return -EINVAL; + } + raw_edid = (struct edid *)(uint32_t)vidi->edid; + edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; + ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL); + if (!ctx->raw_edid) { + DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); + return -ENOMEM; + } + memcpy(ctx->raw_edid, raw_edid, edid_len); + } else { + /* + * with connection = 0, free raw_edid + * only if raw edid data isn't same as fake data. + */ + if (ctx->raw_edid && ctx->raw_edid != + (struct edid *)fake_edid_info) { + kfree(ctx->raw_edid); + ctx->raw_edid = NULL; + } + } ctx->connected = vidi->connection; drm_helper_hpd_irq_event(ctx->subdrv.drm_dev); @@ -614,9 +641,6 @@ static int __devinit vidi_probe(struct platform_device *pdev) INIT_WORK(&ctx->work, vidi_fake_vblank_handler); - /* for test */ - ctx->raw_edid = (struct edid *)fake_edid_info; - subdrv = &ctx->subdrv; subdrv->dev = dev; subdrv->manager = &vidi_manager; @@ -644,6 +668,11 @@ static int __devexit vidi_remove(struct platform_device *pdev) exynos_drm_subdrv_unregister(&ctx->subdrv); + if (ctx->raw_edid != (struct edid *)fake_edid_info) { + kfree(ctx->raw_edid); + ctx->raw_edid = NULL; + } + kfree(ctx); return 0; diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 066bde3f19c4..409e2ec1207c 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -63,7 +63,6 @@ struct hdmi_context { bool dvi_mode; struct mutex hdmi_mutex; - struct resource *regs_res; void __iomem *regs; unsigned int external_irq; unsigned int internal_irq; @@ -2280,16 +2279,17 @@ static int __devinit hdmi_probe(struct platform_device *pdev) return -EINVAL; } - drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL); + drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx), + GFP_KERNEL); if (!drm_hdmi_ctx) { DRM_ERROR("failed to allocate common hdmi context.\n"); return -ENOMEM; } - hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL); + hdata = devm_kzalloc(&pdev->dev, sizeof(struct hdmi_context), + GFP_KERNEL); if (!hdata) { DRM_ERROR("out of memory\n"); - kfree(drm_hdmi_ctx); return -ENOMEM; } @@ -2318,26 +2318,18 @@ static int __devinit hdmi_probe(struct platform_device *pdev) goto err_resource; } - hdata->regs_res = request_mem_region(res->start, resource_size(res), - dev_name(dev)); - if (!hdata->regs_res) { - DRM_ERROR("failed to claim register region\n"); - ret = -ENOENT; - goto err_resource; - } - - hdata->regs = ioremap(res->start, resource_size(res)); + hdata->regs = devm_request_and_ioremap(&pdev->dev, res); if (!hdata->regs) { DRM_ERROR("failed to map registers\n"); ret = -ENXIO; - goto err_req_region; + goto err_resource; } /* DDC i2c driver */ if (i2c_add_driver(&ddc_driver)) { DRM_ERROR("failed to register ddc i2c driver\n"); ret = -ENOENT; - goto err_iomap; + goto err_resource; } hdata->ddc_port = hdmi_ddc; @@ -2398,16 +2390,9 @@ err_hdmiphy: i2c_del_driver(&hdmiphy_driver); err_ddc: i2c_del_driver(&ddc_driver); -err_iomap: - iounmap(hdata->regs); -err_req_region: - release_mem_region(hdata->regs_res->start, - resource_size(hdata->regs_res)); err_resource: hdmi_resources_cleanup(hdata); err_data: - kfree(hdata); - kfree(drm_hdmi_ctx); return ret; } @@ -2425,18 +2410,11 @@ static int __devexit hdmi_remove(struct platform_device *pdev) hdmi_resources_cleanup(hdata); - iounmap(hdata->regs); - - release_mem_region(hdata->regs_res->start, - resource_size(hdata->regs_res)); - /* hdmiphy i2c driver */ i2c_del_driver(&hdmiphy_driver); /* DDC i2c driver */ i2c_del_driver(&ddc_driver); - kfree(hdata); - return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index e2147a2ddcec..30fcc12f81dd 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -956,7 +956,8 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi); - mixer_res->mixer_regs = ioremap(res->start, resource_size(res)); + mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); if (mixer_res->mixer_regs == NULL) { dev_err(dev, "register mapping failed.\n"); ret = -ENXIO; @@ -967,38 +968,34 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, if (res == NULL) { dev_err(dev, "get memory resource failed.\n"); ret = -ENXIO; - goto fail_mixer_regs; + goto fail; } - mixer_res->vp_regs = ioremap(res->start, resource_size(res)); + mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); if (mixer_res->vp_regs == NULL) { dev_err(dev, "register mapping failed.\n"); ret = -ENXIO; - goto fail_mixer_regs; + goto fail; } res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq"); if (res == NULL) { dev_err(dev, "get interrupt resource failed.\n"); ret = -ENXIO; - goto fail_vp_regs; + goto fail; } - ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx); + ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, + 0, "drm_mixer", ctx); if (ret) { dev_err(dev, "request interrupt failed.\n"); - goto fail_vp_regs; + goto fail; } mixer_res->irq = res->start; return 0; -fail_vp_regs: - iounmap(mixer_res->vp_regs); - -fail_mixer_regs: - iounmap(mixer_res->mixer_regs); - fail: if (!IS_ERR_OR_NULL(mixer_res->sclk_dac)) clk_put(mixer_res->sclk_dac); @@ -1013,16 +1010,6 @@ fail: return ret; } -static void mixer_resources_cleanup(struct mixer_context *ctx) -{ - struct mixer_resources *res = &ctx->mixer_res; - - free_irq(res->irq, ctx); - - iounmap(res->vp_regs); - iounmap(res->mixer_regs); -} - static int __devinit mixer_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1032,16 +1019,16 @@ static int __devinit mixer_probe(struct platform_device *pdev) dev_info(dev, "probe start\n"); - drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL); + drm_hdmi_ctx = devm_kzalloc(&pdev->dev, sizeof(*drm_hdmi_ctx), + GFP_KERNEL); if (!drm_hdmi_ctx) { DRM_ERROR("failed to allocate common hdmi context.\n"); return -ENOMEM; } - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) { DRM_ERROR("failed to alloc mixer context.\n"); - kfree(drm_hdmi_ctx); return -ENOMEM; } @@ -1072,17 +1059,10 @@ fail: static int mixer_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct exynos_drm_hdmi_context *drm_hdmi_ctx = - platform_get_drvdata(pdev); - struct mixer_context *ctx = drm_hdmi_ctx->ctx; - - dev_info(dev, "remove successful\n"); + dev_info(&pdev->dev, "remove successful\n"); pm_runtime_disable(&pdev->dev); - mixer_resources_cleanup(ctx); - return 0; } diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c index 563c02904ddf..23ab3c496b05 100644 --- a/drivers/hwmon/acpi_power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c @@ -927,6 +927,8 @@ static int acpi_power_meter_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP + static int acpi_power_meter_resume(struct device *dev) { struct acpi_power_meter_resource *resource; @@ -944,6 +946,8 @@ static int acpi_power_meter_resume(struct device *dev) return 0; } +#endif /* CONFIG_PM_SLEEP */ + static SIMPLE_DEV_PM_OPS(acpi_power_meter_pm, NULL, acpi_power_meter_resume); static struct acpi_driver acpi_power_meter_driver = { diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 4d937a18fadb..282708860517 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -55,9 +55,9 @@ /* wait up to 32 ms for a status change. */ #define APPLESMC_MIN_WAIT 0x0010 +#define APPLESMC_RETRY_WAIT 0x0100 #define APPLESMC_MAX_WAIT 0x8000 -#define APPLESMC_STATUS_MASK 0x0f #define APPLESMC_READ_CMD 0x10 #define APPLESMC_WRITE_CMD 0x11 #define APPLESMC_GET_KEY_BY_INDEX_CMD 0x12 @@ -162,51 +162,68 @@ static unsigned int key_at_index; static struct workqueue_struct *applesmc_led_wq; /* - * __wait_status - Wait up to 32ms for the status port to get a certain value - * (masked with 0x0f), returning zero if the value is obtained. Callers must + * wait_read - Wait for a byte to appear on SMC port. Callers must * hold applesmc_lock. */ -static int __wait_status(u8 val) +static int wait_read(void) { + u8 status; int us; - - val = val & APPLESMC_STATUS_MASK; - for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) { udelay(us); - if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == val) + status = inb(APPLESMC_CMD_PORT); + /* read: wait for smc to settle */ + if (status & 0x01) return 0; } + pr_warn("wait_read() fail: 0x%02x\n", status); return -EIO; } /* - * special treatment of command port - on newer macbooks, it seems necessary - * to resend the command byte before polling the status again. Callers must - * hold applesmc_lock. + * send_byte - Write to SMC port, retrying when necessary. Callers + * must hold applesmc_lock. */ -static int send_command(u8 cmd) +static int send_byte(u8 cmd, u16 port) { + u8 status; int us; + + outb(cmd, port); for (us = APPLESMC_MIN_WAIT; us < APPLESMC_MAX_WAIT; us <<= 1) { - outb(cmd, APPLESMC_CMD_PORT); udelay(us); - if ((inb(APPLESMC_CMD_PORT) & APPLESMC_STATUS_MASK) == 0x0c) + status = inb(APPLESMC_CMD_PORT); + /* write: wait for smc to settle */ + if (status & 0x02) + continue; + /* ready: cmd accepted, return */ + if (status & 0x04) return 0; + /* timeout: give up */ + if (us << 1 == APPLESMC_MAX_WAIT) + break; + /* busy: long wait and resend */ + udelay(APPLESMC_RETRY_WAIT); + outb(cmd, port); } + + pr_warn("send_byte(0x%02x, 0x%04x) fail: 0x%02x\n", cmd, port, status); return -EIO; } +static int send_command(u8 cmd) +{ + return send_byte(cmd, APPLESMC_CMD_PORT); +} + static int send_argument(const char *key) { int i; - for (i = 0; i < 4; i++) { - outb(key[i], APPLESMC_DATA_PORT); - if (__wait_status(0x04)) + for (i = 0; i < 4; i++) + if (send_byte(key[i], APPLESMC_DATA_PORT)) return -EIO; - } return 0; } @@ -219,11 +236,14 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) return -EIO; } - outb(len, APPLESMC_DATA_PORT); + if (send_byte(len, APPLESMC_DATA_PORT)) { + pr_warn("%.4s: read len fail\n", key); + return -EIO; + } for (i = 0; i < len; i++) { - if (__wait_status(0x05)) { - pr_warn("%.4s: read data fail\n", key); + if (wait_read()) { + pr_warn("%.4s: read data[%d] fail\n", key, i); return -EIO; } buffer[i] = inb(APPLESMC_DATA_PORT); @@ -241,14 +261,16 @@ static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len) return -EIO; } - outb(len, APPLESMC_DATA_PORT); + if (send_byte(len, APPLESMC_DATA_PORT)) { + pr_warn("%.4s: write len fail\n", key); + return -EIO; + } for (i = 0; i < len; i++) { - if (__wait_status(0x04)) { + if (send_byte(buffer[i], APPLESMC_DATA_PORT)) { pr_warn("%s: write data fail\n", key); return -EIO; } - outb(buffer[i], APPLESMC_DATA_PORT); } return 0; diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 637c51c11b44..faa16f80db9c 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -793,7 +793,7 @@ static struct notifier_block coretemp_cpu_notifier __refdata = { .notifier_call = coretemp_cpu_callback, }; -static const struct x86_cpu_id coretemp_ids[] = { +static const struct x86_cpu_id __initconst coretemp_ids[] = { { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM }, {} }; diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index e72ba5d2a824..e21e43c13156 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -57,7 +57,7 @@ static const unsigned short normal_i2c[] = { #define JC42_CFG_EVENT_LOCK (1 << 7) #define JC42_CFG_SHUTDOWN (1 << 8) #define JC42_CFG_HYST_SHIFT 9 -#define JC42_CFG_HYST_MASK 0x03 +#define JC42_CFG_HYST_MASK (0x03 << 9) /* Capabilities */ #define JC42_CAP_RANGE (1 << 2) @@ -287,8 +287,8 @@ static ssize_t show_temp_crit_hyst(struct device *dev, return PTR_ERR(data); temp = jc42_temp_from_reg(data->temp_crit); - hyst = jc42_hysteresis[(data->config >> JC42_CFG_HYST_SHIFT) - & JC42_CFG_HYST_MASK]; + hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK) + >> JC42_CFG_HYST_SHIFT]; return sprintf(buf, "%d\n", temp - hyst); } @@ -302,8 +302,8 @@ static ssize_t show_temp_max_hyst(struct device *dev, return PTR_ERR(data); temp = jc42_temp_from_reg(data->temp_max); - hyst = jc42_hysteresis[(data->config >> JC42_CFG_HYST_SHIFT) - & JC42_CFG_HYST_MASK]; + hyst = jc42_hysteresis[(data->config & JC42_CFG_HYST_MASK) + >> JC42_CFG_HYST_SHIFT]; return sprintf(buf, "%d\n", temp - hyst); } @@ -362,8 +362,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev, } mutex_lock(&data->update_lock); - data->config = (data->config - & ~(JC42_CFG_HYST_MASK << JC42_CFG_HYST_SHIFT)) + data->config = (data->config & ~JC42_CFG_HYST_MASK) | (hyst << JC42_CFG_HYST_SHIFT); err = i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config); @@ -535,9 +534,16 @@ static int jc42_remove(struct i2c_client *client) struct jc42_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &jc42_group); - if (data->config != data->orig_config) - i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, - data->orig_config); + + /* Restore original configuration except hysteresis */ + if ((data->config & ~JC42_CFG_HYST_MASK) != + (data->orig_config & ~JC42_CFG_HYST_MASK)) { + int config; + + config = (data->orig_config & ~JC42_CFG_HYST_MASK) + | (data->config & JC42_CFG_HYST_MASK); + i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, config); + } return 0; } diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index 8689664ef03c..ee4ebc198a94 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -309,7 +309,7 @@ static struct notifier_block via_cputemp_cpu_notifier __refdata = { .notifier_call = via_cputemp_cpu_callback, }; -static const struct x86_cpu_id cputemp_ids[] = { +static const struct x86_cpu_id __initconst cputemp_ids[] = { { X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */ { X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */ { X86_VENDOR_CENTAUR, 6, 0xf, }, /* Nano */ diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 2e7530a4e7b8..b4aaa1bd6728 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -462,7 +462,7 @@ config I2C_MPC config I2C_MV64XXX tristate "Marvell mv64xxx I2C Controller" - depends on (MV64X60 || PLAT_ORION) && EXPERIMENTAL + depends on (MV64X60 || PLAT_ORION) help If you say yes to this option, support will be included for the built-in I2C interface on the Marvell 64xxx line of host bridges. @@ -483,10 +483,11 @@ config I2C_MXS config I2C_NOMADIK tristate "ST-Ericsson Nomadik/Ux500 I2C Controller" - depends on PLAT_NOMADIK + depends on ARM_AMBA help If you say yes to this option, support will be included for the - I2C interface from ST-Ericsson's Nomadik and Ux500 architectures. + I2C interface from ST-Ericsson's Nomadik and Ux500 architectures, + as well as the STA2X11 PCIe I/O HUB. config I2C_NUC900 tristate "NUC900 I2C Driver" diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index 1679deef9c89..e24484beef07 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -279,30 +279,31 @@ static int __devexit at91_i2c_remove(struct platform_device *pdev) /* NOTE: could save a few mA by keeping clock off outside of at91_xfer... */ -static int at91_i2c_suspend(struct platform_device *pdev, pm_message_t mesg) +static int at91_i2c_suspend(struct device *dev) { clk_disable(twi_clk); return 0; } -static int at91_i2c_resume(struct platform_device *pdev) +static int at91_i2c_resume(struct device *dev) { return clk_enable(twi_clk); } +static SIMPLE_DEV_PM_OPS(at91_i2c_pm, at91_i2c_suspend, at91_i2c_resume); +#define AT91_I2C_PM (&at91_i2c_pm) + #else -#define at91_i2c_suspend NULL -#define at91_i2c_resume NULL +#define AT91_I2C_PM NULL #endif static struct platform_driver at91_i2c_driver = { .probe = at91_i2c_probe, .remove = __devexit_p(at91_i2c_remove), - .suspend = at91_i2c_suspend, - .resume = at91_i2c_resume, .driver = { .name = "at91_i2c", .owner = THIS_MODULE, + .pm = AT91_I2C_PM, }, }; diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c index cdb59e5b23f7..0cf780fd6ef1 100644 --- a/drivers/i2c/busses/i2c-bfin-twi.c +++ b/drivers/i2c/busses/i2c-bfin-twi.c @@ -25,6 +25,7 @@ #include <asm/blackfin.h> #include <asm/portmux.h> #include <asm/irq.h> +#include <asm/bfin_twi.h> /* SMBus mode*/ #define TWI_I2C_MODE_STANDARD 1 @@ -32,56 +33,6 @@ #define TWI_I2C_MODE_COMBINED 3 #define TWI_I2C_MODE_REPEAT 4 -struct bfin_twi_iface { - int irq; - spinlock_t lock; - char read_write; - u8 command; - u8 *transPtr; - int readNum; - int writeNum; - int cur_mode; - int manual_stop; - int result; - struct i2c_adapter adap; - struct completion complete; - struct i2c_msg *pmsg; - int msg_num; - int cur_msg; - u16 saved_clkdiv; - u16 saved_control; - void __iomem *regs_base; -}; - - -#define DEFINE_TWI_REG(reg, off) \ -static inline u16 read_##reg(struct bfin_twi_iface *iface) \ - { return bfin_read16(iface->regs_base + (off)); } \ -static inline void write_##reg(struct bfin_twi_iface *iface, u16 v) \ - { bfin_write16(iface->regs_base + (off), v); } - -DEFINE_TWI_REG(CLKDIV, 0x00) -DEFINE_TWI_REG(CONTROL, 0x04) -DEFINE_TWI_REG(SLAVE_CTL, 0x08) -DEFINE_TWI_REG(SLAVE_STAT, 0x0C) -DEFINE_TWI_REG(SLAVE_ADDR, 0x10) -DEFINE_TWI_REG(MASTER_CTL, 0x14) -DEFINE_TWI_REG(MASTER_STAT, 0x18) -DEFINE_TWI_REG(MASTER_ADDR, 0x1C) -DEFINE_TWI_REG(INT_STAT, 0x20) -DEFINE_TWI_REG(INT_MASK, 0x24) -DEFINE_TWI_REG(FIFO_CTL, 0x28) -DEFINE_TWI_REG(FIFO_STAT, 0x2C) -DEFINE_TWI_REG(XMT_DATA8, 0x80) -DEFINE_TWI_REG(XMT_DATA16, 0x84) -DEFINE_TWI_REG(RCV_DATA8, 0x88) -DEFINE_TWI_REG(RCV_DATA16, 0x8C) - -static const u16 pin_req[2][3] = { - {P_TWI0_SCL, P_TWI0_SDA, 0}, - {P_TWI1_SCL, P_TWI1_SDA, 0}, -}; - static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface, unsigned short twi_int_status) { @@ -99,7 +50,7 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface, */ else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) write_MASTER_CTL(iface, - read_MASTER_CTL(iface) | MDIR | RSTART); + read_MASTER_CTL(iface) | MDIR); else if (iface->manual_stop) write_MASTER_CTL(iface, read_MASTER_CTL(iface) | STOP); @@ -107,10 +58,10 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface, iface->cur_msg + 1 < iface->msg_num) { if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD) write_MASTER_CTL(iface, - read_MASTER_CTL(iface) | RSTART | MDIR); + read_MASTER_CTL(iface) | MDIR); else write_MASTER_CTL(iface, - (read_MASTER_CTL(iface) | RSTART) & ~MDIR); + read_MASTER_CTL(iface) & ~MDIR); } } if (twi_int_status & RCVSERV) { @@ -130,17 +81,25 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface, } iface->transPtr++; iface->readNum--; - } else if (iface->manual_stop) { - write_MASTER_CTL(iface, - read_MASTER_CTL(iface) | STOP); - } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && - iface->cur_msg + 1 < iface->msg_num) { - if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD) - write_MASTER_CTL(iface, - read_MASTER_CTL(iface) | RSTART | MDIR); - else + } + + if (iface->readNum == 0) { + if (iface->manual_stop) { + /* Temporary workaround to avoid possible bus stall - + * Flush FIFO before issuing the STOP condition + */ + read_RCV_DATA16(iface); write_MASTER_CTL(iface, - (read_MASTER_CTL(iface) | RSTART) & ~MDIR); + read_MASTER_CTL(iface) | STOP); + } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && + iface->cur_msg + 1 < iface->msg_num) { + if (iface->pmsg[iface->cur_msg + 1].flags & I2C_M_RD) + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) | MDIR); + else + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) & ~MDIR); + } } } if (twi_int_status & MERR) { @@ -193,7 +152,8 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface, return; } if (twi_int_status & MCOMP) { - if ((read_MASTER_CTL(iface) & MEN) == 0 && + if (twi_int_status & (XMTSERV | RCVSERV) && + (read_MASTER_CTL(iface) & MEN) == 0 && (iface->cur_mode == TWI_I2C_MODE_REPEAT || iface->cur_mode == TWI_I2C_MODE_COMBINED)) { iface->result = -1; @@ -221,7 +181,7 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface, write_MASTER_CTL(iface, read_MASTER_CTL(iface) & ~RSTART); } else if (iface->cur_mode == TWI_I2C_MODE_REPEAT && - iface->cur_msg+1 < iface->msg_num) { + iface->cur_msg + 1 < iface->msg_num) { iface->cur_msg++; iface->transPtr = iface->pmsg[iface->cur_msg].buf; iface->writeNum = iface->readNum = @@ -241,27 +201,29 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface, } } - if (iface->pmsg[iface->cur_msg].len <= 255) - write_MASTER_CTL(iface, + if (iface->pmsg[iface->cur_msg].len <= 255) { + write_MASTER_CTL(iface, (read_MASTER_CTL(iface) & (~(0xff << 6))) | - (iface->pmsg[iface->cur_msg].len << 6)); - else { + (iface->pmsg[iface->cur_msg].len << 6)); + iface->manual_stop = 0; + } else { write_MASTER_CTL(iface, (read_MASTER_CTL(iface) | (0xff << 6))); iface->manual_stop = 1; } - /* remove restart bit and enable master receive */ - write_MASTER_CTL(iface, - read_MASTER_CTL(iface) & ~RSTART); + /* remove restart bit before last message */ + if (iface->cur_msg + 1 == iface->msg_num) + write_MASTER_CTL(iface, + read_MASTER_CTL(iface) & ~RSTART); } else { iface->result = 1; write_INT_MASK(iface, 0); write_MASTER_CTL(iface, 0); } + complete(&iface->complete); } - complete(&iface->complete); } /* Interrupt handler */ @@ -298,8 +260,8 @@ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap, if (!(read_CONTROL(iface) & TWI_ENA)) return -ENXIO; - while (read_MASTER_STAT(iface) & BUSBUSY) - yield(); + if (read_MASTER_STAT(iface) & BUSBUSY) + return -EAGAIN; iface->pmsg = msgs; iface->msg_num = num; @@ -311,7 +273,8 @@ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap, return -EINVAL; } - iface->cur_mode = TWI_I2C_MODE_REPEAT; + if (iface->msg_num > 1) + iface->cur_mode = TWI_I2C_MODE_REPEAT; iface->manual_stop = 0; iface->transPtr = pmsg->buf; iface->writeNum = iface->readNum = pmsg->len; @@ -356,6 +319,7 @@ static int bfin_twi_do_master_xfer(struct i2c_adapter *adap, /* Master enable */ write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | + (iface->msg_num > 1 ? RSTART : 0) | ((iface->read_write == I2C_SMBUS_READ) ? MDIR : 0) | ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ > 100) ? FAST : 0)); SSYNC(); @@ -398,8 +362,8 @@ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr, if (!(read_CONTROL(iface) & TWI_ENA)) return -ENXIO; - while (read_MASTER_STAT(iface) & BUSBUSY) - yield(); + if (read_MASTER_STAT(iface) & BUSBUSY) + return -EAGAIN; iface->writeNum = 0; iface->readNum = 0; @@ -520,7 +484,7 @@ int bfin_twi_do_smbus_xfer(struct i2c_adapter *adap, u16 addr, else write_MASTER_CTL(iface, 0x1 << 6); /* Master enable */ - write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | + write_MASTER_CTL(iface, read_MASTER_CTL(iface) | MEN | RSTART | ((CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ>100) ? FAST : 0)); break; default: @@ -611,9 +575,9 @@ static struct i2c_algorithm bfin_twi_algorithm = { .functionality = bfin_twi_functionality, }; -static int i2c_bfin_twi_suspend(struct platform_device *pdev, pm_message_t state) +static int i2c_bfin_twi_suspend(struct device *dev) { - struct bfin_twi_iface *iface = platform_get_drvdata(pdev); + struct bfin_twi_iface *iface = dev_get_drvdata(dev); iface->saved_clkdiv = read_CLKDIV(iface); iface->saved_control = read_CONTROL(iface); @@ -626,14 +590,14 @@ static int i2c_bfin_twi_suspend(struct platform_device *pdev, pm_message_t state return 0; } -static int i2c_bfin_twi_resume(struct platform_device *pdev) +static int i2c_bfin_twi_resume(struct device *dev) { - struct bfin_twi_iface *iface = platform_get_drvdata(pdev); + struct bfin_twi_iface *iface = dev_get_drvdata(dev); int rc = request_irq(iface->irq, bfin_twi_interrupt_entry, - 0, pdev->name, iface); + 0, to_platform_device(dev)->name, iface); if (rc) { - dev_err(&pdev->dev, "Can't get IRQ %d !\n", iface->irq); + dev_err(dev, "Can't get IRQ %d !\n", iface->irq); return -ENODEV; } @@ -646,6 +610,9 @@ static int i2c_bfin_twi_resume(struct platform_device *pdev) return 0; } +static SIMPLE_DEV_PM_OPS(i2c_bfin_twi_pm, + i2c_bfin_twi_suspend, i2c_bfin_twi_resume); + static int i2c_bfin_twi_probe(struct platform_device *pdev) { struct bfin_twi_iface *iface; @@ -695,7 +662,8 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev) p_adap->timeout = 5 * HZ; p_adap->retries = 3; - rc = peripheral_request_list(pin_req[pdev->id], "i2c-bfin-twi"); + rc = peripheral_request_list((unsigned short *)pdev->dev.platform_data, + "i2c-bfin-twi"); if (rc) { dev_err(&pdev->dev, "Can't setup pin mux!\n"); goto out_error_pin_mux; @@ -742,7 +710,7 @@ out_error_add_adapter: free_irq(iface->irq, iface); out_error_req_irq: out_error_no_irq: - peripheral_free_list(pin_req[pdev->id]); + peripheral_free_list((unsigned short *)pdev->dev.platform_data); out_error_pin_mux: iounmap(iface->regs_base); out_error_ioremap: @@ -760,7 +728,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev) i2c_del_adapter(&(iface->adap)); free_irq(iface->irq, iface); - peripheral_free_list(pin_req[pdev->id]); + peripheral_free_list((unsigned short *)pdev->dev.platform_data); iounmap(iface->regs_base); kfree(iface); @@ -770,11 +738,10 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev) static struct platform_driver i2c_bfin_twi_driver = { .probe = i2c_bfin_twi_probe, .remove = i2c_bfin_twi_remove, - .suspend = i2c_bfin_twi_suspend, - .resume = i2c_bfin_twi_resume, .driver = { .name = "i2c-bfin-twi", .owner = THIS_MODULE, + .pm = &i2c_bfin_twi_pm, }, }; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 370031ac8200..0722f869465c 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -117,10 +117,8 @@ static u16 __initdata i2c_clk_div[50][2] = { struct imx_i2c_struct { struct i2c_adapter adapter; - struct resource *res; struct clk *clk; void __iomem *base; - int irq; wait_queue_head_t queue; unsigned long i2csr; unsigned int disable_delay; @@ -472,9 +470,8 @@ static int __init i2c_imx_probe(struct platform_device *pdev) struct imxi2c_platform_data *pdata = pdev->dev.platform_data; struct pinctrl *pinctrl; void __iomem *base; - resource_size_t res_size; - int irq, bitrate; - int ret; + int irq, ret; + u32 bitrate; dev_dbg(&pdev->dev, "<%s>\n", __func__); @@ -489,25 +486,15 @@ static int __init i2c_imx_probe(struct platform_device *pdev) return -ENOENT; } - res_size = resource_size(res); - - if (!request_mem_region(res->start, res_size, DRIVER_NAME)) { - dev_err(&pdev->dev, "request_mem_region failed\n"); + base = devm_request_and_ioremap(&pdev->dev, res); + if (!base) return -EBUSY; - } - - base = ioremap(res->start, res_size); - if (!base) { - dev_err(&pdev->dev, "ioremap failed\n"); - ret = -EIO; - goto fail1; - } - i2c_imx = kzalloc(sizeof(struct imx_i2c_struct), GFP_KERNEL); + i2c_imx = devm_kzalloc(&pdev->dev, sizeof(struct imx_i2c_struct), + GFP_KERNEL); if (!i2c_imx) { dev_err(&pdev->dev, "can't allocate interface\n"); - ret = -ENOMEM; - goto fail2; + return -ENOMEM; } /* Setup i2c_imx driver structure */ @@ -517,29 +504,27 @@ static int __init i2c_imx_probe(struct platform_device *pdev) i2c_imx->adapter.dev.parent = &pdev->dev; i2c_imx->adapter.nr = pdev->id; i2c_imx->adapter.dev.of_node = pdev->dev.of_node; - i2c_imx->irq = irq; i2c_imx->base = base; - i2c_imx->res = res; pinctrl = devm_pinctrl_get_select_default(&pdev->dev); if (IS_ERR(pinctrl)) { - ret = PTR_ERR(pinctrl); - goto fail3; + dev_err(&pdev->dev, "can't get/select pinctrl\n"); + return PTR_ERR(pinctrl); } /* Get I2C clock */ - i2c_imx->clk = clk_get(&pdev->dev, "i2c_clk"); + i2c_imx->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(i2c_imx->clk)) { - ret = PTR_ERR(i2c_imx->clk); dev_err(&pdev->dev, "can't get I2C clock\n"); - goto fail3; + return PTR_ERR(i2c_imx->clk); } /* Request IRQ */ - ret = request_irq(i2c_imx->irq, i2c_imx_isr, 0, pdev->name, i2c_imx); + ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, + pdev->name, i2c_imx); if (ret) { - dev_err(&pdev->dev, "can't claim irq %d\n", i2c_imx->irq); - goto fail4; + dev_err(&pdev->dev, "can't claim irq %d\n", irq); + return ret; } /* Init queue */ @@ -564,7 +549,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev) ret = i2c_add_numbered_adapter(&i2c_imx->adapter); if (ret < 0) { dev_err(&pdev->dev, "registration failed\n"); - goto fail5; + return ret; } of_i2c_register_devices(&i2c_imx->adapter); @@ -572,28 +557,16 @@ static int __init i2c_imx_probe(struct platform_device *pdev) /* Set up platform driver data */ platform_set_drvdata(pdev, i2c_imx); - dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", i2c_imx->irq); + dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq); dev_dbg(&i2c_imx->adapter.dev, "device resources from 0x%x to 0x%x\n", - i2c_imx->res->start, i2c_imx->res->end); - dev_dbg(&i2c_imx->adapter.dev, "allocated %d bytes at 0x%x \n", - res_size, i2c_imx->res->start); + res->start, res->end); + dev_dbg(&i2c_imx->adapter.dev, "allocated %d bytes at 0x%x\n", + resource_size(res), res->start); dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n", i2c_imx->adapter.name); dev_dbg(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); return 0; /* Return OK */ - -fail5: - free_irq(i2c_imx->irq, i2c_imx); -fail4: - clk_put(i2c_imx->clk); -fail3: - kfree(i2c_imx); -fail2: - iounmap(base); -fail1: - release_mem_region(res->start, resource_size(res)); - return ret; /* Return error number */ } static int __exit i2c_imx_remove(struct platform_device *pdev) @@ -605,20 +578,12 @@ static int __exit i2c_imx_remove(struct platform_device *pdev) i2c_del_adapter(&i2c_imx->adapter); platform_set_drvdata(pdev, NULL); - /* free interrupt */ - free_irq(i2c_imx->irq, i2c_imx); - /* setup chip registers to defaults */ writeb(0, i2c_imx->base + IMX_I2C_IADR); writeb(0, i2c_imx->base + IMX_I2C_IFDR); writeb(0, i2c_imx->base + IMX_I2C_I2CR); writeb(0, i2c_imx->base + IMX_I2C_I2SR); - clk_put(i2c_imx->clk); - - iounmap(i2c_imx->base); - release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res)); - kfree(i2c_imx); return 0; } diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 4f44a33017b0..2e9d56719e99 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -18,6 +18,11 @@ #include <linux/mv643xx_i2c.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_i2c.h> +#include <linux/clk.h> +#include <linux/err.h> /* Register defines */ #define MV64XXX_I2C_REG_SLAVE_ADDR 0x00 @@ -98,6 +103,9 @@ struct mv64xxx_i2c_data { int rc; u32 freq_m; u32 freq_n; +#if defined(CONFIG_HAVE_CLK) + struct clk *clk; +#endif wait_queue_head_t waitq; spinlock_t lock; struct i2c_msg *msg; @@ -521,6 +529,82 @@ mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data) drv_data->reg_base_p = 0; } +#ifdef CONFIG_OF +static int __devinit +mv64xxx_calc_freq(const int tclk, const int n, const int m) +{ + return tclk / (10 * (m + 1) * (2 << n)); +} + +static bool __devinit +mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n, + u32 *best_m) +{ + int freq, delta, best_delta = INT_MAX; + int m, n; + + for (n = 0; n <= 7; n++) + for (m = 0; m <= 15; m++) { + freq = mv64xxx_calc_freq(tclk, n, m); + delta = req_freq - freq; + if (delta >= 0 && delta < best_delta) { + *best_m = m; + *best_n = n; + best_delta = delta; + } + if (best_delta == 0) + return true; + } + if (best_delta == INT_MAX) + return false; + return true; +} + +static int __devinit +mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, + struct device_node *np) +{ + u32 bus_freq, tclk; + int rc = 0; + + /* CLK is mandatory when using DT to describe the i2c bus. We + * need to know tclk in order to calculate bus clock + * factors. + */ +#if !defined(CONFIG_HAVE_CLK) + /* Have OF but no CLK */ + return -ENODEV; +#else + if (IS_ERR(drv_data->clk)) { + rc = -ENODEV; + goto out; + } + tclk = clk_get_rate(drv_data->clk); + of_property_read_u32(np, "clock-frequency", &bus_freq); + if (!mv64xxx_find_baud_factors(bus_freq, tclk, + &drv_data->freq_n, &drv_data->freq_m)) { + rc = -EINVAL; + goto out; + } + drv_data->irq = irq_of_parse_and_map(np, 0); + + /* Its not yet defined how timeouts will be specified in device tree. + * So hard code the value to 1 second. + */ + drv_data->adapter.timeout = HZ; +out: + return rc; +#endif +} +#else /* CONFIG_OF */ +static int __devinit +mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, + struct device_node *np) +{ + return -ENODEV; +} +#endif /* CONFIG_OF */ + static int __devinit mv64xxx_i2c_probe(struct platform_device *pd) { @@ -528,7 +612,7 @@ mv64xxx_i2c_probe(struct platform_device *pd) struct mv64xxx_i2c_pdata *pdata = pd->dev.platform_data; int rc; - if ((pd->id != 0) || !pdata) + if ((!pdata && !pd->dev.of_node)) return -ENODEV; drv_data = kzalloc(sizeof(struct mv64xxx_i2c_data), GFP_KERNEL); @@ -546,19 +630,35 @@ mv64xxx_i2c_probe(struct platform_device *pd) init_waitqueue_head(&drv_data->waitq); spin_lock_init(&drv_data->lock); - drv_data->freq_m = pdata->freq_m; - drv_data->freq_n = pdata->freq_n; - drv_data->irq = platform_get_irq(pd, 0); +#if defined(CONFIG_HAVE_CLK) + /* Not all platforms have a clk */ + drv_data->clk = clk_get(&pd->dev, NULL); + if (!IS_ERR(drv_data->clk)) { + clk_prepare(drv_data->clk); + clk_enable(drv_data->clk); + } +#endif + if (pdata) { + drv_data->freq_m = pdata->freq_m; + drv_data->freq_n = pdata->freq_n; + drv_data->irq = platform_get_irq(pd, 0); + drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); + } else if (pd->dev.of_node) { + rc = mv64xxx_of_config(drv_data, pd->dev.of_node); + if (rc) + goto exit_unmap_regs; + } if (drv_data->irq < 0) { rc = -ENXIO; goto exit_unmap_regs; } + drv_data->adapter.dev.parent = &pd->dev; drv_data->adapter.algo = &mv64xxx_i2c_algo; drv_data->adapter.owner = THIS_MODULE; drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; - drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout); drv_data->adapter.nr = pd->id; + drv_data->adapter.dev.of_node = pd->dev.of_node; platform_set_drvdata(pd, drv_data); i2c_set_adapdata(&drv_data->adapter, drv_data); @@ -577,11 +677,20 @@ mv64xxx_i2c_probe(struct platform_device *pd) goto exit_free_irq; } + of_i2c_register_devices(&drv_data->adapter); + return 0; exit_free_irq: free_irq(drv_data->irq, drv_data); exit_unmap_regs: +#if defined(CONFIG_HAVE_CLK) + /* Not all platforms have a clk */ + if (!IS_ERR(drv_data->clk)) { + clk_disable(drv_data->clk); + clk_unprepare(drv_data->clk); + } +#endif mv64xxx_i2c_unmap_regs(drv_data); exit_kfree: kfree(drv_data); @@ -597,17 +706,31 @@ mv64xxx_i2c_remove(struct platform_device *dev) rc = i2c_del_adapter(&drv_data->adapter); free_irq(drv_data->irq, drv_data); mv64xxx_i2c_unmap_regs(drv_data); +#if defined(CONFIG_HAVE_CLK) + /* Not all platforms have a clk */ + if (!IS_ERR(drv_data->clk)) { + clk_disable(drv_data->clk); + clk_unprepare(drv_data->clk); + } +#endif kfree(drv_data); return rc; } +static const struct of_device_id mv64xxx_i2c_of_match_table[] __devinitdata = { + { .compatible = "marvell,mv64xxx-i2c", }, + {} +}; +MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); + static struct platform_driver mv64xxx_i2c_driver = { .probe = mv64xxx_i2c_probe, .remove = __devexit_p(mv64xxx_i2c_remove), .driver = { .owner = THIS_MODULE, .name = MV64XXX_I2C_CTLR_NAME, + .of_match_table = of_match_ptr(mv64xxx_i2c_of_match_table), }, }; diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 04eb441b6ce1..088c5c1ed17d 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -46,6 +46,10 @@ #define MXS_I2C_CTRL0_DIRECTION 0x00010000 #define MXS_I2C_CTRL0_XFER_COUNT(v) ((v) & 0x0000FFFF) +#define MXS_I2C_TIMING0 (0x10) +#define MXS_I2C_TIMING1 (0x20) +#define MXS_I2C_TIMING2 (0x30) + #define MXS_I2C_CTRL1 (0x40) #define MXS_I2C_CTRL1_SET (0x44) #define MXS_I2C_CTRL1_CLR (0x48) @@ -97,6 +101,35 @@ #define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \ MXS_I2C_CTRL0_MASTER_MODE) +struct mxs_i2c_speed_config { + uint32_t timing0; + uint32_t timing1; + uint32_t timing2; +}; + +/* + * Timing values for the default 24MHz clock supplied into the i2c block. + * + * The bus can operate at 95kHz or at 400kHz with the following timing + * register configurations. The 100kHz mode isn't present because it's + * values are not stated in the i.MX233/i.MX28 datasheet. The 95kHz mode + * shall be close enough replacement. Therefore when the bus is configured + * for 100kHz operation, 95kHz timing settings are actually loaded. + * + * For details, see i.MX233 [25.4.2 - 25.4.4] and i.MX28 [27.5.2 - 27.5.4]. + */ +static const struct mxs_i2c_speed_config mxs_i2c_95kHz_config = { + .timing0 = 0x00780030, + .timing1 = 0x00800030, + .timing2 = 0x00300030, +}; + +static const struct mxs_i2c_speed_config mxs_i2c_400kHz_config = { + .timing0 = 0x000f0007, + .timing1 = 0x001f000f, + .timing2 = 0x00300030, +}; + /** * struct mxs_i2c_dev - per device, private MXS-I2C data * @@ -112,11 +145,17 @@ struct mxs_i2c_dev { struct completion cmd_complete; u32 cmd_err; struct i2c_adapter adapter; + const struct mxs_i2c_speed_config *speed; }; static void mxs_i2c_reset(struct mxs_i2c_dev *i2c) { stmp_reset_block(i2c->regs); + + writel(i2c->speed->timing0, i2c->regs + MXS_I2C_TIMING0); + writel(i2c->speed->timing1, i2c->regs + MXS_I2C_TIMING1); + writel(i2c->speed->timing2, i2c->regs + MXS_I2C_TIMING2); + writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE, i2c->regs + MXS_I2C_QUEUECTRL_SET); @@ -193,7 +232,7 @@ static int mxs_i2c_wait_for_data(struct mxs_i2c_dev *i2c) static int mxs_i2c_finish_read(struct mxs_i2c_dev *i2c, u8 *buf, int len) { - u32 data; + u32 uninitialized_var(data); int i; for (i = 0; i < len; i++) { @@ -319,6 +358,28 @@ static const struct i2c_algorithm mxs_i2c_algo = { .functionality = mxs_i2c_func, }; +static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c) +{ + uint32_t speed; + struct device *dev = i2c->dev; + struct device_node *node = dev->of_node; + int ret; + + if (!node) + return -EINVAL; + + i2c->speed = &mxs_i2c_95kHz_config; + ret = of_property_read_u32(node, "clock-frequency", &speed); + if (ret) + dev_warn(dev, "No I2C speed selected, using 100kHz\n"); + else if (speed == 400000) + i2c->speed = &mxs_i2c_400kHz_config; + else if (speed != 100000) + dev_warn(dev, "Unsupported I2C speed selected, using 100kHz\n"); + + return 0; +} + static int __devinit mxs_i2c_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -358,6 +419,11 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev) return err; i2c->dev = dev; + + err = mxs_i2c_get_ofdata(i2c); + if (err) + return err; + platform_set_drvdata(pdev, i2c); /* Do reset to enforce correct startup after pinmuxing */ diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index a92440dbef07..5e6f1eed4f83 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -14,7 +14,8 @@ */ #include <linux/init.h> #include <linux/module.h> -#include <linux/platform_device.h> +#include <linux/amba/bus.h> +#include <linux/atomic.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/i2c.h> @@ -23,8 +24,7 @@ #include <linux/io.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> - -#include <plat/i2c.h> +#include <linux/platform_data/i2c-nomadik.h> #define DRIVER_NAME "nmk-i2c" @@ -136,7 +136,7 @@ struct i2c_nmk_client { /** * struct nmk_i2c_dev - private data structure of the controller. - * @pdev: parent platform device. + * @adev: parent amba device. * @adap: corresponding I2C adapter. * @irq: interrupt line for the controller. * @virtbase: virtual io memory area. @@ -150,7 +150,7 @@ struct i2c_nmk_client { * @busy: Busy doing transfer. */ struct nmk_i2c_dev { - struct platform_device *pdev; + struct amba_device *adev; struct i2c_adapter adap; int irq; void __iomem *virtbase; @@ -217,7 +217,7 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev) } } - dev_err(&dev->pdev->dev, + dev_err(&dev->adev->dev, "flushing operation timed out giving up after %d attempts", LOOP_ATTEMPTS); @@ -276,15 +276,32 @@ exit: /** * load_i2c_mcr_reg() - load the MCR register * @dev: private data of controller + * @flags: message flags */ -static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *dev) +static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *dev, u16 flags) { u32 mcr = 0; + unsigned short slave_adr_3msb_bits; - /* 7-bit address transaction */ - mcr |= GEN_MASK(1, I2C_MCR_AM, 12); mcr |= GEN_MASK(dev->cli.slave_adr, I2C_MCR_A7, 1); + if (unlikely(flags & I2C_M_TEN)) { + /* 10-bit address transaction */ + mcr |= GEN_MASK(2, I2C_MCR_AM, 12); + /* + * Get the top 3 bits. + * EA10 represents extended address in MCR. This includes + * the extension (MSB bits) of the 7 bit address loaded + * in A7 + */ + slave_adr_3msb_bits = (dev->cli.slave_adr >> 7) & 0x7; + + mcr |= GEN_MASK(slave_adr_3msb_bits, I2C_MCR_EA10, 8); + } else { + /* 7-bit address transaction */ + mcr |= GEN_MASK(1, I2C_MCR_AM, 12); + } + /* start byte procedure not applied */ mcr |= GEN_MASK(0, I2C_MCR_SB, 11); @@ -364,7 +381,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev) * and high speed (up to 3.4 Mb/s) */ if (dev->cfg.sm > I2C_FREQ_MODE_FAST) { - dev_err(&dev->pdev->dev, + dev_err(&dev->adev->dev, "do not support this mode defaulting to std. mode\n"); brcr2 = i2c_clk/(100000 * 2) & 0xffff; writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR); @@ -381,19 +398,20 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev) /** * read_i2c() - Read from I2C client device * @dev: private data of I2C Driver + * @flags: message flags * * This function reads from i2c client device when controller is in * master mode. There is a completion timeout. If there is no transfer * before timeout error is returned. */ -static int read_i2c(struct nmk_i2c_dev *dev) +static int read_i2c(struct nmk_i2c_dev *dev, u16 flags) { u32 status = 0; u32 mcr; u32 irq_mask = 0; int timeout; - mcr = load_i2c_mcr_reg(dev); + mcr = load_i2c_mcr_reg(dev, flags); writel(mcr, dev->virtbase + I2C_MCR); /* load the current CR value */ @@ -423,7 +441,7 @@ static int read_i2c(struct nmk_i2c_dev *dev) &dev->xfer_complete, dev->adap.timeout); if (timeout < 0) { - dev_err(&dev->pdev->dev, + dev_err(&dev->adev->dev, "wait_for_completion_timeout " "returned %d waiting for event\n", timeout); status = timeout; @@ -431,7 +449,7 @@ static int read_i2c(struct nmk_i2c_dev *dev) if (timeout == 0) { /* Controller timed out */ - dev_err(&dev->pdev->dev, "read from slave 0x%x timed out\n", + dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n", dev->cli.slave_adr); status = -ETIMEDOUT; } @@ -459,17 +477,18 @@ static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes) /** * write_i2c() - Write data to I2C client. * @dev: private data of I2C Driver + * @flags: message flags * * This function writes data to I2C client */ -static int write_i2c(struct nmk_i2c_dev *dev) +static int write_i2c(struct nmk_i2c_dev *dev, u16 flags) { u32 status = 0; u32 mcr; u32 irq_mask = 0; int timeout; - mcr = load_i2c_mcr_reg(dev); + mcr = load_i2c_mcr_reg(dev, flags); writel(mcr, dev->virtbase + I2C_MCR); @@ -510,7 +529,7 @@ static int write_i2c(struct nmk_i2c_dev *dev) &dev->xfer_complete, dev->adap.timeout); if (timeout < 0) { - dev_err(&dev->pdev->dev, + dev_err(&dev->adev->dev, "wait_for_completion_timeout " "returned %d waiting for event\n", timeout); status = timeout; @@ -518,7 +537,7 @@ static int write_i2c(struct nmk_i2c_dev *dev) if (timeout == 0) { /* Controller timed out */ - dev_err(&dev->pdev->dev, "write to slave 0x%x timed out\n", + dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n", dev->cli.slave_adr); status = -ETIMEDOUT; } @@ -538,11 +557,11 @@ static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags) if (flags & I2C_M_RD) { /* read operation */ dev->cli.operation = I2C_READ; - status = read_i2c(dev); + status = read_i2c(dev, flags); } else { /* write operation */ dev->cli.operation = I2C_WRITE; - status = write_i2c(dev); + status = write_i2c(dev, flags); } if (status || (dev->result)) { @@ -557,7 +576,7 @@ static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags) if (((i2c_sr >> 2) & 0x3) == 0x3) { /* get the abort cause */ cause = (i2c_sr >> 4) & 0x7; - dev_err(&dev->pdev->dev, "%s\n", + dev_err(&dev->adev->dev, "%s\n", cause >= ARRAY_SIZE(abort_causes) ? "unknown reason" : abort_causes[cause]); @@ -630,7 +649,7 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap, if (dev->regulator) regulator_enable(dev->regulator); - pm_runtime_get_sync(&dev->pdev->dev); + pm_runtime_get_sync(&dev->adev->dev); clk_enable(dev->clk); @@ -644,13 +663,6 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap, setup_i2c_controller(dev); for (i = 0; i < num_msgs; i++) { - if (unlikely(msgs[i].flags & I2C_M_TEN)) { - dev_err(&dev->pdev->dev, - "10 bit addressing not supported\n"); - - status = -EINVAL; - goto out; - } dev->cli.slave_adr = msgs[i].addr; dev->cli.buffer = msgs[i].buf; dev->cli.count = msgs[i].len; @@ -667,7 +679,7 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap, out: clk_disable(dev->clk); - pm_runtime_put_sync(&dev->pdev->dev); + pm_runtime_put_sync(&dev->adev->dev); if (dev->regulator) regulator_disable(dev->regulator); @@ -790,7 +802,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) if (dev->cli.count) { dev->result = -EIO; - dev_err(&dev->pdev->dev, + dev_err(&dev->adev->dev, "%lu bytes still remain to be xfered\n", dev->cli.count); (void) init_hw(dev); @@ -834,7 +846,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) dev->result = -EIO; (void) init_hw(dev); - dev_err(&dev->pdev->dev, "Tx Fifo Over run\n"); + dev_err(&dev->adev->dev, "Tx Fifo Over run\n"); complete(&dev->xfer_complete); break; @@ -847,10 +859,10 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) case I2C_IT_RFSE: case I2C_IT_WTSR: case I2C_IT_STD: - dev_err(&dev->pdev->dev, "unhandled Interrupt\n"); + dev_err(&dev->adev->dev, "unhandled Interrupt\n"); break; default: - dev_err(&dev->pdev->dev, "spurious Interrupt..\n"); + dev_err(&dev->adev->dev, "spurious Interrupt..\n"); break; } @@ -861,8 +873,8 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) #ifdef CONFIG_PM static int nmk_i2c_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev); + struct amba_device *adev = to_amba_device(dev); + struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev); if (nmk_i2c->busy) return -EBUSY; @@ -891,7 +903,7 @@ static const struct dev_pm_ops nmk_i2c_pm = { static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap) { - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; } static const struct i2c_algorithm nmk_i2c_algo = { @@ -899,78 +911,69 @@ static const struct i2c_algorithm nmk_i2c_algo = { .functionality = nmk_i2c_functionality }; -static int __devinit nmk_i2c_probe(struct platform_device *pdev) +static atomic_t adapter_id = ATOMIC_INIT(0); + +static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) { int ret = 0; - struct resource *res; struct nmk_i2c_controller *pdata = - pdev->dev.platform_data; + adev->dev.platform_data; struct nmk_i2c_dev *dev; struct i2c_adapter *adap; + if (!pdata) { + dev_warn(&adev->dev, "no platform data\n"); + return -ENODEV; + } dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL); if (!dev) { - dev_err(&pdev->dev, "cannot allocate memory\n"); + dev_err(&adev->dev, "cannot allocate memory\n"); ret = -ENOMEM; goto err_no_mem; } dev->busy = false; - dev->pdev = pdev; - platform_set_drvdata(pdev, dev); + dev->adev = adev; + amba_set_drvdata(adev, dev); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENOENT; - goto err_no_resource; - } - - if (request_mem_region(res->start, resource_size(res), - DRIVER_NAME "I/O region") == NULL) { - ret = -EBUSY; - goto err_no_region; - } - - dev->virtbase = ioremap(res->start, resource_size(res)); + dev->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); if (!dev->virtbase) { ret = -ENOMEM; goto err_no_ioremap; } - dev->irq = platform_get_irq(pdev, 0); + dev->irq = adev->irq[0]; ret = request_irq(dev->irq, i2c_irq_handler, 0, DRIVER_NAME, dev); if (ret) { - dev_err(&pdev->dev, "cannot claim the irq %d\n", dev->irq); + dev_err(&adev->dev, "cannot claim the irq %d\n", dev->irq); goto err_irq; } - dev->regulator = regulator_get(&pdev->dev, "v-i2c"); + dev->regulator = regulator_get(&adev->dev, "v-i2c"); if (IS_ERR(dev->regulator)) { - dev_warn(&pdev->dev, "could not get i2c regulator\n"); + dev_warn(&adev->dev, "could not get i2c regulator\n"); dev->regulator = NULL; } - pm_suspend_ignore_children(&pdev->dev, true); - pm_runtime_enable(&pdev->dev); + pm_suspend_ignore_children(&adev->dev, true); - dev->clk = clk_get(&pdev->dev, NULL); + dev->clk = clk_get(&adev->dev, NULL); if (IS_ERR(dev->clk)) { - dev_err(&pdev->dev, "could not get i2c clock\n"); + dev_err(&adev->dev, "could not get i2c clock\n"); ret = PTR_ERR(dev->clk); goto err_no_clk; } adap = &dev->adap; - adap->dev.parent = &pdev->dev; + adap->dev.parent = &adev->dev; adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adap->algo = &nmk_i2c_algo; adap->timeout = msecs_to_jiffies(pdata->timeout); + adap->nr = atomic_read(&adapter_id); snprintf(adap->name, sizeof(adap->name), - "Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start); - - /* fetch the controller id */ - adap->nr = pdev->id; + "Nomadik I2C%d at %pR", adap->nr, &adev->res); + atomic_inc(&adapter_id); /* fetch the controller configuration from machine */ dev->cfg.clk_freq = pdata->clk_freq; @@ -981,16 +984,18 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev) i2c_set_adapdata(adap, dev); - dev_info(&pdev->dev, + dev_info(&adev->dev, "initialize %s on virtual base %p\n", adap->name, dev->virtbase); ret = i2c_add_numbered_adapter(adap); if (ret) { - dev_err(&pdev->dev, "failed to add adapter\n"); + dev_err(&adev->dev, "failed to add adapter\n"); goto err_add_adap; } + pm_runtime_put(&adev->dev); + return 0; err_add_adap: @@ -998,25 +1003,21 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev) err_no_clk: if (dev->regulator) regulator_put(dev->regulator); - pm_runtime_disable(&pdev->dev); free_irq(dev->irq, dev); err_irq: iounmap(dev->virtbase); err_no_ioremap: - release_mem_region(res->start, resource_size(res)); - err_no_region: - platform_set_drvdata(pdev, NULL); - err_no_resource: + amba_set_drvdata(adev, NULL); kfree(dev); err_no_mem: return ret; } -static int __devexit nmk_i2c_remove(struct platform_device *pdev) +static int nmk_i2c_remove(struct amba_device *adev) { - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - struct nmk_i2c_dev *dev = platform_get_drvdata(pdev); + struct resource *res = &adev->res; + struct nmk_i2c_dev *dev = amba_get_drvdata(adev); i2c_del_adapter(&dev->adap); flush_i2c_fifo(dev); @@ -1031,31 +1032,46 @@ static int __devexit nmk_i2c_remove(struct platform_device *pdev) clk_put(dev->clk); if (dev->regulator) regulator_put(dev->regulator); - pm_runtime_disable(&pdev->dev); - platform_set_drvdata(pdev, NULL); + pm_runtime_disable(&adev->dev); + amba_set_drvdata(adev, NULL); kfree(dev); return 0; } -static struct platform_driver nmk_i2c_driver = { - .driver = { +static struct amba_id nmk_i2c_ids[] = { + { + .id = 0x00180024, + .mask = 0x00ffffff, + }, + { + .id = 0x00380024, + .mask = 0x00ffffff, + }, + {}, +}; + +MODULE_DEVICE_TABLE(amba, nmk_i2c_ids); + +static struct amba_driver nmk_i2c_driver = { + .drv = { .owner = THIS_MODULE, .name = DRIVER_NAME, .pm = &nmk_i2c_pm, }, + .id_table = nmk_i2c_ids, .probe = nmk_i2c_probe, - .remove = __devexit_p(nmk_i2c_remove), + .remove = nmk_i2c_remove, }; static int __init nmk_i2c_init(void) { - return platform_driver_register(&nmk_i2c_driver); + return amba_driver_register(&nmk_i2c_driver); } static void __exit nmk_i2c_exit(void) { - platform_driver_unregister(&nmk_i2c_driver); + amba_driver_unregister(&nmk_i2c_driver); } subsys_initcall(nmk_i2c_init); @@ -1064,4 +1080,3 @@ module_exit(nmk_i2c_exit); MODULE_AUTHOR("Sachin Verma, Srinidhi KASAGAR"); MODULE_DESCRIPTION("Nomadik/Ux500 I2C driver"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 75194c579b6d..bffd5501ac2d 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -10,40 +10,9 @@ */ /* - * Device tree configuration: - * - * Required properties: - * - compatible : "opencores,i2c-ocores" - * - reg : bus address start and address range size of device - * - interrupts : interrupt number - * - regstep : size of device registers in bytes - * - clock-frequency : frequency of bus clock in Hz - * - * Example: - * - * i2c0: ocores@a0000000 { - * compatible = "opencores,i2c-ocores"; - * reg = <0xa0000000 0x8>; - * interrupts = <10>; - * - * regstep = <1>; - * clock-frequency = <20000000>; - * - * -- Devices connected on this I2C bus get - * -- defined here; address- and size-cells - * -- apply to these child devices - * - * #address-cells = <1>; - * #size-cells = <0>; - * - * dummy@60 { - * compatible = "dummy"; - * reg = <60>; - * }; - * }; - * + * This driver can be used from the device tree, see + * Documentation/devicetree/bindings/i2c/ocore-i2c.txt */ - #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> @@ -56,10 +25,12 @@ #include <linux/slab.h> #include <linux/io.h> #include <linux/of_i2c.h> +#include <linux/log2.h> struct ocores_i2c { void __iomem *base; - int regstep; + u32 reg_shift; + u32 reg_io_width; wait_queue_head_t wait; struct i2c_adapter adap; struct i2c_msg *msg; @@ -102,12 +73,22 @@ struct ocores_i2c { static inline void oc_setreg(struct ocores_i2c *i2c, int reg, u8 value) { - iowrite8(value, i2c->base + reg * i2c->regstep); + if (i2c->reg_io_width == 4) + iowrite32(value, i2c->base + (reg << i2c->reg_shift)); + else if (i2c->reg_io_width == 2) + iowrite16(value, i2c->base + (reg << i2c->reg_shift)); + else + iowrite8(value, i2c->base + (reg << i2c->reg_shift)); } static inline u8 oc_getreg(struct ocores_i2c *i2c, int reg) { - return ioread8(i2c->base + reg * i2c->regstep); + if (i2c->reg_io_width == 4) + return ioread32(i2c->base + (reg << i2c->reg_shift)); + else if (i2c->reg_io_width == 2) + return ioread16(i2c->base + (reg << i2c->reg_shift)); + else + return ioread8(i2c->base + (reg << i2c->reg_shift)); } static void ocores_process(struct ocores_i2c *i2c) @@ -247,26 +228,35 @@ static struct i2c_adapter ocores_adapter = { }; #ifdef CONFIG_OF -static int ocores_i2c_of_probe(struct platform_device* pdev, - struct ocores_i2c* i2c) +static int ocores_i2c_of_probe(struct platform_device *pdev, + struct ocores_i2c *i2c) { - const __be32* val; - - val = of_get_property(pdev->dev.of_node, "regstep", NULL); - if (!val) { - dev_err(&pdev->dev, "Missing required parameter 'regstep'"); - return -ENODEV; + struct device_node *np = pdev->dev.of_node; + u32 val; + + if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) { + /* no 'reg-shift', check for deprecated 'regstep' */ + if (!of_property_read_u32(np, "regstep", &val)) { + if (!is_power_of_2(val)) { + dev_err(&pdev->dev, "invalid regstep %d\n", + val); + return -EINVAL; + } + i2c->reg_shift = ilog2(val); + dev_warn(&pdev->dev, + "regstep property deprecated, use reg-shift\n"); + } } - i2c->regstep = be32_to_cpup(val); - val = of_get_property(pdev->dev.of_node, "clock-frequency", NULL); - if (!val) { + if (of_property_read_u32(np, "clock-frequency", &val)) { dev_err(&pdev->dev, - "Missing required parameter 'clock-frequency'"); + "Missing required parameter 'clock-frequency'\n"); return -ENODEV; } - i2c->clock_khz = be32_to_cpup(val) / 1000; + i2c->clock_khz = val / 1000; + of_property_read_u32(pdev->dev.of_node, "reg-io-width", + &i2c->reg_io_width); return 0; } #else @@ -308,7 +298,8 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev) pdata = pdev->dev.platform_data; if (pdata) { - i2c->regstep = pdata->regstep; + i2c->reg_shift = pdata->reg_shift; + i2c->reg_io_width = pdata->reg_io_width; i2c->clock_khz = pdata->clock_khz; } else { ret = ocores_i2c_of_probe(pdev, i2c); @@ -316,6 +307,9 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev) return ret; } + if (i2c->reg_io_width == 0) + i2c->reg_io_width = 1; /* Set to default value */ + ocores_init(i2c); init_waitqueue_head(&i2c->wait); @@ -351,7 +345,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev) return 0; } -static int __devexit ocores_i2c_remove(struct platform_device* pdev) +static int __devexit ocores_i2c_remove(struct platform_device *pdev) { struct ocores_i2c *i2c = platform_get_drvdata(pdev); @@ -367,9 +361,9 @@ static int __devexit ocores_i2c_remove(struct platform_device* pdev) } #ifdef CONFIG_PM -static int ocores_i2c_suspend(struct platform_device *pdev, pm_message_t state) +static int ocores_i2c_suspend(struct device *dev) { - struct ocores_i2c *i2c = platform_get_drvdata(pdev); + struct ocores_i2c *i2c = dev_get_drvdata(dev); u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL); /* make sure the device is disabled */ @@ -378,17 +372,19 @@ static int ocores_i2c_suspend(struct platform_device *pdev, pm_message_t state) return 0; } -static int ocores_i2c_resume(struct platform_device *pdev) +static int ocores_i2c_resume(struct device *dev) { - struct ocores_i2c *i2c = platform_get_drvdata(pdev); + struct ocores_i2c *i2c = dev_get_drvdata(dev); ocores_init(i2c); return 0; } + +static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume); +#define OCORES_I2C_PM (&ocores_i2c_pm) #else -#define ocores_i2c_suspend NULL -#define ocores_i2c_resume NULL +#define OCORES_I2C_PM NULL #endif static struct of_device_id ocores_i2c_match[] = { @@ -400,12 +396,11 @@ MODULE_DEVICE_TABLE(of, ocores_i2c_match); static struct platform_driver ocores_i2c_driver = { .probe = ocores_i2c_probe, .remove = __devexit_p(ocores_i2c_remove), - .suspend = ocores_i2c_suspend, - .resume = ocores_i2c_resume, .driver = { .owner = THIS_MODULE, .name = "ocores-i2c", .of_match_table = ocores_i2c_match, + .pm = OCORES_I2C_PM, }, }; diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index c2148332de0f..6849635b268a 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -49,8 +49,8 @@ /* I2C controller revisions present on specific hardware */ #define OMAP_I2C_REV_ON_2430 0x36 -#define OMAP_I2C_REV_ON_3430 0x3C -#define OMAP_I2C_REV_ON_3530_4430 0x40 +#define OMAP_I2C_REV_ON_3430_3530 0x3C +#define OMAP_I2C_REV_ON_3630_4430 0x40 /* timeout waiting for the controller to respond */ #define OMAP_I2C_TIMEOUT (msecs_to_jiffies(1000)) @@ -173,7 +173,7 @@ enum { /* Errata definitions */ #define I2C_OMAP_ERRATA_I207 (1 << 0) -#define I2C_OMAP3_1P153 (1 << 1) +#define I2C_OMAP_ERRATA_I462 (1 << 1) struct omap_i2c_dev { struct device *dev; @@ -269,47 +269,6 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg) (i2c_dev->regs[reg] << i2c_dev->reg_shift)); } -static void omap_i2c_unidle(struct omap_i2c_dev *dev) -{ - if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { - omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); - omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate); - omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate); - omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, dev->sclhstate); - omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, dev->bufstate); - omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, dev->syscstate); - omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate); - omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); - } - - /* - * Don't write to this register if the IE state is 0 as it can - * cause deadlock. - */ - if (dev->iestate) - omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate); -} - -static void omap_i2c_idle(struct omap_i2c_dev *dev) -{ - u16 iv; - - dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG); - if (dev->dtrev == OMAP_I2C_IP_VERSION_2) - omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1); - else - omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0); - - if (dev->rev < OMAP_I2C_OMAP1_REV_2) { - iv = omap_i2c_read_reg(dev, OMAP_I2C_IV_REG); /* Read clears */ - } else { - omap_i2c_write_reg(dev, OMAP_I2C_STAT_REG, dev->iestate); - - /* Flush posted write */ - omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG); - } -} - static int omap_i2c_init(struct omap_i2c_dev *dev) { u16 psc = 0, scll = 0, sclh = 0, buf = 0; @@ -346,7 +305,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, SYSC_AUTOIDLE_MASK); - } else if (dev->rev >= OMAP_I2C_REV_ON_3430) { + } else if (dev->rev >= OMAP_I2C_REV_ON_3430_3530) { dev->syscstate = SYSC_AUTOIDLE_MASK; dev->syscstate |= SYSC_ENAWAKEUP_MASK; dev->syscstate |= (SYSC_IDLEMODE_SMART << @@ -468,11 +427,6 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) /* Take the I2C module out of reset: */ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); - dev->errata = 0; - - if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207) - dev->errata |= I2C_OMAP_ERRATA_I207; - /* Enable interrupts */ dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY | OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK | @@ -514,7 +468,7 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) { struct omap_i2c_dev *dev = i2c_get_adapdata(adap); - int r; + unsigned long timeout; u16 w; dev_dbg(dev->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", @@ -536,7 +490,7 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, w |= OMAP_I2C_BUF_RXFIF_CLR | OMAP_I2C_BUF_TXFIF_CLR; omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, w); - init_completion(&dev->cmd_complete); + INIT_COMPLETION(dev->cmd_complete); dev->cmd_err = 0; w = OMAP_I2C_CON_EN | OMAP_I2C_CON_MST | OMAP_I2C_CON_STT; @@ -584,12 +538,10 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap, * REVISIT: We should abort the transfer on signals, but the bus goes * into arbitration and we're currently unable to recover from it. */ - r = wait_for_completion_timeout(&dev->cmd_complete, - OMAP_I2C_TIMEOUT); + timeout = wait_for_completion_timeout(&dev->cmd_complete, + OMAP_I2C_TIMEOUT); dev->buf_len = 0; - if (r < 0) - return r; - if (r == 0) { + if (timeout == 0) { dev_err(dev->dev, "controller timed out\n"); omap_i2c_init(dev); return -ETIMEDOUT; @@ -630,7 +582,9 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) int i; int r; - pm_runtime_get_sync(dev->dev); + r = pm_runtime_get_sync(dev->dev); + if (IS_ERR_VALUE(r)) + return r; r = omap_i2c_wait_for_bb(dev); if (r < 0) @@ -767,11 +721,11 @@ omap_i2c_omap1_isr(int this_irq, void *dev_id) #endif /* - * OMAP3430 Errata 1.153: When an XRDY/XDR is hit, wait for XUDF before writing + * OMAP3430 Errata i462: When an XRDY/XDR is hit, wait for XUDF before writing * data to DATA_REG. Otherwise some data bytes can be lost while transferring * them from the memory to the I2C interface. */ -static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err) +static int errata_omap3_i462(struct omap_i2c_dev *dev, u16 *stat, int *err) { unsigned long timeout = 10000; @@ -779,7 +733,6 @@ static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err) if (*stat & (OMAP_I2C_STAT_NACK | OMAP_I2C_STAT_AL)) { omap_i2c_ack_stat(dev, *stat & (OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); - *err |= OMAP_I2C_STAT_XUDF; return -ETIMEDOUT; } @@ -792,6 +745,7 @@ static int errata_omap3_1p153(struct omap_i2c_dev *dev, u16 *stat, int *err) return 0; } + *err |= OMAP_I2C_STAT_XUDF; return 0; } @@ -930,8 +884,8 @@ complete: break; } - if ((dev->errata & I2C_OMAP3_1P153) && - errata_omap3_1p153(dev, &stat, &err)) + if ((dev->errata & I2C_OMAP_ERRATA_I462) && + errata_omap3_i462(dev, &stat, &err)) goto complete; omap_i2c_write_reg(dev, OMAP_I2C_DATA_REG, w); @@ -1048,6 +1002,7 @@ omap_i2c_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, dev); + init_completion(&dev->cmd_complete); dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3; @@ -1057,12 +1012,19 @@ omap_i2c_probe(struct platform_device *pdev) dev->regs = (u8 *)reg_map_ip_v1; pm_runtime_enable(dev->dev); - pm_runtime_get_sync(dev->dev); + r = pm_runtime_get_sync(dev->dev); + if (IS_ERR_VALUE(r)) + goto err_free_mem; dev->rev = omap_i2c_read_reg(dev, OMAP_I2C_REV_REG) & 0xff; - if (dev->rev <= OMAP_I2C_REV_ON_3430) - dev->errata |= I2C_OMAP3_1P153; + dev->errata = 0; + + if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207) + dev->errata |= I2C_OMAP_ERRATA_I207; + + if (dev->rev <= OMAP_I2C_REV_ON_3430_3530) + dev->errata |= I2C_OMAP_ERRATA_I462; if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) { u16 s; @@ -1079,7 +1041,7 @@ omap_i2c_probe(struct platform_device *pdev) dev->fifo_size = (dev->fifo_size / 2); - if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) + if (dev->rev >= OMAP_I2C_REV_ON_3630_4430) dev->b_hw = 0; /* Disable hardware fixes */ else dev->b_hw = 1; /* Enable hardware fixes */ @@ -1095,7 +1057,7 @@ omap_i2c_probe(struct platform_device *pdev) isr = (dev->rev < OMAP_I2C_OMAP1_REV_2) ? omap_i2c_omap1_isr : omap_i2c_isr; - r = request_irq(dev->irq, isr, 0, pdev->name, dev); + r = request_irq(dev->irq, isr, IRQF_NO_SUSPEND, pdev->name, dev); if (r) { dev_err(dev->dev, "failure requesting irq %i\n", dev->irq); @@ -1105,8 +1067,6 @@ omap_i2c_probe(struct platform_device *pdev) dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id, dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed); - pm_runtime_put(dev->dev); - adap = &dev->adapter; i2c_set_adapdata(adap, dev); adap->owner = THIS_MODULE; @@ -1126,6 +1086,8 @@ omap_i2c_probe(struct platform_device *pdev) of_i2c_register_devices(adap); + pm_runtime_put(dev->dev); + return 0; err_free_irq: @@ -1134,6 +1096,7 @@ err_unuse_clocks: omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); pm_runtime_put(dev->dev); iounmap(dev->base); + pm_runtime_disable(&pdev->dev); err_free_mem: platform_set_drvdata(pdev, NULL); kfree(dev); @@ -1143,17 +1106,23 @@ err_release_region: return r; } -static int -omap_i2c_remove(struct platform_device *pdev) +static int __devexit omap_i2c_remove(struct platform_device *pdev) { struct omap_i2c_dev *dev = platform_get_drvdata(pdev); struct resource *mem; + int ret; platform_set_drvdata(pdev, NULL); free_irq(dev->irq, dev); i2c_del_adapter(&dev->adapter); + ret = pm_runtime_get_sync(&pdev->dev); + if (IS_ERR_VALUE(ret)) + return ret; + omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); iounmap(dev->base); kfree(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1161,13 +1130,26 @@ omap_i2c_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM #ifdef CONFIG_PM_RUNTIME static int omap_i2c_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_i2c_dev *_dev = platform_get_drvdata(pdev); + u16 iv; + + _dev->iestate = omap_i2c_read_reg(_dev, OMAP_I2C_IE_REG); + + omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, 0); - omap_i2c_idle(_dev); + if (_dev->rev < OMAP_I2C_OMAP1_REV_2) { + iv = omap_i2c_read_reg(_dev, OMAP_I2C_IV_REG); /* Read clears */ + } else { + omap_i2c_write_reg(_dev, OMAP_I2C_STAT_REG, _dev->iestate); + + /* Flush posted write */ + omap_i2c_read_reg(_dev, OMAP_I2C_STAT_REG); + } return 0; } @@ -1177,23 +1159,40 @@ static int omap_i2c_runtime_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct omap_i2c_dev *_dev = platform_get_drvdata(pdev); - omap_i2c_unidle(_dev); + if (_dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) { + omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, 0); + omap_i2c_write_reg(_dev, OMAP_I2C_PSC_REG, _dev->pscstate); + omap_i2c_write_reg(_dev, OMAP_I2C_SCLL_REG, _dev->scllstate); + omap_i2c_write_reg(_dev, OMAP_I2C_SCLH_REG, _dev->sclhstate); + omap_i2c_write_reg(_dev, OMAP_I2C_BUF_REG, _dev->bufstate); + omap_i2c_write_reg(_dev, OMAP_I2C_SYSC_REG, _dev->syscstate); + omap_i2c_write_reg(_dev, OMAP_I2C_WE_REG, _dev->westate); + omap_i2c_write_reg(_dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN); + } + + /* + * Don't write to this register if the IE state is 0 as it can + * cause deadlock. + */ + if (_dev->iestate) + omap_i2c_write_reg(_dev, OMAP_I2C_IE_REG, _dev->iestate); return 0; } +#endif /* CONFIG_PM_RUNTIME */ static struct dev_pm_ops omap_i2c_pm_ops = { - .runtime_suspend = omap_i2c_runtime_suspend, - .runtime_resume = omap_i2c_runtime_resume, + SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, + omap_i2c_runtime_resume, NULL) }; #define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) #else #define OMAP_I2C_PM_OPS NULL -#endif +#endif /* CONFIG_PM */ static struct platform_driver omap_i2c_driver = { .probe = omap_i2c_probe, - .remove = omap_i2c_remove, + .remove = __devexit_p(omap_i2c_remove), .driver = { .name = "omap_i2c", .owner = THIS_MODULE, diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 99389d2eae51..5d54416770b0 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -587,25 +587,27 @@ static struct i2c_algorithm pnx_algorithm = { }; #ifdef CONFIG_PM -static int i2c_pnx_controller_suspend(struct platform_device *pdev, - pm_message_t state) +static int i2c_pnx_controller_suspend(struct device *dev) { - struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); + struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); clk_disable(alg_data->clk); return 0; } -static int i2c_pnx_controller_resume(struct platform_device *pdev) +static int i2c_pnx_controller_resume(struct device *dev) { - struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); + struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); return clk_enable(alg_data->clk); } + +static SIMPLE_DEV_PM_OPS(i2c_pnx_pm, + i2c_pnx_controller_suspend, i2c_pnx_controller_resume); +#define PNX_I2C_PM (&i2c_pnx_pm) #else -#define i2c_pnx_controller_suspend NULL -#define i2c_pnx_controller_resume NULL +#define PNX_I2C_PM NULL #endif static int __devinit i2c_pnx_probe(struct platform_device *pdev) @@ -783,11 +785,10 @@ static struct platform_driver i2c_pnx_driver = { .name = "pnx-i2c", .owner = THIS_MODULE, .of_match_table = of_match_ptr(i2c_pnx_of_match), + .pm = PNX_I2C_PM, }, .probe = i2c_pnx_probe, .remove = __devexit_p(i2c_pnx_remove), - .suspend = i2c_pnx_controller_suspend, - .resume = i2c_pnx_controller_resume, }; static int __init i2c_adap_pnx_init(void) diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c index 93709fbe30eb..d8515be00b98 100644 --- a/drivers/i2c/busses/i2c-puv3.c +++ b/drivers/i2c/busses/i2c-puv3.c @@ -254,7 +254,7 @@ static int __devexit puv3_i2c_remove(struct platform_device *pdev) } #ifdef CONFIG_PM -static int puv3_i2c_suspend(struct platform_device *dev, pm_message_t state) +static int puv3_i2c_suspend(struct device *dev) { int poll_count; /* Disable the IIC */ @@ -267,23 +267,20 @@ static int puv3_i2c_suspend(struct platform_device *dev, pm_message_t state) return 0; } -static int puv3_i2c_resume(struct platform_device *dev) -{ - return 0 ; -} +static SIMPLE_DEV_PM_OPS(puv3_i2c_pm, puv3_i2c_suspend, NULL); +#define PUV3_I2C_PM (&puv3_i2c_pm) + #else -#define puv3_i2c_suspend NULL -#define puv3_i2c_resume NULL +#define PUV3_I2C_PM NULL #endif static struct platform_driver puv3_i2c_driver = { .probe = puv3_i2c_probe, .remove = __devexit_p(puv3_i2c_remove), - .suspend = puv3_i2c_suspend, - .resume = puv3_i2c_resume, .driver = { .name = "PKUnity-v3-I2C", .owner = THIS_MODULE, + .pm = PUV3_I2C_PM, } }; diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 01959154572d..5ae3b0236bd3 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -122,7 +122,7 @@ static inline unsigned int s3c24xx_get_device_quirks(struct platform_device *pde { if (pdev->dev.of_node) { const struct of_device_id *match; - match = of_match_node(&s3c24xx_i2c_match, pdev->dev.of_node); + match = of_match_node(s3c24xx_i2c_match, pdev->dev.of_node); return (unsigned int)match->data; } @@ -609,7 +609,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, if (ret != -EAGAIN) { clk_disable(i2c->clk); - pm_runtime_put_sync(&adap->dev); + pm_runtime_put(&adap->dev); return ret; } @@ -619,7 +619,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap, } clk_disable(i2c->clk); - pm_runtime_put_sync(&adap->dev); + pm_runtime_put(&adap->dev); return -EREMOTEIO; } diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 4d44af181f37..580a0c04cb42 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007-2009 ST-Ericsson AB + * Copyright (C) 2007-2012 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * ST DDC I2C master mode driver, used in e.g. U300 series platforms. * Author: Linus Walleij <linus.walleij@stericsson.com> @@ -139,8 +139,6 @@ module_param(scl_frequency, uint, 0644); * struct stu300_dev - the stu300 driver state holder * @pdev: parent platform device * @adapter: corresponding I2C adapter - * @phybase: location of I/O area in memory - * @physize: size of I/O area in memory * @clk: hardware block clock * @irq: assigned interrupt line * @cmd_issue_lock: this locks the following cmd_ variables @@ -155,8 +153,6 @@ module_param(scl_frequency, uint, 0644); struct stu300_dev { struct platform_device *pdev; struct i2c_adapter adapter; - resource_size_t phybase; - resource_size_t physize; void __iomem *virtbase; struct clk *clk; int irq; @@ -873,64 +869,44 @@ stu300_probe(struct platform_device *pdev) int ret = 0; char clk_name[] = "I2C0"; - dev = kzalloc(sizeof(struct stu300_dev), GFP_KERNEL); + dev = devm_kzalloc(&pdev->dev, sizeof(struct stu300_dev), GFP_KERNEL); if (!dev) { dev_err(&pdev->dev, "could not allocate device struct\n"); - ret = -ENOMEM; - goto err_no_devmem; + return -ENOMEM; } bus_nr = pdev->id; clk_name[3] += (char)bus_nr; - dev->clk = clk_get(&pdev->dev, clk_name); + dev->clk = devm_clk_get(&pdev->dev, clk_name); if (IS_ERR(dev->clk)) { - ret = PTR_ERR(dev->clk); dev_err(&pdev->dev, "could not retrieve i2c bus clock\n"); - goto err_no_clk; + return PTR_ERR(dev->clk); } dev->pdev = pdev; - platform_set_drvdata(pdev, dev); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENOENT; - goto err_no_resource; - } - - dev->phybase = res->start; - dev->physize = resource_size(res); - - if (request_mem_region(dev->phybase, dev->physize, - NAME " I/O Area") == NULL) { - ret = -EBUSY; - goto err_no_ioregion; - } + if (!res) + return -ENOENT; - dev->virtbase = ioremap(dev->phybase, dev->physize); + dev->virtbase = devm_request_and_ioremap(&pdev->dev, res); dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual " "base %p\n", bus_nr, dev->virtbase); - if (!dev->virtbase) { - ret = -ENOMEM; - goto err_no_ioremap; - } + if (!dev->virtbase) + return -ENOMEM; dev->irq = platform_get_irq(pdev, 0); - if (request_irq(dev->irq, stu300_irh, 0, - NAME, dev)) { - ret = -EIO; - goto err_no_irq; - } + ret = devm_request_irq(&pdev->dev, dev->irq, stu300_irh, 0, NAME, dev); + if (ret < 0) + return ret; dev->speed = scl_frequency; - clk_enable(dev->clk); + clk_prepare_enable(dev->clk); ret = stu300_init_hw(dev); clk_disable(dev->clk); - if (ret != 0) { dev_err(&dev->pdev->dev, "error initializing hardware.\n"); - goto err_init_hw; + return -EIO; } /* IRQ event handling initialization */ @@ -952,57 +928,43 @@ stu300_probe(struct platform_device *pdev) /* i2c device drivers may be active on return from add_adapter() */ ret = i2c_add_numbered_adapter(adap); if (ret) { - dev_err(&dev->pdev->dev, "failure adding ST Micro DDC " + dev_err(&pdev->dev, "failure adding ST Micro DDC " "I2C adapter\n"); - goto err_add_adapter; + return ret; } - return 0; - err_add_adapter: - err_init_hw: - free_irq(dev->irq, dev); - err_no_irq: - iounmap(dev->virtbase); - err_no_ioremap: - release_mem_region(dev->phybase, dev->physize); - err_no_ioregion: - platform_set_drvdata(pdev, NULL); - err_no_resource: - clk_put(dev->clk); - err_no_clk: - kfree(dev); - err_no_devmem: - dev_err(&pdev->dev, "failed to add " NAME " adapter: %d\n", - pdev->id); - return ret; + platform_set_drvdata(pdev, dev); + return 0; } #ifdef CONFIG_PM -static int stu300_suspend(struct platform_device *pdev, pm_message_t state) +static int stu300_suspend(struct device *device) { - struct stu300_dev *dev = platform_get_drvdata(pdev); + struct stu300_dev *dev = dev_get_drvdata(device); /* Turn off everything */ stu300_wr8(0x00, dev->virtbase + I2C_CR); return 0; } -static int stu300_resume(struct platform_device *pdev) +static int stu300_resume(struct device *device) { int ret = 0; - struct stu300_dev *dev = platform_get_drvdata(pdev); + struct stu300_dev *dev = dev_get_drvdata(device); clk_enable(dev->clk); ret = stu300_init_hw(dev); clk_disable(dev->clk); if (ret != 0) - dev_err(&pdev->dev, "error re-initializing hardware.\n"); + dev_err(device, "error re-initializing hardware.\n"); return ret; } + +static SIMPLE_DEV_PM_OPS(stu300_pm, stu300_suspend, stu300_resume); +#define STU300_I2C_PM (&stu300_pm) #else -#define stu300_suspend NULL -#define stu300_resume NULL +#define STU300_I2C_PM NULL #endif static int __exit @@ -1013,12 +975,7 @@ stu300_remove(struct platform_device *pdev) i2c_del_adapter(&dev->adapter); /* Turn off everything */ stu300_wr8(0x00, dev->virtbase + I2C_CR); - free_irq(dev->irq, dev); - iounmap(dev->virtbase); - release_mem_region(dev->phybase, dev->physize); - clk_put(dev->clk); platform_set_drvdata(pdev, NULL); - kfree(dev); return 0; } @@ -1026,10 +983,9 @@ static struct platform_driver stu300_i2c_driver = { .driver = { .name = NAME, .owner = THIS_MODULE, + .pm = STU300_I2C_PM, }, .remove = __exit_p(stu300_remove), - .suspend = stu300_suspend, - .resume = stu300_resume, }; diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 3da7ee3eb505..66eb53fac202 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -97,8 +97,21 @@ #define I2C_HEADER_10BIT_ADDR (1<<18) #define I2C_HEADER_IE_ENABLE (1<<17) #define I2C_HEADER_REPEAT_START (1<<16) +#define I2C_HEADER_CONTINUE_XFER (1<<15) #define I2C_HEADER_MASTER_ADDR_SHIFT 12 #define I2C_HEADER_SLAVE_ADDR_SHIFT 1 +/* + * msg_end_type: The bus control which need to be send at end of transfer. + * @MSG_END_STOP: Send stop pulse at end of transfer. + * @MSG_END_REPEAT_START: Send repeat start at end of transfer. + * @MSG_END_CONTINUE: The following on message is coming and so do not send + * stop or repeat start. + */ +enum msg_end_type { + MSG_END_STOP, + MSG_END_REPEAT_START, + MSG_END_CONTINUE, +}; /** * struct tegra_i2c_dev - per device i2c context @@ -106,7 +119,6 @@ * @adapter: core i2c layer adapter information * @clk: clock reference for i2c controller * @i2c_clk: clock reference for i2c bus - * @iomem: memory resource for registers * @base: ioremapped registers cookie * @cont_id: i2c controller id, used for for packet header * @irq: irq number of transfer complete interrupt @@ -124,7 +136,6 @@ struct tegra_i2c_dev { struct i2c_adapter adapter; struct clk *clk; struct clk *i2c_clk; - struct resource *iomem; void __iomem *base; int cont_id; int irq; @@ -165,6 +176,10 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg) { writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); + + /* Read back register to make sure that register writes completed */ + if (reg != I2C_TX_FIFO) + readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); } static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg) @@ -449,7 +464,7 @@ err: } static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, - struct i2c_msg *msg, int stop) + struct i2c_msg *msg, enum msg_end_type end_state) { u32 packet_header; u32 int_mask; @@ -476,7 +491,9 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO); packet_header = I2C_HEADER_IE_ENABLE; - if (!stop) + if (end_state == MSG_END_CONTINUE) + packet_header |= I2C_HEADER_CONTINUE_XFER; + else if (end_state == MSG_END_REPEAT_START) packet_header |= I2C_HEADER_REPEAT_START; if (msg->flags & I2C_M_TEN) { packet_header |= msg->addr; @@ -548,8 +565,14 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], clk_prepare_enable(i2c_dev->clk); for (i = 0; i < num; i++) { - int stop = (i == (num - 1)) ? 1 : 0; - ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop); + enum msg_end_type end_type = MSG_END_STOP; + if (i < (num - 1)) { + if (msgs[i + 1].flags & I2C_M_NOSTART) + end_type = MSG_END_CONTINUE; + else + end_type = MSG_END_REPEAT_START; + } + ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], end_type); if (ret) break; } @@ -559,7 +582,8 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], static u32 tegra_i2c_func(struct i2c_adapter *adap) { - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR; + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR | + I2C_FUNC_PROTOCOL_MANGLING | I2C_FUNC_NOSTART; } static const struct i2c_algorithm tegra_i2c_algo = { @@ -572,7 +596,6 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev) struct tegra_i2c_dev *i2c_dev; struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data; struct resource *res; - struct resource *iomem; struct clk *clk; struct clk *i2c_clk; const unsigned int *prop; @@ -585,50 +608,41 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev) dev_err(&pdev->dev, "no mem resource\n"); return -EINVAL; } - iomem = request_mem_region(res->start, resource_size(res), pdev->name); - if (!iomem) { - dev_err(&pdev->dev, "I2C region already claimed\n"); - return -EBUSY; - } - base = ioremap(iomem->start, resource_size(iomem)); + base = devm_request_and_ioremap(&pdev->dev, res); if (!base) { - dev_err(&pdev->dev, "Cannot ioremap I2C region\n"); - return -ENOMEM; + dev_err(&pdev->dev, "Cannot request/ioremap I2C registers\n"); + return -EADDRNOTAVAIL; } res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(&pdev->dev, "no irq resource\n"); - ret = -EINVAL; - goto err_iounmap; + return -EINVAL; } irq = res->start; - clk = clk_get(&pdev->dev, NULL); + clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { dev_err(&pdev->dev, "missing controller clock"); - ret = PTR_ERR(clk); - goto err_release_region; + return PTR_ERR(clk); } - i2c_clk = clk_get(&pdev->dev, "i2c"); + i2c_clk = devm_clk_get(&pdev->dev, "i2c"); if (IS_ERR(i2c_clk)) { dev_err(&pdev->dev, "missing bus clock"); - ret = PTR_ERR(i2c_clk); - goto err_clk_put; + return PTR_ERR(i2c_clk); } - i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev), GFP_KERNEL); + i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) { - ret = -ENOMEM; - goto err_i2c_clk_put; + dev_err(&pdev->dev, "Could not allocate struct tegra_i2c_dev"); + return -ENOMEM; } i2c_dev->base = base; i2c_dev->clk = clk; i2c_dev->i2c_clk = i2c_clk; - i2c_dev->iomem = iomem; i2c_dev->adapter.algo = &tegra_i2c_algo; i2c_dev->irq = irq; i2c_dev->cont_id = pdev->id; @@ -657,13 +671,14 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev) ret = tegra_i2c_init(i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize i2c controller"); - goto err_free; + return ret; } - ret = request_irq(i2c_dev->irq, tegra_i2c_isr, 0, pdev->name, i2c_dev); + ret = devm_request_irq(&pdev->dev, i2c_dev->irq, + tegra_i2c_isr, 0, pdev->name, i2c_dev); if (ret) { dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); - goto err_free; + return ret; } clk_prepare_enable(i2c_dev->i2c_clk); @@ -681,45 +696,26 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev) ret = i2c_add_numbered_adapter(&i2c_dev->adapter); if (ret) { dev_err(&pdev->dev, "Failed to add I2C adapter\n"); - goto err_free_irq; + clk_disable_unprepare(i2c_dev->i2c_clk); + return ret; } of_i2c_register_devices(&i2c_dev->adapter); return 0; -err_free_irq: - free_irq(i2c_dev->irq, i2c_dev); -err_free: - kfree(i2c_dev); -err_i2c_clk_put: - clk_put(i2c_clk); -err_clk_put: - clk_put(clk); -err_release_region: - release_mem_region(iomem->start, resource_size(iomem)); -err_iounmap: - iounmap(base); - return ret; } static int __devexit tegra_i2c_remove(struct platform_device *pdev) { struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); i2c_del_adapter(&i2c_dev->adapter); - free_irq(i2c_dev->irq, i2c_dev); - clk_put(i2c_dev->i2c_clk); - clk_put(i2c_dev->clk); - release_mem_region(i2c_dev->iomem->start, - resource_size(i2c_dev->iomem)); - iounmap(i2c_dev->base); - kfree(i2c_dev); return 0; } #ifdef CONFIG_PM -static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state) +static int tegra_i2c_suspend(struct device *dev) { - struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); i2c_lock_adapter(&i2c_dev->adapter); i2c_dev->is_suspended = true; @@ -728,9 +724,9 @@ static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state) return 0; } -static int tegra_i2c_resume(struct platform_device *pdev) +static int tegra_i2c_resume(struct device *dev) { - struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); int ret; i2c_lock_adapter(&i2c_dev->adapter); @@ -748,6 +744,11 @@ static int tegra_i2c_resume(struct platform_device *pdev) return 0; } + +static SIMPLE_DEV_PM_OPS(tegra_i2c_pm, tegra_i2c_suspend, tegra_i2c_resume); +#define TEGRA_I2C_PM (&tegra_i2c_pm) +#else +#define TEGRA_I2C_PM NULL #endif #if defined(CONFIG_OF) @@ -758,21 +759,16 @@ static const struct of_device_id tegra_i2c_of_match[] __devinitconst = { {}, }; MODULE_DEVICE_TABLE(of, tegra_i2c_of_match); -#else -#define tegra_i2c_of_match NULL #endif static struct platform_driver tegra_i2c_driver = { .probe = tegra_i2c_probe, .remove = __devexit_p(tegra_i2c_remove), -#ifdef CONFIG_PM - .suspend = tegra_i2c_suspend, - .resume = tegra_i2c_resume, -#endif .driver = { .name = "tegra-i2c", .owner = THIS_MODULE, - .of_match_table = tegra_i2c_of_match, + .of_match_table = of_match_ptr(tegra_i2c_of_match), + .pm = TEGRA_I2C_PM, }, }; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index d5b390f75c9a..14eaecea2b70 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -40,11 +40,27 @@ * Note that newer firmware allows querying device for maximum useable * coordinates. */ +#define XMIN 0 +#define XMAX 6143 +#define YMIN 0 +#define YMAX 6143 #define XMIN_NOMINAL 1472 #define XMAX_NOMINAL 5472 #define YMIN_NOMINAL 1408 #define YMAX_NOMINAL 4448 +/* Size in bits of absolute position values reported by the hardware */ +#define ABS_POS_BITS 13 + +/* + * Any position values from the hardware above the following limits are + * treated as "wrapped around negative" values that have been truncated to + * the 13-bit reporting range of the hardware. These are just reasonable + * guesses and can be adjusted if hardware is found that operates outside + * of these parameters. + */ +#define X_MAX_POSITIVE (((1 << ABS_POS_BITS) + XMAX) / 2) +#define Y_MAX_POSITIVE (((1 << ABS_POS_BITS) + YMAX) / 2) /***************************************************************************** * Stuff we need even when we do not want native Synaptics support @@ -588,6 +604,12 @@ static int synaptics_parse_hw_state(const unsigned char buf[], hw->right = (buf[0] & 0x02) ? 1 : 0; } + /* Convert wrap-around values to negative */ + if (hw->x > X_MAX_POSITIVE) + hw->x -= 1 << ABS_POS_BITS; + if (hw->y > Y_MAX_POSITIVE) + hw->y -= 1 << ABS_POS_BITS; + return 0; } diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 6533f44be5bd..002041975de9 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -464,7 +464,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom) t = (data[6] << 2) | ((data[7] >> 6) & 3); if ((features->type >= INTUOS4S && features->type <= INTUOS4L) || (features->type >= INTUOS5S && features->type <= INTUOS5L) || - features->type == WACOM_21UX2 || features->type == WACOM_24HD) { + (features->type >= WACOM_21UX2 && features->type <= WACOM_24HD)) { t = (t << 1) | (data[1] & 1); } input_report_abs(input, ABS_PRESSURE, t); @@ -614,7 +614,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_abs(input, ABS_MISC, 0); } } else { - if (features->type == WACOM_21UX2) { + if (features->type == WACOM_21UX2 || features->type == WACOM_22HD) { input_report_key(input, BTN_0, (data[5] & 0x01)); input_report_key(input, BTN_1, (data[6] & 0x01)); input_report_key(input, BTN_2, (data[6] & 0x02)); @@ -633,6 +633,12 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_key(input, BTN_Z, (data[8] & 0x20)); input_report_key(input, BTN_BASE, (data[8] & 0x40)); input_report_key(input, BTN_BASE2, (data[8] & 0x80)); + + if (features->type == WACOM_22HD) { + input_report_key(input, KEY_PROG1, data[9] & 0x01); + input_report_key(input, KEY_PROG2, data[9] & 0x02); + input_report_key(input, KEY_PROG3, data[9] & 0x04); + } } else { input_report_key(input, BTN_0, (data[5] & 0x01)); input_report_key(input, BTN_1, (data[5] & 0x02)); @@ -1231,6 +1237,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) case CINTIQ: case WACOM_BEE: case WACOM_21UX2: + case WACOM_22HD: case WACOM_24HD: sync = wacom_intuos_irq(wacom_wac); break; @@ -1432,6 +1439,12 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, wacom_setup_cintiq(wacom_wac); break; + case WACOM_22HD: + __set_bit(KEY_PROG1, input_dev->keybit); + __set_bit(KEY_PROG2, input_dev->keybit); + __set_bit(KEY_PROG3, input_dev->keybit); + /* fall through */ + case WACOM_21UX2: __set_bit(BTN_A, input_dev->keybit); __set_bit(BTN_B, input_dev->keybit); @@ -1858,6 +1871,9 @@ static const struct wacom_features wacom_features_0xF0 = static const struct wacom_features wacom_features_0xCC = { "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87200, 65600, 2047, 63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0xFA = + { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, + 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x90 = { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2075,6 +2091,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xEF) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, + { USB_DEVICE_WACOM(0xFA) }, { USB_DEVICE_LENOVO(0x6004) }, { } }; diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h index bd5d37b28714..96c185cc301e 100644 --- a/drivers/input/tablet/wacom_wac.h +++ b/drivers/input/tablet/wacom_wac.h @@ -73,8 +73,9 @@ enum { INTUOS5S, INTUOS5, INTUOS5L, - WACOM_24HD, WACOM_21UX2, + WACOM_22HD, + WACOM_24HD, CINTIQ, WACOM_BEE, WACOM_MO, diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 73bd2f6b82ec..1ba232cbc09d 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -472,6 +472,19 @@ config TOUCHSCREEN_PENMOUNT To compile this driver as a module, choose M here: the module will be called penmount. +config TOUCHSCREEN_EDT_FT5X06 + tristate "EDT FocalTech FT5x06 I2C Touchscreen support" + depends on I2C + help + Say Y here if you have an EDT "Polytouch" touchscreen based + on the FocalTech FT5x06 family of controllers connected to + your system. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called edt-ft5x06. + config TOUCHSCREEN_MIGOR tristate "Renesas MIGO-R touchscreen" depends on SH_MIGOR && I2C diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 5920c60f999d..178eb128d90f 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_TOUCHSCREEN_CYTTSP_SPI) += cyttsp_spi.o obj-$(CONFIG_TOUCHSCREEN_DA9034) += da9034-ts.o obj-$(CONFIG_TOUCHSCREEN_DA9052) += da9052_tsi.o obj-$(CONFIG_TOUCHSCREEN_DYNAPRO) += dynapro.o +obj-$(CONFIG_TOUCHSCREEN_EDT_FT5X06) += edt-ft5x06.o obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c new file mode 100644 index 000000000000..9afc777a40a7 --- /dev/null +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -0,0 +1,898 @@ +/* + * Copyright (C) 2012 Simon Budig, <simon.budig@kernelconcepts.de> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * This is a driver for the EDT "Polytouch" family of touch controllers + * based on the FocalTech FT5x06 line of chips. + * + * Development of this driver has been sponsored by Glyn: + * http://www.glyn.com/Products/Displays + */ + +#include <linux/module.h> +#include <linux/ratelimit.h> +#include <linux/interrupt.h> +#include <linux/input.h> +#include <linux/i2c.h> +#include <linux/uaccess.h> +#include <linux/delay.h> +#include <linux/debugfs.h> +#include <linux/slab.h> +#include <linux/gpio.h> +#include <linux/input/mt.h> +#include <linux/input/edt-ft5x06.h> + +#define MAX_SUPPORT_POINTS 5 + +#define WORK_REGISTER_THRESHOLD 0x00 +#define WORK_REGISTER_REPORT_RATE 0x08 +#define WORK_REGISTER_GAIN 0x30 +#define WORK_REGISTER_OFFSET 0x31 +#define WORK_REGISTER_NUM_X 0x33 +#define WORK_REGISTER_NUM_Y 0x34 + +#define WORK_REGISTER_OPMODE 0x3c +#define FACTORY_REGISTER_OPMODE 0x01 + +#define TOUCH_EVENT_DOWN 0x00 +#define TOUCH_EVENT_UP 0x01 +#define TOUCH_EVENT_ON 0x02 +#define TOUCH_EVENT_RESERVED 0x03 + +#define EDT_NAME_LEN 23 +#define EDT_SWITCH_MODE_RETRIES 10 +#define EDT_SWITCH_MODE_DELAY 5 /* msec */ +#define EDT_RAW_DATA_RETRIES 100 +#define EDT_RAW_DATA_DELAY 1 /* msec */ + +struct edt_ft5x06_ts_data { + struct i2c_client *client; + struct input_dev *input; + u16 num_x; + u16 num_y; + +#if defined(CONFIG_DEBUG_FS) + struct dentry *debug_dir; + u8 *raw_buffer; + size_t raw_bufsize; +#endif + + struct mutex mutex; + bool factory_mode; + int threshold; + int gain; + int offset; + int report_rate; + + char name[EDT_NAME_LEN]; +}; + +static int edt_ft5x06_ts_readwrite(struct i2c_client *client, + u16 wr_len, u8 *wr_buf, + u16 rd_len, u8 *rd_buf) +{ + struct i2c_msg wrmsg[2]; + int i = 0; + int ret; + + if (wr_len) { + wrmsg[i].addr = client->addr; + wrmsg[i].flags = 0; + wrmsg[i].len = wr_len; + wrmsg[i].buf = wr_buf; + i++; + } + if (rd_len) { + wrmsg[i].addr = client->addr; + wrmsg[i].flags = I2C_M_RD; + wrmsg[i].len = rd_len; + wrmsg[i].buf = rd_buf; + i++; + } + + ret = i2c_transfer(client->adapter, wrmsg, i); + if (ret < 0) + return ret; + if (ret != i) + return -EIO; + + return 0; +} + +static bool edt_ft5x06_ts_check_crc(struct edt_ft5x06_ts_data *tsdata, + u8 *buf, int buflen) +{ + int i; + u8 crc = 0; + + for (i = 0; i < buflen - 1; i++) + crc ^= buf[i]; + + if (crc != buf[buflen-1]) { + dev_err_ratelimited(&tsdata->client->dev, + "crc error: 0x%02x expected, got 0x%02x\n", + crc, buf[buflen-1]); + return false; + } + + return true; +} + +static irqreturn_t edt_ft5x06_ts_isr(int irq, void *dev_id) +{ + struct edt_ft5x06_ts_data *tsdata = dev_id; + struct device *dev = &tsdata->client->dev; + u8 cmd = 0xf9; + u8 rdbuf[26]; + int i, type, x, y, id; + int error; + + memset(rdbuf, 0, sizeof(rdbuf)); + + error = edt_ft5x06_ts_readwrite(tsdata->client, + sizeof(cmd), &cmd, + sizeof(rdbuf), rdbuf); + if (error) { + dev_err_ratelimited(dev, "Unable to fetch data, error: %d\n", + error); + goto out; + } + + if (rdbuf[0] != 0xaa || rdbuf[1] != 0xaa || rdbuf[2] != 26) { + dev_err_ratelimited(dev, "Unexpected header: %02x%02x%02x!\n", + rdbuf[0], rdbuf[1], rdbuf[2]); + goto out; + } + + if (!edt_ft5x06_ts_check_crc(tsdata, rdbuf, 26)) + goto out; + + for (i = 0; i < MAX_SUPPORT_POINTS; i++) { + u8 *buf = &rdbuf[i * 4 + 5]; + bool down; + + type = buf[0] >> 6; + /* ignore Reserved events */ + if (type == TOUCH_EVENT_RESERVED) + continue; + + x = ((buf[0] << 8) | buf[1]) & 0x0fff; + y = ((buf[2] << 8) | buf[3]) & 0x0fff; + id = (buf[2] >> 4) & 0x0f; + down = (type != TOUCH_EVENT_UP); + + input_mt_slot(tsdata->input, id); + input_mt_report_slot_state(tsdata->input, MT_TOOL_FINGER, down); + + if (!down) + continue; + + input_report_abs(tsdata->input, ABS_MT_POSITION_X, x); + input_report_abs(tsdata->input, ABS_MT_POSITION_Y, y); + } + + input_mt_report_pointer_emulation(tsdata->input, true); + input_sync(tsdata->input); + +out: + return IRQ_HANDLED; +} + +static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata, + u8 addr, u8 value) +{ + u8 wrbuf[4]; + + wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc; + wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f; + wrbuf[2] = value; + wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2]; + + return edt_ft5x06_ts_readwrite(tsdata->client, 4, wrbuf, 0, NULL); +} + +static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata, + u8 addr) +{ + u8 wrbuf[2], rdbuf[2]; + int error; + + wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc; + wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f; + wrbuf[1] |= tsdata->factory_mode ? 0x80 : 0x40; + + error = edt_ft5x06_ts_readwrite(tsdata->client, 2, wrbuf, 2, rdbuf); + if (error) + return error; + + if ((wrbuf[0] ^ wrbuf[1] ^ rdbuf[0]) != rdbuf[1]) { + dev_err(&tsdata->client->dev, + "crc error: 0x%02x expected, got 0x%02x\n", + wrbuf[0] ^ wrbuf[1] ^ rdbuf[0], rdbuf[1]); + return -EIO; + } + + return rdbuf[0]; +} + +struct edt_ft5x06_attribute { + struct device_attribute dattr; + size_t field_offset; + u8 limit_low; + u8 limit_high; + u8 addr; +}; + +#define EDT_ATTR(_field, _mode, _addr, _limit_low, _limit_high) \ + struct edt_ft5x06_attribute edt_ft5x06_attr_##_field = { \ + .dattr = __ATTR(_field, _mode, \ + edt_ft5x06_setting_show, \ + edt_ft5x06_setting_store), \ + .field_offset = \ + offsetof(struct edt_ft5x06_ts_data, _field), \ + .limit_low = _limit_low, \ + .limit_high = _limit_high, \ + .addr = _addr, \ + } + +static ssize_t edt_ft5x06_setting_show(struct device *dev, + struct device_attribute *dattr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client); + struct edt_ft5x06_attribute *attr = + container_of(dattr, struct edt_ft5x06_attribute, dattr); + u8 *field = (u8 *)((char *)tsdata + attr->field_offset); + int val; + size_t count = 0; + int error = 0; + + mutex_lock(&tsdata->mutex); + + if (tsdata->factory_mode) { + error = -EIO; + goto out; + } + + val = edt_ft5x06_register_read(tsdata, attr->addr); + if (val < 0) { + error = val; + dev_err(&tsdata->client->dev, + "Failed to fetch attribute %s, error %d\n", + dattr->attr.name, error); + goto out; + } + + if (val != *field) { + dev_warn(&tsdata->client->dev, + "%s: read (%d) and stored value (%d) differ\n", + dattr->attr.name, val, *field); + *field = val; + } + + count = scnprintf(buf, PAGE_SIZE, "%d\n", val); +out: + mutex_unlock(&tsdata->mutex); + return error ?: count; +} + +static ssize_t edt_ft5x06_setting_store(struct device *dev, + struct device_attribute *dattr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client); + struct edt_ft5x06_attribute *attr = + container_of(dattr, struct edt_ft5x06_attribute, dattr); + u8 *field = (u8 *)((char *)tsdata + attr->field_offset); + unsigned int val; + int error; + + mutex_lock(&tsdata->mutex); + + if (tsdata->factory_mode) { + error = -EIO; + goto out; + } + + error = kstrtouint(buf, 0, &val); + if (error) + goto out; + + if (val < attr->limit_low || val > attr->limit_high) { + error = -ERANGE; + goto out; + } + + error = edt_ft5x06_register_write(tsdata, attr->addr, val); + if (error) { + dev_err(&tsdata->client->dev, + "Failed to update attribute %s, error: %d\n", + dattr->attr.name, error); + goto out; + } + + *field = val; + +out: + mutex_unlock(&tsdata->mutex); + return error ?: count; +} + +static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN, 0, 31); +static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, 0, 31); +static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, + WORK_REGISTER_THRESHOLD, 20, 80); +static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, + WORK_REGISTER_REPORT_RATE, 3, 14); + +static struct attribute *edt_ft5x06_attrs[] = { + &edt_ft5x06_attr_gain.dattr.attr, + &edt_ft5x06_attr_offset.dattr.attr, + &edt_ft5x06_attr_threshold.dattr.attr, + &edt_ft5x06_attr_report_rate.dattr.attr, + NULL +}; + +static const struct attribute_group edt_ft5x06_attr_group = { + .attrs = edt_ft5x06_attrs, +}; + +#ifdef CONFIG_DEBUG_FS +static int edt_ft5x06_factory_mode(struct edt_ft5x06_ts_data *tsdata) +{ + struct i2c_client *client = tsdata->client; + int retries = EDT_SWITCH_MODE_RETRIES; + int ret; + int error; + + disable_irq(client->irq); + + if (!tsdata->raw_buffer) { + tsdata->raw_bufsize = tsdata->num_x * tsdata->num_y * + sizeof(u16); + tsdata->raw_buffer = kzalloc(tsdata->raw_bufsize, GFP_KERNEL); + if (!tsdata->raw_buffer) { + error = -ENOMEM; + goto err_out; + } + } + + /* mode register is 0x3c when in the work mode */ + error = edt_ft5x06_register_write(tsdata, WORK_REGISTER_OPMODE, 0x03); + if (error) { + dev_err(&client->dev, + "failed to switch to factory mode, error %d\n", error); + goto err_out; + } + + tsdata->factory_mode = true; + do { + mdelay(EDT_SWITCH_MODE_DELAY); + /* mode register is 0x01 when in factory mode */ + ret = edt_ft5x06_register_read(tsdata, FACTORY_REGISTER_OPMODE); + if (ret == 0x03) + break; + } while (--retries > 0); + + if (retries == 0) { + dev_err(&client->dev, "not in factory mode after %dms.\n", + EDT_SWITCH_MODE_RETRIES * EDT_SWITCH_MODE_DELAY); + error = -EIO; + goto err_out; + } + + return 0; + +err_out: + kfree(tsdata->raw_buffer); + tsdata->raw_buffer = NULL; + tsdata->factory_mode = false; + enable_irq(client->irq); + + return error; +} + +static int edt_ft5x06_work_mode(struct edt_ft5x06_ts_data *tsdata) +{ + struct i2c_client *client = tsdata->client; + int retries = EDT_SWITCH_MODE_RETRIES; + int ret; + int error; + + /* mode register is 0x01 when in the factory mode */ + error = edt_ft5x06_register_write(tsdata, FACTORY_REGISTER_OPMODE, 0x1); + if (error) { + dev_err(&client->dev, + "failed to switch to work mode, error: %d\n", error); + return error; + } + + tsdata->factory_mode = false; + + do { + mdelay(EDT_SWITCH_MODE_DELAY); + /* mode register is 0x01 when in factory mode */ + ret = edt_ft5x06_register_read(tsdata, WORK_REGISTER_OPMODE); + if (ret == 0x01) + break; + } while (--retries > 0); + + if (retries == 0) { + dev_err(&client->dev, "not in work mode after %dms.\n", + EDT_SWITCH_MODE_RETRIES * EDT_SWITCH_MODE_DELAY); + tsdata->factory_mode = true; + return -EIO; + } + + if (tsdata->raw_buffer) + kfree(tsdata->raw_buffer); + tsdata->raw_buffer = NULL; + + /* restore parameters */ + edt_ft5x06_register_write(tsdata, WORK_REGISTER_THRESHOLD, + tsdata->threshold); + edt_ft5x06_register_write(tsdata, WORK_REGISTER_GAIN, + tsdata->gain); + edt_ft5x06_register_write(tsdata, WORK_REGISTER_OFFSET, + tsdata->offset); + edt_ft5x06_register_write(tsdata, WORK_REGISTER_REPORT_RATE, + tsdata->report_rate); + + enable_irq(client->irq); + + return 0; +} + +static int edt_ft5x06_debugfs_mode_get(void *data, u64 *mode) +{ + struct edt_ft5x06_ts_data *tsdata = data; + + *mode = tsdata->factory_mode; + + return 0; +}; + +static int edt_ft5x06_debugfs_mode_set(void *data, u64 mode) +{ + struct edt_ft5x06_ts_data *tsdata = data; + int retval = 0; + + if (mode > 1) + return -ERANGE; + + mutex_lock(&tsdata->mutex); + + if (mode != tsdata->factory_mode) { + retval = mode ? edt_ft5x06_factory_mode(tsdata) : + edt_ft5x06_work_mode(tsdata); + } + + mutex_unlock(&tsdata->mutex); + + return retval; +}; + +DEFINE_SIMPLE_ATTRIBUTE(debugfs_mode_fops, edt_ft5x06_debugfs_mode_get, + edt_ft5x06_debugfs_mode_set, "%llu\n"); + +static int edt_ft5x06_debugfs_raw_data_open(struct inode *inode, + struct file *file) +{ + file->private_data = inode->i_private; + + return 0; +} + +static ssize_t edt_ft5x06_debugfs_raw_data_read(struct file *file, + char __user *buf, size_t count, loff_t *off) +{ + struct edt_ft5x06_ts_data *tsdata = file->private_data; + struct i2c_client *client = tsdata->client; + int retries = EDT_RAW_DATA_RETRIES; + int val, i, error; + size_t read = 0; + int colbytes; + char wrbuf[3]; + u8 *rdbuf; + + if (*off < 0 || *off >= tsdata->raw_bufsize) + return 0; + + mutex_lock(&tsdata->mutex); + + if (!tsdata->factory_mode || !tsdata->raw_buffer) { + error = -EIO; + goto out; + } + + error = edt_ft5x06_register_write(tsdata, 0x08, 0x01); + if (error) { + dev_dbg(&client->dev, + "failed to write 0x08 register, error %d\n", error); + goto out; + } + + do { + msleep(EDT_RAW_DATA_DELAY); + val = edt_ft5x06_register_read(tsdata, 0x08); + if (val < 1) + break; + } while (--retries > 0); + + if (val < 0) { + error = val; + dev_dbg(&client->dev, + "failed to read 0x08 register, error %d\n", error); + goto out; + } + + if (retries == 0) { + dev_dbg(&client->dev, + "timed out waiting for register to settle\n"); + error = -ETIMEDOUT; + goto out; + } + + rdbuf = tsdata->raw_buffer; + colbytes = tsdata->num_y * sizeof(u16); + + wrbuf[0] = 0xf5; + wrbuf[1] = 0x0e; + for (i = 0; i < tsdata->num_x; i++) { + wrbuf[2] = i; /* column index */ + error = edt_ft5x06_ts_readwrite(tsdata->client, + sizeof(wrbuf), wrbuf, + colbytes, rdbuf); + if (error) + goto out; + + rdbuf += colbytes; + } + + read = min_t(size_t, count, tsdata->raw_bufsize - *off); + error = copy_to_user(buf, tsdata->raw_buffer + *off, read); + if (!error) + *off += read; +out: + mutex_unlock(&tsdata->mutex); + return error ?: read; +}; + + +static const struct file_operations debugfs_raw_data_fops = { + .open = edt_ft5x06_debugfs_raw_data_open, + .read = edt_ft5x06_debugfs_raw_data_read, +}; + +static void __devinit +edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata, + const char *debugfs_name) +{ + tsdata->debug_dir = debugfs_create_dir(debugfs_name, NULL); + if (!tsdata->debug_dir) + return; + + debugfs_create_u16("num_x", S_IRUSR, tsdata->debug_dir, &tsdata->num_x); + debugfs_create_u16("num_y", S_IRUSR, tsdata->debug_dir, &tsdata->num_y); + + debugfs_create_file("mode", S_IRUSR | S_IWUSR, + tsdata->debug_dir, tsdata, &debugfs_mode_fops); + debugfs_create_file("raw_data", S_IRUSR, + tsdata->debug_dir, tsdata, &debugfs_raw_data_fops); +} + +static void __devexit +edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) +{ + if (tsdata->debug_dir) + debugfs_remove_recursive(tsdata->debug_dir); +} + +#else + +static inline void +edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata, + const char *debugfs_name) +{ +} + +static inline void +edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) +{ +} + +#endif /* CONFIG_DEBUGFS */ + + + +static int __devinit edt_ft5x06_ts_reset(struct i2c_client *client, + int reset_pin) +{ + int error; + + if (gpio_is_valid(reset_pin)) { + /* this pulls reset down, enabling the low active reset */ + error = gpio_request_one(reset_pin, GPIOF_OUT_INIT_LOW, + "edt-ft5x06 reset"); + if (error) { + dev_err(&client->dev, + "Failed to request GPIO %d as reset pin, error %d\n", + reset_pin, error); + return error; + } + + mdelay(50); + gpio_set_value(reset_pin, 1); + mdelay(100); + } + + return 0; +} + +static int __devinit edt_ft5x06_ts_identify(struct i2c_client *client, + char *model_name, + char *fw_version) +{ + u8 rdbuf[EDT_NAME_LEN]; + char *p; + int error; + + error = edt_ft5x06_ts_readwrite(client, 1, "\xbb", + EDT_NAME_LEN - 1, rdbuf); + if (error) + return error; + + /* remove last '$' end marker */ + rdbuf[EDT_NAME_LEN - 1] = '\0'; + if (rdbuf[EDT_NAME_LEN - 2] == '$') + rdbuf[EDT_NAME_LEN - 2] = '\0'; + + /* look for Model/Version separator */ + p = strchr(rdbuf, '*'); + if (p) + *p++ = '\0'; + + strlcpy(model_name, rdbuf + 1, EDT_NAME_LEN); + strlcpy(fw_version, p ? p : "", EDT_NAME_LEN); + + return 0; +} + +#define EDT_ATTR_CHECKSET(name, reg) \ + if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \ + pdata->name <= edt_ft5x06_attr_##name.limit_high) \ + edt_ft5x06_register_write(tsdata, reg, pdata->name) + +static void __devinit +edt_ft5x06_ts_get_defaults(struct edt_ft5x06_ts_data *tsdata, + const struct edt_ft5x06_platform_data *pdata) +{ + if (!pdata->use_parameters) + return; + + /* pick up defaults from the platform data */ + EDT_ATTR_CHECKSET(threshold, WORK_REGISTER_THRESHOLD); + EDT_ATTR_CHECKSET(gain, WORK_REGISTER_GAIN); + EDT_ATTR_CHECKSET(offset, WORK_REGISTER_OFFSET); + EDT_ATTR_CHECKSET(report_rate, WORK_REGISTER_REPORT_RATE); +} + +static void __devinit +edt_ft5x06_ts_get_parameters(struct edt_ft5x06_ts_data *tsdata) +{ + tsdata->threshold = edt_ft5x06_register_read(tsdata, + WORK_REGISTER_THRESHOLD); + tsdata->gain = edt_ft5x06_register_read(tsdata, WORK_REGISTER_GAIN); + tsdata->offset = edt_ft5x06_register_read(tsdata, WORK_REGISTER_OFFSET); + tsdata->report_rate = edt_ft5x06_register_read(tsdata, + WORK_REGISTER_REPORT_RATE); + tsdata->num_x = edt_ft5x06_register_read(tsdata, WORK_REGISTER_NUM_X); + tsdata->num_y = edt_ft5x06_register_read(tsdata, WORK_REGISTER_NUM_Y); +} + +static int __devinit edt_ft5x06_ts_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + const struct edt_ft5x06_platform_data *pdata = + client->dev.platform_data; + struct edt_ft5x06_ts_data *tsdata; + struct input_dev *input; + int error; + char fw_version[EDT_NAME_LEN]; + + dev_dbg(&client->dev, "probing for EDT FT5x06 I2C\n"); + + if (!pdata) { + dev_err(&client->dev, "no platform data?\n"); + return -EINVAL; + } + + error = edt_ft5x06_ts_reset(client, pdata->reset_pin); + if (error) + return error; + + if (gpio_is_valid(pdata->irq_pin)) { + error = gpio_request_one(pdata->irq_pin, + GPIOF_IN, "edt-ft5x06 irq"); + if (error) { + dev_err(&client->dev, + "Failed to request GPIO %d, error %d\n", + pdata->irq_pin, error); + return error; + } + } + + tsdata = kzalloc(sizeof(*tsdata), GFP_KERNEL); + input = input_allocate_device(); + if (!tsdata || !input) { + dev_err(&client->dev, "failed to allocate driver data.\n"); + error = -ENOMEM; + goto err_free_mem; + } + + mutex_init(&tsdata->mutex); + tsdata->client = client; + tsdata->input = input; + tsdata->factory_mode = false; + + error = edt_ft5x06_ts_identify(client, tsdata->name, fw_version); + if (error) { + dev_err(&client->dev, "touchscreen probe failed\n"); + goto err_free_mem; + } + + edt_ft5x06_ts_get_defaults(tsdata, pdata); + edt_ft5x06_ts_get_parameters(tsdata); + + dev_dbg(&client->dev, + "Model \"%s\", Rev. \"%s\", %dx%d sensors\n", + tsdata->name, fw_version, tsdata->num_x, tsdata->num_y); + + input->name = tsdata->name; + input->id.bustype = BUS_I2C; + input->dev.parent = &client->dev; + + __set_bit(EV_SYN, input->evbit); + __set_bit(EV_KEY, input->evbit); + __set_bit(EV_ABS, input->evbit); + __set_bit(BTN_TOUCH, input->keybit); + input_set_abs_params(input, ABS_X, 0, tsdata->num_x * 64 - 1, 0, 0); + input_set_abs_params(input, ABS_Y, 0, tsdata->num_y * 64 - 1, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_X, + 0, tsdata->num_x * 64 - 1, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, + 0, tsdata->num_y * 64 - 1, 0, 0); + error = input_mt_init_slots(input, MAX_SUPPORT_POINTS); + if (error) { + dev_err(&client->dev, "Unable to init MT slots.\n"); + goto err_free_mem; + } + + input_set_drvdata(input, tsdata); + i2c_set_clientdata(client, tsdata); + + error = request_threaded_irq(client->irq, NULL, edt_ft5x06_ts_isr, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + client->name, tsdata); + if (error) { + dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); + goto err_free_mem; + } + + error = sysfs_create_group(&client->dev.kobj, &edt_ft5x06_attr_group); + if (error) + goto err_free_irq; + + error = input_register_device(input); + if (error) + goto err_remove_attrs; + + edt_ft5x06_ts_prepare_debugfs(tsdata, dev_driver_string(&client->dev)); + device_init_wakeup(&client->dev, 1); + + dev_dbg(&client->dev, + "EDT FT5x06 initialized: IRQ pin %d, Reset pin %d.\n", + pdata->irq_pin, pdata->reset_pin); + + return 0; + +err_remove_attrs: + sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group); +err_free_irq: + free_irq(client->irq, tsdata); +err_free_mem: + input_free_device(input); + kfree(tsdata); + + if (gpio_is_valid(pdata->irq_pin)) + gpio_free(pdata->irq_pin); + + return error; +} + +static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client) +{ + const struct edt_ft5x06_platform_data *pdata = + dev_get_platdata(&client->dev); + struct edt_ft5x06_ts_data *tsdata = i2c_get_clientdata(client); + + edt_ft5x06_ts_teardown_debugfs(tsdata); + sysfs_remove_group(&client->dev.kobj, &edt_ft5x06_attr_group); + + free_irq(client->irq, tsdata); + input_unregister_device(tsdata->input); + + if (gpio_is_valid(pdata->irq_pin)) + gpio_free(pdata->irq_pin); + if (gpio_is_valid(pdata->reset_pin)) + gpio_free(pdata->reset_pin); + + kfree(tsdata->raw_buffer); + kfree(tsdata); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int edt_ft5x06_ts_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(client->irq); + + return 0; +} + +static int edt_ft5x06_ts_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(client->irq); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(edt_ft5x06_ts_pm_ops, + edt_ft5x06_ts_suspend, edt_ft5x06_ts_resume); + +static const struct i2c_device_id edt_ft5x06_ts_id[] = { + { "edt-ft5x06", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, edt_ft5x06_ts_id); + +static struct i2c_driver edt_ft5x06_ts_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "edt_ft5x06", + .pm = &edt_ft5x06_ts_pm_ops, + }, + .id_table = edt_ft5x06_ts_id, + .probe = edt_ft5x06_ts_probe, + .remove = __devexit_p(edt_ft5x06_ts_remove), +}; + +module_i2c_driver(edt_ft5x06_ts_driver); + +MODULE_AUTHOR("Simon Budig <simon.budig@kernelconcepts.de>"); +MODULE_DESCRIPTION("EDT FT5x06 I2C Touchscreen Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 10f122a3a856..1eee45b69b71 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -260,15 +260,6 @@ config DM_DEBUG_BLOCK_STACK_TRACING If unsure, say N. -config DM_DEBUG_SPACE_MAPS - boolean "Extra validation for thin provisioning space maps" - depends on DM_THIN_PROVISIONING - ---help--- - Enable this for messages that may help debug problems with the - space maps used by thin provisioning. - - If unsure, say N. - config DM_MIRROR tristate "Mirror target" depends on BLK_DEV_DM diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 3f06df59fd82..664743d6a6cd 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -42,21 +42,21 @@ struct convert_context { unsigned int offset_out; unsigned int idx_in; unsigned int idx_out; - sector_t sector; - atomic_t pending; + sector_t cc_sector; + atomic_t cc_pending; }; /* * per bio private data */ struct dm_crypt_io { - struct dm_target *target; + struct crypt_config *cc; struct bio *base_bio; struct work_struct work; struct convert_context ctx; - atomic_t pending; + atomic_t io_pending; int error; sector_t sector; struct dm_crypt_io *base_io; @@ -109,9 +109,6 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; */ struct crypt_cpu { struct ablkcipher_request *req; - /* ESSIV: struct crypto_cipher *essiv_tfm */ - void *iv_private; - struct crypto_ablkcipher *tfms[0]; }; /* @@ -151,6 +148,10 @@ struct crypt_config { * per_cpu_ptr() only. */ struct crypt_cpu __percpu *cpu; + + /* ESSIV: struct crypto_cipher *essiv_tfm */ + void *iv_private; + struct crypto_ablkcipher **tfms; unsigned tfms_count; /* @@ -193,7 +194,7 @@ static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) */ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc) { - return __this_cpu_ptr(cc->cpu)->tfms[0]; + return cc->tfms[0]; } /* @@ -258,7 +259,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) struct hash_desc desc; struct scatterlist sg; struct crypto_cipher *essiv_tfm; - int err, cpu; + int err; sg_init_one(&sg, cc->key, cc->key_size); desc.tfm = essiv->hash_tfm; @@ -268,14 +269,12 @@ static int crypt_iv_essiv_init(struct crypt_config *cc) if (err) return err; - for_each_possible_cpu(cpu) { - essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private, + essiv_tfm = cc->iv_private; - err = crypto_cipher_setkey(essiv_tfm, essiv->salt, - crypto_hash_digestsize(essiv->hash_tfm)); - if (err) - return err; - } + err = crypto_cipher_setkey(essiv_tfm, essiv->salt, + crypto_hash_digestsize(essiv->hash_tfm)); + if (err) + return err; return 0; } @@ -286,16 +285,14 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc) struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); struct crypto_cipher *essiv_tfm; - int cpu, r, err = 0; + int r, err = 0; memset(essiv->salt, 0, salt_size); - for_each_possible_cpu(cpu) { - essiv_tfm = per_cpu_ptr(cc->cpu, cpu)->iv_private; - r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); - if (r) - err = r; - } + essiv_tfm = cc->iv_private; + r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size); + if (r) + err = r; return err; } @@ -335,8 +332,6 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc, static void crypt_iv_essiv_dtr(struct crypt_config *cc) { - int cpu; - struct crypt_cpu *cpu_cc; struct crypto_cipher *essiv_tfm; struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; @@ -346,15 +341,12 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc) kzfree(essiv->salt); essiv->salt = NULL; - for_each_possible_cpu(cpu) { - cpu_cc = per_cpu_ptr(cc->cpu, cpu); - essiv_tfm = cpu_cc->iv_private; + essiv_tfm = cc->iv_private; - if (essiv_tfm) - crypto_free_cipher(essiv_tfm); + if (essiv_tfm) + crypto_free_cipher(essiv_tfm); - cpu_cc->iv_private = NULL; - } + cc->iv_private = NULL; } static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, @@ -363,7 +355,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, struct crypto_cipher *essiv_tfm = NULL; struct crypto_hash *hash_tfm = NULL; u8 *salt = NULL; - int err, cpu; + int err; if (!opts) { ti->error = "Digest algorithm missing for ESSIV mode"; @@ -388,15 +380,13 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, cc->iv_gen_private.essiv.salt = salt; cc->iv_gen_private.essiv.hash_tfm = hash_tfm; - for_each_possible_cpu(cpu) { - essiv_tfm = setup_essiv_cpu(cc, ti, salt, - crypto_hash_digestsize(hash_tfm)); - if (IS_ERR(essiv_tfm)) { - crypt_iv_essiv_dtr(cc); - return PTR_ERR(essiv_tfm); - } - per_cpu_ptr(cc->cpu, cpu)->iv_private = essiv_tfm; + essiv_tfm = setup_essiv_cpu(cc, ti, salt, + crypto_hash_digestsize(hash_tfm)); + if (IS_ERR(essiv_tfm)) { + crypt_iv_essiv_dtr(cc); + return PTR_ERR(essiv_tfm); } + cc->iv_private = essiv_tfm; return 0; @@ -410,7 +400,7 @@ bad: static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, struct dm_crypt_request *dmreq) { - struct crypto_cipher *essiv_tfm = this_crypt_config(cc)->iv_private; + struct crypto_cipher *essiv_tfm = cc->iv_private; memset(iv, 0, cc->iv_size); *(__le64 *)iv = cpu_to_le64(dmreq->iv_sector); @@ -664,7 +654,7 @@ static void crypt_convert_init(struct crypt_config *cc, ctx->offset_out = 0; ctx->idx_in = bio_in ? bio_in->bi_idx : 0; ctx->idx_out = bio_out ? bio_out->bi_idx : 0; - ctx->sector = sector + cc->iv_offset; + ctx->cc_sector = sector + cc->iv_offset; init_completion(&ctx->restart); } @@ -695,12 +685,12 @@ static int crypt_convert_block(struct crypt_config *cc, struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); struct dm_crypt_request *dmreq; u8 *iv; - int r = 0; + int r; dmreq = dmreq_of_req(cc, req); iv = iv_of_dmreq(cc, dmreq); - dmreq->iv_sector = ctx->sector; + dmreq->iv_sector = ctx->cc_sector; dmreq->ctx = ctx; sg_init_table(&dmreq->sg_in, 1); sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, @@ -749,12 +739,12 @@ static void crypt_alloc_req(struct crypt_config *cc, struct convert_context *ctx) { struct crypt_cpu *this_cc = this_crypt_config(cc); - unsigned key_index = ctx->sector & (cc->tfms_count - 1); + unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); if (!this_cc->req) this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); - ablkcipher_request_set_tfm(this_cc->req, this_cc->tfms[key_index]); + ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); ablkcipher_request_set_callback(this_cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); @@ -769,14 +759,14 @@ static int crypt_convert(struct crypt_config *cc, struct crypt_cpu *this_cc = this_crypt_config(cc); int r; - atomic_set(&ctx->pending, 1); + atomic_set(&ctx->cc_pending, 1); while(ctx->idx_in < ctx->bio_in->bi_vcnt && ctx->idx_out < ctx->bio_out->bi_vcnt) { crypt_alloc_req(cc, ctx); - atomic_inc(&ctx->pending); + atomic_inc(&ctx->cc_pending); r = crypt_convert_block(cc, ctx, this_cc->req); @@ -788,19 +778,19 @@ static int crypt_convert(struct crypt_config *cc, /* fall through*/ case -EINPROGRESS: this_cc->req = NULL; - ctx->sector++; + ctx->cc_sector++; continue; /* sync */ case 0: - atomic_dec(&ctx->pending); - ctx->sector++; + atomic_dec(&ctx->cc_pending); + ctx->cc_sector++; cond_resched(); continue; /* error */ default: - atomic_dec(&ctx->pending); + atomic_dec(&ctx->cc_pending); return r; } } @@ -811,7 +801,7 @@ static int crypt_convert(struct crypt_config *cc, static void dm_crypt_bio_destructor(struct bio *bio) { struct dm_crypt_io *io = bio->bi_private; - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; bio_free(bio, cc->bs); } @@ -825,7 +815,7 @@ static void dm_crypt_bio_destructor(struct bio *bio) static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, unsigned *out_of_pages) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; @@ -884,26 +874,25 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) } } -static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, +static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, struct bio *bio, sector_t sector) { - struct crypt_config *cc = ti->private; struct dm_crypt_io *io; io = mempool_alloc(cc->io_pool, GFP_NOIO); - io->target = ti; + io->cc = cc; io->base_bio = bio; io->sector = sector; io->error = 0; io->base_io = NULL; - atomic_set(&io->pending, 0); + atomic_set(&io->io_pending, 0); return io; } static void crypt_inc_pending(struct dm_crypt_io *io) { - atomic_inc(&io->pending); + atomic_inc(&io->io_pending); } /* @@ -913,12 +902,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io) */ static void crypt_dec_pending(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *base_bio = io->base_bio; struct dm_crypt_io *base_io = io->base_io; int error = io->error; - if (!atomic_dec_and_test(&io->pending)) + if (!atomic_dec_and_test(&io->io_pending)) return; mempool_free(io, cc->io_pool); @@ -952,7 +941,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) static void crypt_endio(struct bio *clone, int error) { struct dm_crypt_io *io = clone->bi_private; - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; unsigned rw = bio_data_dir(clone); if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) @@ -979,7 +968,7 @@ static void crypt_endio(struct bio *clone, int error) static void clone_init(struct dm_crypt_io *io, struct bio *clone) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; clone->bi_private = io; clone->bi_end_io = crypt_endio; @@ -990,7 +979,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *base_bio = io->base_bio; struct bio *clone; @@ -1038,7 +1027,7 @@ static void kcryptd_io(struct work_struct *work) static void kcryptd_queue_io(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; INIT_WORK(&io->work, kcryptd_io); queue_work(cc->io_queue, &io->work); @@ -1047,7 +1036,7 @@ static void kcryptd_queue_io(struct dm_crypt_io *io) static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) { struct bio *clone = io->ctx.bio_out; - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; if (unlikely(io->error < 0)) { crypt_free_buffer_pages(cc, clone); @@ -1069,7 +1058,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; struct bio *clone; struct dm_crypt_io *new_io; int crypt_finished; @@ -1107,7 +1096,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) if (r < 0) io->error = -EIO; - crypt_finished = atomic_dec_and_test(&io->ctx.pending); + crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); /* Encryption was already finished, submit io now */ if (crypt_finished) { @@ -1135,7 +1124,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) * between fragments, so switch to a new dm_crypt_io structure. */ if (unlikely(!crypt_finished && remaining)) { - new_io = crypt_io_alloc(io->target, io->base_bio, + new_io = crypt_io_alloc(io->cc, io->base_bio, sector); crypt_inc_pending(new_io); crypt_convert_init(cc, &new_io->ctx, NULL, @@ -1169,7 +1158,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io) static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; int r = 0; crypt_inc_pending(io); @@ -1181,7 +1170,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) if (r < 0) io->error = -EIO; - if (atomic_dec_and_test(&io->ctx.pending)) + if (atomic_dec_and_test(&io->ctx.cc_pending)) kcryptd_crypt_read_done(io); crypt_dec_pending(io); @@ -1193,7 +1182,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, struct dm_crypt_request *dmreq = async_req->data; struct convert_context *ctx = dmreq->ctx; struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; if (error == -EINPROGRESS) { complete(&ctx->restart); @@ -1208,7 +1197,7 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); - if (!atomic_dec_and_test(&ctx->pending)) + if (!atomic_dec_and_test(&ctx->cc_pending)) return; if (bio_data_dir(io->base_bio) == READ) @@ -1229,7 +1218,7 @@ static void kcryptd_crypt(struct work_struct *work) static void kcryptd_queue_crypt(struct dm_crypt_io *io) { - struct crypt_config *cc = io->target->private; + struct crypt_config *cc = io->cc; INIT_WORK(&io->work, kcryptd_crypt); queue_work(cc->crypt_queue, &io->work); @@ -1241,7 +1230,6 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) static int crypt_decode_key(u8 *key, char *hex, unsigned int size) { char buffer[3]; - char *endp; unsigned int i; buffer[2] = '\0'; @@ -1250,9 +1238,7 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size) buffer[0] = *hex++; buffer[1] = *hex++; - key[i] = (u8)simple_strtoul(buffer, &endp, 16); - - if (endp != &buffer[2]) + if (kstrtou8(buffer, 16, &key[i])) return -EINVAL; } @@ -1276,29 +1262,38 @@ static void crypt_encode_key(char *hex, u8 *key, unsigned int size) } } -static void crypt_free_tfms(struct crypt_config *cc, int cpu) +static void crypt_free_tfms(struct crypt_config *cc) { - struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); unsigned i; + if (!cc->tfms) + return; + for (i = 0; i < cc->tfms_count; i++) - if (cpu_cc->tfms[i] && !IS_ERR(cpu_cc->tfms[i])) { - crypto_free_ablkcipher(cpu_cc->tfms[i]); - cpu_cc->tfms[i] = NULL; + if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) { + crypto_free_ablkcipher(cc->tfms[i]); + cc->tfms[i] = NULL; } + + kfree(cc->tfms); + cc->tfms = NULL; } -static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) +static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode) { - struct crypt_cpu *cpu_cc = per_cpu_ptr(cc->cpu, cpu); unsigned i; int err; + cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *), + GFP_KERNEL); + if (!cc->tfms) + return -ENOMEM; + for (i = 0; i < cc->tfms_count; i++) { - cpu_cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); - if (IS_ERR(cpu_cc->tfms[i])) { - err = PTR_ERR(cpu_cc->tfms[i]); - crypt_free_tfms(cc, cpu); + cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0); + if (IS_ERR(cc->tfms[i])) { + err = PTR_ERR(cc->tfms[i]); + crypt_free_tfms(cc); return err; } } @@ -1309,15 +1304,14 @@ static int crypt_alloc_tfms(struct crypt_config *cc, int cpu, char *ciphermode) static int crypt_setkey_allcpus(struct crypt_config *cc) { unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count); - int cpu, err = 0, i, r; - - for_each_possible_cpu(cpu) { - for (i = 0; i < cc->tfms_count; i++) { - r = crypto_ablkcipher_setkey(per_cpu_ptr(cc->cpu, cpu)->tfms[i], - cc->key + (i * subkey_size), subkey_size); - if (r) - err = r; - } + int err = 0, i, r; + + for (i = 0; i < cc->tfms_count; i++) { + r = crypto_ablkcipher_setkey(cc->tfms[i], + cc->key + (i * subkey_size), + subkey_size); + if (r) + err = r; } return err; @@ -1379,9 +1373,10 @@ static void crypt_dtr(struct dm_target *ti) cpu_cc = per_cpu_ptr(cc->cpu, cpu); if (cpu_cc->req) mempool_free(cpu_cc->req, cc->req_pool); - crypt_free_tfms(cc, cpu); } + crypt_free_tfms(cc); + if (cc->bs) bioset_free(cc->bs); @@ -1414,7 +1409,7 @@ static int crypt_ctr_cipher(struct dm_target *ti, struct crypt_config *cc = ti->private; char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount; char *cipher_api = NULL; - int cpu, ret = -EINVAL; + int ret = -EINVAL; char dummy; /* Convert to crypto api definition? */ @@ -1455,8 +1450,7 @@ static int crypt_ctr_cipher(struct dm_target *ti, if (tmp) DMWARN("Ignoring unexpected additional cipher options"); - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)) + - cc->tfms_count * sizeof(*(cc->cpu->tfms)), + cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), __alignof__(struct crypt_cpu)); if (!cc->cpu) { ti->error = "Cannot allocate per cpu state"; @@ -1489,12 +1483,10 @@ static int crypt_ctr_cipher(struct dm_target *ti, } /* Allocate cipher */ - for_each_possible_cpu(cpu) { - ret = crypt_alloc_tfms(cc, cpu, cipher_api); - if (ret < 0) { - ti->error = "Error allocating crypto tfm"; - goto bad; - } + ret = crypt_alloc_tfms(cc, cipher_api); + if (ret < 0) { + ti->error = "Error allocating crypto tfm"; + goto bad; } /* Initialize and set key */ @@ -1702,7 +1694,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ti->num_flush_requests = 1; - ti->discard_zeroes_data_unsupported = 1; + ti->discard_zeroes_data_unsupported = true; return 0; @@ -1715,7 +1707,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) { struct dm_crypt_io *io; - struct crypt_config *cc; + struct crypt_config *cc = ti->private; /* * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues. @@ -1723,14 +1715,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, * - for REQ_DISCARD caller must use flush if IO ordering matters */ if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { - cc = ti->private; bio->bi_bdev = cc->dev->bdev; if (bio_sectors(bio)) bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); return DM_MAPIO_REMAPPED; } - io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); + io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT)) @@ -1742,7 +1733,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, } static int crypt_status(struct dm_target *ti, status_type_t type, - char *result, unsigned int maxlen) + unsigned status_flags, char *result, unsigned maxlen) { struct crypt_config *cc = ti->private; unsigned int sz = 0; diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 2dc22dddb2ae..f53846f9ab50 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -295,7 +295,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio, } static int delay_status(struct dm_target *ti, status_type_t type, - char *result, unsigned maxlen) + unsigned status_flags, char *result, unsigned maxlen) { struct delay_c *dc = ti->private; int sz = 0; diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index aa70f7d43a1a..ebaa4f803eec 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -142,24 +142,19 @@ EXPORT_SYMBOL(dm_exception_store_type_unregister); static int set_chunk_size(struct dm_exception_store *store, const char *chunk_size_arg, char **error) { - unsigned long chunk_size_ulong; - char *value; + unsigned chunk_size; - chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10); - if (*chunk_size_arg == '\0' || *value != '\0' || - chunk_size_ulong > UINT_MAX) { + if (kstrtouint(chunk_size_arg, 10, &chunk_size)) { *error = "Invalid chunk size"; return -EINVAL; } - if (!chunk_size_ulong) { + if (!chunk_size) { store->chunk_size = store->chunk_mask = store->chunk_shift = 0; return 0; } - return dm_exception_store_set_chunk_size(store, - (unsigned) chunk_size_ulong, - error); + return dm_exception_store_set_chunk_size(store, chunk_size, error); } int dm_exception_store_set_chunk_size(struct dm_exception_store *store, diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index ac49c01f1a44..cc15543a6ad7 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -333,7 +333,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, } static int flakey_status(struct dm_target *ti, status_type_t type, - char *result, unsigned int maxlen) + unsigned status_flags, char *result, unsigned maxlen) { unsigned sz = 0; struct flakey_c *fc = ti->private; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index a1a3e6df17b8..afd95986d099 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1054,6 +1054,7 @@ static void retrieve_status(struct dm_table *table, char *outbuf, *outptr; status_type_t type; size_t remaining, len, used = 0; + unsigned status_flags = 0; outptr = outbuf = get_result_buffer(param, param_size, &len); @@ -1090,7 +1091,9 @@ static void retrieve_status(struct dm_table *table, /* Get the status/table string from the target driver */ if (ti->type->status) { - if (ti->type->status(ti, type, outptr, remaining)) { + if (param->flags & DM_NOFLUSH_FLAG) + status_flags |= DM_STATUS_NOFLUSH_FLAG; + if (ti->type->status(ti, type, status_flags, outptr, remaining)) { param->flags |= DM_BUFFER_FULL_FLAG; break; } diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 3639eeab6042..1bf19a93eef0 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -96,7 +96,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio, } static int linear_status(struct dm_target *ti, status_type_t type, - char *result, unsigned int maxlen) + unsigned status_flags, char *result, unsigned maxlen) { struct linear_c *lc = (struct linear_c *) ti->private; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 638dae048b4f..d8abb90a6c2f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -85,6 +85,7 @@ struct multipath { unsigned queue_io:1; /* Must we queue all I/O? */ unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */ unsigned saved_queue_if_no_path:1; /* Saved state during suspension */ + unsigned retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */ unsigned pg_init_retries; /* Number of times to retry pg_init */ unsigned pg_init_count; /* Number of times pg_init called */ @@ -568,6 +569,8 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps int r; struct pgpath *p; struct multipath *m = ti->private; + struct request_queue *q = NULL; + const char *attached_handler_name; /* we need at least a path arg */ if (as->argc < 1) { @@ -586,13 +589,37 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps goto bad; } - if (m->hw_handler_name) { - struct request_queue *q = bdev_get_queue(p->path.dev->bdev); + if (m->retain_attached_hw_handler || m->hw_handler_name) + q = bdev_get_queue(p->path.dev->bdev); + + if (m->retain_attached_hw_handler) { + attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); + if (attached_handler_name) { + /* + * Reset hw_handler_name to match the attached handler + * and clear any hw_handler_params associated with the + * ignored handler. + * + * NB. This modifies the table line to show the actual + * handler instead of the original table passed in. + */ + kfree(m->hw_handler_name); + m->hw_handler_name = attached_handler_name; + + kfree(m->hw_handler_params); + m->hw_handler_params = NULL; + } + } + if (m->hw_handler_name) { + /* + * Increments scsi_dh reference, even when using an + * already-attached handler. + */ r = scsi_dh_attach(q, m->hw_handler_name); if (r == -EBUSY) { /* - * Already attached to different hw_handler, + * Already attached to different hw_handler: * try to reattach with correct one. */ scsi_dh_detach(q); @@ -760,7 +787,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) const char *arg_name; static struct dm_arg _args[] = { - {0, 5, "invalid number of feature args"}, + {0, 6, "invalid number of feature args"}, {1, 50, "pg_init_retries must be between 1 and 50"}, {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, }; @@ -781,6 +808,11 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) continue; } + if (!strcasecmp(arg_name, "retain_attached_hw_handler")) { + m->retain_attached_hw_handler = 1; + continue; + } + if (!strcasecmp(arg_name, "pg_init_retries") && (argc >= 1)) { r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error); @@ -1346,7 +1378,7 @@ static void multipath_resume(struct dm_target *ti) * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ */ static int multipath_status(struct dm_target *ti, status_type_t type, - char *result, unsigned int maxlen) + unsigned status_flags, char *result, unsigned maxlen) { int sz = 0; unsigned long flags; @@ -1364,13 +1396,16 @@ static int multipath_status(struct dm_target *ti, status_type_t type, else { DMEMIT("%u ", m->queue_if_no_path + (m->pg_init_retries > 0) * 2 + - (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2); + (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + + m->retain_attached_hw_handler); if (m->queue_if_no_path) DMEMIT("queue_if_no_path "); if (m->pg_init_retries) DMEMIT("pg_init_retries %u ", m->pg_init_retries); if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); + if (m->retain_attached_hw_handler) + DMEMIT("retain_attached_hw_handler "); } if (!m->hw_handler_name || type == STATUSTYPE_INFO) @@ -1656,7 +1691,7 @@ out: *---------------------------------------------------------------*/ static struct target_type multipath_target = { .name = "multipath", - .version = {1, 4, 0}, + .version = {1, 5, 0}, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 017c34d78d61..f2f29c526544 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -101,20 +101,12 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra { unsigned i; struct raid_set *rs; - sector_t sectors_per_dev; if (raid_devs <= raid_type->parity_devs) { ti->error = "Insufficient number of devices"; return ERR_PTR(-EINVAL); } - sectors_per_dev = ti->len; - if ((raid_type->level > 1) && - sector_div(sectors_per_dev, (raid_devs - raid_type->parity_devs))) { - ti->error = "Target length not divisible by number of data devices"; - return ERR_PTR(-EINVAL); - } - rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); if (!rs) { ti->error = "Cannot allocate raid context"; @@ -128,7 +120,6 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra rs->md.raid_disks = raid_devs; rs->md.level = raid_type->level; rs->md.new_level = rs->md.level; - rs->md.dev_sectors = sectors_per_dev; rs->md.layout = raid_type->algorithm; rs->md.new_layout = rs->md.layout; rs->md.delta_disks = 0; @@ -143,6 +134,7 @@ static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *ra * rs->md.external * rs->md.chunk_sectors * rs->md.new_chunk_sectors + * rs->md.dev_sectors */ return rs; @@ -353,6 +345,8 @@ static int parse_raid_params(struct raid_set *rs, char **argv, { unsigned i, rebuild_cnt = 0; unsigned long value, region_size = 0; + sector_t sectors_per_dev = rs->ti->len; + sector_t max_io_len; char *key; /* @@ -429,13 +423,28 @@ static int parse_raid_params(struct raid_set *rs, char **argv, if (!strcasecmp(key, "rebuild")) { rebuild_cnt++; - if (((rs->raid_type->level != 1) && - (rebuild_cnt > rs->raid_type->parity_devs)) || - ((rs->raid_type->level == 1) && - (rebuild_cnt > (rs->md.raid_disks - 1)))) { - rs->ti->error = "Too many rebuild devices specified for given RAID type"; + + switch (rs->raid_type->level) { + case 1: + if (rebuild_cnt >= rs->md.raid_disks) { + rs->ti->error = "Too many rebuild devices specified"; + return -EINVAL; + } + break; + case 4: + case 5: + case 6: + if (rebuild_cnt > rs->raid_type->parity_devs) { + rs->ti->error = "Too many rebuild devices specified for given RAID type"; + return -EINVAL; + } + break; + default: + DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name); + rs->ti->error = "Rebuild not supported for this RAID type"; return -EINVAL; } + if (value > rs->md.raid_disks) { rs->ti->error = "Invalid rebuild index given"; return -EINVAL; @@ -522,14 +531,19 @@ static int parse_raid_params(struct raid_set *rs, char **argv, return -EINVAL; if (rs->md.chunk_sectors) - rs->ti->split_io = rs->md.chunk_sectors; + max_io_len = rs->md.chunk_sectors; else - rs->ti->split_io = region_size; + max_io_len = region_size; - if (rs->md.chunk_sectors) - rs->ti->split_io = rs->md.chunk_sectors; - else - rs->ti->split_io = region_size; + if (dm_set_target_max_io_len(rs->ti, max_io_len)) + return -EINVAL; + + if ((rs->raid_type->level > 1) && + sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) { + rs->ti->error = "Target length not divisible by number of data devices"; + return -EINVAL; + } + rs->md.dev_sectors = sectors_per_dev; /* Assume there are no metadata devices until the drives are parsed */ rs->md.persistent = 0; @@ -1067,7 +1081,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_c } static int raid_status(struct dm_target *ti, status_type_t type, - char *result, unsigned maxlen) + unsigned status_flags, char *result, unsigned maxlen) { struct raid_set *rs = ti->private; unsigned raid_param_cnt = 1; /* at least 1 for chunksize */ diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index b58b7a33914a..bc5ddba8045b 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1081,10 +1081,14 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ti->private = ms; - ti->split_io = dm_rh_get_region_size(ms->rh); + + r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh)); + if (r) + goto err_free_context; + ti->num_flush_requests = 1; ti->num_discard_requests = 1; - ti->discard_zeroes_data_unsupported = 1; + ti->discard_zeroes_data_unsupported = true; ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); @@ -1363,7 +1367,7 @@ static char device_status_char(struct mirror *m) static int mirror_status(struct dm_target *ti, status_type_t type, - char *result, unsigned int maxlen) + unsigned status_flags, char *result, unsigned maxlen) { unsigned int m, sz = 0; struct mirror_set *ms = (struct mirror_set *) ti->private; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 6f758870fc19..a143921feaf6 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -691,7 +691,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) * Return a minimum chunk size of all snapshots that have the specified origin. * Return zero if the origin has no snapshots. */ -static sector_t __minimum_chunk_size(struct origin *o) +static uint32_t __minimum_chunk_size(struct origin *o) { struct dm_snapshot *snap; unsigned chunk_size = 0; @@ -701,7 +701,7 @@ static sector_t __minimum_chunk_size(struct origin *o) chunk_size = min_not_zero(chunk_size, snap->store->chunk_size); - return chunk_size; + return (uint32_t) chunk_size; } /* @@ -1172,7 +1172,10 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) ti->error = "Chunk size not set"; goto bad_read_metadata; } - ti->split_io = s->store->chunk_size; + + r = dm_set_target_max_io_len(ti, s->store->chunk_size); + if (r) + goto bad_read_metadata; return 0; @@ -1239,7 +1242,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src, snap_dest->store->snap = snap_dest; snap_src->store->snap = snap_src; - snap_dest->ti->split_io = snap_dest->store->chunk_size; + snap_dest->ti->max_io_len = snap_dest->store->chunk_size; snap_dest->valid = snap_src->valid; /* @@ -1817,9 +1820,9 @@ static void snapshot_resume(struct dm_target *ti) up_write(&s->lock); } -static sector_t get_origin_minimum_chunksize(struct block_device *bdev) +static uint32_t get_origin_minimum_chunksize(struct block_device *bdev) { - sector_t min_chunksize; + uint32_t min_chunksize; down_read(&_origins_lock); min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); @@ -1838,15 +1841,15 @@ static void snapshot_merge_resume(struct dm_target *ti) snapshot_resume(ti); /* - * snapshot-merge acts as an origin, so set ti->split_io + * snapshot-merge acts as an origin, so set ti->max_io_len */ - ti->split_io = get_origin_minimum_chunksize(s->origin->bdev); + ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev); start_merge(s); } static int snapshot_status(struct dm_target *ti, status_type_t type, - char *result, unsigned int maxlen) + unsigned status_flags, char *result, unsigned maxlen) { unsigned sz = 0; struct dm_snapshot *snap = ti->private; @@ -2073,12 +2076,12 @@ static int origin_write_extent(struct dm_snapshot *merging_snap, struct origin *o; /* - * The origin's __minimum_chunk_size() got stored in split_io + * The origin's __minimum_chunk_size() got stored in max_io_len * by snapshot_merge_resume(). */ down_read(&_origins_lock); o = __lookup_origin(merging_snap->origin->bdev); - for (n = 0; n < size; n += merging_snap->ti->split_io) + for (n = 0; n < size; n += merging_snap->ti->max_io_len) if (__origin_write(&o->snapshots, sector + n, NULL) == DM_MAPIO_SUBMITTED) must_wait = 1; @@ -2138,18 +2141,18 @@ static int origin_map(struct dm_target *ti, struct bio *bio, } /* - * Set the target "split_io" field to the minimum of all the snapshots' + * Set the target "max_io_len" field to the minimum of all the snapshots' * chunk sizes. */ static void origin_resume(struct dm_target *ti) { struct dm_dev *dev = ti->private; - ti->split_io = get_origin_minimum_chunksize(dev->bdev); + ti->max_io_len = get_origin_minimum_chunksize(dev->bdev); } -static int origin_status(struct dm_target *ti, status_type_t type, char *result, - unsigned int maxlen) +static int origin_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { struct dm_dev *dev = ti->private; @@ -2176,7 +2179,6 @@ static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm, return max_size; bvm->bi_bdev = dev->bdev; - bvm->bi_sector = bvm->bi_sector; return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); } diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 35c94ff24ad5..a087bf2a8d66 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -26,14 +26,12 @@ struct stripe { struct stripe_c { uint32_t stripes; int stripes_shift; - sector_t stripes_mask; /* The size of this target / num. stripes */ sector_t stripe_width; - /* stripe chunk size */ - uint32_t chunk_shift; - sector_t chunk_mask; + uint32_t chunk_size; + int chunk_size_shift; /* Needed for handling events */ struct dm_target *ti; @@ -91,7 +89,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc, /* * Construct a striped mapping. - * <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+ + * <number of stripes> <chunk size> [<dev_path> <offset>]+ */ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) { @@ -99,7 +97,6 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) sector_t width; uint32_t stripes; uint32_t chunk_size; - char *end; int r; unsigned int i; @@ -108,34 +105,23 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) return -EINVAL; } - stripes = simple_strtoul(argv[0], &end, 10); - if (!stripes || *end) { + if (kstrtouint(argv[0], 10, &stripes) || !stripes) { ti->error = "Invalid stripe count"; return -EINVAL; } - chunk_size = simple_strtoul(argv[1], &end, 10); - if (*end) { + if (kstrtouint(argv[1], 10, &chunk_size) || !chunk_size) { ti->error = "Invalid chunk_size"; return -EINVAL; } - /* - * chunk_size is a power of two - */ - if (!is_power_of_2(chunk_size) || - (chunk_size < (PAGE_SIZE >> SECTOR_SHIFT))) { - ti->error = "Invalid chunk size"; - return -EINVAL; - } - - if (ti->len & (chunk_size - 1)) { + width = ti->len; + if (sector_div(width, chunk_size)) { ti->error = "Target length not divisible by " "chunk size"; return -EINVAL; } - width = ti->len; if (sector_div(width, stripes)) { ti->error = "Target length not divisible by " "number of stripes"; @@ -167,17 +153,21 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (stripes & (stripes - 1)) sc->stripes_shift = -1; - else { - sc->stripes_shift = ffs(stripes) - 1; - sc->stripes_mask = ((sector_t) stripes) - 1; - } + else + sc->stripes_shift = __ffs(stripes); + + r = dm_set_target_max_io_len(ti, chunk_size); + if (r) + return r; - ti->split_io = chunk_size; ti->num_flush_requests = stripes; ti->num_discard_requests = stripes; - sc->chunk_shift = ffs(chunk_size) - 1; - sc->chunk_mask = ((sector_t) chunk_size) - 1; + sc->chunk_size = chunk_size; + if (chunk_size & (chunk_size - 1)) + sc->chunk_size_shift = -1; + else + sc->chunk_size_shift = __ffs(chunk_size); /* * Get the stripe destinations. @@ -216,17 +206,29 @@ static void stripe_dtr(struct dm_target *ti) static void stripe_map_sector(struct stripe_c *sc, sector_t sector, uint32_t *stripe, sector_t *result) { - sector_t offset = dm_target_offset(sc->ti, sector); - sector_t chunk = offset >> sc->chunk_shift; + sector_t chunk = dm_target_offset(sc->ti, sector); + sector_t chunk_offset; + + if (sc->chunk_size_shift < 0) + chunk_offset = sector_div(chunk, sc->chunk_size); + else { + chunk_offset = chunk & (sc->chunk_size - 1); + chunk >>= sc->chunk_size_shift; + } if (sc->stripes_shift < 0) *stripe = sector_div(chunk, sc->stripes); else { - *stripe = chunk & sc->stripes_mask; + *stripe = chunk & (sc->stripes - 1); chunk >>= sc->stripes_shift; } - *result = (chunk << sc->chunk_shift) | (offset & sc->chunk_mask); + if (sc->chunk_size_shift < 0) + chunk *= sc->chunk_size; + else + chunk <<= sc->chunk_size_shift; + + *result = chunk + chunk_offset; } static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, @@ -237,9 +239,16 @@ static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, stripe_map_sector(sc, sector, &stripe, result); if (stripe == target_stripe) return; - *result &= ~sc->chunk_mask; /* round down */ + + /* round down */ + sector = *result; + if (sc->chunk_size_shift < 0) + *result -= sector_div(sector, sc->chunk_size); + else + *result = sector & ~(sector_t)(sc->chunk_size - 1); + if (target_stripe < stripe) - *result += sc->chunk_mask + 1; /* next chunk */ + *result += sc->chunk_size; /* next chunk */ } static int stripe_map_discard(struct stripe_c *sc, struct bio *bio, @@ -302,8 +311,8 @@ static int stripe_map(struct dm_target *ti, struct bio *bio, * */ -static int stripe_status(struct dm_target *ti, - status_type_t type, char *result, unsigned int maxlen) +static int stripe_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen) { struct stripe_c *sc = (struct stripe_c *) ti->private; char buffer[sc->stripes + 1]; @@ -324,7 +333,7 @@ static int stripe_status(struct dm_target *ti, case STATUSTYPE_TABLE: DMEMIT("%d %llu", sc->stripes, - (unsigned long long)sc->chunk_mask + 1); + (unsigned long long)sc->chunk_size); for (i = 0; i < sc->stripes; i++) DMEMIT(" %s %llu", sc->stripe[i].dev->name, (unsigned long long)sc->stripe[i].physical_start); @@ -391,7 +400,7 @@ static void stripe_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct stripe_c *sc = ti->private; - unsigned chunk_size = (sc->chunk_mask + 1) << 9; + unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT; blk_limits_io_min(limits, chunk_size); blk_limits_io_opt(limits, chunk_size * sc->stripes); @@ -419,7 +428,7 @@ static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm, static struct target_type stripe_target = { .name = "striped", - .version = {1, 4, 0}, + .version = {1, 5, 0}, .module = THIS_MODULE, .ctr = stripe_ctr, .dtr = stripe_dtr, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 2e227fbf1622..f90069029aae 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1319,6 +1319,9 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) if (!ti->num_flush_requests) continue; + if (ti->flush_supported) + return 1; + if (ti->type->iterate_devices && ti->type->iterate_devices(ti, device_flush_capable, &flush)) return 1; diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 3e2907f0bc46..693e149e9727 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Red Hat, Inc. + * Copyright (C) 2011-2012 Red Hat, Inc. * * This file is released under the GPL. */ @@ -80,6 +80,12 @@ #define THIN_METADATA_CACHE_SIZE 64 #define SECTOR_TO_BLOCK_SHIFT 3 +/* + * 3 for btree insert + + * 2 for btree lookup used within space map + */ +#define THIN_MAX_CONCURRENT_LOCKS 5 + /* This should be plenty */ #define SPACE_MAP_ROOT_SIZE 128 @@ -172,13 +178,20 @@ struct dm_pool_metadata { struct rw_semaphore root_lock; uint32_t time; - int need_commit; dm_block_t root; dm_block_t details_root; struct list_head thin_devices; uint64_t trans_id; unsigned long flags; sector_t data_block_size; + bool read_only:1; + + /* + * Set if a transaction has to be aborted but the attempt to roll back + * to the previous (good) transaction failed. The only pool metadata + * operation possible in this state is the closing of the device. + */ + bool fail_io:1; }; struct dm_thin_device { @@ -187,7 +200,8 @@ struct dm_thin_device { dm_thin_id id; int open_count; - int changed; + bool changed:1; + bool aborted_with_changes:1; uint64_t mapped_blocks; uint64_t transaction_id; uint32_t creation_time; @@ -338,7 +352,21 @@ static int subtree_equal(void *context, void *value1_le, void *value2_le) /*----------------------------------------------------------------*/ -static int superblock_all_zeroes(struct dm_block_manager *bm, int *result) +static int superblock_lock_zero(struct dm_pool_metadata *pmd, + struct dm_block **sblock) +{ + return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION, + &sb_validator, sblock); +} + +static int superblock_lock(struct dm_pool_metadata *pmd, + struct dm_block **sblock) +{ + return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, + &sb_validator, sblock); +} + +static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result) { int r; unsigned i; @@ -365,72 +393,9 @@ static int superblock_all_zeroes(struct dm_block_manager *bm, int *result) return dm_bm_unlock(b); } -static int init_pmd(struct dm_pool_metadata *pmd, - struct dm_block_manager *bm, - dm_block_t nr_blocks, int create) +static void __setup_btree_details(struct dm_pool_metadata *pmd) { - int r; - struct dm_space_map *sm, *data_sm; - struct dm_transaction_manager *tm; - struct dm_block *sblock; - - if (create) { - r = dm_tm_create_with_sm(bm, THIN_SUPERBLOCK_LOCATION, - &sb_validator, &tm, &sm, &sblock); - if (r < 0) { - DMERR("tm_create_with_sm failed"); - return r; - } - - data_sm = dm_sm_disk_create(tm, nr_blocks); - if (IS_ERR(data_sm)) { - DMERR("sm_disk_create failed"); - dm_tm_unlock(tm, sblock); - r = PTR_ERR(data_sm); - goto bad; - } - } else { - struct thin_disk_superblock *disk_super = NULL; - size_t space_map_root_offset = - offsetof(struct thin_disk_superblock, metadata_space_map_root); - - r = dm_tm_open_with_sm(bm, THIN_SUPERBLOCK_LOCATION, - &sb_validator, space_map_root_offset, - SPACE_MAP_ROOT_SIZE, &tm, &sm, &sblock); - if (r < 0) { - DMERR("tm_open_with_sm failed"); - return r; - } - - disk_super = dm_block_data(sblock); - data_sm = dm_sm_disk_open(tm, disk_super->data_space_map_root, - sizeof(disk_super->data_space_map_root)); - if (IS_ERR(data_sm)) { - DMERR("sm_disk_open failed"); - r = PTR_ERR(data_sm); - goto bad; - } - } - - - r = dm_tm_unlock(tm, sblock); - if (r < 0) { - DMERR("couldn't unlock superblock"); - goto bad_data_sm; - } - - pmd->bm = bm; - pmd->metadata_sm = sm; - pmd->data_sm = data_sm; - pmd->tm = tm; - pmd->nb_tm = dm_tm_create_non_blocking_clone(tm); - if (!pmd->nb_tm) { - DMERR("could not create clone tm"); - r = -ENOMEM; - goto bad_data_sm; - } - - pmd->info.tm = tm; + pmd->info.tm = pmd->tm; pmd->info.levels = 2; pmd->info.value_type.context = pmd->data_sm; pmd->info.value_type.size = sizeof(__le64); @@ -441,7 +406,7 @@ static int init_pmd(struct dm_pool_metadata *pmd, memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info)); pmd->nb_info.tm = pmd->nb_tm; - pmd->tl_info.tm = tm; + pmd->tl_info.tm = pmd->tm; pmd->tl_info.levels = 1; pmd->tl_info.value_type.context = &pmd->info; pmd->tl_info.value_type.size = sizeof(__le64); @@ -449,7 +414,7 @@ static int init_pmd(struct dm_pool_metadata *pmd, pmd->tl_info.value_type.dec = subtree_dec; pmd->tl_info.value_type.equal = subtree_equal; - pmd->bl_info.tm = tm; + pmd->bl_info.tm = pmd->tm; pmd->bl_info.levels = 1; pmd->bl_info.value_type.context = pmd->data_sm; pmd->bl_info.value_type.size = sizeof(__le64); @@ -457,48 +422,266 @@ static int init_pmd(struct dm_pool_metadata *pmd, pmd->bl_info.value_type.dec = data_block_dec; pmd->bl_info.value_type.equal = data_block_equal; - pmd->details_info.tm = tm; + pmd->details_info.tm = pmd->tm; pmd->details_info.levels = 1; pmd->details_info.value_type.context = NULL; pmd->details_info.value_type.size = sizeof(struct disk_device_details); pmd->details_info.value_type.inc = NULL; pmd->details_info.value_type.dec = NULL; pmd->details_info.value_type.equal = NULL; +} - pmd->root = 0; +static int __write_initial_superblock(struct dm_pool_metadata *pmd) +{ + int r; + struct dm_block *sblock; + size_t metadata_len, data_len; + struct thin_disk_superblock *disk_super; + sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT; - init_rwsem(&pmd->root_lock); - pmd->time = 0; - pmd->need_commit = 0; - pmd->details_root = 0; - pmd->trans_id = 0; - pmd->flags = 0; - INIT_LIST_HEAD(&pmd->thin_devices); + if (bdev_size > THIN_METADATA_MAX_SECTORS) + bdev_size = THIN_METADATA_MAX_SECTORS; + + r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); + if (r < 0) + return r; + + r = dm_sm_root_size(pmd->data_sm, &data_len); + if (r < 0) + return r; + + r = dm_sm_commit(pmd->data_sm); + if (r < 0) + return r; + + r = dm_tm_pre_commit(pmd->tm); + if (r < 0) + return r; + + r = superblock_lock_zero(pmd, &sblock); + if (r) + return r; + + disk_super = dm_block_data(sblock); + disk_super->flags = 0; + memset(disk_super->uuid, 0, sizeof(disk_super->uuid)); + disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC); + disk_super->version = cpu_to_le32(THIN_VERSION); + disk_super->time = 0; + disk_super->trans_id = 0; + disk_super->held_root = 0; + + r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root, + metadata_len); + if (r < 0) + goto bad_locked; + + r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root, + data_len); + if (r < 0) + goto bad_locked; + + disk_super->data_mapping_root = cpu_to_le64(pmd->root); + disk_super->device_details_root = cpu_to_le64(pmd->details_root); + disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); + disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT); + disk_super->data_block_size = cpu_to_le32(pmd->data_block_size); + + return dm_tm_commit(pmd->tm, sblock); + +bad_locked: + dm_bm_unlock(sblock); + return r; +} + +static int __format_metadata(struct dm_pool_metadata *pmd) +{ + int r; + + r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION, + &pmd->tm, &pmd->metadata_sm); + if (r < 0) { + DMERR("tm_create_with_sm failed"); + return r; + } + + pmd->data_sm = dm_sm_disk_create(pmd->tm, 0); + if (IS_ERR(pmd->data_sm)) { + DMERR("sm_disk_create failed"); + r = PTR_ERR(pmd->data_sm); + goto bad_cleanup_tm; + } + + pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm); + if (!pmd->nb_tm) { + DMERR("could not create non-blocking clone tm"); + r = -ENOMEM; + goto bad_cleanup_data_sm; + } + + __setup_btree_details(pmd); + + r = dm_btree_empty(&pmd->info, &pmd->root); + if (r < 0) + goto bad_cleanup_nb_tm; + + r = dm_btree_empty(&pmd->details_info, &pmd->details_root); + if (r < 0) { + DMERR("couldn't create devices root"); + goto bad_cleanup_nb_tm; + } + + r = __write_initial_superblock(pmd); + if (r) + goto bad_cleanup_nb_tm; return 0; -bad_data_sm: - dm_sm_destroy(data_sm); -bad: - dm_tm_destroy(tm); - dm_sm_destroy(sm); +bad_cleanup_nb_tm: + dm_tm_destroy(pmd->nb_tm); +bad_cleanup_data_sm: + dm_sm_destroy(pmd->data_sm); +bad_cleanup_tm: + dm_tm_destroy(pmd->tm); + dm_sm_destroy(pmd->metadata_sm); + + return r; +} + +static int __check_incompat_features(struct thin_disk_superblock *disk_super, + struct dm_pool_metadata *pmd) +{ + uint32_t features; + + features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP; + if (features) { + DMERR("could not access metadata due to unsupported optional features (%lx).", + (unsigned long)features); + return -EINVAL; + } + + /* + * Check for read-only metadata to skip the following RDWR checks. + */ + if (get_disk_ro(pmd->bdev->bd_disk)) + return 0; + + features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP; + if (features) { + DMERR("could not access metadata RDWR due to unsupported optional features (%lx).", + (unsigned long)features); + return -EINVAL; + } + + return 0; +} + +static int __open_metadata(struct dm_pool_metadata *pmd) +{ + int r; + struct dm_block *sblock; + struct thin_disk_superblock *disk_super; + + r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, + &sb_validator, &sblock); + if (r < 0) { + DMERR("couldn't read superblock"); + return r; + } + + disk_super = dm_block_data(sblock); + + r = __check_incompat_features(disk_super, pmd); + if (r < 0) + goto bad_unlock_sblock; + + r = dm_tm_open_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION, + disk_super->metadata_space_map_root, + sizeof(disk_super->metadata_space_map_root), + &pmd->tm, &pmd->metadata_sm); + if (r < 0) { + DMERR("tm_open_with_sm failed"); + goto bad_unlock_sblock; + } + + pmd->data_sm = dm_sm_disk_open(pmd->tm, disk_super->data_space_map_root, + sizeof(disk_super->data_space_map_root)); + if (IS_ERR(pmd->data_sm)) { + DMERR("sm_disk_open failed"); + r = PTR_ERR(pmd->data_sm); + goto bad_cleanup_tm; + } + + pmd->nb_tm = dm_tm_create_non_blocking_clone(pmd->tm); + if (!pmd->nb_tm) { + DMERR("could not create non-blocking clone tm"); + r = -ENOMEM; + goto bad_cleanup_data_sm; + } + + __setup_btree_details(pmd); + return dm_bm_unlock(sblock); + +bad_cleanup_data_sm: + dm_sm_destroy(pmd->data_sm); +bad_cleanup_tm: + dm_tm_destroy(pmd->tm); + dm_sm_destroy(pmd->metadata_sm); +bad_unlock_sblock: + dm_bm_unlock(sblock); + + return r; +} + +static int __open_or_format_metadata(struct dm_pool_metadata *pmd, bool format_device) +{ + int r, unformatted; + + r = __superblock_all_zeroes(pmd->bm, &unformatted); + if (r) + return r; + + if (unformatted) + return format_device ? __format_metadata(pmd) : -EPERM; + + return __open_metadata(pmd); +} + +static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool format_device) +{ + int r; + + pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE, + THIN_METADATA_CACHE_SIZE, + THIN_MAX_CONCURRENT_LOCKS); + if (IS_ERR(pmd->bm)) { + DMERR("could not create block manager"); + return PTR_ERR(pmd->bm); + } + + r = __open_or_format_metadata(pmd, format_device); + if (r) + dm_block_manager_destroy(pmd->bm); return r; } +static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd) +{ + dm_sm_destroy(pmd->data_sm); + dm_sm_destroy(pmd->metadata_sm); + dm_tm_destroy(pmd->nb_tm); + dm_tm_destroy(pmd->tm); + dm_block_manager_destroy(pmd->bm); +} + static int __begin_transaction(struct dm_pool_metadata *pmd) { int r; - u32 features; struct thin_disk_superblock *disk_super; struct dm_block *sblock; /* - * __maybe_commit_transaction() resets these - */ - WARN_ON(pmd->need_commit); - - /* * We re-read the superblock every time. Shouldn't need to do this * really. */ @@ -515,32 +698,8 @@ static int __begin_transaction(struct dm_pool_metadata *pmd) pmd->flags = le32_to_cpu(disk_super->flags); pmd->data_block_size = le32_to_cpu(disk_super->data_block_size); - features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP; - if (features) { - DMERR("could not access metadata due to " - "unsupported optional features (%lx).", - (unsigned long)features); - r = -EINVAL; - goto out; - } - - /* - * Check for read-only metadata to skip the following RDWR checks. - */ - if (get_disk_ro(pmd->bdev->bd_disk)) - goto out; - - features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP; - if (features) { - DMERR("could not access metadata RDWR due to " - "unsupported optional features (%lx).", - (unsigned long)features); - r = -EINVAL; - } - -out: dm_bm_unlock(sblock); - return r; + return 0; } static int __write_changed_details(struct dm_pool_metadata *pmd) @@ -573,8 +732,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd) list_del(&td->list); kfree(td); } - - pmd->need_commit = 1; } return 0; @@ -582,9 +739,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd) static int __commit_transaction(struct dm_pool_metadata *pmd) { - /* - * FIXME: Associated pool should be made read-only on failure. - */ int r; size_t metadata_len, data_len; struct thin_disk_superblock *disk_super; @@ -597,31 +751,27 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) r = __write_changed_details(pmd); if (r < 0) - goto out; - - if (!pmd->need_commit) - goto out; + return r; r = dm_sm_commit(pmd->data_sm); if (r < 0) - goto out; + return r; r = dm_tm_pre_commit(pmd->tm); if (r < 0) - goto out; + return r; r = dm_sm_root_size(pmd->metadata_sm, &metadata_len); if (r < 0) - goto out; + return r; r = dm_sm_root_size(pmd->data_sm, &data_len); if (r < 0) - goto out; + return r; - r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, - &sb_validator, &sblock); + r = superblock_lock(pmd, &sblock); if (r) - goto out; + return r; disk_super = dm_block_data(sblock); disk_super->time = cpu_to_le32(pmd->time); @@ -640,12 +790,7 @@ static int __commit_transaction(struct dm_pool_metadata *pmd) if (r < 0) goto out_locked; - r = dm_tm_commit(pmd->tm, sblock); - if (!r) - pmd->need_commit = 0; - -out: - return r; + return dm_tm_commit(pmd->tm, sblock); out_locked: dm_bm_unlock(sblock); @@ -653,15 +798,11 @@ out_locked: } struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, - sector_t data_block_size) + sector_t data_block_size, + bool format_device) { int r; - struct thin_disk_superblock *disk_super; struct dm_pool_metadata *pmd; - sector_t bdev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; - struct dm_block_manager *bm; - int create; - struct dm_block *sblock; pmd = kmalloc(sizeof(*pmd), GFP_KERNEL); if (!pmd) { @@ -669,90 +810,28 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, return ERR_PTR(-ENOMEM); } - /* - * Max hex locks: - * 3 for btree insert + - * 2 for btree lookup used within space map - */ - bm = dm_block_manager_create(bdev, THIN_METADATA_BLOCK_SIZE, - THIN_METADATA_CACHE_SIZE, 5); - if (!bm) { - DMERR("could not create block manager"); - kfree(pmd); - return ERR_PTR(-ENOMEM); - } - - r = superblock_all_zeroes(bm, &create); - if (r) { - dm_block_manager_destroy(bm); - kfree(pmd); - return ERR_PTR(r); - } - + init_rwsem(&pmd->root_lock); + pmd->time = 0; + INIT_LIST_HEAD(&pmd->thin_devices); + pmd->read_only = false; + pmd->fail_io = false; + pmd->bdev = bdev; + pmd->data_block_size = data_block_size; - r = init_pmd(pmd, bm, 0, create); + r = __create_persistent_data_objects(pmd, format_device); if (r) { - dm_block_manager_destroy(bm); kfree(pmd); return ERR_PTR(r); } - pmd->bdev = bdev; - - if (!create) { - r = __begin_transaction(pmd); - if (r < 0) - goto bad; - return pmd; - } - - /* - * Create. - */ - r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, - &sb_validator, &sblock); - if (r) - goto bad; - - if (bdev_size > THIN_METADATA_MAX_SECTORS) - bdev_size = THIN_METADATA_MAX_SECTORS; - - disk_super = dm_block_data(sblock); - disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC); - disk_super->version = cpu_to_le32(THIN_VERSION); - disk_super->time = 0; - disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); - disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT); - disk_super->data_block_size = cpu_to_le32(data_block_size); - - r = dm_bm_unlock(sblock); - if (r < 0) - goto bad; - - r = dm_btree_empty(&pmd->info, &pmd->root); - if (r < 0) - goto bad; - - r = dm_btree_empty(&pmd->details_info, &pmd->details_root); - if (r < 0) { - DMERR("couldn't create devices root"); - goto bad; - } - pmd->flags = 0; - pmd->need_commit = 1; - r = dm_pool_commit_metadata(pmd); + r = __begin_transaction(pmd); if (r < 0) { - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", - __func__, r); - goto bad; + if (dm_pool_metadata_close(pmd) < 0) + DMWARN("%s: dm_pool_metadata_close() failed.", __func__); + return ERR_PTR(r); } return pmd; - -bad: - if (dm_pool_metadata_close(pmd) < 0) - DMWARN("%s: dm_pool_metadata_close() failed.", __func__); - return ERR_PTR(r); } int dm_pool_metadata_close(struct dm_pool_metadata *pmd) @@ -778,18 +857,17 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) return -EBUSY; } - r = __commit_transaction(pmd); - if (r < 0) - DMWARN("%s: __commit_transaction() failed, error = %d", - __func__, r); + if (!pmd->read_only && !pmd->fail_io) { + r = __commit_transaction(pmd); + if (r < 0) + DMWARN("%s: __commit_transaction() failed, error = %d", + __func__, r); + } - dm_tm_destroy(pmd->tm); - dm_tm_destroy(pmd->nb_tm); - dm_block_manager_destroy(pmd->bm); - dm_sm_destroy(pmd->metadata_sm); - dm_sm_destroy(pmd->data_sm); - kfree(pmd); + if (!pmd->fail_io) + __destroy_persistent_data_objects(pmd); + kfree(pmd); return 0; } @@ -850,6 +928,7 @@ static int __open_device(struct dm_pool_metadata *pmd, (*td)->id = dev; (*td)->open_count = 1; (*td)->changed = changed; + (*td)->aborted_with_changes = false; (*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks); (*td)->transaction_id = le64_to_cpu(details_le.transaction_id); (*td)->creation_time = le32_to_cpu(details_le.creation_time); @@ -911,10 +990,11 @@ static int __create_thin(struct dm_pool_metadata *pmd, int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev) { - int r; + int r = -EINVAL; down_write(&pmd->root_lock); - r = __create_thin(pmd, dev); + if (!pmd->fail_io) + r = __create_thin(pmd, dev); up_write(&pmd->root_lock); return r; @@ -1001,10 +1081,11 @@ int dm_pool_create_snap(struct dm_pool_metadata *pmd, dm_thin_id dev, dm_thin_id origin) { - int r; + int r = -EINVAL; down_write(&pmd->root_lock); - r = __create_snap(pmd, dev, origin); + if (!pmd->fail_io) + r = __create_snap(pmd, dev, origin); up_write(&pmd->root_lock); return r; @@ -1037,18 +1118,17 @@ static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev) if (r) return r; - pmd->need_commit = 1; - return 0; } int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev) { - int r; + int r = -EINVAL; down_write(&pmd->root_lock); - r = __delete_device(pmd, dev); + if (!pmd->fail_io) + r = __delete_device(pmd, dev); up_write(&pmd->root_lock); return r; @@ -1058,28 +1138,40 @@ int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd, uint64_t current_id, uint64_t new_id) { + int r = -EINVAL; + down_write(&pmd->root_lock); + + if (pmd->fail_io) + goto out; + if (pmd->trans_id != current_id) { - up_write(&pmd->root_lock); DMERR("mismatched transaction id"); - return -EINVAL; + goto out; } pmd->trans_id = new_id; - pmd->need_commit = 1; + r = 0; + +out: up_write(&pmd->root_lock); - return 0; + return r; } int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd, uint64_t *result) { + int r = -EINVAL; + down_read(&pmd->root_lock); - *result = pmd->trans_id; + if (!pmd->fail_io) { + *result = pmd->trans_id; + r = 0; + } up_read(&pmd->root_lock); - return 0; + return r; } static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) @@ -1108,8 +1200,6 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) dm_tm_dec(pmd->tm, held_root); dm_tm_unlock(pmd->tm, copy); - pmd->need_commit = 1; - return -EBUSY; } @@ -1131,29 +1221,25 @@ static int __reserve_metadata_snap(struct dm_pool_metadata *pmd) /* * Write the held root into the superblock. */ - r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, - &sb_validator, &sblock); + r = superblock_lock(pmd, &sblock); if (r) { dm_tm_dec(pmd->tm, held_root); - pmd->need_commit = 1; return r; } disk_super = dm_block_data(sblock); disk_super->held_root = cpu_to_le64(held_root); dm_bm_unlock(sblock); - - pmd->need_commit = 1; - return 0; } int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd) { - int r; + int r = -EINVAL; down_write(&pmd->root_lock); - r = __reserve_metadata_snap(pmd); + if (!pmd->fail_io) + r = __reserve_metadata_snap(pmd); up_write(&pmd->root_lock); return r; @@ -1166,15 +1252,13 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) struct dm_block *sblock, *copy; dm_block_t held_root; - r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, - &sb_validator, &sblock); + r = superblock_lock(pmd, &sblock); if (r) return r; disk_super = dm_block_data(sblock); held_root = le64_to_cpu(disk_super->held_root); disk_super->held_root = cpu_to_le64(0); - pmd->need_commit = 1; dm_bm_unlock(sblock); @@ -1197,10 +1281,11 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd) int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd) { - int r; + int r = -EINVAL; down_write(&pmd->root_lock); - r = __release_metadata_snap(pmd); + if (!pmd->fail_io) + r = __release_metadata_snap(pmd); up_write(&pmd->root_lock); return r; @@ -1227,10 +1312,11 @@ static int __get_metadata_snap(struct dm_pool_metadata *pmd, int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, dm_block_t *result) { - int r; + int r = -EINVAL; down_read(&pmd->root_lock); - r = __get_metadata_snap(pmd, result); + if (!pmd->fail_io) + r = __get_metadata_snap(pmd, result); up_read(&pmd->root_lock); return r; @@ -1239,10 +1325,11 @@ int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd, int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev, struct dm_thin_device **td) { - int r; + int r = -EINVAL; down_write(&pmd->root_lock); - r = __open_device(pmd, dev, 0, td); + if (!pmd->fail_io) + r = __open_device(pmd, dev, 0, td); up_write(&pmd->root_lock); return r; @@ -1262,7 +1349,7 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td) return td->id; } -static int __snapshotted_since(struct dm_thin_device *td, uint32_t time) +static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time) { return td->snapshotted_time > time; } @@ -1270,28 +1357,31 @@ static int __snapshotted_since(struct dm_thin_device *td, uint32_t time) int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block, int can_block, struct dm_thin_lookup_result *result) { - int r; + int r = -EINVAL; uint64_t block_time = 0; __le64 value; struct dm_pool_metadata *pmd = td->pmd; dm_block_t keys[2] = { td->id, block }; + struct dm_btree_info *info; if (can_block) { down_read(&pmd->root_lock); - r = dm_btree_lookup(&pmd->info, pmd->root, keys, &value); - if (!r) - block_time = le64_to_cpu(value); - up_read(&pmd->root_lock); - - } else if (down_read_trylock(&pmd->root_lock)) { - r = dm_btree_lookup(&pmd->nb_info, pmd->root, keys, &value); - if (!r) - block_time = le64_to_cpu(value); - up_read(&pmd->root_lock); - - } else + info = &pmd->info; + } else if (down_read_trylock(&pmd->root_lock)) + info = &pmd->nb_info; + else return -EWOULDBLOCK; + if (pmd->fail_io) + goto out; + + r = dm_btree_lookup(info, pmd->root, keys, &value); + if (!r) + block_time = le64_to_cpu(value); + +out: + up_read(&pmd->root_lock); + if (!r) { dm_block_t exception_block; uint32_t exception_time; @@ -1312,7 +1402,6 @@ static int __insert(struct dm_thin_device *td, dm_block_t block, struct dm_pool_metadata *pmd = td->pmd; dm_block_t keys[2] = { td->id, block }; - pmd->need_commit = 1; value = cpu_to_le64(pack_block_time(data_block, pmd->time)); __dm_bless_for_disk(&value); @@ -1321,10 +1410,9 @@ static int __insert(struct dm_thin_device *td, dm_block_t block, if (r) return r; - if (inserted) { + td->changed = 1; + if (inserted) td->mapped_blocks++; - td->changed = 1; - } return 0; } @@ -1332,10 +1420,11 @@ static int __insert(struct dm_thin_device *td, dm_block_t block, int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block, dm_block_t data_block) { - int r; + int r = -EINVAL; down_write(&td->pmd->root_lock); - r = __insert(td, block, data_block); + if (!td->pmd->fail_io) + r = __insert(td, block, data_block); up_write(&td->pmd->root_lock); return r; @@ -1353,31 +1442,51 @@ static int __remove(struct dm_thin_device *td, dm_block_t block) td->mapped_blocks--; td->changed = 1; - pmd->need_commit = 1; return 0; } int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block) { - int r; + int r = -EINVAL; down_write(&td->pmd->root_lock); - r = __remove(td, block); + if (!td->pmd->fail_io) + r = __remove(td, block); up_write(&td->pmd->root_lock); return r; } -int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result) +bool dm_thin_changed_this_transaction(struct dm_thin_device *td) { int r; - down_write(&pmd->root_lock); + down_read(&td->pmd->root_lock); + r = td->changed; + up_read(&td->pmd->root_lock); - r = dm_sm_new_block(pmd->data_sm, result); - pmd->need_commit = 1; + return r; +} + +bool dm_thin_aborted_changes(struct dm_thin_device *td) +{ + bool r; + down_read(&td->pmd->root_lock); + r = td->aborted_with_changes; + up_read(&td->pmd->root_lock); + + return r; +} + +int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result) +{ + int r = -EINVAL; + + down_write(&pmd->root_lock); + if (!pmd->fail_io) + r = dm_sm_new_block(pmd->data_sm, result); up_write(&pmd->root_lock); return r; @@ -1385,9 +1494,11 @@ int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result) int dm_pool_commit_metadata(struct dm_pool_metadata *pmd) { - int r; + int r = -EINVAL; down_write(&pmd->root_lock); + if (pmd->fail_io) + goto out; r = __commit_transaction(pmd); if (r <= 0) @@ -1402,12 +1513,41 @@ out: return r; } +static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd) +{ + struct dm_thin_device *td; + + list_for_each_entry(td, &pmd->thin_devices, list) + td->aborted_with_changes = td->changed; +} + +int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) +{ + int r = -EINVAL; + + down_write(&pmd->root_lock); + if (pmd->fail_io) + goto out; + + __set_abort_with_changes_flags(pmd); + __destroy_persistent_data_objects(pmd); + r = __create_persistent_data_objects(pmd, false); + if (r) + pmd->fail_io = true; + +out: + up_write(&pmd->root_lock); + + return r; +} + int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result) { - int r; + int r = -EINVAL; down_read(&pmd->root_lock); - r = dm_sm_get_nr_free(pmd->data_sm, result); + if (!pmd->fail_io) + r = dm_sm_get_nr_free(pmd->data_sm, result); up_read(&pmd->root_lock); return r; @@ -1416,10 +1556,11 @@ int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *resul int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd, dm_block_t *result) { - int r; + int r = -EINVAL; down_read(&pmd->root_lock); - r = dm_sm_get_nr_free(pmd->metadata_sm, result); + if (!pmd->fail_io) + r = dm_sm_get_nr_free(pmd->metadata_sm, result); up_read(&pmd->root_lock); return r; @@ -1428,10 +1569,11 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd, int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result) { - int r; + int r = -EINVAL; down_read(&pmd->root_lock); - r = dm_sm_get_nr_blocks(pmd->metadata_sm, result); + if (!pmd->fail_io) + r = dm_sm_get_nr_blocks(pmd->metadata_sm, result); up_read(&pmd->root_lock); return r; @@ -1448,10 +1590,11 @@ int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result) int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result) { - int r; + int r = -EINVAL; down_read(&pmd->root_lock); - r = dm_sm_get_nr_blocks(pmd->data_sm, result); + if (!pmd->fail_io) + r = dm_sm_get_nr_blocks(pmd->data_sm, result); up_read(&pmd->root_lock); return r; @@ -1459,13 +1602,17 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result) int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result) { + int r = -EINVAL; struct dm_pool_metadata *pmd = td->pmd; down_read(&pmd->root_lock); - *result = td->mapped_blocks; + if (!pmd->fail_io) { + *result = td->mapped_blocks; + r = 0; + } up_read(&pmd->root_lock); - return 0; + return r; } static int __highest_block(struct dm_thin_device *td, dm_block_t *result) @@ -1487,11 +1634,12 @@ static int __highest_block(struct dm_thin_device *td, dm_block_t *result) int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, dm_block_t *result) { - int r; + int r = -EINVAL; struct dm_pool_metadata *pmd = td->pmd; down_read(&pmd->root_lock); - r = __highest_block(td, result); + if (!pmd->fail_io) + r = __highest_block(td, result); up_read(&pmd->root_lock); return r; @@ -1514,20 +1662,25 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) return -EINVAL; } - r = dm_sm_extend(pmd->data_sm, new_count - old_count); - if (!r) - pmd->need_commit = 1; - - return r; + return dm_sm_extend(pmd->data_sm, new_count - old_count); } int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) { - int r; + int r = -EINVAL; down_write(&pmd->root_lock); - r = __resize_data_dev(pmd, new_count); + if (!pmd->fail_io) + r = __resize_data_dev(pmd, new_count); up_write(&pmd->root_lock); return r; } + +void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) +{ + down_write(&pmd->root_lock); + pmd->read_only = true; + dm_bm_set_read_only(pmd->bm); + up_write(&pmd->root_lock); +} diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index b88918ccdaf6..0cecc3702885 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -38,7 +38,8 @@ typedef uint64_t dm_thin_id; * Reopens or creates a new, empty metadata volume. */ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev, - sector_t data_block_size); + sector_t data_block_size, + bool format_device); int dm_pool_metadata_close(struct dm_pool_metadata *pmd); @@ -79,6 +80,16 @@ int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd, int dm_pool_commit_metadata(struct dm_pool_metadata *pmd); /* + * Discards all uncommitted changes. Rereads the superblock, rolling back + * to the last good transaction. Thin devices remain open. + * dm_thin_aborted_changes() tells you if they had uncommitted changes. + * + * If this call fails it's only useful to call dm_pool_metadata_close(). + * All other methods will fail with -EINVAL. + */ +int dm_pool_abort_metadata(struct dm_pool_metadata *pmd); + +/* * Set/get userspace transaction id. */ int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd, @@ -119,7 +130,7 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td); struct dm_thin_lookup_result { dm_block_t block; - int shared; + unsigned shared:1; }; /* @@ -147,6 +158,10 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block); /* * Queries. */ +bool dm_thin_changed_this_transaction(struct dm_thin_device *td); + +bool dm_thin_aborted_changes(struct dm_thin_device *td); + int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, dm_block_t *highest_mapped); @@ -171,6 +186,12 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); */ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); +/* + * Flicks the underlying block manager into read only mode, so you know + * that nothing is changing. + */ +void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); + /*----------------------------------------------------------------*/ #endif diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 68694da0d21d..af1fc3b2c2ad 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1,10 +1,11 @@ /* - * Copyright (C) 2011 Red Hat UK. + * Copyright (C) 2011-2012 Red Hat UK. * * This file is released under the GPL. */ #include "dm-thin-metadata.h" +#include "dm.h" #include <linux/device-mapper.h> #include <linux/dm-io.h> @@ -19,7 +20,7 @@ /* * Tunable constants */ -#define ENDIO_HOOK_POOL_SIZE 10240 +#define ENDIO_HOOK_POOL_SIZE 1024 #define DEFERRED_SET_SIZE 64 #define MAPPING_POOL_SIZE 1024 #define PRISON_CELLS 1024 @@ -496,12 +497,27 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, */ struct dm_thin_new_mapping; +/* + * The pool runs in 3 modes. Ordered in degraded order for comparisons. + */ +enum pool_mode { + PM_WRITE, /* metadata may be changed */ + PM_READ_ONLY, /* metadata may not be changed */ + PM_FAIL, /* all I/O fails */ +}; + struct pool_features { + enum pool_mode mode; + unsigned zero_new_blocks:1; unsigned discard_enabled:1; unsigned discard_passdown:1; }; +struct thin_c; +typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio); +typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m); + struct pool { struct list_head list; struct dm_target *ti; /* Only set if a pool target is bound */ @@ -510,10 +526,9 @@ struct pool { struct block_device *md_dev; struct dm_pool_metadata *pmd; - uint32_t sectors_per_block; - unsigned block_shift; - dm_block_t offset_mask; dm_block_t low_water_blocks; + uint32_t sectors_per_block; + int sectors_per_block_shift; struct pool_features pf; unsigned low_water_triggered:1; /* A dm event has been sent */ @@ -526,8 +541,8 @@ struct pool { struct work_struct worker; struct delayed_work waker; - unsigned ref_count; unsigned long last_commit_jiffies; + unsigned ref_count; spinlock_t lock; struct bio_list deferred_bios; @@ -543,8 +558,17 @@ struct pool { struct dm_thin_new_mapping *next_mapping; mempool_t *mapping_pool; mempool_t *endio_hook_pool; + + process_bio_fn process_bio; + process_bio_fn process_discard; + + process_mapping_fn process_prepared_mapping; + process_mapping_fn process_prepared_discard; }; +static enum pool_mode get_pool_mode(struct pool *pool); +static void set_pool_mode(struct pool *pool, enum pool_mode mode); + /* * Target context for a pool. */ @@ -679,16 +703,28 @@ static void requeue_io(struct thin_c *tc) static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) { - return bio->bi_sector >> tc->pool->block_shift; + sector_t block_nr = bio->bi_sector; + + if (tc->pool->sectors_per_block_shift < 0) + (void) sector_div(block_nr, tc->pool->sectors_per_block); + else + block_nr >>= tc->pool->sectors_per_block_shift; + + return block_nr; } static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) { struct pool *pool = tc->pool; + sector_t bi_sector = bio->bi_sector; bio->bi_bdev = tc->pool_dev->bdev; - bio->bi_sector = (block << pool->block_shift) + - (bio->bi_sector & pool->offset_mask); + if (tc->pool->sectors_per_block_shift < 0) + bio->bi_sector = (block * pool->sectors_per_block) + + sector_div(bi_sector, pool->sectors_per_block); + else + bio->bi_sector = (block << pool->sectors_per_block_shift) | + (bi_sector & (pool->sectors_per_block - 1)); } static void remap_to_origin(struct thin_c *tc, struct bio *bio) @@ -696,21 +732,39 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio) bio->bi_bdev = tc->origin_dev->bdev; } +static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) +{ + return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && + dm_thin_changed_this_transaction(tc->td); +} + static void issue(struct thin_c *tc, struct bio *bio) { struct pool *pool = tc->pool; unsigned long flags; + if (!bio_triggers_commit(tc, bio)) { + generic_make_request(bio); + return; + } + /* - * Batch together any FUA/FLUSH bios we find and then issue - * a single commit for them in process_deferred_bios(). + * Complete bio with an error if earlier I/O caused changes to + * the metadata that can't be committed e.g, due to I/O errors + * on the metadata device. */ - if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { - spin_lock_irqsave(&pool->lock, flags); - bio_list_add(&pool->deferred_flush_bios, bio); - spin_unlock_irqrestore(&pool->lock, flags); - } else - generic_make_request(bio); + if (dm_thin_aborted_changes(tc->td)) { + bio_io_error(bio); + return; + } + + /* + * Batch together any bios that trigger commits and then issue a + * single commit for them in process_deferred_bios(). + */ + spin_lock_irqsave(&pool->lock, flags); + bio_list_add(&pool->deferred_flush_bios, bio); + spin_unlock_irqrestore(&pool->lock, flags); } static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) @@ -847,6 +901,14 @@ static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell wake_worker(pool); } +static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) +{ + if (m->bio) + m->bio->bi_end_io = m->saved_bi_end_io; + cell_error(m->cell); + list_del(&m->list); + mempool_free(m, m->tc->pool->mapping_pool); +} static void process_prepared_mapping(struct dm_thin_new_mapping *m) { struct thin_c *tc = m->tc; @@ -859,7 +921,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) if (m->err) { cell_error(m->cell); - return; + goto out; } /* @@ -871,7 +933,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) if (r) { DMERR("dm_thin_insert_block() failed"); cell_error(m->cell); - return; + goto out; } /* @@ -886,22 +948,25 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) } else cell_defer(tc, m->cell, m->data_block); +out: list_del(&m->list); mempool_free(m, tc->pool->mapping_pool); } -static void process_prepared_discard(struct dm_thin_new_mapping *m) +static void process_prepared_discard_fail(struct dm_thin_new_mapping *m) { - int r; struct thin_c *tc = m->tc; - r = dm_thin_remove_block(tc->td, m->virt_block); - if (r) - DMERR("dm_thin_remove_block() failed"); + bio_io_error(m->bio); + cell_defer_except(tc, m->cell); + cell_defer_except(tc, m->cell2); + mempool_free(m, tc->pool->mapping_pool); +} + +static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m) +{ + struct thin_c *tc = m->tc; - /* - * Pass the discard down to the underlying device? - */ if (m->pass_discard) remap_and_issue(tc, m->bio, m->data_block); else @@ -912,8 +977,20 @@ static void process_prepared_discard(struct dm_thin_new_mapping *m) mempool_free(m, tc->pool->mapping_pool); } +static void process_prepared_discard(struct dm_thin_new_mapping *m) +{ + int r; + struct thin_c *tc = m->tc; + + r = dm_thin_remove_block(tc->td, m->virt_block); + if (r) + DMERR("dm_thin_remove_block() failed"); + + process_prepared_discard_passdown(m); +} + static void process_prepared(struct pool *pool, struct list_head *head, - void (*fn)(struct dm_thin_new_mapping *)) + process_mapping_fn *fn) { unsigned long flags; struct list_head maps; @@ -925,7 +1002,7 @@ static void process_prepared(struct pool *pool, struct list_head *head, spin_unlock_irqrestore(&pool->lock, flags); list_for_each_entry_safe(m, tmp, &maps, list) - fn(m); + (*fn)(m); } /* @@ -933,9 +1010,7 @@ static void process_prepared(struct pool *pool, struct list_head *head, */ static int io_overlaps_block(struct pool *pool, struct bio *bio) { - return !(bio->bi_sector & pool->offset_mask) && - (bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT)); - + return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); } static int io_overwrites_block(struct pool *pool, struct bio *bio) @@ -1093,6 +1168,35 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, } } +static int commit(struct pool *pool) +{ + int r; + + r = dm_pool_commit_metadata(pool->pmd); + if (r) + DMERR("commit failed, error = %d", r); + + return r; +} + +/* + * A non-zero return indicates read_only or fail_io mode. + * Many callers don't care about the return value. + */ +static int commit_or_fallback(struct pool *pool) +{ + int r; + + if (get_pool_mode(pool) != PM_WRITE) + return -EINVAL; + + r = commit(pool); + if (r) + set_pool_mode(pool, PM_READ_ONLY); + + return r; +} + static int alloc_data_block(struct thin_c *tc, dm_block_t *result) { int r; @@ -1121,12 +1225,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) * Try to commit to see if that will free up some * more space. */ - r = dm_pool_commit_metadata(pool->pmd); - if (r) { - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", - __func__, r); - return r; - } + (void) commit_or_fallback(pool); r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); if (r) @@ -1218,7 +1317,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) */ m = get_next_mapping(pool); m->tc = tc; - m->pass_discard = (!lookup_result.shared) & pool->pf.discard_passdown; + m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown; m->virt_block = block; m->data_block = lookup_result.block; m->cell = cell; @@ -1234,15 +1333,10 @@ static void process_discard(struct thin_c *tc, struct bio *bio) } } else { /* - * This path is hit if people are ignoring - * limits->discard_granularity. It ignores any - * part of the discard that is in a subsequent - * block. + * The DM core makes sure that the discard doesn't span + * a block boundary. So we submit the discard of a + * partial block appropriately. */ - sector_t offset = bio->bi_sector - (block << pool->block_shift); - unsigned remaining = (pool->sectors_per_block - offset) << 9; - bio->bi_size = min(bio->bi_size, remaining); - cell_release_singleton(cell, bio); cell_release_singleton(cell2, bio); if ((!lookup_result.shared) && pool->pf.discard_passdown) @@ -1310,7 +1404,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, if (bio_detain(pool->prison, &key, bio, &cell)) return; - if (bio_data_dir(bio) == WRITE) + if (bio_data_dir(bio) == WRITE && bio->bi_size) break_sharing(tc, bio, block, &key, lookup_result, cell); else { struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr; @@ -1362,6 +1456,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block default: DMERR("%s: alloc_data_block() failed, error = %d", __func__, r); + set_pool_mode(tc->pool, PM_READ_ONLY); cell_error(cell); break; } @@ -1419,6 +1514,49 @@ static void process_bio(struct thin_c *tc, struct bio *bio) } } +static void process_bio_read_only(struct thin_c *tc, struct bio *bio) +{ + int r; + int rw = bio_data_dir(bio); + dm_block_t block = get_bio_block(tc, bio); + struct dm_thin_lookup_result lookup_result; + + r = dm_thin_find_block(tc->td, block, 1, &lookup_result); + switch (r) { + case 0: + if (lookup_result.shared && (rw == WRITE) && bio->bi_size) + bio_io_error(bio); + else + remap_and_issue(tc, bio, lookup_result.block); + break; + + case -ENODATA: + if (rw != READ) { + bio_io_error(bio); + break; + } + + if (tc->origin_dev) { + remap_to_origin_and_issue(tc, bio); + break; + } + + zero_fill_bio(bio); + bio_endio(bio, 0); + break; + + default: + DMERR("dm_thin_find_block() failed, error = %d", r); + bio_io_error(bio); + break; + } +} + +static void process_bio_fail(struct thin_c *tc, struct bio *bio) +{ + bio_io_error(bio); +} + static int need_commit_due_to_time(struct pool *pool) { return jiffies < pool->last_commit_jiffies || @@ -1430,7 +1568,6 @@ static void process_deferred_bios(struct pool *pool) unsigned long flags; struct bio *bio; struct bio_list bios; - int r; bio_list_init(&bios); @@ -1457,9 +1594,9 @@ static void process_deferred_bios(struct pool *pool) } if (bio->bi_rw & REQ_DISCARD) - process_discard(tc, bio); + pool->process_discard(tc, bio); else - process_bio(tc, bio); + pool->process_bio(tc, bio); } /* @@ -1475,10 +1612,7 @@ static void process_deferred_bios(struct pool *pool) if (bio_list_empty(&bios) && !need_commit_due_to_time(pool)) return; - r = dm_pool_commit_metadata(pool->pmd); - if (r) { - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", - __func__, r); + if (commit_or_fallback(pool)) { while ((bio = bio_list_pop(&bios))) bio_io_error(bio); return; @@ -1493,8 +1627,8 @@ static void do_worker(struct work_struct *ws) { struct pool *pool = container_of(ws, struct pool, worker); - process_prepared(pool, &pool->prepared_mappings, process_prepared_mapping); - process_prepared(pool, &pool->prepared_discards, process_prepared_discard); + process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); + process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); process_deferred_bios(pool); } @@ -1511,6 +1645,52 @@ static void do_waker(struct work_struct *ws) /*----------------------------------------------------------------*/ +static enum pool_mode get_pool_mode(struct pool *pool) +{ + return pool->pf.mode; +} + +static void set_pool_mode(struct pool *pool, enum pool_mode mode) +{ + int r; + + pool->pf.mode = mode; + + switch (mode) { + case PM_FAIL: + DMERR("switching pool to failure mode"); + pool->process_bio = process_bio_fail; + pool->process_discard = process_bio_fail; + pool->process_prepared_mapping = process_prepared_mapping_fail; + pool->process_prepared_discard = process_prepared_discard_fail; + break; + + case PM_READ_ONLY: + DMERR("switching pool to read-only mode"); + r = dm_pool_abort_metadata(pool->pmd); + if (r) { + DMERR("aborting transaction failed"); + set_pool_mode(pool, PM_FAIL); + } else { + dm_pool_metadata_read_only(pool->pmd); + pool->process_bio = process_bio_read_only; + pool->process_discard = process_discard; + pool->process_prepared_mapping = process_prepared_mapping_fail; + pool->process_prepared_discard = process_prepared_discard_passdown; + } + break; + + case PM_WRITE: + pool->process_bio = process_bio; + pool->process_discard = process_discard; + pool->process_prepared_mapping = process_prepared_mapping; + pool->process_prepared_discard = process_prepared_discard; + break; + } +} + +/*----------------------------------------------------------------*/ + /* * Mapping functions. */ @@ -1556,6 +1736,12 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio, struct dm_thin_lookup_result result; map_context->ptr = thin_hook_bio(tc, bio); + + if (get_pool_mode(tc->pool) == PM_FAIL) { + bio_io_error(bio); + return DM_MAPIO_SUBMITTED; + } + if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) { thin_defer_bio(tc, bio); return DM_MAPIO_SUBMITTED; @@ -1592,14 +1778,35 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio, break; case -ENODATA: + if (get_pool_mode(tc->pool) == PM_READ_ONLY) { + /* + * This block isn't provisioned, and we have no way + * of doing so. Just error it. + */ + bio_io_error(bio); + r = DM_MAPIO_SUBMITTED; + break; + } + /* fall through */ + + case -EWOULDBLOCK: /* * In future, the failed dm_thin_find_block above could * provide the hint to load the metadata into cache. */ - case -EWOULDBLOCK: thin_defer_bio(tc, bio); r = DM_MAPIO_SUBMITTED; break; + + default: + /* + * Must always call bio_io_error on failure. + * dm_thin_find_block can fail with -EINVAL if the + * pool is switched to fail-io mode. + */ + bio_io_error(bio); + r = DM_MAPIO_SUBMITTED; + break; } return r; @@ -1636,15 +1843,26 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) { struct pool_c *pt = ti->private; + /* + * We want to make sure that degraded pools are never upgraded. + */ + enum pool_mode old_mode = pool->pf.mode; + enum pool_mode new_mode = pt->pf.mode; + + if (old_mode > new_mode) + new_mode = old_mode; + pool->ti = ti; pool->low_water_blocks = pt->low_water_blocks; pool->pf = pt->pf; + set_pool_mode(pool, new_mode); /* * If discard_passdown was enabled verify that the data device * supports discards. Disable discard_passdown if not; otherwise * -EOPNOTSUPP will be returned. */ + /* FIXME: pull this out into a sep fn. */ if (pt->pf.discard_passdown) { struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); if (!q || !blk_queue_discard(q)) { @@ -1670,6 +1888,7 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti) /* Initialize pool features. */ static void pool_features_init(struct pool_features *pf) { + pf->mode = PM_WRITE; pf->zero_new_blocks = 1; pf->discard_enabled = 1; pf->discard_passdown = 1; @@ -1700,14 +1919,16 @@ static struct kmem_cache *_endio_hook_cache; static struct pool *pool_create(struct mapped_device *pool_md, struct block_device *metadata_dev, - unsigned long block_size, char **error) + unsigned long block_size, + int read_only, char **error) { int r; void *err_p; struct pool *pool; struct dm_pool_metadata *pmd; + bool format_device = read_only ? false : true; - pmd = dm_pool_metadata_open(metadata_dev, block_size); + pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device); if (IS_ERR(pmd)) { *error = "Error creating metadata object"; return (struct pool *)pmd; @@ -1722,8 +1943,10 @@ static struct pool *pool_create(struct mapped_device *pool_md, pool->pmd = pmd; pool->sectors_per_block = block_size; - pool->block_shift = ffs(block_size) - 1; - pool->offset_mask = block_size - 1; + if (block_size & (block_size - 1)) + pool->sectors_per_block_shift = -1; + else + pool->sectors_per_block_shift = __ffs(block_size); pool->low_water_blocks = 0; pool_features_init(&pool->pf); pool->prison = prison_create(PRISON_CELLS); @@ -1822,25 +2045,29 @@ static void __pool_dec(struct pool *pool) static struct pool *__pool_find(struct mapped_device *pool_md, struct block_device *metadata_dev, - unsigned long block_size, char **error, - int *created) + unsigned long block_size, int read_only, + char **error, int *created) { struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); if (pool) { - if (pool->pool_md != pool_md) + if (pool->pool_md != pool_md) { + *error = "metadata device already in use by a pool"; return ERR_PTR(-EBUSY); + } __pool_inc(pool); } else { pool = __pool_table_lookup(pool_md); if (pool) { - if (pool->md_dev != metadata_dev) + if (pool->md_dev != metadata_dev) { + *error = "different pool cannot replace a pool"; return ERR_PTR(-EINVAL); + } __pool_inc(pool); } else { - pool = pool_create(pool_md, metadata_dev, block_size, error); + pool = pool_create(pool_md, metadata_dev, block_size, read_only, error); *created = 1; } } @@ -1891,19 +2118,23 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, arg_name = dm_shift_arg(as); argc--; - if (!strcasecmp(arg_name, "skip_block_zeroing")) { + if (!strcasecmp(arg_name, "skip_block_zeroing")) pf->zero_new_blocks = 0; - continue; - } else if (!strcasecmp(arg_name, "ignore_discard")) { + + else if (!strcasecmp(arg_name, "ignore_discard")) pf->discard_enabled = 0; - continue; - } else if (!strcasecmp(arg_name, "no_discard_passdown")) { + + else if (!strcasecmp(arg_name, "no_discard_passdown")) pf->discard_passdown = 0; - continue; - } - ti->error = "Unrecognised pool feature requested"; - r = -EINVAL; + else if (!strcasecmp(arg_name, "read_only")) + pf->mode = PM_READ_ONLY; + + else { + ti->error = "Unrecognised pool feature requested"; + r = -EINVAL; + break; + } } return r; @@ -1967,7 +2198,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) if (kstrtoul(argv[2], 10, &block_size) || !block_size || block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS || block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS || - !is_power_of_2(block_size)) { + block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { ti->error = "Invalid block size"; r = -EINVAL; goto out; @@ -1996,7 +2227,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) } pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, - block_size, &ti->error, &pool_created); + block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created); if (IS_ERR(pool)) { r = PTR_ERR(pool); goto out_free_pt; @@ -2014,6 +2245,15 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) goto out_flags_changed; } + /* + * The block layer requires discard_granularity to be a power of 2. + */ + if (pf.discard_enabled && !is_power_of_2(block_size)) { + ti->error = "Discard support must be disabled when the block size is not a power of 2"; + r = -EINVAL; + goto out_flags_changed; + } + pt->pool = pool; pt->ti = ti; pt->metadata_dev = metadata_dev; @@ -2033,7 +2273,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) * stacking of discard limits (this keeps the pool and * thin devices' discard limits consistent). */ - ti->discards_supported = 1; + ti->discards_supported = true; } ti->private = pt; @@ -2093,7 +2333,8 @@ static int pool_preresume(struct dm_target *ti) int r; struct pool_c *pt = ti->private; struct pool *pool = pt->pool; - dm_block_t data_size, sb_data_size; + sector_t data_size = ti->len; + dm_block_t sb_data_size; /* * Take control of the pool object. @@ -2102,7 +2343,8 @@ static int pool_preresume(struct dm_target *ti) if (r) return r; - data_size = ti->len >> pool->block_shift; + (void) sector_div(data_size, pool->sectors_per_block); + r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); if (r) { DMERR("failed to retrieve data device size"); @@ -2111,22 +2353,19 @@ static int pool_preresume(struct dm_target *ti) if (data_size < sb_data_size) { DMERR("pool target too small, is %llu blocks (expected %llu)", - data_size, sb_data_size); + (unsigned long long)data_size, sb_data_size); return -EINVAL; } else if (data_size > sb_data_size) { r = dm_pool_resize_data_dev(pool->pmd, data_size); if (r) { DMERR("failed to resize data device"); + /* FIXME Stricter than necessary: Rollback transaction instead here */ + set_pool_mode(pool, PM_READ_ONLY); return r; } - r = dm_pool_commit_metadata(pool->pmd); - if (r) { - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", - __func__, r); - return r; - } + (void) commit_or_fallback(pool); } return 0; @@ -2149,19 +2388,12 @@ static void pool_resume(struct dm_target *ti) static void pool_postsuspend(struct dm_target *ti) { - int r; struct pool_c *pt = ti->private; struct pool *pool = pt->pool; cancel_delayed_work(&pool->waker); flush_workqueue(pool->wq); - - r = dm_pool_commit_metadata(pool->pmd); - if (r < 0) { - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", - __func__, r); - /* FIXME: invalidate device? error the next FUA or FLUSH bio ?*/ - } + (void) commit_or_fallback(pool); } static int check_arg_count(unsigned argc, unsigned args_required) @@ -2295,12 +2527,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct if (r) return r; - r = dm_pool_commit_metadata(pool->pmd); - if (r) { - DMERR("%s: dm_pool_commit_metadata() failed, error = %d", - __func__, r); - return r; - } + (void) commit_or_fallback(pool); r = dm_pool_reserve_metadata_snap(pool->pmd); if (r) @@ -2361,25 +2588,41 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv) else DMWARN("Unrecognised thin pool target message received: %s", argv[0]); - if (!r) { - r = dm_pool_commit_metadata(pool->pmd); - if (r) - DMERR("%s message: dm_pool_commit_metadata() failed, error = %d", - argv[0], r); - } + if (!r) + (void) commit_or_fallback(pool); return r; } +static void emit_flags(struct pool_features *pf, char *result, + unsigned sz, unsigned maxlen) +{ + unsigned count = !pf->zero_new_blocks + !pf->discard_enabled + + !pf->discard_passdown + (pf->mode == PM_READ_ONLY); + DMEMIT("%u ", count); + + if (!pf->zero_new_blocks) + DMEMIT("skip_block_zeroing "); + + if (!pf->discard_enabled) + DMEMIT("ignore_discard "); + + if (!pf->discard_passdown) + DMEMIT("no_discard_passdown "); + + if (pf->mode == PM_READ_ONLY) + DMEMIT("read_only "); +} + /* * Status line is: * <transaction id> <used metadata sectors>/<total metadata sectors> * <used data sectors>/<total data sectors> <held metadata root> */ static int pool_status(struct dm_target *ti, status_type_t type, - char *result, unsigned maxlen) + unsigned status_flags, char *result, unsigned maxlen) { - int r, count; + int r; unsigned sz = 0; uint64_t transaction_id; dm_block_t nr_free_blocks_data; @@ -2394,6 +2637,15 @@ static int pool_status(struct dm_target *ti, status_type_t type, switch (type) { case STATUSTYPE_INFO: + if (get_pool_mode(pool) == PM_FAIL) { + DMEMIT("Fail"); + break; + } + + /* Commit to ensure statistics aren't out-of-date */ + if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) + (void) commit_or_fallback(pool); + r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); if (r) @@ -2429,9 +2681,19 @@ static int pool_status(struct dm_target *ti, status_type_t type, (unsigned long long)nr_blocks_data); if (held_root) - DMEMIT("%llu", held_root); + DMEMIT("%llu ", held_root); + else + DMEMIT("- "); + + if (pool->pf.mode == PM_READ_ONLY) + DMEMIT("ro "); + else + DMEMIT("rw "); + + if (pool->pf.discard_enabled && pool->pf.discard_passdown) + DMEMIT("discard_passdown"); else - DMEMIT("-"); + DMEMIT("no_discard_passdown"); break; @@ -2441,20 +2703,7 @@ static int pool_status(struct dm_target *ti, status_type_t type, format_dev_t(buf2, pt->data_dev->bdev->bd_dev), (unsigned long)pool->sectors_per_block, (unsigned long long)pt->low_water_blocks); - - count = !pool->pf.zero_new_blocks + !pool->pf.discard_enabled + - !pt->pf.discard_passdown; - DMEMIT("%u ", count); - - if (!pool->pf.zero_new_blocks) - DMEMIT("skip_block_zeroing "); - - if (!pool->pf.discard_enabled) - DMEMIT("ignore_discard "); - - if (!pt->pf.discard_passdown) - DMEMIT("no_discard_passdown "); - + emit_flags(&pt->pf, result, sz, maxlen); break; } @@ -2492,7 +2741,8 @@ static void set_discard_limits(struct pool *pool, struct queue_limits *limits) /* * This is just a hint, and not enforced. We have to cope with - * bios that overlap 2 blocks. + * bios that cover a block partially. A discard that spans a block + * boundary is not sent to this target. */ limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; limits->discard_zeroes_data = pool->pf.zero_new_blocks; @@ -2513,7 +2763,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 2, 0}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -2618,20 +2868,31 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) } __pool_inc(tc->pool); + if (get_pool_mode(tc->pool) == PM_FAIL) { + ti->error = "Couldn't open thin device, Pool is in fail mode"; + goto bad_thin_open; + } + r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); if (r) { ti->error = "Couldn't open thin internal device"; goto bad_thin_open; } - ti->split_io = tc->pool->sectors_per_block; + r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); + if (r) + goto bad_thin_open; + ti->num_flush_requests = 1; + ti->flush_supported = true; /* In case the pool supports discards, pass them on. */ if (tc->pool->pf.discard_enabled) { - ti->discards_supported = 1; + ti->discards_supported = true; ti->num_discard_requests = 1; - ti->discard_zeroes_data_unsupported = 1; + ti->discard_zeroes_data_unsupported = true; + /* Discard requests must be split on a block boundary */ + ti->split_discard_requests = true; } dm_put(pool_md); @@ -2712,7 +2973,7 @@ static void thin_postsuspend(struct dm_target *ti) * <nr mapped sectors> <highest mapped sector> */ static int thin_status(struct dm_target *ti, status_type_t type, - char *result, unsigned maxlen) + unsigned status_flags, char *result, unsigned maxlen) { int r; ssize_t sz = 0; @@ -2720,6 +2981,11 @@ static int thin_status(struct dm_target *ti, status_type_t type, char buf[BDEVNAME_SIZE]; struct thin_c *tc = ti->private; + if (get_pool_mode(tc->pool) == PM_FAIL) { + DMEMIT("Fail"); + return 0; + } + if (!tc->td) DMEMIT("-"); else { @@ -2757,19 +3023,21 @@ static int thin_status(struct dm_target *ti, status_type_t type, static int thin_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { - dm_block_t blocks; + sector_t blocks; struct thin_c *tc = ti->private; + struct pool *pool = tc->pool; /* * We can't call dm_pool_get_data_dev_size() since that blocks. So * we follow a more convoluted path through to the pool's target. */ - if (!tc->pool->ti) + if (!pool->ti) return 0; /* nothing is bound */ - blocks = tc->pool->ti->len >> tc->pool->block_shift; + blocks = pool->ti->len; + (void) sector_div(blocks, pool->sectors_per_block); if (blocks) - return fn(ti, tc->pool_dev, 0, tc->pool->sectors_per_block * blocks, data); + return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); return 0; } @@ -2786,7 +3054,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type thin_target = { .name = "thin", - .version = {1, 1, 0}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index fa365d39b612..254d19268ad2 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -515,7 +515,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio, * Status: V (valid) or C (corruption found) */ static int verity_status(struct dm_target *ti, status_type_t type, - char *result, unsigned maxlen) + unsigned status_flags, char *result, unsigned maxlen) { struct dm_verity *v = ti->private; unsigned sz = 0; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e24143cc2040..4e09b6ff5b49 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -968,22 +968,41 @@ static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti static sector_t max_io_len(sector_t sector, struct dm_target *ti) { sector_t len = max_io_len_target_boundary(sector, ti); + sector_t offset, max_len; /* - * Does the target need to split even further ? + * Does the target need to split even further? */ - if (ti->split_io) { - sector_t boundary; - sector_t offset = dm_target_offset(ti, sector); - boundary = ((offset + ti->split_io) & ~(ti->split_io - 1)) - - offset; - if (len > boundary) - len = boundary; + if (ti->max_io_len) { + offset = dm_target_offset(ti, sector); + if (unlikely(ti->max_io_len & (ti->max_io_len - 1))) + max_len = sector_div(offset, ti->max_io_len); + else + max_len = offset & (ti->max_io_len - 1); + max_len = ti->max_io_len - max_len; + + if (len > max_len) + len = max_len; } return len; } +int dm_set_target_max_io_len(struct dm_target *ti, sector_t len) +{ + if (len > UINT_MAX) { + DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)", + (unsigned long long)len, UINT_MAX); + ti->error = "Maximum size of target IO is too large"; + return -EINVAL; + } + + ti->max_io_len = (uint32_t) len; + + return 0; +} +EXPORT_SYMBOL_GPL(dm_set_target_max_io_len); + static void __map_bio(struct dm_target *ti, struct bio *clone, struct dm_target_io *tio) { @@ -1196,7 +1215,10 @@ static int __clone_and_map_discard(struct clone_info *ci) if (!ti->num_discard_requests) return -EOPNOTSUPP; - len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); + if (!ti->split_discard_requests) + len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti)); + else + len = min(ci->sector_count, max_io_len(ci->sector, ti)); __issue_target_requests(ci, ti, ti->num_discard_requests, len); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index b7dacd59d8d7..52eef493d266 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -23,6 +23,11 @@ #define DM_SUSPEND_NOFLUSH_FLAG (1 << 1) /* + * Status feature flags + */ +#define DM_STATUS_NOFLUSH_FLAG (1 << 0) + +/* * Type of table and mapped_device's mempool */ #define DM_TYPE_NONE 0 diff --git a/drivers/md/persistent-data/Makefile b/drivers/md/persistent-data/Makefile index cfa95f662230..d8e7cb767c1e 100644 --- a/drivers/md/persistent-data/Makefile +++ b/drivers/md/persistent-data/Makefile @@ -1,7 +1,6 @@ obj-$(CONFIG_DM_PERSISTENT_DATA) += dm-persistent-data.o dm-persistent-data-objs := \ dm-block-manager.o \ - dm-space-map-checker.o \ dm-space-map-common.o \ dm-space-map-disk.o \ dm-space-map-metadata.o \ diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 0317ecdc6e53..5ba277768d99 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -325,11 +325,6 @@ static struct dm_buffer *to_buffer(struct dm_block *b) return (struct dm_buffer *) b; } -static struct dm_bufio_client *to_bufio(struct dm_block_manager *bm) -{ - return (struct dm_bufio_client *) bm; -} - dm_block_t dm_block_location(struct dm_block *b) { return dm_bufio_get_block_number(to_buffer(b)); @@ -367,34 +362,60 @@ static void dm_block_manager_write_callback(struct dm_buffer *buf) /*---------------------------------------------------------------- * Public interface *--------------------------------------------------------------*/ +struct dm_block_manager { + struct dm_bufio_client *bufio; + bool read_only:1; +}; + struct dm_block_manager *dm_block_manager_create(struct block_device *bdev, unsigned block_size, unsigned cache_size, unsigned max_held_per_thread) { - return (struct dm_block_manager *) - dm_bufio_client_create(bdev, block_size, max_held_per_thread, - sizeof(struct buffer_aux), - dm_block_manager_alloc_callback, - dm_block_manager_write_callback); + int r; + struct dm_block_manager *bm; + + bm = kmalloc(sizeof(*bm), GFP_KERNEL); + if (!bm) { + r = -ENOMEM; + goto bad; + } + + bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread, + sizeof(struct buffer_aux), + dm_block_manager_alloc_callback, + dm_block_manager_write_callback); + if (IS_ERR(bm->bufio)) { + r = PTR_ERR(bm->bufio); + kfree(bm); + goto bad; + } + + bm->read_only = false; + + return bm; + +bad: + return ERR_PTR(r); } EXPORT_SYMBOL_GPL(dm_block_manager_create); void dm_block_manager_destroy(struct dm_block_manager *bm) { - return dm_bufio_client_destroy(to_bufio(bm)); + dm_bufio_client_destroy(bm->bufio); + kfree(bm); } EXPORT_SYMBOL_GPL(dm_block_manager_destroy); unsigned dm_bm_block_size(struct dm_block_manager *bm) { - return dm_bufio_get_block_size(to_bufio(bm)); + return dm_bufio_get_block_size(bm->bufio); } EXPORT_SYMBOL_GPL(dm_bm_block_size); dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm) { - return dm_bufio_get_device_size(to_bufio(bm)); + return dm_bufio_get_device_size(bm->bufio); } static int dm_bm_validate_buffer(struct dm_block_manager *bm, @@ -406,7 +427,7 @@ static int dm_bm_validate_buffer(struct dm_block_manager *bm, int r; if (!v) return 0; - r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(to_bufio(bm))); + r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio)); if (unlikely(r)) return r; aux->validator = v; @@ -430,7 +451,7 @@ int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b, void *p; int r; - p = dm_bufio_read(to_bufio(bm), b, (struct dm_buffer **) result); + p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); if (unlikely(IS_ERR(p))) return PTR_ERR(p); @@ -463,7 +484,10 @@ int dm_bm_write_lock(struct dm_block_manager *bm, void *p; int r; - p = dm_bufio_read(to_bufio(bm), b, (struct dm_buffer **) result); + if (bm->read_only) + return -EPERM; + + p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); if (unlikely(IS_ERR(p))) return PTR_ERR(p); @@ -496,7 +520,7 @@ int dm_bm_read_try_lock(struct dm_block_manager *bm, void *p; int r; - p = dm_bufio_get(to_bufio(bm), b, (struct dm_buffer **) result); + p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result); if (unlikely(IS_ERR(p))) return PTR_ERR(p); if (unlikely(!p)) @@ -529,7 +553,10 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, struct buffer_aux *aux; void *p; - p = dm_bufio_new(to_bufio(bm), b, (struct dm_buffer **) result); + if (bm->read_only) + return -EPERM; + + p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result); if (unlikely(IS_ERR(p))) return PTR_ERR(p); @@ -547,6 +574,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, return 0; } +EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero); int dm_bm_unlock(struct dm_block *b) { @@ -565,45 +593,30 @@ int dm_bm_unlock(struct dm_block *b) } EXPORT_SYMBOL_GPL(dm_bm_unlock); -int dm_bm_unlock_move(struct dm_block *b, dm_block_t n) -{ - struct buffer_aux *aux; - - aux = dm_bufio_get_aux_data(to_buffer(b)); - - if (aux->write_locked) { - dm_bufio_mark_buffer_dirty(to_buffer(b)); - bl_up_write(&aux->lock); - } else - bl_up_read(&aux->lock); - - dm_bufio_release_move(to_buffer(b), n); - return 0; -} - int dm_bm_flush_and_unlock(struct dm_block_manager *bm, struct dm_block *superblock) { int r; - r = dm_bufio_write_dirty_buffers(to_bufio(bm)); - if (unlikely(r)) - return r; - r = dm_bufio_issue_flush(to_bufio(bm)); - if (unlikely(r)) + if (bm->read_only) + return -EPERM; + + r = dm_bufio_write_dirty_buffers(bm->bufio); + if (unlikely(r)) { + dm_bm_unlock(superblock); return r; + } dm_bm_unlock(superblock); - r = dm_bufio_write_dirty_buffers(to_bufio(bm)); - if (unlikely(r)) - return r; - r = dm_bufio_issue_flush(to_bufio(bm)); - if (unlikely(r)) - return r; + return dm_bufio_write_dirty_buffers(bm->bufio); +} - return 0; +void dm_bm_set_read_only(struct dm_block_manager *bm) +{ + bm->read_only = true; } +EXPORT_SYMBOL_GPL(dm_bm_set_read_only); u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor) { diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h index 924833d2dfa6..be5bff61be28 100644 --- a/drivers/md/persistent-data/dm-block-manager.h +++ b/drivers/md/persistent-data/dm-block-manager.h @@ -97,14 +97,6 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, dm_block_t b, int dm_bm_unlock(struct dm_block *b); /* - * An optimisation; we often want to copy a block's contents to a new - * block. eg, as part of the shadowing operation. It's far better for - * bufio to do this move behind the scenes than hold 2 locks and memcpy the - * data. - */ -int dm_bm_unlock_move(struct dm_block *b, dm_block_t n); - -/* * It's a common idiom to have a superblock that should be committed last. * * @superblock should be write-locked on entry. It will be unlocked during @@ -116,6 +108,19 @@ int dm_bm_unlock_move(struct dm_block *b, dm_block_t n); int dm_bm_flush_and_unlock(struct dm_block_manager *bm, struct dm_block *superblock); +/* + * Switches the bm to a read only mode. Once read-only mode + * has been entered the following functions will return -EPERM. + * + * dm_bm_write_lock + * dm_bm_write_lock_zero + * dm_bm_flush_and_unlock + * + * Additionally you should not use dm_bm_unlock_move, however no error will + * be returned if you do. + */ +void dm_bm_set_read_only(struct dm_block_manager *bm); + u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor); /*----------------------------------------------------------------*/ diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c deleted file mode 100644 index fc90c11620ad..000000000000 --- a/drivers/md/persistent-data/dm-space-map-checker.c +++ /dev/null @@ -1,446 +0,0 @@ -/* - * Copyright (C) 2011 Red Hat, Inc. - * - * This file is released under the GPL. - */ - -#include "dm-space-map-checker.h" - -#include <linux/device-mapper.h> -#include <linux/export.h> -#include <linux/vmalloc.h> - -#ifdef CONFIG_DM_DEBUG_SPACE_MAPS - -#define DM_MSG_PREFIX "space map checker" - -/*----------------------------------------------------------------*/ - -struct count_array { - dm_block_t nr; - dm_block_t nr_free; - - uint32_t *counts; -}; - -static int ca_get_count(struct count_array *ca, dm_block_t b, uint32_t *count) -{ - if (b >= ca->nr) - return -EINVAL; - - *count = ca->counts[b]; - return 0; -} - -static int ca_count_more_than_one(struct count_array *ca, dm_block_t b, int *r) -{ - if (b >= ca->nr) - return -EINVAL; - - *r = ca->counts[b] > 1; - return 0; -} - -static int ca_set_count(struct count_array *ca, dm_block_t b, uint32_t count) -{ - uint32_t old_count; - - if (b >= ca->nr) - return -EINVAL; - - old_count = ca->counts[b]; - - if (!count && old_count) - ca->nr_free++; - - else if (count && !old_count) - ca->nr_free--; - - ca->counts[b] = count; - return 0; -} - -static int ca_inc_block(struct count_array *ca, dm_block_t b) -{ - if (b >= ca->nr) - return -EINVAL; - - ca_set_count(ca, b, ca->counts[b] + 1); - return 0; -} - -static int ca_dec_block(struct count_array *ca, dm_block_t b) -{ - if (b >= ca->nr) - return -EINVAL; - - BUG_ON(ca->counts[b] == 0); - ca_set_count(ca, b, ca->counts[b] - 1); - return 0; -} - -static int ca_create(struct count_array *ca, struct dm_space_map *sm) -{ - int r; - dm_block_t nr_blocks; - - r = dm_sm_get_nr_blocks(sm, &nr_blocks); - if (r) - return r; - - ca->nr = nr_blocks; - ca->nr_free = nr_blocks; - - if (!nr_blocks) - ca->counts = NULL; - else { - ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks); - if (!ca->counts) - return -ENOMEM; - } - - return 0; -} - -static void ca_destroy(struct count_array *ca) -{ - vfree(ca->counts); -} - -static int ca_load(struct count_array *ca, struct dm_space_map *sm) -{ - int r; - uint32_t count; - dm_block_t nr_blocks, i; - - r = dm_sm_get_nr_blocks(sm, &nr_blocks); - if (r) - return r; - - BUG_ON(ca->nr != nr_blocks); - - DMWARN("Loading debug space map from disk. This may take some time"); - for (i = 0; i < nr_blocks; i++) { - r = dm_sm_get_count(sm, i, &count); - if (r) { - DMERR("load failed"); - return r; - } - - ca_set_count(ca, i, count); - } - DMWARN("Load complete"); - - return 0; -} - -static int ca_extend(struct count_array *ca, dm_block_t extra_blocks) -{ - dm_block_t nr_blocks = ca->nr + extra_blocks; - uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks); - if (!counts) - return -ENOMEM; - - if (ca->counts) { - memcpy(counts, ca->counts, sizeof(*counts) * ca->nr); - ca_destroy(ca); - } - ca->nr = nr_blocks; - ca->nr_free += extra_blocks; - ca->counts = counts; - return 0; -} - -static int ca_commit(struct count_array *old, struct count_array *new) -{ - if (old->nr != new->nr) { - BUG_ON(old->nr > new->nr); - ca_extend(old, new->nr - old->nr); - } - - BUG_ON(old->nr != new->nr); - old->nr_free = new->nr_free; - memcpy(old->counts, new->counts, sizeof(*old->counts) * old->nr); - return 0; -} - -/*----------------------------------------------------------------*/ - -struct sm_checker { - struct dm_space_map sm; - - struct count_array old_counts; - struct count_array counts; - - struct dm_space_map *real_sm; -}; - -static void sm_checker_destroy(struct dm_space_map *sm) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - - dm_sm_destroy(smc->real_sm); - ca_destroy(&smc->old_counts); - ca_destroy(&smc->counts); - kfree(smc); -} - -static int sm_checker_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - int r = dm_sm_get_nr_blocks(smc->real_sm, count); - if (!r) - BUG_ON(smc->old_counts.nr != *count); - return r; -} - -static int sm_checker_get_nr_free(struct dm_space_map *sm, dm_block_t *count) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - int r = dm_sm_get_nr_free(smc->real_sm, count); - if (!r) { - /* - * Slow, but we know it's correct. - */ - dm_block_t b, n = 0; - for (b = 0; b < smc->old_counts.nr; b++) - if (smc->old_counts.counts[b] == 0 && - smc->counts.counts[b] == 0) - n++; - - if (n != *count) - DMERR("free block counts differ, checker %u, sm-disk:%u", - (unsigned) n, (unsigned) *count); - } - return r; -} - -static int sm_checker_new_block(struct dm_space_map *sm, dm_block_t *b) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - int r = dm_sm_new_block(smc->real_sm, b); - - if (!r) { - BUG_ON(*b >= smc->old_counts.nr); - BUG_ON(smc->old_counts.counts[*b] != 0); - BUG_ON(*b >= smc->counts.nr); - BUG_ON(smc->counts.counts[*b] != 0); - ca_set_count(&smc->counts, *b, 1); - } - - return r; -} - -static int sm_checker_inc_block(struct dm_space_map *sm, dm_block_t b) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - int r = dm_sm_inc_block(smc->real_sm, b); - int r2 = ca_inc_block(&smc->counts, b); - BUG_ON(r != r2); - return r; -} - -static int sm_checker_dec_block(struct dm_space_map *sm, dm_block_t b) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - int r = dm_sm_dec_block(smc->real_sm, b); - int r2 = ca_dec_block(&smc->counts, b); - BUG_ON(r != r2); - return r; -} - -static int sm_checker_get_count(struct dm_space_map *sm, dm_block_t b, uint32_t *result) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - uint32_t result2 = 0; - int r = dm_sm_get_count(smc->real_sm, b, result); - int r2 = ca_get_count(&smc->counts, b, &result2); - - BUG_ON(r != r2); - if (!r) - BUG_ON(*result != result2); - return r; -} - -static int sm_checker_count_more_than_one(struct dm_space_map *sm, dm_block_t b, int *result) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - int result2 = 0; - int r = dm_sm_count_is_more_than_one(smc->real_sm, b, result); - int r2 = ca_count_more_than_one(&smc->counts, b, &result2); - - BUG_ON(r != r2); - if (!r) - BUG_ON(!(*result) && result2); - return r; -} - -static int sm_checker_set_count(struct dm_space_map *sm, dm_block_t b, uint32_t count) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - uint32_t old_rc; - int r = dm_sm_set_count(smc->real_sm, b, count); - int r2; - - BUG_ON(b >= smc->counts.nr); - old_rc = smc->counts.counts[b]; - r2 = ca_set_count(&smc->counts, b, count); - BUG_ON(r != r2); - - return r; -} - -static int sm_checker_commit(struct dm_space_map *sm) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - int r; - - r = dm_sm_commit(smc->real_sm); - if (r) - return r; - - r = ca_commit(&smc->old_counts, &smc->counts); - if (r) - return r; - - return 0; -} - -static int sm_checker_extend(struct dm_space_map *sm, dm_block_t extra_blocks) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - int r = dm_sm_extend(smc->real_sm, extra_blocks); - if (r) - return r; - - return ca_extend(&smc->counts, extra_blocks); -} - -static int sm_checker_root_size(struct dm_space_map *sm, size_t *result) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - return dm_sm_root_size(smc->real_sm, result); -} - -static int sm_checker_copy_root(struct dm_space_map *sm, void *copy_to_here_le, size_t len) -{ - struct sm_checker *smc = container_of(sm, struct sm_checker, sm); - return dm_sm_copy_root(smc->real_sm, copy_to_here_le, len); -} - -/*----------------------------------------------------------------*/ - -static struct dm_space_map ops_ = { - .destroy = sm_checker_destroy, - .get_nr_blocks = sm_checker_get_nr_blocks, - .get_nr_free = sm_checker_get_nr_free, - .inc_block = sm_checker_inc_block, - .dec_block = sm_checker_dec_block, - .new_block = sm_checker_new_block, - .get_count = sm_checker_get_count, - .count_is_more_than_one = sm_checker_count_more_than_one, - .set_count = sm_checker_set_count, - .commit = sm_checker_commit, - .extend = sm_checker_extend, - .root_size = sm_checker_root_size, - .copy_root = sm_checker_copy_root -}; - -struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm) -{ - int r; - struct sm_checker *smc; - - if (IS_ERR_OR_NULL(sm)) - return ERR_PTR(-EINVAL); - - smc = kmalloc(sizeof(*smc), GFP_KERNEL); - if (!smc) - return ERR_PTR(-ENOMEM); - - memcpy(&smc->sm, &ops_, sizeof(smc->sm)); - r = ca_create(&smc->old_counts, sm); - if (r) { - kfree(smc); - return ERR_PTR(r); - } - - r = ca_create(&smc->counts, sm); - if (r) { - ca_destroy(&smc->old_counts); - kfree(smc); - return ERR_PTR(r); - } - - smc->real_sm = sm; - - r = ca_load(&smc->counts, sm); - if (r) { - ca_destroy(&smc->counts); - ca_destroy(&smc->old_counts); - kfree(smc); - return ERR_PTR(r); - } - - r = ca_commit(&smc->old_counts, &smc->counts); - if (r) { - ca_destroy(&smc->counts); - ca_destroy(&smc->old_counts); - kfree(smc); - return ERR_PTR(r); - } - - return &smc->sm; -} -EXPORT_SYMBOL_GPL(dm_sm_checker_create); - -struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm) -{ - int r; - struct sm_checker *smc; - - if (IS_ERR_OR_NULL(sm)) - return ERR_PTR(-EINVAL); - - smc = kmalloc(sizeof(*smc), GFP_KERNEL); - if (!smc) - return ERR_PTR(-ENOMEM); - - memcpy(&smc->sm, &ops_, sizeof(smc->sm)); - r = ca_create(&smc->old_counts, sm); - if (r) { - kfree(smc); - return ERR_PTR(r); - } - - r = ca_create(&smc->counts, sm); - if (r) { - ca_destroy(&smc->old_counts); - kfree(smc); - return ERR_PTR(r); - } - - smc->real_sm = sm; - return &smc->sm; -} -EXPORT_SYMBOL_GPL(dm_sm_checker_create_fresh); - -/*----------------------------------------------------------------*/ - -#else - -struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm) -{ - return sm; -} -EXPORT_SYMBOL_GPL(dm_sm_checker_create); - -struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm) -{ - return sm; -} -EXPORT_SYMBOL_GPL(dm_sm_checker_create_fresh); - -/*----------------------------------------------------------------*/ - -#endif diff --git a/drivers/md/persistent-data/dm-space-map-checker.h b/drivers/md/persistent-data/dm-space-map-checker.h deleted file mode 100644 index 444dccf6688c..000000000000 --- a/drivers/md/persistent-data/dm-space-map-checker.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (C) 2011 Red Hat, Inc. - * - * This file is released under the GPL. - */ - -#ifndef SNAPSHOTS_SPACE_MAP_CHECKER_H -#define SNAPSHOTS_SPACE_MAP_CHECKER_H - -#include "dm-space-map.h" - -/*----------------------------------------------------------------*/ - -/* - * This space map wraps a real on-disk space map, and verifies all of its - * operations. It uses a lot of memory, so only use if you have a specific - * problem that you're debugging. - * - * Ownership of @sm passes. - */ -struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm); -struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm); - -/*----------------------------------------------------------------*/ - -#endif diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c index ff3beed6ad2d..d77602d63c83 100644 --- a/drivers/md/persistent-data/dm-space-map-common.c +++ b/drivers/md/persistent-data/dm-space-map-common.c @@ -224,6 +224,7 @@ static int sm_ll_init(struct ll_disk *ll, struct dm_transaction_manager *tm) ll->nr_blocks = 0; ll->bitmap_root = 0; ll->ref_count_root = 0; + ll->bitmap_index_changed = false; return 0; } @@ -476,7 +477,15 @@ int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev) int sm_ll_commit(struct ll_disk *ll) { - return ll->commit(ll); + int r = 0; + + if (ll->bitmap_index_changed) { + r = ll->commit(ll); + if (!r) + ll->bitmap_index_changed = false; + } + + return r; } /*----------------------------------------------------------------*/ @@ -491,6 +500,7 @@ static int metadata_ll_load_ie(struct ll_disk *ll, dm_block_t index, static int metadata_ll_save_ie(struct ll_disk *ll, dm_block_t index, struct disk_index_entry *ie) { + ll->bitmap_index_changed = true; memcpy(ll->mi_le.index + index, ie, sizeof(*ie)); return 0; } diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h index 8f220821a9a9..b3078d5eda0c 100644 --- a/drivers/md/persistent-data/dm-space-map-common.h +++ b/drivers/md/persistent-data/dm-space-map-common.h @@ -78,6 +78,7 @@ struct ll_disk { open_index_fn open_index; max_index_entries_fn max_entries; commit_fn commit; + bool bitmap_index_changed:1; }; struct disk_sm_root { diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index 3d0ed5332883..f6d29e614ab7 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -4,7 +4,6 @@ * This file is released under the GPL. */ -#include "dm-space-map-checker.h" #include "dm-space-map-common.h" #include "dm-space-map-disk.h" #include "dm-space-map.h" @@ -252,9 +251,8 @@ static struct dm_space_map ops = { .copy_root = sm_disk_copy_root }; -static struct dm_space_map *dm_sm_disk_create_real( - struct dm_transaction_manager *tm, - dm_block_t nr_blocks) +struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, + dm_block_t nr_blocks) { int r; struct sm_disk *smd; @@ -285,27 +283,10 @@ bad: kfree(smd); return ERR_PTR(r); } - -struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, - dm_block_t nr_blocks) -{ - struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks); - struct dm_space_map *smc; - - if (IS_ERR_OR_NULL(sm)) - return sm; - - smc = dm_sm_checker_create_fresh(sm); - if (IS_ERR(smc)) - dm_sm_destroy(sm); - - return smc; -} EXPORT_SYMBOL_GPL(dm_sm_disk_create); -static struct dm_space_map *dm_sm_disk_open_real( - struct dm_transaction_manager *tm, - void *root_le, size_t len) +struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm, + void *root_le, size_t len) { int r; struct sm_disk *smd; @@ -332,13 +313,6 @@ bad: kfree(smd); return ERR_PTR(r); } - -struct dm_space_map *dm_sm_disk_open(struct dm_transaction_manager *tm, - void *root_le, size_t len) -{ - return dm_sm_checker_create( - dm_sm_disk_open_real(tm, root_le, len)); -} EXPORT_SYMBOL_GPL(dm_sm_disk_open); /*----------------------------------------------------------------*/ diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index e5604b32d91f..d247a35da3c6 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c @@ -5,7 +5,6 @@ */ #include "dm-transaction-manager.h" #include "dm-space-map.h" -#include "dm-space-map-checker.h" #include "dm-space-map-disk.h" #include "dm-space-map-metadata.h" #include "dm-persistent-data-internal.h" @@ -220,13 +219,24 @@ static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig, if (r < 0) return r; - r = dm_bm_unlock_move(orig_block, new); - if (r < 0) { + /* + * It would be tempting to use dm_bm_unlock_move here, but some + * code, such as the space maps, keeps using the old data structures + * secure in the knowledge they won't be changed until the next + * transaction. Using unlock_move would force a synchronous read + * since the old block would no longer be in the cache. + */ + r = dm_bm_write_lock_zero(tm->bm, new, v, result); + if (r) { dm_bm_unlock(orig_block); return r; } - return dm_bm_write_lock(tm->bm, new, v, result); + memcpy(dm_block_data(*result), dm_block_data(orig_block), + dm_bm_block_size(tm->bm)); + + dm_bm_unlock(orig_block); + return r; } int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig, @@ -311,98 +321,61 @@ struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm) static int dm_tm_create_internal(struct dm_block_manager *bm, dm_block_t sb_location, - struct dm_block_validator *sb_validator, - size_t root_offset, size_t root_max_len, struct dm_transaction_manager **tm, struct dm_space_map **sm, - struct dm_block **sblock, - int create) + int create, + void *sm_root, size_t sm_len) { int r; - struct dm_space_map *inner; - inner = dm_sm_metadata_init(); - if (IS_ERR(inner)) - return PTR_ERR(inner); + *sm = dm_sm_metadata_init(); + if (IS_ERR(*sm)) + return PTR_ERR(*sm); - *tm = dm_tm_create(bm, inner); + *tm = dm_tm_create(bm, *sm); if (IS_ERR(*tm)) { - dm_sm_destroy(inner); + dm_sm_destroy(*sm); return PTR_ERR(*tm); } if (create) { - r = dm_bm_write_lock_zero(dm_tm_get_bm(*tm), sb_location, - sb_validator, sblock); - if (r < 0) { - DMERR("couldn't lock superblock"); - goto bad1; - } - - r = dm_sm_metadata_create(inner, *tm, dm_bm_nr_blocks(bm), + r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm), sb_location); if (r) { DMERR("couldn't create metadata space map"); - goto bad2; - } - - *sm = dm_sm_checker_create(inner); - if (IS_ERR(*sm)) { - r = PTR_ERR(*sm); - goto bad2; + goto bad; } } else { - r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location, - sb_validator, sblock); - if (r < 0) { - DMERR("couldn't lock superblock"); - goto bad1; - } - - r = dm_sm_metadata_open(inner, *tm, - dm_block_data(*sblock) + root_offset, - root_max_len); + r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len); if (r) { DMERR("couldn't open metadata space map"); - goto bad2; - } - - *sm = dm_sm_checker_create(inner); - if (IS_ERR(*sm)) { - r = PTR_ERR(*sm); - goto bad2; + goto bad; } } return 0; -bad2: - dm_tm_unlock(*tm, *sblock); -bad1: +bad: dm_tm_destroy(*tm); - dm_sm_destroy(inner); + dm_sm_destroy(*sm); return r; } int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location, - struct dm_block_validator *sb_validator, struct dm_transaction_manager **tm, - struct dm_space_map **sm, struct dm_block **sblock) + struct dm_space_map **sm) { - return dm_tm_create_internal(bm, sb_location, sb_validator, - 0, 0, tm, sm, sblock, 1); + return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0); } EXPORT_SYMBOL_GPL(dm_tm_create_with_sm); int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location, - struct dm_block_validator *sb_validator, - size_t root_offset, size_t root_max_len, + void *sm_root, size_t root_len, struct dm_transaction_manager **tm, - struct dm_space_map **sm, struct dm_block **sblock) + struct dm_space_map **sm) { - return dm_tm_create_internal(bm, sb_location, sb_validator, root_offset, - root_max_len, tm, sm, sblock, 0); + return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len); } EXPORT_SYMBOL_GPL(dm_tm_open_with_sm); diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h index 6da784871db4..b5b139076ca5 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.h +++ b/drivers/md/persistent-data/dm-transaction-manager.h @@ -115,16 +115,17 @@ struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm); * * Returns a tm that has an open transaction to write the new disk sm. * Caller should store the new sm root and commit. + * + * The superblock location is passed so the metadata space map knows it + * shouldn't be used. */ int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location, - struct dm_block_validator *sb_validator, struct dm_transaction_manager **tm, - struct dm_space_map **sm, struct dm_block **sblock); + struct dm_space_map **sm); int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location, - struct dm_block_validator *sb_validator, - size_t root_offset, size_t root_max_len, + void *sm_root, size_t root_len, struct dm_transaction_manager **tm, - struct dm_space_map **sm, struct dm_block **sblock); + struct dm_space_map **sm); #endif /* _LINUX_DM_TRANSACTION_MANAGER_H */ diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 92144ed1ad46..4fdc04ef37c2 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -276,6 +276,7 @@ config TWL6030_PWM tristate "TWL6030 PWM (Pulse Width Modulator) Support" depends on TWL4030_CORE select HAVE_PWM + depends on !PWM default n help Say yes here if you want support for TWL6030 PWM. diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c index 0ba26fb12cf5..a447f4ec11fb 100644 --- a/drivers/mfd/timberdale.c +++ b/drivers/mfd/timberdale.c @@ -83,7 +83,7 @@ timberdale_xiic_platform_data = { static __devinitdata struct ocores_i2c_platform_data timberdale_ocores_platform_data = { - .regstep = 4, + .reg_shift = 2, .clock_khz = 62500, .devices = timberdale_i2c_board_info, .num_devices = ARRAY_SIZE(timberdale_i2c_board_info) diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 154f3ef07631..98a442da892a 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -64,6 +64,7 @@ config AB8500_PWM bool "AB8500 PWM support" depends on AB8500_CORE && ARCH_U8500 select HAVE_PWM + depends on !PWM help This driver exports functions to enable/disble/config/free Pulse Width Modulation in the Analog Baseband Chip AB8500. diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig new file mode 100644 index 000000000000..8fc3808d7a3e --- /dev/null +++ b/drivers/pwm/Kconfig @@ -0,0 +1,108 @@ +menuconfig PWM + bool "PWM Support" + depends on !MACH_JZ4740 && !PUV3_PWM + help + This enables PWM support through the generic PWM framework. + You only need to enable this, if you also want to enable + one or more of the PWM drivers below. + + If unsure, say N. + +if PWM + +config PWM_BFIN + tristate "Blackfin PWM support" + depends on BFIN_GPTIMERS + help + Generic PWM framework driver for Blackfin. + + To compile this driver as a module, choose M here: the module + will be called pwm-bfin. + +config PWM_IMX + tristate "i.MX pwm support" + depends on ARCH_MXC + help + Generic PWM framework driver for i.MX. + + To compile this driver as a module, choose M here: the module + will be called pwm-imx. + +config PWM_LPC32XX + tristate "LPC32XX PWM support" + depends on ARCH_LPC32XX + help + Generic PWM framework driver for LPC32XX. The LPC32XX SOC has two + PWM controllers. + + To compile this driver as a module, choose M here: the module + will be called pwm-lpc32xx. + +config PWM_MXS + tristate "Freescale MXS PWM support" + depends on ARCH_MXS && OF + select STMP_DEVICE + help + Generic PWM framework driver for Freescale MXS. + + To compile this driver as a module, choose M here: the module + will be called pwm-mxs. + +config PWM_PXA + tristate "PXA PWM support" + depends on ARCH_PXA + help + Generic PWM framework driver for PXA. + + To compile this driver as a module, choose M here: the module + will be called pwm-pxa. + +config PWM_SAMSUNG + tristate "Samsung pwm support" + depends on PLAT_SAMSUNG + help + Generic PWM framework driver for Samsung. + + To compile this driver as a module, choose M here: the module + will be called pwm-samsung. + +config PWM_TEGRA + tristate "NVIDIA Tegra PWM support" + depends on ARCH_TEGRA + help + Generic PWM framework driver for the PWFM controller found on NVIDIA + Tegra SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-tegra. + +config PWM_TIECAP + tristate "ECAP PWM support" + depends on SOC_AM33XX + help + PWM driver support for the ECAP APWM controller found on AM33XX + TI SOC + + To compile this driver as a module, choose M here: the module + will be called pwm-tiecap. + +config PWM_TIEHRPWM + tristate "EHRPWM PWM support" + depends on SOC_AM33XX + help + PWM driver support for the EHRPWM controller found on AM33XX + TI SOC + + To compile this driver as a module, choose M here: the module + will be called pwm-tiehrpwm. + +config PWM_VT8500 + tristate "vt8500 pwm support" + depends on ARCH_VT8500 + help + Generic PWM framework driver for vt8500. + + To compile this driver as a module, choose M here: the module + will be called pwm-vt8500. + +endif diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile new file mode 100644 index 000000000000..e4b2c898964d --- /dev/null +++ b/drivers/pwm/Makefile @@ -0,0 +1,11 @@ +obj-$(CONFIG_PWM) += core.o +obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o +obj-$(CONFIG_PWM_IMX) += pwm-imx.o +obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o +obj-$(CONFIG_PWM_MXS) += pwm-mxs.o +obj-$(CONFIG_PWM_PXA) += pwm-pxa.o +obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o +obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o +obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o +obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o +obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c new file mode 100644 index 000000000000..ecb76909e946 --- /dev/null +++ b/drivers/pwm/core.c @@ -0,0 +1,713 @@ +/* + * Generic pwmlib implementation + * + * Copyright (C) 2011 Sascha Hauer <s.hauer@pengutronix.de> + * Copyright (C) 2011-2012 Avionic Design GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/pwm.h> +#include <linux/radix-tree.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +#define MAX_PWMS 1024 + +static DEFINE_MUTEX(pwm_lookup_lock); +static LIST_HEAD(pwm_lookup_list); +static DEFINE_MUTEX(pwm_lock); +static LIST_HEAD(pwm_chips); +static DECLARE_BITMAP(allocated_pwms, MAX_PWMS); +static RADIX_TREE(pwm_tree, GFP_KERNEL); + +static struct pwm_device *pwm_to_device(unsigned int pwm) +{ + return radix_tree_lookup(&pwm_tree, pwm); +} + +static int alloc_pwms(int pwm, unsigned int count) +{ + unsigned int from = 0; + unsigned int start; + + if (pwm >= MAX_PWMS) + return -EINVAL; + + if (pwm >= 0) + from = pwm; + + start = bitmap_find_next_zero_area(allocated_pwms, MAX_PWMS, from, + count, 0); + + if (pwm >= 0 && start != pwm) + return -EEXIST; + + if (start + count > MAX_PWMS) + return -ENOSPC; + + return start; +} + +static void free_pwms(struct pwm_chip *chip) +{ + unsigned int i; + + for (i = 0; i < chip->npwm; i++) { + struct pwm_device *pwm = &chip->pwms[i]; + radix_tree_delete(&pwm_tree, pwm->pwm); + } + + bitmap_clear(allocated_pwms, chip->base, chip->npwm); + + kfree(chip->pwms); + chip->pwms = NULL; +} + +static struct pwm_chip *pwmchip_find_by_name(const char *name) +{ + struct pwm_chip *chip; + + if (!name) + return NULL; + + mutex_lock(&pwm_lock); + + list_for_each_entry(chip, &pwm_chips, list) { + const char *chip_name = dev_name(chip->dev); + + if (chip_name && strcmp(chip_name, name) == 0) { + mutex_unlock(&pwm_lock); + return chip; + } + } + + mutex_unlock(&pwm_lock); + + return NULL; +} + +static int pwm_device_request(struct pwm_device *pwm, const char *label) +{ + int err; + + if (test_bit(PWMF_REQUESTED, &pwm->flags)) + return -EBUSY; + + if (!try_module_get(pwm->chip->ops->owner)) + return -ENODEV; + + if (pwm->chip->ops->request) { + err = pwm->chip->ops->request(pwm->chip, pwm); + if (err) { + module_put(pwm->chip->ops->owner); + return err; + } + } + + set_bit(PWMF_REQUESTED, &pwm->flags); + pwm->label = label; + + return 0; +} + +static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc, + const struct of_phandle_args *args) +{ + struct pwm_device *pwm; + + if (pc->of_pwm_n_cells < 2) + return ERR_PTR(-EINVAL); + + if (args->args[0] >= pc->npwm) + return ERR_PTR(-EINVAL); + + pwm = pwm_request_from_chip(pc, args->args[0], NULL); + if (IS_ERR(pwm)) + return pwm; + + pwm_set_period(pwm, args->args[1]); + + return pwm; +} + +void of_pwmchip_add(struct pwm_chip *chip) +{ + if (!chip->dev || !chip->dev->of_node) + return; + + if (!chip->of_xlate) { + chip->of_xlate = of_pwm_simple_xlate; + chip->of_pwm_n_cells = 2; + } + + of_node_get(chip->dev->of_node); +} + +void of_pwmchip_remove(struct pwm_chip *chip) +{ + if (chip->dev && chip->dev->of_node) + of_node_put(chip->dev->of_node); +} + +/** + * pwm_set_chip_data() - set private chip data for a PWM + * @pwm: PWM device + * @data: pointer to chip-specific data + */ +int pwm_set_chip_data(struct pwm_device *pwm, void *data) +{ + if (!pwm) + return -EINVAL; + + pwm->chip_data = data; + + return 0; +} + +/** + * pwm_get_chip_data() - get private chip data for a PWM + * @pwm: PWM device + */ +void *pwm_get_chip_data(struct pwm_device *pwm) +{ + return pwm ? pwm->chip_data : NULL; +} + +/** + * pwmchip_add() - register a new PWM chip + * @chip: the PWM chip to add + * + * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base + * will be used. + */ +int pwmchip_add(struct pwm_chip *chip) +{ + struct pwm_device *pwm; + unsigned int i; + int ret; + + if (!chip || !chip->dev || !chip->ops || !chip->ops->config || + !chip->ops->enable || !chip->ops->disable) + return -EINVAL; + + mutex_lock(&pwm_lock); + + ret = alloc_pwms(chip->base, chip->npwm); + if (ret < 0) + goto out; + + chip->pwms = kzalloc(chip->npwm * sizeof(*pwm), GFP_KERNEL); + if (!chip->pwms) { + ret = -ENOMEM; + goto out; + } + + chip->base = ret; + + for (i = 0; i < chip->npwm; i++) { + pwm = &chip->pwms[i]; + + pwm->chip = chip; + pwm->pwm = chip->base + i; + pwm->hwpwm = i; + + radix_tree_insert(&pwm_tree, pwm->pwm, pwm); + } + + bitmap_set(allocated_pwms, chip->base, chip->npwm); + + INIT_LIST_HEAD(&chip->list); + list_add(&chip->list, &pwm_chips); + + ret = 0; + + if (IS_ENABLED(CONFIG_OF)) + of_pwmchip_add(chip); + +out: + mutex_unlock(&pwm_lock); + return ret; +} +EXPORT_SYMBOL_GPL(pwmchip_add); + +/** + * pwmchip_remove() - remove a PWM chip + * @chip: the PWM chip to remove + * + * Removes a PWM chip. This function may return busy if the PWM chip provides + * a PWM device that is still requested. + */ +int pwmchip_remove(struct pwm_chip *chip) +{ + unsigned int i; + int ret = 0; + + mutex_lock(&pwm_lock); + + for (i = 0; i < chip->npwm; i++) { + struct pwm_device *pwm = &chip->pwms[i]; + + if (test_bit(PWMF_REQUESTED, &pwm->flags)) { + ret = -EBUSY; + goto out; + } + } + + list_del_init(&chip->list); + + if (IS_ENABLED(CONFIG_OF)) + of_pwmchip_remove(chip); + + free_pwms(chip); + +out: + mutex_unlock(&pwm_lock); + return ret; +} +EXPORT_SYMBOL_GPL(pwmchip_remove); + +/** + * pwm_request() - request a PWM device + * @pwm_id: global PWM device index + * @label: PWM device label + * + * This function is deprecated, use pwm_get() instead. + */ +struct pwm_device *pwm_request(int pwm, const char *label) +{ + struct pwm_device *dev; + int err; + + if (pwm < 0 || pwm >= MAX_PWMS) + return ERR_PTR(-EINVAL); + + mutex_lock(&pwm_lock); + + dev = pwm_to_device(pwm); + if (!dev) { + dev = ERR_PTR(-EPROBE_DEFER); + goto out; + } + + err = pwm_device_request(dev, label); + if (err < 0) + dev = ERR_PTR(err); + +out: + mutex_unlock(&pwm_lock); + + return dev; +} +EXPORT_SYMBOL_GPL(pwm_request); + +/** + * pwm_request_from_chip() - request a PWM device relative to a PWM chip + * @chip: PWM chip + * @index: per-chip index of the PWM to request + * @label: a literal description string of this PWM + * + * Returns the PWM at the given index of the given PWM chip. A negative error + * code is returned if the index is not valid for the specified PWM chip or + * if the PWM device cannot be requested. + */ +struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, + unsigned int index, + const char *label) +{ + struct pwm_device *pwm; + int err; + + if (!chip || index >= chip->npwm) + return ERR_PTR(-EINVAL); + + mutex_lock(&pwm_lock); + pwm = &chip->pwms[index]; + + err = pwm_device_request(pwm, label); + if (err < 0) + pwm = ERR_PTR(err); + + mutex_unlock(&pwm_lock); + return pwm; +} +EXPORT_SYMBOL_GPL(pwm_request_from_chip); + +/** + * pwm_free() - free a PWM device + * @pwm: PWM device + * + * This function is deprecated, use pwm_put() instead. + */ +void pwm_free(struct pwm_device *pwm) +{ + pwm_put(pwm); +} +EXPORT_SYMBOL_GPL(pwm_free); + +/** + * pwm_config() - change a PWM device configuration + * @pwm: PWM device + * @duty_ns: "on" time (in nanoseconds) + * @period_ns: duration (in nanoseconds) of one cycle + */ +int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) +{ + if (!pwm || period_ns == 0 || duty_ns > period_ns) + return -EINVAL; + + return pwm->chip->ops->config(pwm->chip, pwm, duty_ns, period_ns); +} +EXPORT_SYMBOL_GPL(pwm_config); + +/** + * pwm_enable() - start a PWM output toggling + * @pwm: PWM device + */ +int pwm_enable(struct pwm_device *pwm) +{ + if (pwm && !test_and_set_bit(PWMF_ENABLED, &pwm->flags)) + return pwm->chip->ops->enable(pwm->chip, pwm); + + return pwm ? 0 : -EINVAL; +} +EXPORT_SYMBOL_GPL(pwm_enable); + +/** + * pwm_disable() - stop a PWM output toggling + * @pwm: PWM device + */ +void pwm_disable(struct pwm_device *pwm) +{ + if (pwm && test_and_clear_bit(PWMF_ENABLED, &pwm->flags)) + pwm->chip->ops->disable(pwm->chip, pwm); +} +EXPORT_SYMBOL_GPL(pwm_disable); + +static struct pwm_chip *of_node_to_pwmchip(struct device_node *np) +{ + struct pwm_chip *chip; + + mutex_lock(&pwm_lock); + + list_for_each_entry(chip, &pwm_chips, list) + if (chip->dev && chip->dev->of_node == np) { + mutex_unlock(&pwm_lock); + return chip; + } + + mutex_unlock(&pwm_lock); + + return ERR_PTR(-EPROBE_DEFER); +} + +/** + * of_pwm_request() - request a PWM via the PWM framework + * @np: device node to get the PWM from + * @con_id: consumer name + * + * Returns the PWM device parsed from the phandle and index specified in the + * "pwms" property of a device tree node or a negative error-code on failure. + * Values parsed from the device tree are stored in the returned PWM device + * object. + * + * If con_id is NULL, the first PWM device listed in the "pwms" property will + * be requested. Otherwise the "pwm-names" property is used to do a reverse + * lookup of the PWM index. This also means that the "pwm-names" property + * becomes mandatory for devices that look up the PWM device via the con_id + * parameter. + */ +static struct pwm_device *of_pwm_request(struct device_node *np, + const char *con_id) +{ + struct pwm_device *pwm = NULL; + struct of_phandle_args args; + struct pwm_chip *pc; + int index = 0; + int err; + + if (con_id) { + index = of_property_match_string(np, "pwm-names", con_id); + if (index < 0) + return ERR_PTR(index); + } + + err = of_parse_phandle_with_args(np, "pwms", "#pwm-cells", index, + &args); + if (err) { + pr_debug("%s(): can't parse \"pwms\" property\n", __func__); + return ERR_PTR(err); + } + + pc = of_node_to_pwmchip(args.np); + if (IS_ERR(pc)) { + pr_debug("%s(): PWM chip not found\n", __func__); + pwm = ERR_CAST(pc); + goto put; + } + + if (args.args_count != pc->of_pwm_n_cells) { + pr_debug("%s: wrong #pwm-cells for %s\n", np->full_name, + args.np->full_name); + pwm = ERR_PTR(-EINVAL); + goto put; + } + + pwm = pc->of_xlate(pc, &args); + if (IS_ERR(pwm)) + goto put; + + /* + * If a consumer name was not given, try to look it up from the + * "pwm-names" property if it exists. Otherwise use the name of + * the user device node. + */ + if (!con_id) { + err = of_property_read_string_index(np, "pwm-names", index, + &con_id); + if (err < 0) + con_id = np->name; + } + + pwm->label = con_id; + +put: + of_node_put(args.np); + + return pwm; +} + +/** + * pwm_add_table() - register PWM device consumers + * @table: array of consumers to register + * @num: number of consumers in table + */ +void __init pwm_add_table(struct pwm_lookup *table, size_t num) +{ + mutex_lock(&pwm_lookup_lock); + + while (num--) { + list_add_tail(&table->list, &pwm_lookup_list); + table++; + } + + mutex_unlock(&pwm_lookup_lock); +} + +/** + * pwm_get() - look up and request a PWM device + * @dev: device for PWM consumer + * @con_id: consumer name + * + * Lookup is first attempted using DT. If the device was not instantiated from + * a device tree, a PWM chip and a relative index is looked up via a table + * supplied by board setup code (see pwm_add_table()). + * + * Once a PWM chip has been found the specified PWM device will be requested + * and is ready to be used. + */ +struct pwm_device *pwm_get(struct device *dev, const char *con_id) +{ + struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); + const char *dev_id = dev ? dev_name(dev): NULL; + struct pwm_chip *chip = NULL; + unsigned int index = 0; + unsigned int best = 0; + struct pwm_lookup *p; + unsigned int match; + + /* look up via DT first */ + if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) + return of_pwm_request(dev->of_node, con_id); + + /* + * We look up the provider in the static table typically provided by + * board setup code. We first try to lookup the consumer device by + * name. If the consumer device was passed in as NULL or if no match + * was found, we try to find the consumer by directly looking it up + * by name. + * + * If a match is found, the provider PWM chip is looked up by name + * and a PWM device is requested using the PWM device per-chip index. + * + * The lookup algorithm was shamelessly taken from the clock + * framework: + * + * We do slightly fuzzy matching here: + * An entry with a NULL ID is assumed to be a wildcard. + * If an entry has a device ID, it must match + * If an entry has a connection ID, it must match + * Then we take the most specific entry - with the following order + * of precedence: dev+con > dev only > con only. + */ + mutex_lock(&pwm_lookup_lock); + + list_for_each_entry(p, &pwm_lookup_list, list) { + match = 0; + + if (p->dev_id) { + if (!dev_id || strcmp(p->dev_id, dev_id)) + continue; + + match += 2; + } + + if (p->con_id) { + if (!con_id || strcmp(p->con_id, con_id)) + continue; + + match += 1; + } + + if (match > best) { + chip = pwmchip_find_by_name(p->provider); + index = p->index; + + if (match != 3) + best = match; + else + break; + } + } + + if (chip) + pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id); + + mutex_unlock(&pwm_lookup_lock); + + return pwm; +} +EXPORT_SYMBOL_GPL(pwm_get); + +/** + * pwm_put() - release a PWM device + * @pwm: PWM device + */ +void pwm_put(struct pwm_device *pwm) +{ + if (!pwm) + return; + + mutex_lock(&pwm_lock); + + if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) { + pr_warning("PWM device already freed\n"); + goto out; + } + + if (pwm->chip->ops->free) + pwm->chip->ops->free(pwm->chip, pwm); + + pwm->label = NULL; + + module_put(pwm->chip->ops->owner); +out: + mutex_unlock(&pwm_lock); +} +EXPORT_SYMBOL_GPL(pwm_put); + +#ifdef CONFIG_DEBUG_FS +static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) +{ + unsigned int i; + + for (i = 0; i < chip->npwm; i++) { + struct pwm_device *pwm = &chip->pwms[i]; + + seq_printf(s, " pwm-%-3d (%-20.20s):", i, pwm->label); + + if (test_bit(PWMF_REQUESTED, &pwm->flags)) + seq_printf(s, " requested"); + + if (test_bit(PWMF_ENABLED, &pwm->flags)) + seq_printf(s, " enabled"); + + seq_printf(s, "\n"); + } +} + +static void *pwm_seq_start(struct seq_file *s, loff_t *pos) +{ + mutex_lock(&pwm_lock); + s->private = ""; + + return seq_list_start(&pwm_chips, *pos); +} + +static void *pwm_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + s->private = "\n"; + + return seq_list_next(v, &pwm_chips, pos); +} + +static void pwm_seq_stop(struct seq_file *s, void *v) +{ + mutex_unlock(&pwm_lock); +} + +static int pwm_seq_show(struct seq_file *s, void *v) +{ + struct pwm_chip *chip = list_entry(v, struct pwm_chip, list); + + seq_printf(s, "%s%s/%s, %d PWM device%s\n", (char *)s->private, + chip->dev->bus ? chip->dev->bus->name : "no-bus", + dev_name(chip->dev), chip->npwm, + (chip->npwm != 1) ? "s" : ""); + + if (chip->ops->dbg_show) + chip->ops->dbg_show(chip, s); + else + pwm_dbg_show(chip, s); + + return 0; +} + +static const struct seq_operations pwm_seq_ops = { + .start = pwm_seq_start, + .next = pwm_seq_next, + .stop = pwm_seq_stop, + .show = pwm_seq_show, +}; + +static int pwm_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &pwm_seq_ops); +} + +static const struct file_operations pwm_debugfs_ops = { + .owner = THIS_MODULE, + .open = pwm_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init pwm_debugfs_init(void) +{ + debugfs_create_file("pwm", S_IFREG | S_IRUGO, NULL, NULL, + &pwm_debugfs_ops); + + return 0; +} + +subsys_initcall(pwm_debugfs_init); +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/pwm/pwm-bfin.c b/drivers/pwm/pwm-bfin.c new file mode 100644 index 000000000000..d53c4e7941ef --- /dev/null +++ b/drivers/pwm/pwm-bfin.c @@ -0,0 +1,162 @@ +/* + * Blackfin Pulse Width Modulation (PWM) core + * + * Copyright (c) 2011 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/slab.h> + +#include <asm/gptimers.h> +#include <asm/portmux.h> + +struct bfin_pwm_chip { + struct pwm_chip chip; +}; + +struct bfin_pwm { + unsigned short pin; +}; + +static const unsigned short pwm_to_gptimer_per[] = { + P_TMR0, P_TMR1, P_TMR2, P_TMR3, P_TMR4, P_TMR5, + P_TMR6, P_TMR7, P_TMR8, P_TMR9, P_TMR10, P_TMR11, +}; + +static int bfin_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct bfin_pwm *priv; + int ret; + + if (pwm->hwpwm >= ARRAY_SIZE(pwm_to_gptimer_per)) + return -EINVAL; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->pin = pwm_to_gptimer_per[pwm->hwpwm]; + + ret = peripheral_request(priv->pin, NULL); + if (ret) { + kfree(priv); + return ret; + } + + pwm_set_chip_data(pwm, priv); + + return 0; +} + +static void bfin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct bfin_pwm *priv = pwm_get_chip_data(pwm); + + if (priv) { + peripheral_free(priv->pin); + kfree(priv); + } +} + +static int bfin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct bfin_pwm *priv = pwm_get_chip_data(pwm); + unsigned long period, duty; + unsigned long long val; + + if (duty_ns < 0 || duty_ns > period_ns) + return -EINVAL; + + val = (unsigned long long)get_sclk() * period_ns; + do_div(val, NSEC_PER_SEC); + period = val; + + val = (unsigned long long)period * duty_ns; + do_div(val, period_ns); + duty = period - val; + + if (duty >= period) + duty = period - 1; + + set_gptimer_config(priv->pin, TIMER_MODE_PWM | TIMER_PERIOD_CNT); + set_gptimer_pwidth(priv->pin, duty); + set_gptimer_period(priv->pin, period); + + return 0; +} + +static int bfin_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct bfin_pwm *priv = pwm_get_chip_data(pwm); + + enable_gptimer(priv->pin); + + return 0; +} + +static void bfin_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct bfin_pwm *priv = pwm_get_chip_data(pwm); + + disable_gptimer(priv->pin); +} + +static struct pwm_ops bfin_pwm_ops = { + .request = bfin_pwm_request, + .free = bfin_pwm_free, + .config = bfin_pwm_config, + .enable = bfin_pwm_enable, + .disable = bfin_pwm_disable, + .owner = THIS_MODULE, +}; + +static int bfin_pwm_probe(struct platform_device *pdev) +{ + struct bfin_pwm_chip *pwm; + int ret; + + pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); + if (!pwm) { + dev_err(&pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, pwm); + + pwm->chip.dev = &pdev->dev; + pwm->chip.ops = &bfin_pwm_ops; + pwm->chip.base = -1; + pwm->chip.npwm = 12; + + ret = pwmchip_add(&pwm->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int __devexit bfin_pwm_remove(struct platform_device *pdev) +{ + struct bfin_pwm_chip *pwm = platform_get_drvdata(pdev); + + return pwmchip_remove(&pwm->chip); +} + +static struct platform_driver bfin_pwm_driver = { + .driver = { + .name = "bfin-pwm", + }, + .probe = bfin_pwm_probe, + .remove = __devexit_p(bfin_pwm_remove), +}; + +module_platform_driver(bfin_pwm_driver); + +MODULE_LICENSE("GPL"); diff --git a/arch/arm/plat-mxc/pwm.c b/drivers/pwm/pwm-imx.c index c0cab2270dd1..2a0b35333972 100644 --- a/arch/arm/plat-mxc/pwm.c +++ b/drivers/pwm/pwm-imx.c @@ -39,33 +39,28 @@ #define MX3_PWMCR_CLKSRC_IPG (1 << 16) #define MX3_PWMCR_EN (1 << 0) - - -struct pwm_device { - struct list_head node; - struct platform_device *pdev; - - const char *label; +struct imx_chip { struct clk *clk; int clk_enabled; void __iomem *mmio_base; - unsigned int use_count; - unsigned int pwm_id; + struct pwm_chip chip; }; -int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) +#define to_imx_chip(chip) container_of(chip, struct imx_chip, chip) + +static int imx_pwm_config(struct pwm_chip *chip, + struct pwm_device *pwm, int duty_ns, int period_ns) { - if (pwm == NULL || period_ns == 0 || duty_ns > period_ns) - return -EINVAL; + struct imx_chip *imx = to_imx_chip(chip); if (!(cpu_is_mx1() || cpu_is_mx21())) { unsigned long long c; unsigned long period_cycles, duty_cycles, prescale; u32 cr; - c = clk_get_rate(pwm->clk); + c = clk_get_rate(imx->clk); c = c * period_ns; do_div(c, 1000000000); period_cycles = c; @@ -86,8 +81,8 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) else period_cycles = 0; - writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR); - writel(period_cycles, pwm->mmio_base + MX3_PWMPR); + writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); + writel(period_cycles, imx->mmio_base + MX3_PWMPR); cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | @@ -98,7 +93,7 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) else cr |= MX3_PWMCR_CLKSRC_IPG_HIGH; - writel(cr, pwm->mmio_base + MX3_PWMCR); + writel(cr, imx->mmio_base + MX3_PWMCR); } else if (cpu_is_mx1() || cpu_is_mx21()) { /* The PWM subsystem allows for exact frequencies. However, * I cannot connect a scope on my device to the PWM line and @@ -116,191 +111,120 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) * both the prescaler (/1 .. /128) and then by CLKSEL * (/2 .. /16). */ - u32 max = readl(pwm->mmio_base + MX1_PWMP); + u32 max = readl(imx->mmio_base + MX1_PWMP); u32 p = max * duty_ns / period_ns; - writel(max - p, pwm->mmio_base + MX1_PWMS); + writel(max - p, imx->mmio_base + MX1_PWMS); } else { BUG(); } return 0; } -EXPORT_SYMBOL(pwm_config); -int pwm_enable(struct pwm_device *pwm) +static int imx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { + struct imx_chip *imx = to_imx_chip(chip); int rc = 0; - if (!pwm->clk_enabled) { - rc = clk_prepare_enable(pwm->clk); + if (!imx->clk_enabled) { + rc = clk_prepare_enable(imx->clk); if (!rc) - pwm->clk_enabled = 1; + imx->clk_enabled = 1; } return rc; } -EXPORT_SYMBOL(pwm_enable); -void pwm_disable(struct pwm_device *pwm) +static void imx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { - writel(0, pwm->mmio_base + MX3_PWMCR); + struct imx_chip *imx = to_imx_chip(chip); - if (pwm->clk_enabled) { - clk_disable_unprepare(pwm->clk); - pwm->clk_enabled = 0; - } -} -EXPORT_SYMBOL(pwm_disable); + writel(0, imx->mmio_base + MX3_PWMCR); -static DEFINE_MUTEX(pwm_lock); -static LIST_HEAD(pwm_list); - -struct pwm_device *pwm_request(int pwm_id, const char *label) -{ - struct pwm_device *pwm; - int found = 0; - - mutex_lock(&pwm_lock); - - list_for_each_entry(pwm, &pwm_list, node) { - if (pwm->pwm_id == pwm_id) { - found = 1; - break; - } + if (imx->clk_enabled) { + clk_disable_unprepare(imx->clk); + imx->clk_enabled = 0; } - - if (found) { - if (pwm->use_count == 0) { - pwm->use_count++; - pwm->label = label; - } else - pwm = ERR_PTR(-EBUSY); - } else - pwm = ERR_PTR(-ENOENT); - - mutex_unlock(&pwm_lock); - return pwm; } -EXPORT_SYMBOL(pwm_request); -void pwm_free(struct pwm_device *pwm) -{ - mutex_lock(&pwm_lock); - - if (pwm->use_count) { - pwm->use_count--; - pwm->label = NULL; - } else - pr_warning("PWM device already freed\n"); - - mutex_unlock(&pwm_lock); -} -EXPORT_SYMBOL(pwm_free); +static struct pwm_ops imx_pwm_ops = { + .enable = imx_pwm_enable, + .disable = imx_pwm_disable, + .config = imx_pwm_config, + .owner = THIS_MODULE, +}; -static int __devinit mxc_pwm_probe(struct platform_device *pdev) +static int __devinit imx_pwm_probe(struct platform_device *pdev) { - struct pwm_device *pwm; + struct imx_chip *imx; struct resource *r; int ret = 0; - pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL); - if (pwm == NULL) { + imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL); + if (imx == NULL) { dev_err(&pdev->dev, "failed to allocate memory\n"); return -ENOMEM; } - pwm->clk = clk_get(&pdev->dev, "pwm"); + imx->clk = devm_clk_get(&pdev->dev, "pwm"); - if (IS_ERR(pwm->clk)) { - ret = PTR_ERR(pwm->clk); - goto err_free; - } + if (IS_ERR(imx->clk)) + return PTR_ERR(imx->clk); - pwm->clk_enabled = 0; + imx->chip.ops = &imx_pwm_ops; + imx->chip.dev = &pdev->dev; + imx->chip.base = -1; + imx->chip.npwm = 1; - pwm->use_count = 0; - pwm->pwm_id = pdev->id; - pwm->pdev = pdev; + imx->clk_enabled = 0; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (r == NULL) { dev_err(&pdev->dev, "no memory resource defined\n"); - ret = -ENODEV; - goto err_free_clk; - } - - r = request_mem_region(r->start, resource_size(r), pdev->name); - if (r == NULL) { - dev_err(&pdev->dev, "failed to request memory resource\n"); - ret = -EBUSY; - goto err_free_clk; + return -ENODEV; } - pwm->mmio_base = ioremap(r->start, resource_size(r)); - if (pwm->mmio_base == NULL) { - dev_err(&pdev->dev, "failed to ioremap() registers\n"); - ret = -ENODEV; - goto err_free_mem; - } + imx->mmio_base = devm_request_and_ioremap(&pdev->dev, r); + if (imx->mmio_base == NULL) + return -EADDRNOTAVAIL; - mutex_lock(&pwm_lock); - list_add_tail(&pwm->node, &pwm_list); - mutex_unlock(&pwm_lock); + ret = pwmchip_add(&imx->chip); + if (ret < 0) + return ret; - platform_set_drvdata(pdev, pwm); + platform_set_drvdata(pdev, imx); return 0; - -err_free_mem: - release_mem_region(r->start, resource_size(r)); -err_free_clk: - clk_put(pwm->clk); -err_free: - kfree(pwm); - return ret; } -static int __devexit mxc_pwm_remove(struct platform_device *pdev) +static int __devexit imx_pwm_remove(struct platform_device *pdev) { - struct pwm_device *pwm; - struct resource *r; + struct imx_chip *imx; - pwm = platform_get_drvdata(pdev); - if (pwm == NULL) + imx = platform_get_drvdata(pdev); + if (imx == NULL) return -ENODEV; - mutex_lock(&pwm_lock); - list_del(&pwm->node); - mutex_unlock(&pwm_lock); - - iounmap(pwm->mmio_base); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(r->start, resource_size(r)); - - clk_put(pwm->clk); - - kfree(pwm); - return 0; + return pwmchip_remove(&imx->chip); } -static struct platform_driver mxc_pwm_driver = { +static struct platform_driver imx_pwm_driver = { .driver = { .name = "mxc_pwm", }, - .probe = mxc_pwm_probe, - .remove = __devexit_p(mxc_pwm_remove), + .probe = imx_pwm_probe, + .remove = __devexit_p(imx_pwm_remove), }; -static int __init mxc_pwm_init(void) +static int __init imx_pwm_init(void) { - return platform_driver_register(&mxc_pwm_driver); + return platform_driver_register(&imx_pwm_driver); } -arch_initcall(mxc_pwm_init); +arch_initcall(imx_pwm_init); -static void __exit mxc_pwm_exit(void) +static void __exit imx_pwm_exit(void) { - platform_driver_unregister(&mxc_pwm_driver); + platform_driver_unregister(&imx_pwm_driver); } -module_exit(mxc_pwm_exit); +module_exit(imx_pwm_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c new file mode 100644 index 000000000000..adb87f0c1633 --- /dev/null +++ b/drivers/pwm/pwm-lpc32xx.c @@ -0,0 +1,148 @@ +/* + * Copyright 2012 Alexandre Pereira da Silva <aletes.xgr@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2. + * + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/slab.h> + +struct lpc32xx_pwm_chip { + struct pwm_chip chip; + struct clk *clk; + void __iomem *base; +}; + +#define PWM_ENABLE (1 << 31) +#define PWM_RELOADV(x) (((x) & 0xFF) << 8) +#define PWM_DUTY(x) ((x) & 0xFF) + +#define to_lpc32xx_pwm_chip(_chip) \ + container_of(_chip, struct lpc32xx_pwm_chip, chip) + +static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip); + unsigned long long c; + int period_cycles, duty_cycles; + + c = clk_get_rate(lpc32xx->clk) / 256; + c = c * period_ns; + do_div(c, NSEC_PER_SEC); + + /* Handle high and low extremes */ + if (c == 0) + c = 1; + if (c > 255) + c = 0; /* 0 set division by 256 */ + period_cycles = c; + + c = 256 * duty_ns; + do_div(c, period_ns); + duty_cycles = c; + + writel(PWM_ENABLE | PWM_RELOADV(period_cycles) | PWM_DUTY(duty_cycles), + lpc32xx->base + (pwm->hwpwm << 2)); + + return 0; +} + +static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip); + + return clk_enable(lpc32xx->clk); +} + +static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip); + + writel(0, lpc32xx->base + (pwm->hwpwm << 2)); + clk_disable(lpc32xx->clk); +} + +static const struct pwm_ops lpc32xx_pwm_ops = { + .config = lpc32xx_pwm_config, + .enable = lpc32xx_pwm_enable, + .disable = lpc32xx_pwm_disable, + .owner = THIS_MODULE, +}; + +static int lpc32xx_pwm_probe(struct platform_device *pdev) +{ + struct lpc32xx_pwm_chip *lpc32xx; + struct resource *res; + int ret; + + lpc32xx = devm_kzalloc(&pdev->dev, sizeof(*lpc32xx), GFP_KERNEL); + if (!lpc32xx) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + lpc32xx->base = devm_request_and_ioremap(&pdev->dev, res); + if (!lpc32xx->base) + return -EADDRNOTAVAIL; + + lpc32xx->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(lpc32xx->clk)) + return PTR_ERR(lpc32xx->clk); + + lpc32xx->chip.dev = &pdev->dev; + lpc32xx->chip.ops = &lpc32xx_pwm_ops; + lpc32xx->chip.npwm = 2; + + ret = pwmchip_add(&lpc32xx->chip); + if (ret < 0) { + dev_err(&pdev->dev, "failed to add PWM chip, error %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, lpc32xx); + + return 0; +} + +static int __devexit lpc32xx_pwm_remove(struct platform_device *pdev) +{ + struct lpc32xx_pwm_chip *lpc32xx = platform_get_drvdata(pdev); + + clk_disable(lpc32xx->clk); + return pwmchip_remove(&lpc32xx->chip); +} + +static struct of_device_id lpc32xx_pwm_dt_ids[] = { + { .compatible = "nxp,lpc3220-pwm", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, lpc32xx_pwm_dt_ids); + +static struct platform_driver lpc32xx_pwm_driver = { + .driver = { + .name = "lpc32xx-pwm", + .of_match_table = of_match_ptr(lpc32xx_pwm_dt_ids), + }, + .probe = lpc32xx_pwm_probe, + .remove = __devexit_p(lpc32xx_pwm_remove), +}; +module_platform_driver(lpc32xx_pwm_driver); + +MODULE_ALIAS("platform:lpc32xx-pwm"); +MODULE_AUTHOR("Alexandre Pereira da Silva <aletes.xgr@gmail.com>"); +MODULE_DESCRIPTION("LPC32XX PWM Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c new file mode 100644 index 000000000000..e5852646f082 --- /dev/null +++ b/drivers/pwm/pwm-mxs.c @@ -0,0 +1,203 @@ +/* + * Copyright 2012 Freescale Semiconductor, Inc. + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/slab.h> +#include <linux/stmp_device.h> + +#define SET 0x4 +#define CLR 0x8 +#define TOG 0xc + +#define PWM_CTRL 0x0 +#define PWM_ACTIVE0 0x10 +#define PWM_PERIOD0 0x20 +#define PERIOD_PERIOD(p) ((p) & 0xffff) +#define PERIOD_PERIOD_MAX 0x10000 +#define PERIOD_ACTIVE_HIGH (3 << 16) +#define PERIOD_INACTIVE_LOW (2 << 18) +#define PERIOD_CDIV(div) (((div) & 0x7) << 20) +#define PERIOD_CDIV_MAX 8 + +struct mxs_pwm_chip { + struct pwm_chip chip; + struct device *dev; + struct clk *clk; + void __iomem *base; +}; + +#define to_mxs_pwm_chip(_chip) container_of(_chip, struct mxs_pwm_chip, chip) + +static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip); + int ret, div = 0; + unsigned int period_cycles, duty_cycles; + unsigned long rate; + unsigned long long c; + + rate = clk_get_rate(mxs->clk); + while (1) { + c = rate / (1 << div); + c = c * period_ns; + do_div(c, 1000000000); + if (c < PERIOD_PERIOD_MAX) + break; + div++; + if (div > PERIOD_CDIV_MAX) + return -EINVAL; + } + + period_cycles = c; + c *= duty_ns; + do_div(c, period_ns); + duty_cycles = c; + + /* + * If the PWM channel is disabled, make sure to turn on the clock + * before writing the register. Otherwise, keep it enabled. + */ + if (!test_bit(PWMF_ENABLED, &pwm->flags)) { + ret = clk_prepare_enable(mxs->clk); + if (ret) + return ret; + } + + writel(duty_cycles << 16, + mxs->base + PWM_ACTIVE0 + pwm->hwpwm * 0x20); + writel(PERIOD_PERIOD(period_cycles) | PERIOD_ACTIVE_HIGH | + PERIOD_INACTIVE_LOW | PERIOD_CDIV(div), + mxs->base + PWM_PERIOD0 + pwm->hwpwm * 0x20); + + /* + * If the PWM is not enabled, turn the clock off again to save power. + */ + if (!test_bit(PWMF_ENABLED, &pwm->flags)) + clk_disable_unprepare(mxs->clk); + + return 0; +} + +static int mxs_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip); + int ret; + + ret = clk_prepare_enable(mxs->clk); + if (ret) + return ret; + + writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + SET); + + return 0; +} + +static void mxs_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct mxs_pwm_chip *mxs = to_mxs_pwm_chip(chip); + + writel(1 << pwm->hwpwm, mxs->base + PWM_CTRL + CLR); + + clk_disable_unprepare(mxs->clk); +} + +static const struct pwm_ops mxs_pwm_ops = { + .config = mxs_pwm_config, + .enable = mxs_pwm_enable, + .disable = mxs_pwm_disable, + .owner = THIS_MODULE, +}; + +static int mxs_pwm_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mxs_pwm_chip *mxs; + struct resource *res; + struct pinctrl *pinctrl; + int ret; + + mxs = devm_kzalloc(&pdev->dev, sizeof(*mxs), GFP_KERNEL); + if (!mxs) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mxs->base = devm_request_and_ioremap(&pdev->dev, res); + if (!mxs->base) + return -EADDRNOTAVAIL; + + pinctrl = devm_pinctrl_get_select_default(&pdev->dev); + if (IS_ERR(pinctrl)) + return PTR_ERR(pinctrl); + + mxs->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(mxs->clk)) + return PTR_ERR(mxs->clk); + + mxs->chip.dev = &pdev->dev; + mxs->chip.ops = &mxs_pwm_ops; + mxs->chip.base = -1; + ret = of_property_read_u32(np, "fsl,pwm-number", &mxs->chip.npwm); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get pwm number: %d\n", ret); + return ret; + } + + ret = pwmchip_add(&mxs->chip); + if (ret < 0) { + dev_err(&pdev->dev, "failed to add pwm chip %d\n", ret); + return ret; + } + + mxs->dev = &pdev->dev; + platform_set_drvdata(pdev, mxs); + + stmp_reset_block(mxs->base); + + return 0; +} + +static int __devexit mxs_pwm_remove(struct platform_device *pdev) +{ + struct mxs_pwm_chip *mxs = platform_get_drvdata(pdev); + + return pwmchip_remove(&mxs->chip); +} + +static struct of_device_id mxs_pwm_dt_ids[] = { + { .compatible = "fsl,imx23-pwm", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mxs_pwm_dt_ids); + +static struct platform_driver mxs_pwm_driver = { + .driver = { + .name = "mxs-pwm", + .of_match_table = of_match_ptr(mxs_pwm_dt_ids), + }, + .probe = mxs_pwm_probe, + .remove = __devexit_p(mxs_pwm_remove), +}; +module_platform_driver(mxs_pwm_driver); + +MODULE_ALIAS("platform:mxs-pwm"); +MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); +MODULE_DESCRIPTION("Freescale MXS PWM Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c new file mode 100644 index 000000000000..bd5867a1c700 --- /dev/null +++ b/drivers/pwm/pwm-pxa.c @@ -0,0 +1,218 @@ +/* + * drivers/pwm/pwm-pxa.c + * + * simple driver for PWM (Pulse Width Modulator) controller + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * 2008-02-13 initial version + * eric miao <eric.miao@marvell.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/pwm.h> + +#include <asm/div64.h> + +#define HAS_SECONDARY_PWM 0x10 +#define PWM_ID_BASE(d) ((d) & 0xf) + +static const struct platform_device_id pwm_id_table[] = { + /* PWM has_secondary_pwm? */ + { "pxa25x-pwm", 0 }, + { "pxa27x-pwm", 0 | HAS_SECONDARY_PWM }, + { "pxa168-pwm", 1 }, + { "pxa910-pwm", 1 }, + { }, +}; +MODULE_DEVICE_TABLE(platform, pwm_id_table); + +/* PWM registers and bits definitions */ +#define PWMCR (0x00) +#define PWMDCR (0x04) +#define PWMPCR (0x08) + +#define PWMCR_SD (1 << 6) +#define PWMDCR_FD (1 << 10) + +struct pxa_pwm_chip { + struct pwm_chip chip; + struct device *dev; + + struct clk *clk; + int clk_enabled; + void __iomem *mmio_base; +}; + +static inline struct pxa_pwm_chip *to_pxa_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct pxa_pwm_chip, chip); +} + +/* + * period_ns = 10^9 * (PRESCALE + 1) * (PV + 1) / PWM_CLK_RATE + * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE + */ +static int pxa_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip); + unsigned long long c; + unsigned long period_cycles, prescale, pv, dc; + unsigned long offset; + int rc; + + if (period_ns == 0 || duty_ns > period_ns) + return -EINVAL; + + offset = pwm->hwpwm ? 0x10 : 0; + + c = clk_get_rate(pc->clk); + c = c * period_ns; + do_div(c, 1000000000); + period_cycles = c; + + if (period_cycles < 1) + period_cycles = 1; + prescale = (period_cycles - 1) / 1024; + pv = period_cycles / (prescale + 1) - 1; + + if (prescale > 63) + return -EINVAL; + + if (duty_ns == period_ns) + dc = PWMDCR_FD; + else + dc = (pv + 1) * duty_ns / period_ns; + + /* NOTE: the clock to PWM has to be enabled first + * before writing to the registers + */ + rc = clk_prepare_enable(pc->clk); + if (rc < 0) + return rc; + + writel(prescale, pc->mmio_base + offset + PWMCR); + writel(dc, pc->mmio_base + offset + PWMDCR); + writel(pv, pc->mmio_base + offset + PWMPCR); + + clk_disable_unprepare(pc->clk); + return 0; +} + +static int pxa_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip); + int rc = 0; + + if (!pc->clk_enabled) { + rc = clk_prepare_enable(pc->clk); + if (!rc) + pc->clk_enabled++; + } + return rc; +} + +static void pxa_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct pxa_pwm_chip *pc = to_pxa_pwm_chip(chip); + + if (pc->clk_enabled) { + clk_disable_unprepare(pc->clk); + pc->clk_enabled--; + } +} + +static struct pwm_ops pxa_pwm_ops = { + .config = pxa_pwm_config, + .enable = pxa_pwm_enable, + .disable = pxa_pwm_disable, + .owner = THIS_MODULE, +}; + +static int __devinit pwm_probe(struct platform_device *pdev) +{ + const struct platform_device_id *id = platform_get_device_id(pdev); + struct pxa_pwm_chip *pwm; + struct resource *r; + int ret = 0; + + pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); + if (pwm == NULL) { + dev_err(&pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + pwm->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pwm->clk)) + return PTR_ERR(pwm->clk); + + pwm->clk_enabled = 0; + + pwm->chip.dev = &pdev->dev; + pwm->chip.ops = &pxa_pwm_ops; + pwm->chip.base = -1; + pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + + pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r); + if (pwm->mmio_base == NULL) + return -EADDRNOTAVAIL; + + ret = pwmchip_add(&pwm->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, pwm); + return 0; +} + +static int __devexit pwm_remove(struct platform_device *pdev) +{ + struct pxa_pwm_chip *chip; + + chip = platform_get_drvdata(pdev); + if (chip == NULL) + return -ENODEV; + + return pwmchip_remove(&chip->chip); +} + +static struct platform_driver pwm_driver = { + .driver = { + .name = "pxa25x-pwm", + .owner = THIS_MODULE, + }, + .probe = pwm_probe, + .remove = __devexit_p(pwm_remove), + .id_table = pwm_id_table, +}; + +static int __init pwm_init(void) +{ + return platform_driver_register(&pwm_driver); +} +arch_initcall(pwm_init); + +static void __exit pwm_exit(void) +{ + platform_driver_unregister(&pwm_driver); +} +module_exit(pwm_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/arch/arm/plat-samsung/pwm.c b/drivers/pwm/pwm-samsung.c index d3583050fb05..d10386528c9c 100644 --- a/arch/arm/plat-samsung/pwm.c +++ b/drivers/pwm/pwm-samsung.c @@ -1,4 +1,4 @@ -/* arch/arm/plat-s3c/pwm.c +/* drivers/pwm/pwm-samsung.c * * Copyright (c) 2007 Ben Dooks * Copyright (c) 2008 Simtec Electronics @@ -11,6 +11,8 @@ * the Free Software Foundation; either version 2 of the License. */ +#define pr_fmt(fmt) "pwm-samsung: " fmt + #include <linux/export.h> #include <linux/kernel.h> #include <linux/platform_device.h> @@ -24,8 +26,7 @@ #include <plat/regs-timer.h> -struct pwm_device { - struct list_head list; +struct s3c_chip { struct platform_device *pdev; struct clk *clk_div; @@ -36,81 +37,36 @@ struct pwm_device { unsigned int duty_ns; unsigned char tcon_base; - unsigned char use_count; unsigned char pwm_id; + struct pwm_chip chip; }; +#define to_s3c_chip(chip) container_of(chip, struct s3c_chip, chip) + #define pwm_dbg(_pwm, msg...) dev_dbg(&(_pwm)->pdev->dev, msg) static struct clk *clk_scaler[2]; -static inline int pwm_is_tdiv(struct pwm_device *pwm) -{ - return clk_get_parent(pwm->clk) == pwm->clk_div; -} - -static DEFINE_MUTEX(pwm_lock); -static LIST_HEAD(pwm_list); - -struct pwm_device *pwm_request(int pwm_id, const char *label) -{ - struct pwm_device *pwm; - int found = 0; - - mutex_lock(&pwm_lock); - - list_for_each_entry(pwm, &pwm_list, list) { - if (pwm->pwm_id == pwm_id) { - found = 1; - break; - } - } - - if (found) { - if (pwm->use_count == 0) { - pwm->use_count = 1; - pwm->label = label; - } else - pwm = ERR_PTR(-EBUSY); - } else - pwm = ERR_PTR(-ENOENT); - - mutex_unlock(&pwm_lock); - return pwm; -} - -EXPORT_SYMBOL(pwm_request); - - -void pwm_free(struct pwm_device *pwm) +static inline int pwm_is_tdiv(struct s3c_chip *chip) { - mutex_lock(&pwm_lock); - - if (pwm->use_count) { - pwm->use_count--; - pwm->label = NULL; - } else - printk(KERN_ERR "PWM%d device already freed\n", pwm->pwm_id); - - mutex_unlock(&pwm_lock); + return clk_get_parent(chip->clk) == chip->clk_div; } -EXPORT_SYMBOL(pwm_free); - #define pwm_tcon_start(pwm) (1 << (pwm->tcon_base + 0)) #define pwm_tcon_invert(pwm) (1 << (pwm->tcon_base + 2)) #define pwm_tcon_autoreload(pwm) (1 << (pwm->tcon_base + 3)) #define pwm_tcon_manulupdate(pwm) (1 << (pwm->tcon_base + 1)) -int pwm_enable(struct pwm_device *pwm) +static int s3c_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { + struct s3c_chip *s3c = to_s3c_chip(chip); unsigned long flags; unsigned long tcon; local_irq_save(flags); tcon = __raw_readl(S3C2410_TCON); - tcon |= pwm_tcon_start(pwm); + tcon |= pwm_tcon_start(s3c); __raw_writel(tcon, S3C2410_TCON); local_irq_restore(flags); @@ -118,31 +74,28 @@ int pwm_enable(struct pwm_device *pwm) return 0; } -EXPORT_SYMBOL(pwm_enable); - -void pwm_disable(struct pwm_device *pwm) +static void s3c_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) { + struct s3c_chip *s3c = to_s3c_chip(chip); unsigned long flags; unsigned long tcon; local_irq_save(flags); tcon = __raw_readl(S3C2410_TCON); - tcon &= ~pwm_tcon_start(pwm); + tcon &= ~pwm_tcon_start(s3c); __raw_writel(tcon, S3C2410_TCON); local_irq_restore(flags); } -EXPORT_SYMBOL(pwm_disable); - -static unsigned long pwm_calc_tin(struct pwm_device *pwm, unsigned long freq) +static unsigned long pwm_calc_tin(struct s3c_chip *s3c, unsigned long freq) { unsigned long tin_parent_rate; unsigned int div; - tin_parent_rate = clk_get_rate(clk_get_parent(pwm->clk_div)); - pwm_dbg(pwm, "tin parent at %lu\n", tin_parent_rate); + tin_parent_rate = clk_get_rate(clk_get_parent(s3c->clk_div)); + pwm_dbg(s3c, "tin parent at %lu\n", tin_parent_rate); for (div = 2; div <= 16; div *= 2) { if ((tin_parent_rate / (div << 16)) < freq) @@ -154,8 +107,10 @@ static unsigned long pwm_calc_tin(struct pwm_device *pwm, unsigned long freq) #define NS_IN_HZ (1000000000UL) -int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) +static int s3c_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) { + struct s3c_chip *s3c = to_s3c_chip(chip); unsigned long tin_rate; unsigned long tin_ns; unsigned long period; @@ -174,38 +129,38 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) if (duty_ns > period_ns) return -EINVAL; - if (period_ns == pwm->period_ns && - duty_ns == pwm->duty_ns) + if (period_ns == s3c->period_ns && + duty_ns == s3c->duty_ns) return 0; /* The TCMP and TCNT can be read without a lock, they're not * shared between the timers. */ - tcmp = __raw_readl(S3C2410_TCMPB(pwm->pwm_id)); - tcnt = __raw_readl(S3C2410_TCNTB(pwm->pwm_id)); + tcmp = __raw_readl(S3C2410_TCMPB(s3c->pwm_id)); + tcnt = __raw_readl(S3C2410_TCNTB(s3c->pwm_id)); period = NS_IN_HZ / period_ns; - pwm_dbg(pwm, "duty_ns=%d, period_ns=%d (%lu)\n", + pwm_dbg(s3c, "duty_ns=%d, period_ns=%d (%lu)\n", duty_ns, period_ns, period); /* Check to see if we are changing the clock rate of the PWM */ - if (pwm->period_ns != period_ns) { - if (pwm_is_tdiv(pwm)) { - tin_rate = pwm_calc_tin(pwm, period); - clk_set_rate(pwm->clk_div, tin_rate); + if (s3c->period_ns != period_ns) { + if (pwm_is_tdiv(s3c)) { + tin_rate = pwm_calc_tin(s3c, period); + clk_set_rate(s3c->clk_div, tin_rate); } else - tin_rate = clk_get_rate(pwm->clk); + tin_rate = clk_get_rate(s3c->clk); - pwm->period_ns = period_ns; + s3c->period_ns = period_ns; - pwm_dbg(pwm, "tin_rate=%lu\n", tin_rate); + pwm_dbg(s3c, "tin_rate=%lu\n", tin_rate); tin_ns = NS_IN_HZ / tin_rate; tcnt = period_ns / tin_ns; } else - tin_ns = NS_IN_HZ / clk_get_rate(pwm->clk); + tin_ns = NS_IN_HZ / clk_get_rate(s3c->clk); /* Note, counters count down */ @@ -216,7 +171,7 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) if (tcmp == tcnt) tcmp--; - pwm_dbg(pwm, "tin_ns=%lu, tcmp=%ld/%lu\n", tin_ns, tcmp, tcnt); + pwm_dbg(s3c, "tin_ns=%lu, tcmp=%ld/%lu\n", tin_ns, tcmp, tcnt); if (tcmp < 0) tcmp = 0; @@ -225,15 +180,15 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) local_irq_save(flags); - __raw_writel(tcmp, S3C2410_TCMPB(pwm->pwm_id)); - __raw_writel(tcnt, S3C2410_TCNTB(pwm->pwm_id)); + __raw_writel(tcmp, S3C2410_TCMPB(s3c->pwm_id)); + __raw_writel(tcnt, S3C2410_TCNTB(s3c->pwm_id)); tcon = __raw_readl(S3C2410_TCON); - tcon |= pwm_tcon_manulupdate(pwm); - tcon |= pwm_tcon_autoreload(pwm); + tcon |= pwm_tcon_manulupdate(s3c); + tcon |= pwm_tcon_autoreload(s3c); __raw_writel(tcon, S3C2410_TCON); - tcon &= ~pwm_tcon_manulupdate(pwm); + tcon &= ~pwm_tcon_manulupdate(s3c); __raw_writel(tcon, S3C2410_TCON); local_irq_restore(flags); @@ -241,24 +196,17 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) return 0; } -EXPORT_SYMBOL(pwm_config); - -static int pwm_register(struct pwm_device *pwm) -{ - pwm->duty_ns = -1; - pwm->period_ns = -1; - - mutex_lock(&pwm_lock); - list_add_tail(&pwm->list, &pwm_list); - mutex_unlock(&pwm_lock); - - return 0; -} +static struct pwm_ops s3c_pwm_ops = { + .enable = s3c_pwm_enable, + .disable = s3c_pwm_disable, + .config = s3c_pwm_config, + .owner = THIS_MODULE, +}; static int s3c_pwm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct pwm_device *pwm; + struct s3c_chip *s3c; unsigned long flags; unsigned long tcon; unsigned int id = pdev->id; @@ -269,83 +217,75 @@ static int s3c_pwm_probe(struct platform_device *pdev) return -ENXIO; } - pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL); - if (pwm == NULL) { + s3c = devm_kzalloc(&pdev->dev, sizeof(*s3c), GFP_KERNEL); + if (s3c == NULL) { dev_err(dev, "failed to allocate pwm_device\n"); return -ENOMEM; } - pwm->pdev = pdev; - pwm->pwm_id = id; - /* calculate base of control bits in TCON */ - pwm->tcon_base = id == 0 ? 0 : (id * 4) + 4; + s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4; + s3c->chip.ops = &s3c_pwm_ops; + s3c->chip.base = -1; + s3c->chip.npwm = 1; - pwm->clk = clk_get(dev, "pwm-tin"); - if (IS_ERR(pwm->clk)) { + s3c->clk = devm_clk_get(dev, "pwm-tin"); + if (IS_ERR(s3c->clk)) { dev_err(dev, "failed to get pwm tin clk\n"); - ret = PTR_ERR(pwm->clk); - goto err_alloc; + return PTR_ERR(s3c->clk); } - pwm->clk_div = clk_get(dev, "pwm-tdiv"); - if (IS_ERR(pwm->clk_div)) { + s3c->clk_div = devm_clk_get(dev, "pwm-tdiv"); + if (IS_ERR(s3c->clk_div)) { dev_err(dev, "failed to get pwm tdiv clk\n"); - ret = PTR_ERR(pwm->clk_div); - goto err_clk_tin; + return PTR_ERR(s3c->clk_div); } - clk_enable(pwm->clk); - clk_enable(pwm->clk_div); + clk_enable(s3c->clk); + clk_enable(s3c->clk_div); local_irq_save(flags); tcon = __raw_readl(S3C2410_TCON); - tcon |= pwm_tcon_invert(pwm); + tcon |= pwm_tcon_invert(s3c); __raw_writel(tcon, S3C2410_TCON); local_irq_restore(flags); - - ret = pwm_register(pwm); - if (ret) { + ret = pwmchip_add(&s3c->chip); + if (ret < 0) { dev_err(dev, "failed to register pwm\n"); goto err_clk_tdiv; } - pwm_dbg(pwm, "config bits %02x\n", - (__raw_readl(S3C2410_TCON) >> pwm->tcon_base) & 0x0f); + pwm_dbg(s3c, "config bits %02x\n", + (__raw_readl(S3C2410_TCON) >> s3c->tcon_base) & 0x0f); dev_info(dev, "tin at %lu, tdiv at %lu, tin=%sclk, base %d\n", - clk_get_rate(pwm->clk), - clk_get_rate(pwm->clk_div), - pwm_is_tdiv(pwm) ? "div" : "ext", pwm->tcon_base); + clk_get_rate(s3c->clk), + clk_get_rate(s3c->clk_div), + pwm_is_tdiv(s3c) ? "div" : "ext", s3c->tcon_base); - platform_set_drvdata(pdev, pwm); + platform_set_drvdata(pdev, s3c); return 0; err_clk_tdiv: - clk_disable(pwm->clk_div); - clk_disable(pwm->clk); - clk_put(pwm->clk_div); - - err_clk_tin: - clk_put(pwm->clk); - - err_alloc: - kfree(pwm); + clk_disable(s3c->clk_div); + clk_disable(s3c->clk); return ret; } static int __devexit s3c_pwm_remove(struct platform_device *pdev) { - struct pwm_device *pwm = platform_get_drvdata(pdev); + struct s3c_chip *s3c = platform_get_drvdata(pdev); + int err; + + err = pwmchip_remove(&s3c->chip); + if (err < 0) + return err; - clk_disable(pwm->clk_div); - clk_disable(pwm->clk); - clk_put(pwm->clk_div); - clk_put(pwm->clk); - kfree(pwm); + clk_disable(s3c->clk_div); + clk_disable(s3c->clk); return 0; } @@ -353,26 +293,26 @@ static int __devexit s3c_pwm_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int s3c_pwm_suspend(struct platform_device *pdev, pm_message_t state) { - struct pwm_device *pwm = platform_get_drvdata(pdev); + struct s3c_chip *s3c = platform_get_drvdata(pdev); /* No one preserve these values during suspend so reset them * Otherwise driver leaves PWM unconfigured if same values * passed to pwm_config */ - pwm->period_ns = 0; - pwm->duty_ns = 0; + s3c->period_ns = 0; + s3c->duty_ns = 0; return 0; } static int s3c_pwm_resume(struct platform_device *pdev) { - struct pwm_device *pwm = platform_get_drvdata(pdev); + struct s3c_chip *s3c = platform_get_drvdata(pdev); unsigned long tcon; /* Restore invertion */ tcon = __raw_readl(S3C2410_TCON); - tcon |= pwm_tcon_invert(pwm); + tcon |= pwm_tcon_invert(s3c); __raw_writel(tcon, S3C2410_TCON); return 0; @@ -402,13 +342,13 @@ static int __init pwm_init(void) clk_scaler[1] = clk_get(NULL, "pwm-scaler1"); if (IS_ERR(clk_scaler[0]) || IS_ERR(clk_scaler[1])) { - printk(KERN_ERR "%s: failed to get scaler clocks\n", __func__); + pr_err("failed to get scaler clocks\n"); return -EINVAL; } ret = platform_driver_register(&s3c_pwm_driver); if (ret) - printk(KERN_ERR "%s: failed to add pwm driver\n", __func__); + pr_err("failed to add pwm driver\n"); return ret; } diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c new file mode 100644 index 000000000000..02ce18d5e49a --- /dev/null +++ b/drivers/pwm/pwm-tegra.c @@ -0,0 +1,261 @@ +/* + * drivers/pwm/pwm-tegra.c + * + * Tegra pulse-width-modulation controller driver + * + * Copyright (c) 2010, NVIDIA Corporation. + * Based on arch/arm/plat-mxc/pwm.c by Sascha Hauer <s.hauer@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/pwm.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define PWM_ENABLE (1 << 31) +#define PWM_DUTY_WIDTH 8 +#define PWM_DUTY_SHIFT 16 +#define PWM_SCALE_WIDTH 13 +#define PWM_SCALE_SHIFT 0 + +#define NUM_PWM 4 + +struct tegra_pwm_chip { + struct pwm_chip chip; + struct device *dev; + + struct clk *clk; + + void __iomem *mmio_base; +}; + +static inline struct tegra_pwm_chip *to_tegra_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct tegra_pwm_chip, chip); +} + +static inline u32 pwm_readl(struct tegra_pwm_chip *chip, unsigned int num) +{ + return readl(chip->mmio_base + (num << 4)); +} + +static inline void pwm_writel(struct tegra_pwm_chip *chip, unsigned int num, + unsigned long val) +{ + writel(val, chip->mmio_base + (num << 4)); +} + +static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip); + unsigned long long c; + unsigned long rate, hz; + u32 val = 0; + int err; + + /* + * Convert from duty_ns / period_ns to a fixed number of duty ticks + * per (1 << PWM_DUTY_WIDTH) cycles and make sure to round to the + * nearest integer during division. + */ + c = duty_ns * ((1 << PWM_DUTY_WIDTH) - 1) + period_ns / 2; + do_div(c, period_ns); + + val = (u32)c << PWM_DUTY_SHIFT; + + /* + * Compute the prescaler value for which (1 << PWM_DUTY_WIDTH) + * cycles at the PWM clock rate will take period_ns nanoseconds. + */ + rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH; + hz = 1000000000ul / period_ns; + + rate = (rate + (hz / 2)) / hz; + + /* + * Since the actual PWM divider is the register's frequency divider + * field minus 1, we need to decrement to get the correct value to + * write to the register. + */ + if (rate > 0) + rate--; + + /* + * Make sure that the rate will fit in the register's frequency + * divider field. + */ + if (rate >> PWM_SCALE_WIDTH) + return -EINVAL; + + val |= rate << PWM_SCALE_SHIFT; + + /* + * If the PWM channel is disabled, make sure to turn on the clock + * before writing the register. Otherwise, keep it enabled. + */ + if (!test_bit(PWMF_ENABLED, &pwm->flags)) { + err = clk_prepare_enable(pc->clk); + if (err < 0) + return err; + } else + val |= PWM_ENABLE; + + pwm_writel(pc, pwm->hwpwm, val); + + /* + * If the PWM is not enabled, turn the clock off again to save power. + */ + if (!test_bit(PWMF_ENABLED, &pwm->flags)) + clk_disable_unprepare(pc->clk); + + return 0; +} + +static int tegra_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip); + int rc = 0; + u32 val; + + rc = clk_prepare_enable(pc->clk); + if (rc < 0) + return rc; + + val = pwm_readl(pc, pwm->hwpwm); + val |= PWM_ENABLE; + pwm_writel(pc, pwm->hwpwm, val); + + return 0; +} + +static void tegra_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip); + u32 val; + + val = pwm_readl(pc, pwm->hwpwm); + val &= ~PWM_ENABLE; + pwm_writel(pc, pwm->hwpwm, val); + + clk_disable_unprepare(pc->clk); +} + +static const struct pwm_ops tegra_pwm_ops = { + .config = tegra_pwm_config, + .enable = tegra_pwm_enable, + .disable = tegra_pwm_disable, + .owner = THIS_MODULE, +}; + +static int tegra_pwm_probe(struct platform_device *pdev) +{ + struct tegra_pwm_chip *pwm; + struct resource *r; + int ret; + + pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); + if (!pwm) { + dev_err(&pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + pwm->dev = &pdev->dev; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no memory resources defined\n"); + return -ENODEV; + } + + pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r); + if (!pwm->mmio_base) { + dev_err(&pdev->dev, "failed to ioremap() region\n"); + return -EADDRNOTAVAIL; + } + + platform_set_drvdata(pdev, pwm); + + pwm->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pwm->clk)) + return PTR_ERR(pwm->clk); + + pwm->chip.dev = &pdev->dev; + pwm->chip.ops = &tegra_pwm_ops; + pwm->chip.base = -1; + pwm->chip.npwm = NUM_PWM; + + ret = pwmchip_add(&pwm->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int __devexit tegra_pwm_remove(struct platform_device *pdev) +{ + struct tegra_pwm_chip *pc = platform_get_drvdata(pdev); + int i; + + if (WARN_ON(!pc)) + return -ENODEV; + + for (i = 0; i < NUM_PWM; i++) { + struct pwm_device *pwm = &pc->chip.pwms[i]; + + if (!test_bit(PWMF_ENABLED, &pwm->flags)) + if (clk_prepare_enable(pc->clk) < 0) + continue; + + pwm_writel(pc, i, 0); + + clk_disable_unprepare(pc->clk); + } + + return pwmchip_remove(&pc->chip); +} + +#ifdef CONFIG_OF +static struct of_device_id tegra_pwm_of_match[] = { + { .compatible = "nvidia,tegra20-pwm" }, + { .compatible = "nvidia,tegra30-pwm" }, + { } +}; + +MODULE_DEVICE_TABLE(of, tegra_pwm_of_match); +#endif + +static struct platform_driver tegra_pwm_driver = { + .driver = { + .name = "tegra-pwm", + .of_match_table = of_match_ptr(tegra_pwm_of_match), + }, + .probe = tegra_pwm_probe, + .remove = __devexit_p(tegra_pwm_remove), +}; + +module_platform_driver(tegra_pwm_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("NVIDIA Corporation"); +MODULE_ALIAS("platform:tegra-pwm"); diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c new file mode 100644 index 000000000000..3c2ad284ee3e --- /dev/null +++ b/drivers/pwm/pwm-tiecap.c @@ -0,0 +1,232 @@ +/* + * ECAP PWM driver + * + * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/pm_runtime.h> +#include <linux/pwm.h> + +/* ECAP registers and bits definitions */ +#define CAP1 0x08 +#define CAP2 0x0C +#define CAP3 0x10 +#define CAP4 0x14 +#define ECCTL2 0x2A +#define ECCTL2_APWM_MODE BIT(9) +#define ECCTL2_SYNC_SEL_DISA (BIT(7) | BIT(6)) +#define ECCTL2_TSCTR_FREERUN BIT(4) + +struct ecap_pwm_chip { + struct pwm_chip chip; + unsigned int clk_rate; + void __iomem *mmio_base; +}; + +static inline struct ecap_pwm_chip *to_ecap_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct ecap_pwm_chip, chip); +} + +/* + * period_ns = 10^9 * period_cycles / PWM_CLK_RATE + * duty_ns = 10^9 * duty_cycles / PWM_CLK_RATE + */ +static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); + unsigned long long c; + unsigned long period_cycles, duty_cycles; + unsigned int reg_val; + + if (period_ns < 0 || duty_ns < 0 || period_ns > NSEC_PER_SEC) + return -ERANGE; + + c = pc->clk_rate; + c = c * period_ns; + do_div(c, NSEC_PER_SEC); + period_cycles = (unsigned long)c; + + if (period_cycles < 1) { + period_cycles = 1; + duty_cycles = 1; + } else { + c = pc->clk_rate; + c = c * duty_ns; + do_div(c, NSEC_PER_SEC); + duty_cycles = (unsigned long)c; + } + + pm_runtime_get_sync(pc->chip.dev); + + reg_val = readw(pc->mmio_base + ECCTL2); + + /* Configure APWM mode & disable sync option */ + reg_val |= ECCTL2_APWM_MODE | ECCTL2_SYNC_SEL_DISA; + + writew(reg_val, pc->mmio_base + ECCTL2); + + if (!test_bit(PWMF_ENABLED, &pwm->flags)) { + /* Update active registers if not running */ + writel(duty_cycles, pc->mmio_base + CAP2); + writel(period_cycles, pc->mmio_base + CAP1); + } else { + /* + * Update shadow registers to configure period and + * compare values. This helps current PWM period to + * complete on reconfiguring + */ + writel(duty_cycles, pc->mmio_base + CAP4); + writel(period_cycles, pc->mmio_base + CAP3); + } + + pm_runtime_put_sync(pc->chip.dev); + return 0; +} + +static int ecap_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); + unsigned int reg_val; + + /* Leave clock enabled on enabling PWM */ + pm_runtime_get_sync(pc->chip.dev); + + /* + * Enable 'Free run Time stamp counter mode' to start counter + * and 'APWM mode' to enable APWM output + */ + reg_val = readw(pc->mmio_base + ECCTL2); + reg_val |= ECCTL2_TSCTR_FREERUN | ECCTL2_APWM_MODE; + writew(reg_val, pc->mmio_base + ECCTL2); + return 0; +} + +static void ecap_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct ecap_pwm_chip *pc = to_ecap_pwm_chip(chip); + unsigned int reg_val; + + /* + * Disable 'Free run Time stamp counter mode' to stop counter + * and 'APWM mode' to put APWM output to low + */ + reg_val = readw(pc->mmio_base + ECCTL2); + reg_val &= ~(ECCTL2_TSCTR_FREERUN | ECCTL2_APWM_MODE); + writew(reg_val, pc->mmio_base + ECCTL2); + + /* Disable clock on PWM disable */ + pm_runtime_put_sync(pc->chip.dev); +} + +static void ecap_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + if (test_bit(PWMF_ENABLED, &pwm->flags)) { + dev_warn(chip->dev, "Removing PWM device without disabling\n"); + pm_runtime_put_sync(chip->dev); + } +} + +static const struct pwm_ops ecap_pwm_ops = { + .free = ecap_pwm_free, + .config = ecap_pwm_config, + .enable = ecap_pwm_enable, + .disable = ecap_pwm_disable, + .owner = THIS_MODULE, +}; + +static int __devinit ecap_pwm_probe(struct platform_device *pdev) +{ + int ret; + struct resource *r; + struct clk *clk; + struct ecap_pwm_chip *pc; + + pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); + if (!pc) { + dev_err(&pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + clk = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return PTR_ERR(clk); + } + + pc->clk_rate = clk_get_rate(clk); + if (!pc->clk_rate) { + dev_err(&pdev->dev, "failed to get clock rate\n"); + return -EINVAL; + } + + pc->chip.dev = &pdev->dev; + pc->chip.ops = &ecap_pwm_ops; + pc->chip.base = -1; + pc->chip.npwm = 1; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + + pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); + if (!pc->mmio_base) { + dev_err(&pdev->dev, "failed to ioremap() registers\n"); + return -EADDRNOTAVAIL; + } + + ret = pwmchip_add(&pc->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); + return ret; + } + + pm_runtime_enable(&pdev->dev); + platform_set_drvdata(pdev, pc); + return 0; +} + +static int __devexit ecap_pwm_remove(struct platform_device *pdev) +{ + struct ecap_pwm_chip *pc = platform_get_drvdata(pdev); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return pwmchip_remove(&pc->chip); +} + +static struct platform_driver ecap_pwm_driver = { + .driver = { + .name = "ecap", + }, + .probe = ecap_pwm_probe, + .remove = __devexit_p(ecap_pwm_remove), +}; + +module_platform_driver(ecap_pwm_driver); + +MODULE_DESCRIPTION("ECAP PWM driver"); +MODULE_AUTHOR("Texas Instruments"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c new file mode 100644 index 000000000000..010d232cb0c8 --- /dev/null +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -0,0 +1,411 @@ +/* + * EHRPWM PWM driver + * + * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pwm.h> +#include <linux/io.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/pm_runtime.h> + +/* EHRPWM registers and bits definitions */ + +/* Time base module registers */ +#define TBCTL 0x00 +#define TBPRD 0x0A + +#define TBCTL_RUN_MASK (BIT(15) | BIT(14)) +#define TBCTL_STOP_NEXT 0 +#define TBCTL_STOP_ON_CYCLE BIT(14) +#define TBCTL_FREE_RUN (BIT(15) | BIT(14)) +#define TBCTL_PRDLD_MASK BIT(3) +#define TBCTL_PRDLD_SHDW 0 +#define TBCTL_PRDLD_IMDT BIT(3) +#define TBCTL_CLKDIV_MASK (BIT(12) | BIT(11) | BIT(10) | BIT(9) | \ + BIT(8) | BIT(7)) +#define TBCTL_CTRMODE_MASK (BIT(1) | BIT(0)) +#define TBCTL_CTRMODE_UP 0 +#define TBCTL_CTRMODE_DOWN BIT(0) +#define TBCTL_CTRMODE_UPDOWN BIT(1) +#define TBCTL_CTRMODE_FREEZE (BIT(1) | BIT(0)) + +#define TBCTL_HSPCLKDIV_SHIFT 7 +#define TBCTL_CLKDIV_SHIFT 10 + +#define CLKDIV_MAX 7 +#define HSPCLKDIV_MAX 7 +#define PERIOD_MAX 0xFFFF + +/* compare module registers */ +#define CMPA 0x12 +#define CMPB 0x14 + +/* Action qualifier module registers */ +#define AQCTLA 0x16 +#define AQCTLB 0x18 +#define AQSFRC 0x1A +#define AQCSFRC 0x1C + +#define AQCTL_CBU_MASK (BIT(9) | BIT(8)) +#define AQCTL_CBU_FRCLOW BIT(8) +#define AQCTL_CBU_FRCHIGH BIT(9) +#define AQCTL_CBU_FRCTOGGLE (BIT(9) | BIT(8)) +#define AQCTL_CAU_MASK (BIT(5) | BIT(4)) +#define AQCTL_CAU_FRCLOW BIT(4) +#define AQCTL_CAU_FRCHIGH BIT(5) +#define AQCTL_CAU_FRCTOGGLE (BIT(5) | BIT(4)) +#define AQCTL_PRD_MASK (BIT(3) | BIT(2)) +#define AQCTL_PRD_FRCLOW BIT(2) +#define AQCTL_PRD_FRCHIGH BIT(3) +#define AQCTL_PRD_FRCTOGGLE (BIT(3) | BIT(2)) +#define AQCTL_ZRO_MASK (BIT(1) | BIT(0)) +#define AQCTL_ZRO_FRCLOW BIT(0) +#define AQCTL_ZRO_FRCHIGH BIT(1) +#define AQCTL_ZRO_FRCTOGGLE (BIT(1) | BIT(0)) + +#define AQSFRC_RLDCSF_MASK (BIT(7) | BIT(6)) +#define AQSFRC_RLDCSF_ZRO 0 +#define AQSFRC_RLDCSF_PRD BIT(6) +#define AQSFRC_RLDCSF_ZROPRD BIT(7) +#define AQSFRC_RLDCSF_IMDT (BIT(7) | BIT(6)) + +#define AQCSFRC_CSFB_MASK (BIT(3) | BIT(2)) +#define AQCSFRC_CSFB_FRCDIS 0 +#define AQCSFRC_CSFB_FRCLOW BIT(2) +#define AQCSFRC_CSFB_FRCHIGH BIT(3) +#define AQCSFRC_CSFB_DISSWFRC (BIT(3) | BIT(2)) +#define AQCSFRC_CSFA_MASK (BIT(1) | BIT(0)) +#define AQCSFRC_CSFA_FRCDIS 0 +#define AQCSFRC_CSFA_FRCLOW BIT(0) +#define AQCSFRC_CSFA_FRCHIGH BIT(1) +#define AQCSFRC_CSFA_DISSWFRC (BIT(1) | BIT(0)) + +#define NUM_PWM_CHANNEL 2 /* EHRPWM channels */ + +struct ehrpwm_pwm_chip { + struct pwm_chip chip; + unsigned int clk_rate; + void __iomem *mmio_base; +}; + +static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct ehrpwm_pwm_chip, chip); +} + +static void ehrpwm_write(void *base, int offset, unsigned int val) +{ + writew(val & 0xFFFF, base + offset); +} + +static void ehrpwm_modify(void *base, int offset, + unsigned short mask, unsigned short val) +{ + unsigned short regval; + + regval = readw(base + offset); + regval &= ~mask; + regval |= val & mask; + writew(regval, base + offset); +} + +/** + * set_prescale_div - Set up the prescaler divider function + * @rqst_prescaler: prescaler value min + * @prescale_div: prescaler value set + * @tb_clk_div: Time Base Control prescaler bits + */ +static int set_prescale_div(unsigned long rqst_prescaler, + unsigned short *prescale_div, unsigned short *tb_clk_div) +{ + unsigned int clkdiv, hspclkdiv; + + for (clkdiv = 0; clkdiv <= CLKDIV_MAX; clkdiv++) { + for (hspclkdiv = 0; hspclkdiv <= HSPCLKDIV_MAX; hspclkdiv++) { + + /* + * calculations for prescaler value : + * prescale_div = HSPCLKDIVIDER * CLKDIVIDER. + * HSPCLKDIVIDER = 2 ** hspclkdiv + * CLKDIVIDER = (1), if clkdiv == 0 *OR* + * (2 * clkdiv), if clkdiv != 0 + * + * Configure prescale_div value such that period + * register value is less than 65535. + */ + + *prescale_div = (1 << clkdiv) * + (hspclkdiv ? (hspclkdiv * 2) : 1); + if (*prescale_div > rqst_prescaler) { + *tb_clk_div = (clkdiv << TBCTL_CLKDIV_SHIFT) | + (hspclkdiv << TBCTL_HSPCLKDIV_SHIFT); + return 0; + } + } + } + return 1; +} + +static void configure_chans(struct ehrpwm_pwm_chip *pc, int chan, + unsigned long duty_cycles) +{ + int cmp_reg, aqctl_reg; + unsigned short aqctl_val, aqctl_mask; + + /* + * Channels can be configured from action qualifier module. + * Channel 0 configured with compare A register and for + * up-counter mode. + * Channel 1 configured with compare B register and for + * up-counter mode. + */ + if (chan == 1) { + aqctl_reg = AQCTLB; + cmp_reg = CMPB; + /* Configure PWM Low from compare B value */ + aqctl_val = AQCTL_CBU_FRCLOW; + aqctl_mask = AQCTL_CBU_MASK; + } else { + cmp_reg = CMPA; + aqctl_reg = AQCTLA; + /* Configure PWM Low from compare A value*/ + aqctl_val = AQCTL_CAU_FRCLOW; + aqctl_mask = AQCTL_CAU_MASK; + } + + /* Configure PWM High from period value and zero value */ + aqctl_val |= AQCTL_PRD_FRCHIGH | AQCTL_ZRO_FRCHIGH; + aqctl_mask |= AQCTL_PRD_MASK | AQCTL_ZRO_MASK; + ehrpwm_modify(pc->mmio_base, aqctl_reg, aqctl_mask, aqctl_val); + + ehrpwm_write(pc->mmio_base, cmp_reg, duty_cycles); +} + +/* + * period_ns = 10^9 * (ps_divval * period_cycles) / PWM_CLK_RATE + * duty_ns = 10^9 * (ps_divval * duty_cycles) / PWM_CLK_RATE + */ +static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); + unsigned long long c; + unsigned long period_cycles, duty_cycles; + unsigned short ps_divval, tb_divval; + + if (period_ns < 0 || duty_ns < 0 || period_ns > NSEC_PER_SEC) + return -ERANGE; + + c = pc->clk_rate; + c = c * period_ns; + do_div(c, NSEC_PER_SEC); + period_cycles = (unsigned long)c; + + if (period_cycles < 1) { + period_cycles = 1; + duty_cycles = 1; + } else { + c = pc->clk_rate; + c = c * duty_ns; + do_div(c, NSEC_PER_SEC); + duty_cycles = (unsigned long)c; + } + + /* Configure clock prescaler to support Low frequency PWM wave */ + if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval, + &tb_divval)) { + dev_err(chip->dev, "Unsupported values\n"); + return -EINVAL; + } + + pm_runtime_get_sync(chip->dev); + + /* Update clock prescaler values */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CLKDIV_MASK, tb_divval); + + /* Update period & duty cycle with presacler division */ + period_cycles = period_cycles / ps_divval; + duty_cycles = duty_cycles / ps_divval; + + /* Configure shadow loading on Period register */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_PRDLD_MASK, TBCTL_PRDLD_SHDW); + + ehrpwm_write(pc->mmio_base, TBPRD, period_cycles); + + /* Configure ehrpwm counter for up-count mode */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_CTRMODE_MASK, + TBCTL_CTRMODE_UP); + + /* Configure the channel for duty cycle */ + configure_chans(pc, pwm->hwpwm, duty_cycles); + pm_runtime_put_sync(chip->dev); + return 0; +} + +static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); + unsigned short aqcsfrc_val, aqcsfrc_mask; + + /* Leave clock enabled on enabling PWM */ + pm_runtime_get_sync(chip->dev); + + /* Disabling Action Qualifier on PWM output */ + if (pwm->hwpwm) { + aqcsfrc_val = AQCSFRC_CSFB_FRCDIS; + aqcsfrc_mask = AQCSFRC_CSFB_MASK; + } else { + aqcsfrc_val = AQCSFRC_CSFA_FRCDIS; + aqcsfrc_mask = AQCSFRC_CSFA_MASK; + } + + /* Changes to shadow mode */ + ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK, + AQSFRC_RLDCSF_ZRO); + + ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); + + /* Enable time counter for free_run */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN); + return 0; +} + +static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); + unsigned short aqcsfrc_val, aqcsfrc_mask; + + /* Action Qualifier puts PWM output low forcefully */ + if (pwm->hwpwm) { + aqcsfrc_val = AQCSFRC_CSFB_FRCLOW; + aqcsfrc_mask = AQCSFRC_CSFB_MASK; + } else { + aqcsfrc_val = AQCSFRC_CSFA_FRCLOW; + aqcsfrc_mask = AQCSFRC_CSFA_MASK; + } + + /* + * Changes to immediate action on Action Qualifier. This puts + * Action Qualifier control on PWM output from next TBCLK + */ + ehrpwm_modify(pc->mmio_base, AQSFRC, AQSFRC_RLDCSF_MASK, + AQSFRC_RLDCSF_IMDT); + + ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); + + /* Stop Time base counter */ + ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT); + + /* Disable clock on PWM disable */ + pm_runtime_put_sync(chip->dev); +} + +static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + if (test_bit(PWMF_ENABLED, &pwm->flags)) { + dev_warn(chip->dev, "Removing PWM device without disabling\n"); + pm_runtime_put_sync(chip->dev); + } +} + +static const struct pwm_ops ehrpwm_pwm_ops = { + .free = ehrpwm_pwm_free, + .config = ehrpwm_pwm_config, + .enable = ehrpwm_pwm_enable, + .disable = ehrpwm_pwm_disable, + .owner = THIS_MODULE, +}; + +static int __devinit ehrpwm_pwm_probe(struct platform_device *pdev) +{ + int ret; + struct resource *r; + struct clk *clk; + struct ehrpwm_pwm_chip *pc; + + pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); + if (!pc) { + dev_err(&pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + clk = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return PTR_ERR(clk); + } + + pc->clk_rate = clk_get_rate(clk); + if (!pc->clk_rate) { + dev_err(&pdev->dev, "failed to get clock rate\n"); + return -EINVAL; + } + + pc->chip.dev = &pdev->dev; + pc->chip.ops = &ehrpwm_pwm_ops; + pc->chip.base = -1; + pc->chip.npwm = NUM_PWM_CHANNEL; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + + pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); + if (!pc->mmio_base) { + dev_err(&pdev->dev, "failed to ioremap() registers\n"); + return -EADDRNOTAVAIL; + } + + ret = pwmchip_add(&pc->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); + return ret; + } + + pm_runtime_enable(&pdev->dev); + platform_set_drvdata(pdev, pc); + return 0; +} + +static int __devexit ehrpwm_pwm_remove(struct platform_device *pdev) +{ + struct ehrpwm_pwm_chip *pc = platform_get_drvdata(pdev); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return pwmchip_remove(&pc->chip); +} + +static struct platform_driver ehrpwm_pwm_driver = { + .driver = { + .name = "ehrpwm", + }, + .probe = ehrpwm_pwm_probe, + .remove = __devexit_p(ehrpwm_pwm_remove), +}; + +module_platform_driver(ehrpwm_pwm_driver); + +MODULE_DESCRIPTION("EHRPWM PWM driver"); +MODULE_AUTHOR("Texas Instruments"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c new file mode 100644 index 000000000000..548021439f0c --- /dev/null +++ b/drivers/pwm/pwm-vt8500.c @@ -0,0 +1,177 @@ +/* + * drivers/pwm/pwm-vt8500.c + * + * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/pwm.h> +#include <linux/delay.h> + +#include <asm/div64.h> + +#define VT8500_NR_PWMS 4 + +struct vt8500_chip { + struct pwm_chip chip; + void __iomem *base; +}; + +#define to_vt8500_chip(chip) container_of(chip, struct vt8500_chip, chip) + +#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) +static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask) +{ + int loops = msecs_to_loops(10); + while ((readb(reg) & bitmask) && --loops) + cpu_relax(); + + if (unlikely(!loops)) + pr_warning("Waiting for status bits 0x%x to clear timed out\n", + bitmask); +} + +static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct vt8500_chip *vt8500 = to_vt8500_chip(chip); + unsigned long long c; + unsigned long period_cycles, prescale, pv, dc; + + c = 25000000/2; /* wild guess --- need to implement clocks */ + c = c * period_ns; + do_div(c, 1000000000); + period_cycles = c; + + if (period_cycles < 1) + period_cycles = 1; + prescale = (period_cycles - 1) / 4096; + pv = period_cycles / (prescale + 1) - 1; + if (pv > 4095) + pv = 4095; + + if (prescale > 1023) + return -EINVAL; + + c = (unsigned long long)pv * duty_ns; + do_div(c, period_ns); + dc = c; + + pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 1)); + writel(prescale, vt8500->base + 0x4 + (pwm->hwpwm << 4)); + + pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 2)); + writel(pv, vt8500->base + 0x8 + (pwm->hwpwm << 4)); + + pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 3)); + writel(dc, vt8500->base + 0xc + (pwm->hwpwm << 4)); + + return 0; +} + +static int vt8500_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct vt8500_chip *vt8500 = to_vt8500_chip(chip); + + pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0)); + writel(5, vt8500->base + (pwm->hwpwm << 4)); + return 0; +} + +static void vt8500_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct vt8500_chip *vt8500 = to_vt8500_chip(chip); + + pwm_busy_wait(vt8500->base + 0x40 + pwm->hwpwm, (1 << 0)); + writel(0, vt8500->base + (pwm->hwpwm << 4)); +} + +static struct pwm_ops vt8500_pwm_ops = { + .enable = vt8500_pwm_enable, + .disable = vt8500_pwm_disable, + .config = vt8500_pwm_config, + .owner = THIS_MODULE, +}; + +static int __devinit pwm_probe(struct platform_device *pdev) +{ + struct vt8500_chip *chip; + struct resource *r; + int ret; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (chip == NULL) { + dev_err(&pdev->dev, "failed to allocate memory\n"); + return -ENOMEM; + } + + chip->chip.dev = &pdev->dev; + chip->chip.ops = &vt8500_pwm_ops; + chip->chip.base = -1; + chip->chip.npwm = VT8500_NR_PWMS; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (r == NULL) { + dev_err(&pdev->dev, "no memory resource defined\n"); + return -ENODEV; + } + + chip->base = devm_request_and_ioremap(&pdev->dev, r); + if (chip->base == NULL) + return -EADDRNOTAVAIL; + + ret = pwmchip_add(&chip->chip); + if (ret < 0) + return ret; + + platform_set_drvdata(pdev, chip); + return ret; +} + +static int __devexit pwm_remove(struct platform_device *pdev) +{ + struct vt8500_chip *chip; + + chip = platform_get_drvdata(pdev); + if (chip == NULL) + return -ENODEV; + + return pwmchip_remove(&chip->chip); +} + +static struct platform_driver pwm_driver = { + .driver = { + .name = "vt8500-pwm", + .owner = THIS_MODULE, + }, + .probe = pwm_probe, + .remove = __devexit_p(pwm_remove), +}; + +static int __init pwm_init(void) +{ + return platform_driver_register(&pwm_driver); +} +arch_initcall(pwm_init); + +static void __exit pwm_exit(void) +{ + platform_driver_unregister(&pwm_driver); +} +module_exit(pwm_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 2979292650d6..cf282763a8dc 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -245,7 +245,7 @@ config BACKLIGHT_CARILLO_RANCH config BACKLIGHT_PWM tristate "Generic PWM based Backlight Driver" - depends on HAVE_PWM + depends on PWM help If you have a LCD backlight adjustable by PWM, say Y to enable this driver. diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 342b7d7cbb63..995f0164c9b0 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -26,11 +26,13 @@ struct pwm_bl_data { struct device *dev; unsigned int period; unsigned int lth_brightness; + unsigned int *levels; int (*notify)(struct device *, int brightness); void (*notify_after)(struct device *, int brightness); int (*check_fb)(struct device *, struct fb_info *); + void (*exit)(struct device *); }; static int pwm_backlight_update_status(struct backlight_device *bl) @@ -52,9 +54,18 @@ static int pwm_backlight_update_status(struct backlight_device *bl) pwm_config(pb->pwm, 0, pb->period); pwm_disable(pb->pwm); } else { - brightness = pb->lth_brightness + - (brightness * (pb->period - pb->lth_brightness) / max); - pwm_config(pb->pwm, brightness, pb->period); + int duty_cycle; + + if (pb->levels) { + duty_cycle = pb->levels[brightness]; + max = pb->levels[max]; + } else { + duty_cycle = brightness; + } + + duty_cycle = pb->lth_brightness + + (duty_cycle * (pb->period - pb->lth_brightness) / max); + pwm_config(pb->pwm, duty_cycle, pb->period); pwm_enable(pb->pwm); } @@ -83,17 +94,98 @@ static const struct backlight_ops pwm_backlight_ops = { .check_fb = pwm_backlight_check_fb, }; +#ifdef CONFIG_OF +static int pwm_backlight_parse_dt(struct device *dev, + struct platform_pwm_backlight_data *data) +{ + struct device_node *node = dev->of_node; + struct property *prop; + int length; + u32 value; + int ret; + + if (!node) + return -ENODEV; + + memset(data, 0, sizeof(*data)); + + /* determine the number of brightness levels */ + prop = of_find_property(node, "brightness-levels", &length); + if (!prop) + return -EINVAL; + + data->max_brightness = length / sizeof(u32); + + /* read brightness levels from DT property */ + if (data->max_brightness > 0) { + size_t size = sizeof(*data->levels) * data->max_brightness; + + data->levels = devm_kzalloc(dev, size, GFP_KERNEL); + if (!data->levels) + return -ENOMEM; + + ret = of_property_read_u32_array(node, "brightness-levels", + data->levels, + data->max_brightness); + if (ret < 0) + return ret; + + ret = of_property_read_u32(node, "default-brightness-level", + &value); + if (ret < 0) + return ret; + + if (value >= data->max_brightness) { + dev_warn(dev, "invalid default brightness level: %u, using %u\n", + value, data->max_brightness - 1); + value = data->max_brightness - 1; + } + + data->dft_brightness = value; + data->max_brightness--; + } + + /* + * TODO: Most users of this driver use a number of GPIOs to control + * backlight power. Support for specifying these needs to be + * added. + */ + + return 0; +} + +static struct of_device_id pwm_backlight_of_match[] = { + { .compatible = "pwm-backlight" }, + { } +}; + +MODULE_DEVICE_TABLE(of, pwm_backlight_of_match); +#else +static int pwm_backlight_parse_dt(struct device *dev, + struct platform_pwm_backlight_data *data) +{ + return -ENODEV; +} +#endif + static int pwm_backlight_probe(struct platform_device *pdev) { - struct backlight_properties props; struct platform_pwm_backlight_data *data = pdev->dev.platform_data; + struct platform_pwm_backlight_data defdata; + struct backlight_properties props; struct backlight_device *bl; struct pwm_bl_data *pb; + unsigned int max; int ret; if (!data) { - dev_err(&pdev->dev, "failed to find platform data\n"); - return -EINVAL; + ret = pwm_backlight_parse_dt(&pdev->dev, &defdata); + if (ret < 0) { + dev_err(&pdev->dev, "failed to find platform data\n"); + return ret; + } + + data = &defdata; } if (data->init) { @@ -109,21 +201,42 @@ static int pwm_backlight_probe(struct platform_device *pdev) goto err_alloc; } - pb->period = data->pwm_period_ns; + if (data->levels) { + max = data->levels[data->max_brightness]; + pb->levels = data->levels; + } else + max = data->max_brightness; + pb->notify = data->notify; pb->notify_after = data->notify_after; pb->check_fb = data->check_fb; - pb->lth_brightness = data->lth_brightness * - (data->pwm_period_ns / data->max_brightness); + pb->exit = data->exit; pb->dev = &pdev->dev; - pb->pwm = pwm_request(data->pwm_id, "backlight"); + pb->pwm = pwm_get(&pdev->dev, NULL); if (IS_ERR(pb->pwm)) { - dev_err(&pdev->dev, "unable to request PWM for backlight\n"); - ret = PTR_ERR(pb->pwm); - goto err_alloc; - } else - dev_dbg(&pdev->dev, "got pwm for backlight\n"); + dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n"); + + pb->pwm = pwm_request(data->pwm_id, "pwm-backlight"); + if (IS_ERR(pb->pwm)) { + dev_err(&pdev->dev, "unable to request legacy PWM\n"); + ret = PTR_ERR(pb->pwm); + goto err_alloc; + } + } + + dev_dbg(&pdev->dev, "got pwm for backlight\n"); + + /* + * The DT case will set the pwm_period_ns field to 0 and store the + * period, parsed from the DT, in the PWM device. For the non-DT case, + * set the period from platform data. + */ + if (data->pwm_period_ns > 0) + pwm_set_period(pb->pwm, data->pwm_period_ns); + + pb->period = pwm_get_period(pb->pwm); + pb->lth_brightness = data->lth_brightness * (pb->period / max); memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; @@ -143,7 +256,7 @@ static int pwm_backlight_probe(struct platform_device *pdev) return 0; err_bl: - pwm_free(pb->pwm); + pwm_put(pb->pwm); err_alloc: if (data->exit) data->exit(&pdev->dev); @@ -152,16 +265,15 @@ err_alloc: static int pwm_backlight_remove(struct platform_device *pdev) { - struct platform_pwm_backlight_data *data = pdev->dev.platform_data; struct backlight_device *bl = platform_get_drvdata(pdev); struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev); backlight_device_unregister(bl); pwm_config(pb->pwm, 0, pb->period); pwm_disable(pb->pwm); - pwm_free(pb->pwm); - if (data->exit) - data->exit(&pdev->dev); + pwm_put(pb->pwm); + if (pb->exit) + pb->exit(&pdev->dev); return 0; } @@ -195,11 +307,12 @@ static SIMPLE_DEV_PM_OPS(pwm_backlight_pm_ops, pwm_backlight_suspend, static struct platform_driver pwm_backlight_driver = { .driver = { - .name = "pwm-backlight", - .owner = THIS_MODULE, + .name = "pwm-backlight", + .owner = THIS_MODULE, #ifdef CONFIG_PM - .pm = &pwm_backlight_pm_ops, + .pm = &pwm_backlight_pm_ops, #endif + .of_match_table = of_match_ptr(pwm_backlight_of_match), }, .probe = pwm_backlight_probe, .remove = pwm_backlight_remove, diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h index abfb2682de7f..2be8a2dbc868 100644 --- a/include/asm-generic/dma-coherent.h +++ b/include/asm-generic/dma-coherent.h @@ -29,6 +29,7 @@ dma_mark_declared_memory_occupied(struct device *dev, #else #define dma_alloc_from_coherent(dev, size, handle, ret) (0) #define dma_release_from_coherent(dev, order, vaddr) (0) +#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) #endif #endif diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index 2e248d8924dc..de8bf89940f8 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -176,4 +176,59 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) +extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +/** + * dma_mmap_attrs - map a coherent DMA allocation into user space + * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices + * @vma: vm_area_struct describing requested user mapping + * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs + * @handle: device-view address returned from dma_alloc_attrs + * @size: size of memory originally requested in dma_alloc_attrs + * @attrs: attributes of mapping properties requested in dma_alloc_attrs + * + * Map a coherent DMA buffer previously allocated by dma_alloc_attrs + * into user space. The coherent DMA buffer must not be freed by the + * driver until the user space mapping has been released. + */ +static inline int +dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + if (ops->mmap) + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); +} + +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) + +static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); +} + +int +dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size); + +static inline int +dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, + dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + if (ops->get_sgtable) + return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, + attrs); + return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); +} + +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) + #endif diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h index 68733587e700..c20b00181530 100644 --- a/include/drm/exynos_drm.h +++ b/include/drm/exynos_drm.h @@ -107,11 +107,6 @@ struct drm_exynos_vidi_connection { uint64_t edid; }; -struct drm_exynos_plane_set_zpos { - __u32 plane_id; - __s32 zpos; -}; - /* memory type definitions. */ enum e_drm_exynos_gem_mem_type { /* Physically Continuous memory and used as default. */ @@ -164,7 +159,6 @@ struct drm_exynos_g2d_exec { #define DRM_EXYNOS_GEM_MMAP 0x02 /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ #define DRM_EXYNOS_GEM_GET 0x04 -#define DRM_EXYNOS_PLANE_SET_ZPOS 0x06 #define DRM_EXYNOS_VIDI_CONNECTION 0x07 /* G2D */ @@ -184,9 +178,6 @@ struct drm_exynos_g2d_exec { #define DRM_IOCTL_EXYNOS_GEM_GET DRM_IOWR(DRM_COMMAND_BASE + \ DRM_EXYNOS_GEM_GET, struct drm_exynos_gem_info) -#define DRM_IOCTL_EXYNOS_PLANE_SET_ZPOS DRM_IOWR(DRM_COMMAND_BASE + \ - DRM_EXYNOS_PLANE_SET_ZPOS, struct drm_exynos_plane_set_zpos) - #define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \ DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 98f34b886f95..38d27a10aa5d 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -66,14 +66,13 @@ typedef int (*dm_request_endio_fn) (struct dm_target *ti, struct request *clone, int error, union map_info *map_context); -typedef void (*dm_flush_fn) (struct dm_target *ti); typedef void (*dm_presuspend_fn) (struct dm_target *ti); typedef void (*dm_postsuspend_fn) (struct dm_target *ti); typedef int (*dm_preresume_fn) (struct dm_target *ti); typedef void (*dm_resume_fn) (struct dm_target *ti); typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, - char *result, unsigned int maxlen); + unsigned status_flags, char *result, unsigned maxlen); typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); @@ -139,7 +138,6 @@ struct target_type { dm_map_request_fn map_rq; dm_endio_fn end_io; dm_request_endio_fn rq_end_io; - dm_flush_fn flush; dm_presuspend_fn presuspend; dm_postsuspend_fn postsuspend; dm_preresume_fn preresume; @@ -188,8 +186,8 @@ struct dm_target { sector_t begin; sector_t len; - /* Always a power of 2 */ - sector_t split_io; + /* If non-zero, maximum size of I/O submitted to a target. */ + uint32_t max_io_len; /* * A number of zero-length barrier requests that will be submitted @@ -214,15 +212,27 @@ struct dm_target { char *error; /* + * Set if this target needs to receive flushes regardless of + * whether or not its underlying devices have support. + */ + bool flush_supported:1; + + /* * Set if this target needs to receive discards regardless of * whether or not its underlying devices have support. */ - unsigned discards_supported:1; + bool discards_supported:1; + + /* + * Set if the target required discard request to be split + * on max_io_len boundary. + */ + bool split_discard_requests:1; /* * Set if this target does not return zeroes on discarded blocks. */ - unsigned discard_zeroes_data_unsupported:1; + bool discard_zeroes_data_unsupported:1; }; /* Each target can link one of these into the table */ @@ -360,6 +370,11 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback int dm_table_complete(struct dm_table *t); /* + * Target may require that it is never sent I/O larger than len. + */ +int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); + +/* * Table reference counting. */ struct dm_table *dm_get_live_table(struct mapped_device *md); diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index 75fd5573516e..91e3a360f611 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h @@ -267,9 +267,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 22 +#define DM_VERSION_MINOR 23 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2011-10-19)" +#define DM_VERSION_EXTRA "-ioctl (2012-07-25)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ @@ -307,6 +307,8 @@ enum { /* * Set this to suspend without flushing queued ios. + * Also disables flushing uncommitted changes in the thin target before + * generating statistics for DM_TABLE_STATUS and DM_DEV_WAIT. */ #define DM_NOFLUSH_FLAG (1 << 11) /* In */ diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h index 547ab568d3ae..f83f793223ff 100644 --- a/include/linux/dma-attrs.h +++ b/include/linux/dma-attrs.h @@ -15,6 +15,8 @@ enum dma_attr { DMA_ATTR_WEAK_ORDERING, DMA_ATTR_WRITE_COMBINE, DMA_ATTR_NON_CONSISTENT, + DMA_ATTR_NO_KERNEL_MAPPING, + DMA_ATTR_SKIP_CPU_SYNC, DMA_ATTR_MAX, }; diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index dfc099e56a66..94af41858513 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -18,6 +18,9 @@ struct dma_map_ops { int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, struct dma_attrs *attrs); + int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, + dma_addr_t, size_t, struct dma_attrs *attrs); + dma_addr_t (*map_page)(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, diff --git a/include/linux/edac.h b/include/linux/edac.h index 91ba3bae42ee..bab9f8473dc1 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -13,9 +13,11 @@ #define _LINUX_EDAC_H_ #include <linux/atomic.h> +#include <linux/device.h> #include <linux/kobject.h> #include <linux/completion.h> #include <linux/workqueue.h> +#include <linux/debugfs.h> struct device; @@ -49,7 +51,19 @@ static inline void opstate_init(void) #define EDAC_MC_LABEL_LEN 31 #define MC_PROC_NAME_MAX_LEN 7 -/* memory devices */ +/** + * enum dev_type - describe the type of memory DRAM chips used at the stick + * @DEV_UNKNOWN: Can't be determined, or MC doesn't support detect it + * @DEV_X1: 1 bit for data + * @DEV_X2: 2 bits for data + * @DEV_X4: 4 bits for data + * @DEV_X8: 8 bits for data + * @DEV_X16: 16 bits for data + * @DEV_X32: 32 bits for data + * @DEV_X64: 64 bits for data + * + * Typical values are x4 and x8. + */ enum dev_type { DEV_UNKNOWN = 0, DEV_X1, @@ -167,18 +181,30 @@ enum mem_type { #define MEM_FLAG_DDR3 BIT(MEM_DDR3) #define MEM_FLAG_RDDR3 BIT(MEM_RDDR3) -/* chipset Error Detection and Correction capabilities and mode */ +/** + * enum edac-type - Error Detection and Correction capabilities and mode + * @EDAC_UNKNOWN: Unknown if ECC is available + * @EDAC_NONE: Doesn't support ECC + * @EDAC_RESERVED: Reserved ECC type + * @EDAC_PARITY: Detects parity errors + * @EDAC_EC: Error Checking - no correction + * @EDAC_SECDED: Single bit error correction, Double detection + * @EDAC_S2ECD2ED: Chipkill x2 devices - do these exist? + * @EDAC_S4ECD4ED: Chipkill x4 devices + * @EDAC_S8ECD8ED: Chipkill x8 devices + * @EDAC_S16ECD16ED: Chipkill x16 devices + */ enum edac_type { - EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ - EDAC_NONE, /* Doesn't support ECC */ - EDAC_RESERVED, /* Reserved ECC type */ - EDAC_PARITY, /* Detects parity errors */ - EDAC_EC, /* Error Checking - no correction */ - EDAC_SECDED, /* Single bit error correction, Double detection */ - EDAC_S2ECD2ED, /* Chipkill x2 devices - do these exist? */ - EDAC_S4ECD4ED, /* Chipkill x4 devices */ - EDAC_S8ECD8ED, /* Chipkill x8 devices */ - EDAC_S16ECD16ED, /* Chipkill x16 devices */ + EDAC_UNKNOWN = 0, + EDAC_NONE, + EDAC_RESERVED, + EDAC_PARITY, + EDAC_EC, + EDAC_SECDED, + EDAC_S2ECD2ED, + EDAC_S4ECD4ED, + EDAC_S8ECD8ED, + EDAC_S16ECD16ED, }; #define EDAC_FLAG_UNKNOWN BIT(EDAC_UNKNOWN) @@ -191,18 +217,30 @@ enum edac_type { #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) -/* scrubbing capabilities */ +/** + * enum scrub_type - scrubbing capabilities + * @SCRUB_UNKNOWN Unknown if scrubber is available + * @SCRUB_NONE: No scrubber + * @SCRUB_SW_PROG: SW progressive (sequential) scrubbing + * @SCRUB_SW_SRC: Software scrub only errors + * @SCRUB_SW_PROG_SRC: Progressive software scrub from an error + * @SCRUB_SW_TUNABLE: Software scrub frequency is tunable + * @SCRUB_HW_PROG: HW progressive (sequential) scrubbing + * @SCRUB_HW_SRC: Hardware scrub only errors + * @SCRUB_HW_PROG_SRC: Progressive hardware scrub from an error + * SCRUB_HW_TUNABLE: Hardware scrub frequency is tunable + */ enum scrub_type { - SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */ - SCRUB_NONE, /* No scrubber */ - SCRUB_SW_PROG, /* SW progressive (sequential) scrubbing */ - SCRUB_SW_SRC, /* Software scrub only errors */ - SCRUB_SW_PROG_SRC, /* Progressive software scrub from an error */ - SCRUB_SW_TUNABLE, /* Software scrub frequency is tunable */ - SCRUB_HW_PROG, /* HW progressive (sequential) scrubbing */ - SCRUB_HW_SRC, /* Hardware scrub only errors */ - SCRUB_HW_PROG_SRC, /* Progressive hardware scrub from an error */ - SCRUB_HW_TUNABLE /* Hardware scrub frequency is tunable */ + SCRUB_UNKNOWN = 0, + SCRUB_NONE, + SCRUB_SW_PROG, + SCRUB_SW_SRC, + SCRUB_SW_PROG_SRC, + SCRUB_SW_TUNABLE, + SCRUB_HW_PROG, + SCRUB_HW_SRC, + SCRUB_HW_PROG_SRC, + SCRUB_HW_TUNABLE }; #define SCRUB_FLAG_SW_PROG BIT(SCRUB_SW_PROG) @@ -374,23 +412,21 @@ struct edac_mc_layer { #define EDAC_MAX_LAYERS 3 /** - * EDAC_DIMM_PTR - Macro responsible to find a pointer inside a pointer array + * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer array * for the element given by [layer0,layer1,layer2] position * * @layers: a struct edac_mc_layer array, describing how many elements * were allocated for each layer - * @var: name of the var where we want to get the pointer - * (like mci->dimms) * @n_layers: Number of layers at the @layers array * @layer0: layer0 position * @layer1: layer1 position. Unused if n_layers < 2 * @layer2: layer2 position. Unused if n_layers < 3 * - * For 1 layer, this macro returns &var[layer0] + * For 1 layer, this macro returns &var[layer0] - &var * For 2 layers, this macro is similar to allocate a bi-dimensional array - * and to return "&var[layer0][layer1]" + * and to return "&var[layer0][layer1] - &var" * For 3 layers, this macro is similar to allocate a tri-dimensional array - * and to return "&var[layer0][layer1][layer2]" + * and to return "&var[layer0][layer1][layer2] - &var" * * A loop could be used here to make it more generic, but, as we only have * 3 layers, this is a little faster. @@ -398,23 +434,52 @@ struct edac_mc_layer { * a NULL is returned, causing an OOPS during the memory allocation routine, * with would point to the developer that he's doing something wrong. */ -#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ - typeof(var) __p; \ +#define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \ + int __i; \ if ((nlayers) == 1) \ - __p = &var[layer0]; \ + __i = layer0; \ else if ((nlayers) == 2) \ - __p = &var[(layer1) + ((layers[1]).size * (layer0))]; \ + __i = (layer1) + ((layers[1]).size * (layer0)); \ else if ((nlayers) == 3) \ - __p = &var[(layer2) + ((layers[2]).size * ((layer1) + \ - ((layers[1]).size * (layer0))))]; \ + __i = (layer2) + ((layers[2]).size * ((layer1) + \ + ((layers[1]).size * (layer0)))); \ else \ + __i = -EINVAL; \ + __i; \ +}) + +/** + * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array + * for the element given by [layer0,layer1,layer2] position + * + * @layers: a struct edac_mc_layer array, describing how many elements + * were allocated for each layer + * @var: name of the var where we want to get the pointer + * (like mci->dimms) + * @n_layers: Number of layers at the @layers array + * @layer0: layer0 position + * @layer1: layer1 position. Unused if n_layers < 2 + * @layer2: layer2 position. Unused if n_layers < 3 + * + * For 1 layer, this macro returns &var[layer0] + * For 2 layers, this macro is similar to allocate a bi-dimensional array + * and to return "&var[layer0][layer1]" + * For 3 layers, this macro is similar to allocate a tri-dimensional array + * and to return "&var[layer0][layer1][layer2]" + */ +#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \ + typeof(*var) __p; \ + int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \ + if (___i < 0) \ __p = NULL; \ + else \ + __p = (var)[___i]; \ __p; \ }) - -/* FIXME: add the proper per-location error counts */ struct dimm_info { + struct device dev; + char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ /* Memory location data */ @@ -456,6 +521,8 @@ struct rank_info { }; struct csrow_info { + struct device dev; + /* Used only by edac_mc_find_csrow_by_page() */ unsigned long first_page; /* first page number in csrow */ unsigned long last_page; /* last page number in csrow */ @@ -469,44 +536,26 @@ struct csrow_info { struct mem_ctl_info *mci; /* the parent */ - struct kobject kobj; /* sysfs kobject for this csrow */ - /* channel information for this csrow */ u32 nr_channels; - struct rank_info *channels; + struct rank_info **channels; }; -struct mcidev_sysfs_group { - const char *name; /* group name */ - const struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */ -}; - -struct mcidev_sysfs_group_kobj { - struct list_head list; /* list for all instances within a mc */ - - struct kobject kobj; /* kobj for the group */ - - const struct mcidev_sysfs_group *grp; /* group description table */ - struct mem_ctl_info *mci; /* the parent */ -}; - -/* mcidev_sysfs_attribute structure - * used for driver sysfs attributes and in mem_ctl_info - * sysfs top level entries +/* + * struct errcount_attribute - used to store the several error counts */ -struct mcidev_sysfs_attribute { - /* It should use either attr or grp */ - struct attribute attr; - const struct mcidev_sysfs_group *grp; /* Points to a group of attributes */ - - /* Ops for show/store values at the attribute - not used on group */ - ssize_t (*show)(struct mem_ctl_info *,char *); - ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); +struct errcount_attribute_data { + int n_layers; + int pos[EDAC_MAX_LAYERS]; + int layer0, layer1, layer2; }; /* MEMORY controller information structure */ struct mem_ctl_info { + struct device dev; + struct bus_type bus; + struct list_head link; /* for global list of mem_ctl_info structs */ struct module *owner; /* Module owner of this control struct */ @@ -548,10 +597,18 @@ struct mem_ctl_info { unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, unsigned long page); int mc_idx; - struct csrow_info *csrows; + struct csrow_info **csrows; unsigned nr_csrows, num_cschannel; - /* Memory Controller hierarchy */ + /* + * Memory Controller hierarchy + * + * There are basically two types of memory controller: the ones that + * sees memory sticks ("dimms"), and the ones that sees memory ranks. + * All old memory controllers enumerate memories per rank, but most + * of the recent drivers enumerate memories per DIMM, instead. + * When the memory controller is per rank, mem_is_per_rank is true. + */ unsigned n_layers; struct edac_mc_layer *layers; bool mem_is_per_rank; @@ -560,14 +617,14 @@ struct mem_ctl_info { * DIMM info. Will eventually remove the entire csrows_info some day */ unsigned tot_dimms; - struct dimm_info *dimms; + struct dimm_info **dimms; /* * FIXME - what about controllers on other busses? - IDs must be * unique. dev pointer should be sufficiently unique, but * BUS:SLOT.FUNC numbers may not be unique. */ - struct device *dev; + struct device *pdev; const char *mod_name; const char *mod_ver; const char *ctl_name; @@ -586,12 +643,6 @@ struct mem_ctl_info { struct completion complete; - /* edac sysfs device control */ - struct kobject edac_mci_kobj; - - /* list for all grp instances within a mc */ - struct list_head grp_kobj_list; - /* Additional top controller level attributes, but specified * by the low level driver. * @@ -609,6 +660,13 @@ struct mem_ctl_info { /* the internal state of this controller instance */ int op_state; + +#ifdef CONFIG_EDAC_DEBUG + struct dentry *debugfs; + u8 fake_inject_layer[EDAC_MAX_LAYERS]; + u32 fake_inject_ue; + u16 fake_inject_count; +#endif }; #endif diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 7edcf1031718..db04ec5121cb 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -152,7 +152,7 @@ static inline void fw_card_put(struct fw_card *card) struct fw_attribute_group { struct attribute_group *groups[2]; struct attribute_group group; - struct attribute *attrs[12]; + struct attribute *attrs[13]; }; enum fw_device_state { @@ -321,7 +321,7 @@ struct fw_transaction { struct fw_address_handler { u64 offset; - size_t length; + u64 length; fw_address_callback_t address_callback; void *callback_data; struct list_head link; diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h index 4d5e57ff6614..1c06b5c7c308 100644 --- a/include/linux/i2c-ocores.h +++ b/include/linux/i2c-ocores.h @@ -12,7 +12,8 @@ #define _LINUX_I2C_OCORES_H struct ocores_i2c_platform_data { - u32 regstep; /* distance between registers */ + u32 reg_shift; /* register offset shift value */ + u32 reg_io_width; /* register io read/write width */ u32 clock_khz; /* input clock in kHz */ u8 num_devices; /* number of devices in the devices list */ struct i2c_board_info const *devices; /* devices connected to the bus */ diff --git a/include/linux/input/edt-ft5x06.h b/include/linux/input/edt-ft5x06.h new file mode 100644 index 000000000000..8a1e0d1a0124 --- /dev/null +++ b/include/linux/input/edt-ft5x06.h @@ -0,0 +1,24 @@ +#ifndef _EDT_FT5X06_H +#define _EDT_FT5X06_H + +/* + * Copyright (c) 2012 Simon Budig, <simon.budig@kernelconcepts.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + */ + +struct edt_ft5x06_platform_data { + int irq_pin; + int reset_pin; + + /* startup defaults for operational parameters */ + bool use_parameters; + u8 gain; + u8 threshold; + u8 offset; + u8 report_rate; +}; + +#endif /* _EDT_FT5X06_H */ diff --git a/include/linux/of.h b/include/linux/of.h index 0e9cf9eec085..42c2a58328c1 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -386,6 +386,13 @@ static inline int of_property_read_u64(const struct device_node *np, return -ENOSYS; } +static inline int of_property_match_string(struct device_node *np, + const char *propname, + const char *string) +{ + return -ENOSYS; +} + static inline struct device_node *of_parse_phandle(struct device_node *np, const char *phandle_name, int index) @@ -393,6 +400,15 @@ static inline struct device_node *of_parse_phandle(struct device_node *np, return NULL; } +static inline int of_parse_phandle_with_args(struct device_node *np, + const char *list_name, + const char *cells_name, + int index, + struct of_phandle_args *out_args) +{ + return -ENOSYS; +} + static inline int of_alias_get_id(struct device_node *np, const char *stem) { return -ENOSYS; diff --git a/arch/arm/plat-nomadik/include/plat/i2c.h b/include/linux/platform_data/i2c-nomadik.h index 8ba70ffc31ec..c2303c3e4803 100644 --- a/arch/arm/plat-nomadik/include/plat/i2c.h +++ b/include/linux/platform_data/i2c-nomadik.h @@ -5,8 +5,8 @@ * it under the terms of the GNU General Public License version 2, as * published by the Free Software Foundation. */ -#ifndef __PLAT_I2C_H -#define __PLAT_I2C_H +#ifndef __PDATA_I2C_NOMADIK_H +#define __PDATA_I2C_NOMADIK_H enum i2c_freq_mode { I2C_FREQ_MODE_STANDARD, /* up to 100 Kb/s */ @@ -36,4 +36,4 @@ struct nmk_i2c_controller { enum i2c_freq_mode sm; }; -#endif /* __PLAT_I2C_H */ +#endif /* __PDATA_I2C_NOMADIK_H */ diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 7c775751392c..21d076c5089e 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -1,7 +1,10 @@ #ifndef __LINUX_PWM_H #define __LINUX_PWM_H +#include <linux/of.h> + struct pwm_device; +struct seq_file; /* * pwm_request - request a PWM device @@ -28,4 +31,118 @@ int pwm_enable(struct pwm_device *pwm); */ void pwm_disable(struct pwm_device *pwm); +#ifdef CONFIG_PWM +struct pwm_chip; + +enum { + PWMF_REQUESTED = 1 << 0, + PWMF_ENABLED = 1 << 1, +}; + +struct pwm_device { + const char *label; + unsigned long flags; + unsigned int hwpwm; + unsigned int pwm; + struct pwm_chip *chip; + void *chip_data; + + unsigned int period; /* in nanoseconds */ +}; + +static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) +{ + if (pwm) + pwm->period = period; +} + +static inline unsigned int pwm_get_period(struct pwm_device *pwm) +{ + return pwm ? pwm->period : 0; +} + +/** + * struct pwm_ops - PWM controller operations + * @request: optional hook for requesting a PWM + * @free: optional hook for freeing a PWM + * @config: configure duty cycles and period length for this PWM + * @enable: enable PWM output toggling + * @disable: disable PWM output toggling + * @dbg_show: optional routine to show contents in debugfs + * @owner: helps prevent removal of modules exporting active PWMs + */ +struct pwm_ops { + int (*request)(struct pwm_chip *chip, + struct pwm_device *pwm); + void (*free)(struct pwm_chip *chip, + struct pwm_device *pwm); + int (*config)(struct pwm_chip *chip, + struct pwm_device *pwm, + int duty_ns, int period_ns); + int (*enable)(struct pwm_chip *chip, + struct pwm_device *pwm); + void (*disable)(struct pwm_chip *chip, + struct pwm_device *pwm); +#ifdef CONFIG_DEBUG_FS + void (*dbg_show)(struct pwm_chip *chip, + struct seq_file *s); +#endif + struct module *owner; +}; + +/** + * struct pwm_chip - abstract a PWM controller + * @dev: device providing the PWMs + * @list: list node for internal use + * @ops: callbacks for this PWM controller + * @base: number of first PWM controlled by this chip + * @npwm: number of PWMs controlled by this chip + * @pwms: array of PWM devices allocated by the framework + */ +struct pwm_chip { + struct device *dev; + struct list_head list; + const struct pwm_ops *ops; + int base; + unsigned int npwm; + + struct pwm_device *pwms; + + struct pwm_device * (*of_xlate)(struct pwm_chip *pc, + const struct of_phandle_args *args); + unsigned int of_pwm_n_cells; +}; + +int pwm_set_chip_data(struct pwm_device *pwm, void *data); +void *pwm_get_chip_data(struct pwm_device *pwm); + +int pwmchip_add(struct pwm_chip *chip); +int pwmchip_remove(struct pwm_chip *chip); +struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, + unsigned int index, + const char *label); + +struct pwm_device *pwm_get(struct device *dev, const char *consumer); +void pwm_put(struct pwm_device *pwm); + +struct pwm_lookup { + struct list_head list; + const char *provider; + unsigned int index; + const char *dev_id; + const char *con_id; +}; + +#define PWM_LOOKUP(_provider, _index, _dev_id, _con_id) \ + { \ + .provider = _provider, \ + .index = _index, \ + .dev_id = _dev_id, \ + .con_id = _con_id, \ + } + +void pwm_add_table(struct pwm_lookup *table, size_t num); + +#endif + #endif /* __LINUX_PWM_H */ diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index 63d2df43e61a..56f4a866539a 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h @@ -12,6 +12,7 @@ struct platform_pwm_backlight_data { unsigned int dft_brightness; unsigned int lth_brightness; unsigned int pwm_period_ns; + unsigned int *levels; int (*init)(struct device *dev); int (*notify)(struct device *dev, int brightness); void (*notify_after)(struct device *dev, int brightness); diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index ac9586dadfa5..7b600da9a635 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -214,6 +214,10 @@ void sg_free_table(struct sg_table *); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); +int sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, unsigned int n_pages, + unsigned long offset, unsigned long size, + gfp_t gfp_mask); size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen); diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index dcdfc2bda922..6071e911c7f4 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -32,7 +32,7 @@ struct vm_struct { struct page **pages; unsigned int nr_pages; phys_addr_t phys_addr; - void *caller; + const void *caller; }; /* @@ -62,7 +62,7 @@ extern void *vmalloc_32_user(unsigned long size); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); extern void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, - pgprot_t prot, int node, void *caller); + pgprot_t prot, int node, const void *caller); extern void vfree(const void *addr); extern void *vmap(struct page **pages, unsigned int count, @@ -85,14 +85,15 @@ static inline size_t get_vm_area_size(const struct vm_struct *area) extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); extern struct vm_struct *get_vm_area_caller(unsigned long size, - unsigned long flags, void *caller); + unsigned long flags, const void *caller); extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end); extern struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, - void *caller); + const void *caller); extern struct vm_struct *remove_vm_area(const void *addr); +extern struct vm_struct *find_vm_area(const void *addr); extern int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages); diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h new file mode 100644 index 000000000000..260470e72483 --- /dev/null +++ b/include/ras/ras_event.h @@ -0,0 +1,102 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ras +#define TRACE_INCLUDE_FILE ras_event + +#if !defined(_TRACE_HW_EVENT_MC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HW_EVENT_MC_H + +#include <linux/tracepoint.h> +#include <linux/edac.h> +#include <linux/ktime.h> + +/* + * Hardware Events Report + * + * Those events are generated when hardware detected a corrected or + * uncorrected event, and are meant to replace the current API to report + * errors defined on both EDAC and MCE subsystems. + * + * FIXME: Add events for handling memory errors originated from the + * MCE subsystem. + */ + +/* + * Hardware-independent Memory Controller specific events + */ + +/* + * Default error mechanisms for Memory Controller errors (CE and UE) + */ +TRACE_EVENT(mc_event, + + TP_PROTO(const unsigned int err_type, + const char *error_msg, + const char *label, + const int error_count, + const u8 mc_index, + const s8 top_layer, + const s8 mid_layer, + const s8 low_layer, + unsigned long address, + const u8 grain_bits, + unsigned long syndrome, + const char *driver_detail), + + TP_ARGS(err_type, error_msg, label, error_count, mc_index, + top_layer, mid_layer, low_layer, address, grain_bits, + syndrome, driver_detail), + + TP_STRUCT__entry( + __field( unsigned int, error_type ) + __string( msg, error_msg ) + __string( label, label ) + __field( u16, error_count ) + __field( u8, mc_index ) + __field( s8, top_layer ) + __field( s8, middle_layer ) + __field( s8, lower_layer ) + __field( long, address ) + __field( u8, grain_bits ) + __field( long, syndrome ) + __string( driver_detail, driver_detail ) + ), + + TP_fast_assign( + __entry->error_type = err_type; + __assign_str(msg, error_msg); + __assign_str(label, label); + __entry->error_count = error_count; + __entry->mc_index = mc_index; + __entry->top_layer = top_layer; + __entry->middle_layer = mid_layer; + __entry->lower_layer = low_layer; + __entry->address = address; + __entry->grain_bits = grain_bits; + __entry->syndrome = syndrome; + __assign_str(driver_detail, driver_detail); + ), + + TP_printk("%d %s error%s:%s%s on %s (mc:%d location:%d:%d:%d address:0x%08lx grain:%d syndrome:0x%08lx%s%s)", + __entry->error_count, + (__entry->error_type == HW_EVENT_ERR_CORRECTED) ? "Corrected" : + ((__entry->error_type == HW_EVENT_ERR_FATAL) ? + "Fatal" : "Uncorrected"), + __entry->error_count > 1 ? "s" : "", + ((char *)__get_str(msg))[0] ? " " : "", + __get_str(msg), + __get_str(label), + __entry->mc_index, + __entry->top_layer, + __entry->middle_layer, + __entry->lower_layer, + __entry->address, + 1 << __entry->grain_bits, + __entry->syndrome, + ((char *)__get_str(driver_detail))[0] ? " " : "", + __get_str(driver_detail)) +); + +#endif /* _TRACE_HW_EVENT_MC_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 6096e89bee55..e719adf695bf 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -319,6 +319,70 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) EXPORT_SYMBOL(sg_alloc_table); /** + * sg_alloc_table_from_pages - Allocate and initialize an sg table from + * an array of pages + * @sgt: The sg table header to use + * @pages: Pointer to an array of page pointers + * @n_pages: Number of pages in the pages array + * @offset: Offset from start of the first page to the start of a buffer + * @size: Number of valid bytes in the buffer (after offset) + * @gfp_mask: GFP allocation mask + * + * Description: + * Allocate and initialize an sg table from a list of pages. Contiguous + * ranges of the pages are squashed into a single scatterlist node. A user + * may provide an offset at a start and a size of valid data in a buffer + * specified by the page array. The returned sg table is released by + * sg_free_table. + * + * Returns: + * 0 on success, negative error on failure + */ +int sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, unsigned int n_pages, + unsigned long offset, unsigned long size, + gfp_t gfp_mask) +{ + unsigned int chunks; + unsigned int i; + unsigned int cur_page; + int ret; + struct scatterlist *s; + + /* compute number of contiguous chunks */ + chunks = 1; + for (i = 1; i < n_pages; ++i) + if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) + ++chunks; + + ret = sg_alloc_table(sgt, chunks, gfp_mask); + if (unlikely(ret)) + return ret; + + /* merging chunks and putting them into the scatterlist */ + cur_page = 0; + for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { + unsigned long chunk_size; + unsigned int j; + + /* look for the end of the current chunk */ + for (j = cur_page + 1; j < n_pages; ++j) + if (page_to_pfn(pages[j]) != + page_to_pfn(pages[j - 1]) + 1) + break; + + chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; + sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); + size -= chunk_size; + offset = 0; + cur_page = j; + } + + return 0; +} +EXPORT_SYMBOL(sg_alloc_table_from_pages); + +/** * sg_miter_start - start mapping iteration over a sg list * @miter: sg mapping iter to be started * @sgl: sg list to iterate over diff --git a/mm/vmalloc.c b/mm/vmalloc.c index c7ac8e1b3ac7..e03f4c7307a5 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1280,7 +1280,7 @@ DEFINE_RWLOCK(vmlist_lock); struct vm_struct *vmlist; static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, - unsigned long flags, void *caller) + unsigned long flags, const void *caller) { vm->flags = flags; vm->addr = (void *)va->va_start; @@ -1306,7 +1306,7 @@ static void insert_vmalloc_vmlist(struct vm_struct *vm) } static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, - unsigned long flags, void *caller) + unsigned long flags, const void *caller) { setup_vmalloc_vm(vm, va, flags, caller); insert_vmalloc_vmlist(vm); @@ -1314,7 +1314,7 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long align, unsigned long flags, unsigned long start, - unsigned long end, int node, gfp_t gfp_mask, void *caller) + unsigned long end, int node, gfp_t gfp_mask, const void *caller) { struct vmap_area *va; struct vm_struct *area; @@ -1375,7 +1375,7 @@ EXPORT_SYMBOL_GPL(__get_vm_area); struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, - void *caller) + const void *caller) { return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, caller); @@ -1397,13 +1397,21 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) } struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, - void *caller) + const void *caller) { return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, -1, GFP_KERNEL, caller); } -static struct vm_struct *find_vm_area(const void *addr) +/** + * find_vm_area - find a continuous kernel virtual area + * @addr: base address + * + * Search for the kernel VM area starting at @addr, and return it. + * It is up to the caller to do all required locking to keep the returned + * pointer valid. + */ +struct vm_struct *find_vm_area(const void *addr) { struct vmap_area *va; @@ -1568,9 +1576,9 @@ EXPORT_SYMBOL(vmap); static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, - int node, void *caller); + int node, const void *caller); static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, - pgprot_t prot, int node, void *caller) + pgprot_t prot, int node, const void *caller) { const int order = 0; struct page **pages; @@ -1643,7 +1651,7 @@ fail: */ void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, - pgprot_t prot, int node, void *caller) + pgprot_t prot, int node, const void *caller) { struct vm_struct *area; void *addr; @@ -1699,7 +1707,7 @@ fail: */ static void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, pgprot_t prot, - int node, void *caller) + int node, const void *caller) { return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, gfp_mask, prot, node, caller); |